实现一个二维矩阵的最大池化
import numpy as np
def max_pooling_2d(image, kernel_size, stride):
# 获取输入图像的形状和卷积核大小
input_height, input_width = image.shape
kernel_height, kernel_width = kernel_size
# 计算输出图像的大小
output_height = (input_height - kernel_height) // stride + 1
output_width = (input_width - kernel_width) // stride + 1
# 创建一个与输出图像相同大小的零矩阵
output = np.zeros((output_height, output_width))
# 对输入图像进行最大池化
for i in range(0, output_height):
for j in range(0, output_width):
# 计算池化窗口的位置
row_start = i * stride
row_end = row_start + kernel_height
col_start = j * stride
col_end = col_start + kernel_width
# 提取池化窗口内的子图像
sub_image = image[row_start:row_end, col_start:col_end]
# 对子图像进行最大池化并将结果赋值给输出图像
output[i, j] = np.max(sub_image)
return output
# 示例用法
image = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
kernel_size = (2, 2)
stride = 2
result = max_pooling_2d(image, kernel_size, stride)
print(result)