最近在學習神經網絡,主要是依據書本《深度學習入門(基于Python的理論與實現)》,現對第5章“誤差反向傳播法”中的示例程序進行注釋修改如下,以備后續查閱。?
?編程軟件用的是Eric7,界面如下:
神經網絡的訓練過程就是通過尋找損失函數的最小值(或局部最小值)的點,即損失函數偏導(梯度)為0(或接近于0)的點,確定該點處的權重和偏移矩陣,用于后續的預測predict。誤差反向傳播法提供了一種求解損失函數偏導(梯度)的簡便方法,有效降低了程序的計算量。
1. 神經網絡訓練主程序
現將train_nerualnet.py注釋修改如下:
# coding: utf-8
import sys, os
sys.path.append(os.pardir)import numpy as np
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet# 讀入數據
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True) # normalizer: 歸一化 /= np.float32(255.0) => 0.0~1.0network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10) # 28*28=784iters_num = 10000 #迭代次數
train_size = x_train.shape[0] #樣本數量
print("Train_size:", train_size)
batch_size = 100 #批量大小
learning_rate = 0.1 #學習率train_loss_list = []
train_acc_list = []
test_acc_list = []iter_per_epoch = max(train_size / batch_size, 1)
print("Iters/epoch:", iter_per_epoch)
print("epochs:", int(iters_num/iter_per_epoch)+1)
epochs = 0for i in range(iters_num):#用余數表示每個epoch內mini-batches的序號 batch_index = i % iter_per_epoch#每經歷一個epoch,更新一次打亂的數據索引shuffled_indicesif batch_index == 0:shuffled_indices = np.random.permutation(train_size)#將被打亂樣本的索引,按順序取樣,一次batch_size個batch_indices = shuffled_indices[int(batch_index * batch_size) : int((batch_index + 1)*batch_size)]#batch_mask = np.random.choice(train_size, batch_size)x_batch = x_train[batch_indices]t_batch = t_train[batch_indices]# 梯度#grad = network.numerical_gradient(x_batch, t_batch)grad = network.gradient(x_batch, t_batch)# 更新for key in ('W1', 'b1', 'W2', 'b2'):network.params[key] -= learning_rate * grad[key]loss = network.loss(x_batch, t_batch)train_loss_list.append(loss)if i % iter_per_epoch == 0:train_acc = network.accuracy(x_train, t_train)test_acc = network.accuracy(x_test, t_test)train_acc_list.append(train_acc)test_acc_list.append(test_acc)print("epochs:", epochs)print("訓練和測試精度:", train_acc, test_acc)epochs += 1
其中:
- batch_size為神經網絡一次迭代計算使用的樣本數量(mini-batches一次的數量),迭代的目的是找到損失函數對權重偏導的局部0值點(損失函數的局部最小值點)。
- 一次epoch是指將所有數據訓練遍歷一次
- iter_per_epoch = train_size / batch_size 表示經過一個epoch需要迭代的次數
- 完成所有迭代,需要經過的epochs = iters_num / iter_per_epoch
在原始程序中,np.random.choice(train_size, batch_size) 無法保證一個epoch遍歷了所有訓練數據,不重復或不遺漏,因為每次采樣都是獨立且有放回的(默認行為)。可以考慮如下方法:每個 epoch 打亂數據后按順序分批次
核心思路:
在每個 epoch 開始時,將訓練數據的索引打亂
shuffled_indices = np.random.permutation(train_size)
然后按順序分割成多個 batch。這種方式既保證了隨機性,又確保每個樣本被使用且僅使用一次。
# 按順序遍歷所有 batch
for i in range(0, train_size, batch_size):batch_indices = shuffled_indices[i:i + batch_size]# 用 batch_indices 獲取對應的數據,進行訓練
?程序中用到np.random的幾個隨機生產數據的方法,介紹如下:
# 生成 3 個 [0, 10) 之間的整數
arr = np.random.randint(0, 10, size=3)# 生成一個 2x3 的數組,元素在 [0.0, 1.0) 之間
arr = np.random.rand(2, 3)# 生成一個 2x2 的數組,元素在 [1.0, 5.0) 之間
arr = np.random.uniform(1.0, 5.0, size=(2, 2))# 生成 2x3 的正態分布矩陣,均值 0,方差 1
normal_data = np.random.randn(2, 3)
2. mnist數據集處理
上述神經網絡訓練用到了mnist數據集,數據集下載、預處理程序mnist.py內容如下:?
# coding: utf-8
try:import urllib.request
except ImportError:raise ImportError('You should use Python 3.x')
import os.path
import gzip
import pickle
import os
import numpy as npurl_base = 'https://ossci-datasets.s3.amazonaws.com/mnist/' # mirror site
key_file = { #文件字典,包括train和test所需的圖片和標簽'train_img':'train-images-idx3-ubyte.gz','train_label':'train-labels-idx1-ubyte.gz','test_img':'t10k-images-idx3-ubyte.gz','test_label':'t10k-labels-idx1-ubyte.gz'
}dataset_dir = os.path.dirname(os.path.abspath(__file__)) #當前文件絕對路徑
save_file = dataset_dir + "/mnist.pkl" #保存文件train_num = 60000
test_num = 10000
img_dim = (1, 28, 28)
img_size = 784def _download(file_name):file_path = dataset_dir + "/" + file_nameif os.path.exists(file_path):returnprint("Downloading " + file_name + " ... ")urllib.request.urlretrieve(url_base + file_name, file_path) #根據文件名從下載目錄下載,保存到dataset目錄下print("Done")def download_mnist():for v in key_file.values():_download(v)def _load_label(file_name): file_path = dataset_dir + "/" + file_nameprint("Converting " + file_name + " to NumPy Array ...") #將文件轉換成numpy數組with gzip.open(file_path, 'rb') as f:labels = np.frombuffer(f.read(), np.uint8, offset=8)print("Done")return labelsdef _load_img(file_name): file_path = dataset_dir + "/" + file_nameprint("Converting " + file_name + " to NumPy Array ...") with gzip.open(file_path, 'rb') as f:data = np.frombuffer(f.read(), np.uint8, offset=16)data = data.reshape(-1, img_size)print("Done")return datadef _convert_numpy():dataset = {}dataset['train_img'] = _load_img(key_file['train_img'])dataset['train_label'] = _load_label(key_file['train_label']) dataset['test_img'] = _load_img(key_file['test_img'])dataset['test_label'] = _load_label(key_file['test_label'])return datasetdef init_mnist(): download_mnist()dataset = _convert_numpy()print("Creating pickle file ...")with open(save_file, 'wb') as f:# Pickle the list using the highest protocol available.pickle.dump(dataset, f, -1)print("Done!")def _change_one_hot_label(X):T = np.zeros((X.size, 10)) #(x.size, 10)的0矩陣,x.size為圖片的數量for idx, row in enumerate(T): #idx為序號,row為一維數組row[X[idx]] = 1 #圖片對應的label是幾,對應的數組的第幾位就為1return Tdef load_mnist(normalize=True, flatten=True, one_hot_label=False):if not os.path.exists(save_file):init_mnist()with open(save_file, 'rb') as f:dataset = pickle.load(f)if normalize:'''將圖像數據進行歸一化處理data = np.random.randint(0, 256, size=(4, 16), dtype=np.uint8) #四張單通道照片,照片尺寸4x4data.astype(np.float32)'''for key in ('train_img', 'test_img'):#文件存儲格式為np.uint8,轉換數據類型為np.float32后進行歸一化處理,uint8除以255.0默認格式為float64,會增加內存占用dataset[key] = dataset[key].astype(np.float32)dataset[key] /= np.float32(255.0)if one_hot_label:dataset['train_label'] = _change_one_hot_label(dataset['train_label'])dataset['test_label'] = _change_one_hot_label(dataset['test_label'])if not flatten:'''該參數設置為False,則輸人圖像為1x28x28的三維數組;若設置為True,則輸入圖像會保存為由784個元素構成的一維數組。'''for key in ('train_img', 'test_img'):#reshape方法動態調整數組形狀時,利用-1來實現自動計算某個維度的大小,第二個1表示單通道(灰度圖),正常圖片為rgb三通道dataset[key] = dataset[key].reshape(-1, 1, 28, 28)return (dataset['train_img'], dataset['train_label']), (dataset['test_img'], dataset['test_label']) if __name__ == '__main__':init_mnist()
? ? 讀入MNIST數據集參數
- ? ? normalize : 將圖像的像素值正規化為0.0~1.0
- ? ? one_hot_label :?one_hot_label為True的情況下,標簽作為one-hot數組返回,one-hot數組格式為[0,0,1,0,0,0,0,0,0,0],此標簽表示正確的數為第三個數,即為2。
- ? ? flatten : 是否將圖像展開為一維數組
? ???返回值Returns格式:??
? ? (訓練圖像, 訓練標簽), (測試圖像, 測試標簽)
? ?上述程序中_load_label(file_name)是讀取標簽數據,offset=8是因為前8字節包含了魔法數字和標簽的數量信息(4字節魔法數字 + 4字節圖像數量)。? ? ? ??
- 魔術數字(Magic Number):一個4字節的無符號整數,用于標識文件類型和版本。對于標簽數據,訓練集和測試集的魔術數字都是0x00000801。
- 數量(Number of Items):一個4字節的無符號整數,表示標簽的數量。
- 標簽數據:緊隨其后的所有字節都是標簽數據,每個字節對應一個圖像的標簽(即數字0到9)。
上述程序中,_load_img(file_name)是讀取圖像數據,offset=16是因為前16字節包含了魔法數字和圖像的尺寸信息(4字節魔法數字 + 4字節圖像數量 + 4字節行數 + 4字節列數)。因此,從第17個字節開始才是實際的圖像數據。 第17個字節開始每個圖像都是28x28=784字節的連續像素值。??
- 魔術數字(Magic Number):一個4字節的無符號整數(通常是32位),用于標識文件類型和版本。
- 對于圖像數據,訓練集的魔術數字是0x00000803,測試集的魔術數字是0x00000801。
- 數量(Number of Images):一個4字節的無符號整數,表示文件中圖像的數量。
- 行數(Number of Rows):一個4字節的無符號整數,表示圖像的高度(通常是28)。
- 列數(Number of Columns):一個4字節的無符號整數,表示圖像的寬度(通常是28)。
- 圖像數據:緊隨其后的所有字節都是圖像數據,每個圖像的像素值按行優先順序存儲。像素值是灰度值,范圍從0到255,一個像素點占用一個字節。
? ? 上述程序中init_mnist()是初始化mnist數據集:下載、轉換圖像和標簽數據,并保存為pickle文件,pkl格式文件,是Python中一種用于序列化對象的文件格式,全稱是pickle。它可以將Python中的任意對象轉換為一種可以保存到磁盤上或通過網絡傳輸的格式,然后再將這些對象從磁盤上讀取出來或者從網絡上接收過來,重新還原為原來的Python對象。這種能力使得pkl格式文件在Python編程中非常有用,尤其是在需要保存和加載復雜數據結構或自定義對象時。pkl格式文件的使用依賴于Python的pickle模塊。pickle模塊提供了兩個主要的函數:
- pickle.dump([,protocal])用于將Python對象序列化并保存到文件中。protocal:如果該項省略,則默認為0。如果為負值或HIGHEST_PROTOCOL,則使用最高的協議版本。
- ? ? pickle.load()用于從文件中讀取序列化的對象并還原為原來的Python對象。二進制讀取模式(‘rb’),二進制寫入模式(‘wb’)。
3. 神經網絡隱藏層
神經網絡隱藏層定義,two_layer_net.py注釋如下:
# coding: utf-8
import sys, os
sys.path.append(os.pardir) # 為了導入父目錄的文件而進行的設定
import numpy as np
from common.layers import *
from common.gradient import numerical_gradient
from collections import OrderedDictclass TwoLayerNet:def __init__(self, input_size, hidden_size, output_size, weight_init_std = 0.01):# 初始化權重self.params = {}self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size) # np.random.randn平均值為0、均方差為1的正態分布數據self.params['b1'] = np.zeros(hidden_size)self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size) self.params['b2'] = np.zeros(output_size)# 生成層self.layers = OrderedDict()#仿射層1, Affine1, a1 = x@W1 + b1self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])#激活層1, Relu1, z1 = Relu(a1)self.layers['Relu1'] = Relu()#仿射層2, Affine2, a2 = z1@W2 + b2self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])#最后一層, SoftmaxWithLoss, y = softmax(a2), loss = cross_entropy_error(y, t)self.lastLayer = SoftmaxWithLoss()def predict(self, x):# 正向傳播,上一層的輸出為下一層的輸入。self.layers按先后順序依次為各生成層for layer in self.layers.values(): # self.layers.values()是按順序排列的值,self.layers.keys()是按順序排列的鍵x = layer.forward(x)return x# x:輸入數據, t:監督數據def loss(self, x, t):y = self.predict(x)return self.lastLayer.forward(y, t) #返回softmax-with-loss的值def accuracy(self, x, t):y = self.predict(x)y = np.argmax(y, axis=1) #每組數據輸出值(每一行)中最大值(即概率最大值)的索引號,二維向量變一維數組if t.ndim != 1 : t = np.argmax(t, axis=1) # 如果t不是一維數組,將其轉換成非one-hot格式# y == t 返回一個bool數組,y和t相同位置的元素如果相等,bool數組同位置的值為True,其它為False# np.sum(y == t)返回bool數組中True的數量。x.shape[0]為batch_sizeaccuracy = np.sum(y == t) / float(x.shape[0])return accuracy# x:輸入數據, t:監督數據def numerical_gradient(self, x, t):loss_W = lambda W: self.loss(x, t)grads = {}grads['W1'] = numerical_gradient(loss_W, self.params['W1'])grads['b1'] = numerical_gradient(loss_W, self.params['b1'])grads['W2'] = numerical_gradient(loss_W, self.params['W2'])grads['b2'] = numerical_gradient(loss_W, self.params['b2'])return gradsdef gradient(self, x, t):# forwardself.loss(x, t)# backwarddout = 1dout = self.lastLayer.backward(dout)layers = list(self.layers.values())layers.reverse() #list倒序for layer in layers:dout = layer.backward(dout) #下一層backward的輸出dout作為上一層backward的輸入,同時計算各Affine層的dW和db# 設定grads = {}grads['W1'], grads['b1'] = self.layers['Affine1'].dW, self.layers['Affine1'].dbgrads['W2'], grads['b2'] = self.layers['Affine2'].dW, self.layers['Affine2'].dbreturn grads
? ? ?其中gradient函數就是反向求偏導(梯度)函數,下一層的輸出(梯度)作為上一層的輸入。?
?4. 各層調用的激活函數
神經網絡的隱藏層會調用了不同的激活函數對輸出進行處理,layers.py定義各隱藏層及激活函數,注釋如下:
# coding: utf-8
import numpy as np
from common.functions import *
from common.util import im2col, col2imclass Relu:def __init__(self):self.mask = Nonedef forward(self, x):self.mask = (x <= 0) # bool向量,元素≤0的位置為True,其余為Falseout = x.copy() # 如果out = x,則out為x的內存地址引用,修改x也會改變out值 out[self.mask] = 0 # out向量中x元素≤0的位置(為True位置)的值為0return outdef backward(self, dout):dout[self.mask] = 0 # dx = ?L/?x = ?L/?out * ?out/?x = dout * ?out/?x,out為零的位置?out/?x為0,對應位置的dx=0dx = doutreturn dxclass Sigmoid: # sigmoid(x) = 1 / ( 1 + np.exp(-x))def __init__(self):self.out = Nonedef forward(self, x):out = sigmoid(x)self.out = out #定義全局變量self.outreturn outdef backward(self, dout):# dx = ?L/?x = ?L/?out * ?out/?x = dout * ?out/?x = dout*(1-out)*outdx = dout * (1.0 - self.out) * self.outreturn dxclass Affine:'''定義Affine層,仿射層,即:out = X@W + b__init__(self, W, b): 初始化,輸入參數W,b'''def __init__(self, W, b):self.W =Wself.b = bself.x = Noneself.original_x_shape = None# 權重和偏置參數的導數self.dW = Noneself.db = Nonedef forward(self, x):'''forward(self, x): 正向傳遞,輸入參數x首先將x轉換成二維張量1. 若輸入是多維數據(如圖像的卷積層輸出,形狀為 (batch, C, H, W)),該操作將其展平為二維矩陣 (batch, C*H*W),使每個樣本變為一維向量,適配全連接的矩陣乘法 np.dot(x, W)。2. 若輸入已是二維(如普通全連接層的輸出),則保持原狀。'''self.original_x_shape = x.shapex = x.reshape(x.shape[0], -1)self.x = xout = np.dot(self.x, self.W) + self.breturn outdef backward(self, dout):'''out = X@W + bbackward(self, dout): 反向傳遞,輸入參數dout,dout = ?L/?out,故:self.dW = ?L/?out * ?out/?W = (self.x.T)@doutself.db = ?L/?out * ?out/?b = np.sum(dout, axis=0)self.dx = ?L/?out * ?out/?x = dout@(self.W.T)'''dx = np.dot(dout, self.W.T)self.dW = np.dot(self.x.T, dout)self.db = np.sum(dout, axis=0)dx = dx.reshape(*self.original_x_shape) # 還原輸入數據的形狀(對應張量)return dxclass SoftmaxWithLoss:'''loss = loss(y,t) 交叉熵損失函數y = softmax(x)'''def __init__(self):self.loss = Noneself.y = None # softmax的輸出self.t = None # 監督數據def forward(self, x, t):self.t = tself.y = softmax(x)self.loss = cross_entropy_error(self.y, self.t)return self.lossdef backward(self, dout=1):batch_size = self.t.shape[0]if self.t.size == self.y.size: # 當數據t是one-hot-vector的情況dx = (self.y - self.t) / batch_sizeelse: #此時x.shape為(batch_size,)dx = self.y.copy()#dx = y - t,self.t為一維數組,其第i個元素即為第i組數據的正確值#dx只需在輸出y中,把第i組數據對應的輸出(第i行)的第self.t[i]個值減去1即可。'''| 0, self.t[0] || 1, self.t[1] |dx | 2, self.t[2] | -= 1,第一列由np.arange(batch_size)生成,n = batch_size - 1 | ...... || n, self.t[n] |如果將self.t轉換成one-hot格式,self.t在上述dx對應位置的值為1,其它都為0,即:temp_t = np.zeros_like(y)temp_t[np.arange(batch_size), self.t] = 1'''dx[np.arange(batch_size), self.t] -= 1dx = dx / batch_sizereturn dxclass Dropout:"""http://arxiv.org/abs/1207.0580"""def __init__(self, dropout_ratio=0.5):self.dropout_ratio = dropout_ratioself.mask = Nonedef forward(self, x, train_flg=True):if train_flg:self.mask = np.random.rand(*x.shape) > self.dropout_ratioreturn x * self.maskelse:return x * (1.0 - self.dropout_ratio)def backward(self, dout):return dout * self.maskclass BatchNormalization:"""http://arxiv.org/abs/1502.03167"""def __init__(self, gamma, beta, momentum=0.9, running_mean=None, running_var=None):self.gamma = gammaself.beta = betaself.momentum = momentumself.input_shape = None # Conv層的情況下為4維,全連接層的情況下為2維 # 測試時使用的平均值和方差self.running_mean = running_meanself.running_var = running_var # backward時使用的中間數據self.batch_size = Noneself.xc = Noneself.std = Noneself.dgamma = Noneself.dbeta = Nonedef forward(self, x, train_flg=True):self.input_shape = x.shapeif x.ndim != 2:N, C, H, W = x.shapex = x.reshape(N, -1)out = self.__forward(x, train_flg)return out.reshape(*self.input_shape)def __forward(self, x, train_flg):if self.running_mean is None:N, D = x.shapeself.running_mean = np.zeros(D)self.running_var = np.zeros(D)if train_flg:mu = x.mean(axis=0)xc = x - muvar = np.mean(xc**2, axis=0)std = np.sqrt(var + 10e-7)xn = xc / stdself.batch_size = x.shape[0]self.xc = xcself.xn = xnself.std = stdself.running_mean = self.momentum * self.running_mean + (1-self.momentum) * muself.running_var = self.momentum * self.running_var + (1-self.momentum) * var else:xc = x - self.running_meanxn = xc / ((np.sqrt(self.running_var + 10e-7)))out = self.gamma * xn + self.beta return outdef backward(self, dout):if dout.ndim != 2:N, C, H, W = dout.shapedout = dout.reshape(N, -1)dx = self.__backward(dout)dx = dx.reshape(*self.input_shape)return dxdef __backward(self, dout):dbeta = dout.sum(axis=0)dgamma = np.sum(self.xn * dout, axis=0)dxn = self.gamma * doutdxc = dxn / self.stddstd = -np.sum((dxn * self.xc) / (self.std * self.std), axis=0)dvar = 0.5 * dstd / self.stddxc += (2.0 / self.batch_size) * self.xc * dvardmu = np.sum(dxc, axis=0)dx = dxc - dmu / self.batch_sizeself.dgamma = dgammaself.dbeta = dbetareturn dxclass Convolution:def __init__(self, W, b, stride=1, pad=0):self.W = W #卷積層過濾器,W.shape: (Filter_Num, Channel, Filter_Height, Filter_Width)self.b = b #卷積層偏置,b.shape: (Filter_Num,)self.stride = strideself.pad = pad# 中間數據(backward時使用)self.x = None self.col = Noneself.col_W = None# 權重和偏置參數的梯度self.dW = Noneself.db = Nonedef forward(self, x):FN, C, FH, FW = self.W.shapeN, C, H, W = x.shapeout_h = 1 + int((H + 2*self.pad - FH) / self.stride) #列窗口數,卷積層輸出高度,Out_Height, OHout_w = 1 + int((W + 2*self.pad - FW) / self.stride) #行窗口數,卷積層輸出寬度,Out_Width, OWcol = im2col(x, FH, FW, self.stride, self.pad) #col.shape: (N×OH×OW, C×FH×FW),卷積層對每個窗口與過濾器求乘積,然后所有通道求和col_W = self.W.reshape(FN, -1).T #col_W.shape: (C×FH×FW, FN)out = np.dot(col, col_W) + self.b #np.dot(col, col_W).shape: (N×OH×OW, FN), self.b.shape: (FN)out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2) #(N, OH, OW, FN) ==> (N, FN, OH, OW)self.x = xself.col = colself.col_W = col_Wreturn outdef backward(self, dout):FN, C, FH, FW = self.W.shape #過濾器shapedout = dout.transpose(0,2,3,1).reshape(-1, FN) #dout.shape:(N, FN, OH, OW) ==> (N, OH, OW, FN) ==> (N×OH×OW, FN)self.db = np.sum(dout, axis=0)self.dW = np.dot(self.col.T, dout) # Out = self.col @ self.col_W dW = self.col.T @ doutself.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW) # dW.shape: (C×FH×FW, FN) ==> (FN, C×FH×FW) ==> (FN, C, FH, FW)dcol = np.dot(dout, self.col_W.T) # Out = self.col @ self.col_W dcol = dout @ self.col_W.T (N×OH×OW, FN) @ (FN, C×FH×FW) ==> (N×OH×OW, C×FH×FW)dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad) # dcol ==> dx: (N×OH×OW, C×FH×FW) ==> (N, C, H, W)return dxclass Pooling:def __init__(self, pool_h, pool_w, stride=1, pad=0):self.pool_h = pool_hself.pool_w = pool_wself.stride = strideself.pad = padself.x = Noneself.arg_max = Nonedef forward(self, x):N, C, H, W = x.shapeout_h = int(1 + (H - self.pool_h) / self.stride)out_w = int(1 + (W - self.pool_w) / self.stride)col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad)col = col.reshape(-1, self.pool_h*self.pool_w)arg_max = np.argmax(col, axis=1)out = np.max(col, axis=1)out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2)self.x = xself.arg_max = arg_maxreturn outdef backward(self, dout):dout = dout.transpose(0, 2, 3, 1)pool_size = self.pool_h * self.pool_wdmax = np.zeros((dout.size, pool_size))dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten()dmax = dmax.reshape(dout.shape + (pool_size,)) dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1)dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad)return dx