KNN:
這里要求我們完成一個KNN分類器,實現對圖片使用KNN算法進行分類標簽
k_nearest_neighbor.py
這里要求我們完成4個接口
# X:測試集
# 使用兩個循環
def compute_distances_two_loops(self, X):num_test = X.shape[0]num_train = self.X_train.shape[0]dists = np.zeros((num_test, num_train))for i in range(num_test):for j in range(num_train):# TODO:# 計算測試集中第i張圖片與訓練集中第j張圖片的L2距離,并儲存到dists[i,j]# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****dists[i,j]=np.sqrt(np.sum(np.square(X[i,:]-self.X_train[j,:])))# X[i,:]選取測試集第i張圖片,self.X_train[j,:]選取測試集第j張圖片,相減得到各元素之差# np.square得到各元素平方# np.sum對每列求和# np.sqrt再對和開方# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return dists
# X:測試集
# 使用一個循環
def compute_distances_one_loop(self, X):num_test = X.shape[0]num_train = self.X_train.shape[0]dists = np.zeros((num_test, num_train))for i in range(num_test):# TODO:同上# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****dists[i,:]=np.sqrt(np.sum(np.square(self.X_train-X[i,:]),axis=1))# 由于自動擴展,self.X_train的每行j都減去X[i,:]# np.square對每個元素平方# np.sum(axis=1)對每行元素求和# np.sqrt對每個元素開平方# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return dists
# X:測試集
# 不使用循環
def compute_distances_no_loops(self, X):num_test = X.shape[0]num_train = self.X_train.shape[0]dists = np.zeros((num_test, num_train))# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****dists=np.sqrt(np.sum(np.square(self.X_train-X[:,None,:]),axis=2))# X[:,None,:]將X的形狀從(num_test,D)變為(num_test,1,D),# 這樣可以實現自動擴展,X_train的每行都減去X,得到結果的size為(num_test,num_train,D)# np.square對每個元素平方# np.sum(axis=2)對D所對應的維度的元素求和# np.sqrt對每個元素開平方# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return dists
# dists:距離矩陣
# k:最近鄰個數
# 返回預測標簽
def predict_labels(self, dists, k=1):num_test = dists.shape[0]y_pred = np.zeros(num_test)for i in range(num_test):closest_y = []# TODO: 找到k個最近鄰的標簽# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****knn=np.argsort(dists[i,:])[0:k] # argsort返回從小到大排序的索引# 取前k個索引# dists[i,:]是第i個測試樣本到所有訓練樣本的距離# knn是第i個測試樣本的k個最近鄰的索引closest_y=self.y_train[knn]# 通過索引knn找到對應的標簽# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# TODO: 統計k個最近鄰的標簽中出現次數最多的標簽# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****y_pred[i]=np.argmax(np.bincount(closest_y.astype(int)))# np.bincount統計每個標簽出現的次數# closest_y.astype(int)將標簽轉換為整數類型# np.argmax找到出現次數最多的標簽的索引# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return y_pred
通過比較,三種計算dists矩陣的方法效率比較如下:
Two loop version took 14.840988 seconds
One loop version took 33.446717 seconds
No loop version took 0.102279 seconds
可以發現完全向量化的運算效率很高,所以盡量使用向量化運算
交叉驗證及尋找最優k
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]X_train_folds = []
y_train_folds = []
# TODO:將X_train和y_train分為num_folds份# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****X_train_folds=np.array(np.split(X_train,num_folds))
y_train_folds=np.array(np.split(y_train,num_folds))
#np.split將傳入數組平均分為nums_folds份# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# 儲存k所對應的所有folds作為測試集的acc
k_to_accuracies = {}# TODO: 使用cross-validation計算每個k對應的acc
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# 遍歷所有可能的k
for k in k_choices:curr_acc=[]for i in np.arange(num_folds): # i 為測試集下標idx = np.array([j for j in range(num_folds) if j!=i]) # 取出訓練集下標X_test_n=X_train_folds[i]y_test_n=y_train_folds[i]X_train_n=np.concatenate(X_train_folds[idx],axis=0) # 將訓練集連接y_train_n=np.concatenate(y_train_folds[idx],axis=None) #對應標簽連接classifier = KNearestNeighbor()classifier.train(X_train_n, y_train_n)y_test_n_pred = classifier.predict_labels(classifier.compute_distances_no_loops(X_test_n), k) #得到預測標簽向量num_correct = np.sum(y_test_n_pred == y_test_n) # 計算正確預測標簽向量acc = float(num_correct) / len(y_test_n) # 計算acccurr_acc.append(acc)k_to_accuracies[k]=curr_acc# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# Print out the computed accuracies
for k in sorted(k_to_accuracies):for acc in k_to_accuracies[k]:print('k = %d, accuracy = %f' % (k, acc))
結果可視化如下:
可以發現acc最優的k = 10
SVM:
Loss:
梯度:
linear_svm.py
# W:權重矩陣 (D,C)
# X:測試集 (N,D)
# y:標簽 (N,)
# reg:正則化強度
# loss:損失值
# dW:梯度 (D,C)
def svm_loss_naive(W, X, y, reg):dW = np.zeros(W.shape) # 初始化梯度為0num_classes = W.shape[1] # 類別數num_train = X.shape[0] # 訓練樣本數loss = 0.0for i in range(num_train):f=0scores = X[i].dot(W) # 計算第i個樣本的得分correct_class_score = scores[y[i]] # 獲取正確類別的得分for j in range(num_classes):if j == y[i]:continuemargin = scores[j] - correct_class_score + 1 # 這里設置的delta為1if margin > 0: # margin大于0才計算損失和梯度,否則損失和梯度都為0loss += margin dW[:,j]+=X[i] # 累加梯度,這里多加了正確標簽的梯度f+=1 # 計算多加的次數dW[:,y[i]] += -f*X[i] # 修正正確標簽的梯度# 平均損失和梯度loss /= num_traindW/=num_train# 添加正則化損失loss += reg * np.sum(W * W)# TODO: 添加正則化梯度# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****dW+=2*reg*W# 正則化梯度,乘以2是因為我們對W的平方求導# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return loss, dW
def svm_loss_vectorized(W, X, y, reg):loss = 0.0dW = np.zeros(W.shape) # TODO: 向量化計算Loss# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****num_test=len(X) #樣本數scores=X.dot(W) # 計算所有樣本的得分 (N,C)correct_class_scores=scores[np.arange(num_test),y] # 獲取所有樣本的正確類別得分 (N,)margin=np.clip(scores-correct_class_scores.reshape([-1,1])+1,0,None) # 計算margin (N,C),并將小于0的值設為0# correct_class_scores.reshape([-1,1]) 將正確類別得分轉換為列向量# scores-correct_class_scores.reshape([-1,1]) 計算每個類別的margin# np.clip(...,0,None) 將小于0的margin設為0margin[np.arange(num_test),y]=0 # 將正確類別的margin設為0loss=np.sum(margin)/num_test + reg * np.sum(np.square(W)) # 平均損失加上正則化損失# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# TODO: 向量化計算梯度# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****m = (margin>0).astype(int) # 將margin大于0的部分設為1(表示需要計入這個權重所對應的梯度),其他部分為0 (N,C)f=np.sum(m,axis=1) # 計算每個樣本違反margin的類別數 (N,)m[np.arange(num_test),y]=-f # 將正確類別的margin設為負的違反類別數 (N,C),用于特別處理正確類別的梯度dW= X.T.dot(m)/num_test + 2*reg*W # X.T.dot(m) 計算每個類別的梯度 (D,C),dW[i,j]是第i個特征對第j個類別的梯度# 那么其就等于X[i].dot(m[:,j])# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return loss, dW
linear_classifier.py
# X: 訓練數據 (N, D)
# y: 標簽 (N,)
# learning_rate: 學習率
# reg: 正則化強度
# num_iters: 迭代次數
# batch_size: batch大小
# verbose: 是否打印訓練過程
# 輸出:每次iteration的loss
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,batch_size=200, verbose=False):num_train, dim = X.shapenum_classes = np.max(y) + 1 # 類別數if self.W is None:self.W = 0.001 * np.random.randn(dim, num_classes) #隨機初始化Wloss_history = []for it in range(num_iters):X_batch = Noney_batch = None# TODO: 隨機采樣一個batch# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****idx=np.random.choice(num_train,batch_size) #從num_train個樣本中隨機選擇batch_size個樣本的索引X_batch=X[idx,:]y_batch=y[idx].reshape(1,-1) #重建為(N,)# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****loss, grad = self.loss(X_batch, y_batch, reg)loss_history.append(loss)# TODO: 更新權重W# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****self.W-=learning_rate*grad# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****if verbose and it % 100 == 0:print('iteration %d / %d: loss %f' % (it, num_iters, loss))return loss_history
# X: 測試數據 (N, D)
# 返回預測標簽 (N,)
def predict(self, X):y_pred = np.zeros(X.shape[0])# TODO: 計算預測標簽# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****y_pred=np.argmax(X.dot(self.W),axis=1).reshape(1,-1)# 找到得分最高的樣本對應的索引,并重建為列向量# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return y_pred
loss曲線如下:
網格化搜索尋找最優超參數:
# 使用驗證集來調整超參數(正則化強度和學習率)。
learning_rates = np.linspace(5e-8, 5e-7,num=5)
regularization_strengths = np.linspace(2e4, 3e4,num=5)# 儲存鍵值對{(學習率,正則化強度):(訓練acc,驗證acc)}
results = {}
best_val = -1 # 記錄最優驗證acc
best_svm = None # 最優驗證acc對應的SVM對象# TODO: 使用驗證集來調整超參數(學習率和正則化強度)。
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****for lr in learning_rates:for reg in regularization_strengths:svm = LinearSVM()loss_hist = svm.train(X_train, y_train, learning_rate=lr, reg=reg,num_iters=1500, verbose=True)y_train_pred = svm.predict(X_train)y_val_pred = svm.predict(X_val)train_acc=np.mean(y_train == y_train_pred)val_acc=np.mean(y_val == y_val_pred)results[(lr,reg)]=(train_acc,val_acc)if(val_acc>best_val):best_svm=svmbest_val=val_acc# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****for lr, reg in sorted(results):train_accuracy, val_accuracy = results[(lr, reg)]print('lr %e reg %e train accuracy: %f val accuracy: %f' % (lr, reg, train_accuracy, val_accuracy))print('best validation accuracy achieved during cross-validation: %f' % best_val)
可以看到,在搜索范圍內,學習率和正則化強度越小,train_acc和val_acc都是越高,也許還需要擴大一下搜索邊界,且換成對數范圍搜索會更佳
可視化各個類所對應的權重矩陣:
Softmax:
Loss:
梯度:
softmax.py
def softmax_loss_naive(W, X, y, reg):loss = 0.0dW = np.zeros_like(W)# TODO: 使用顯示循環計算loss和梯度# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****num_train = X.shape[0]num_classes = W.shape[1]for i in range(num_train): # 第i個樣本scores = X[i].dot(W) # 第i個樣本的得分向量 (C,)exp_scores = np.exp(scores - np.max(scores))probs = exp_scores / np.sum(exp_scores) # 類別概率 (C,)loss += -np.log(probs[y[i]]) # 第i個樣本的交叉熵損失for j in range(num_classes): # 第j個類別# X[i]與第j個類別 能貢獻到的權重梯度就是 dw[:,j]if j == y[i]: # 正確類別的貢獻dW[:,j] += (probs[j] - 1) * X[i] # dW[:,j] = (exp_scores[j]/np.sum(exp_scores) - 1) * X[i]else : # 錯誤類別的貢獻 dW[:,j] += probs[j] * X[i] # dW[:,j] = 1/np.sum(exp_scores) * exp_scores[j] * X[i]loss = loss / num_train + reg * np.sum(np.square(W))dW = dW / num_train + 2 * reg * W# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return loss, dW
def softmax_loss_vectorized(W, X, y, reg):loss = 0.0dW = np.zeros_like(W)# TODO: 向量化計算loss和梯度# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****num_train = X.shape[0] # 樣本數Nscores = X.dot(W) # (N, C)exp_scores = np.exp(scores - np.max(scores)) # (N, C)total = np.sum(exp_scores, axis=1,keepdims=True) # (N, 1)probs = exp_scores / total # (N, C)loss = -np.sum(np.log(probs[np.arange(num_train), y])) / num_train + reg * np.sum(np.square(W))probs[np.arange(num_train), y] -= 1 # 正確類別減1dW = X.T.dot(probs) / num_train + 2 * reg * W # (D, C)# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return loss, dW
搜索超參數
# 使用驗證集搜索超參數
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = np.linspace(8e-8, 2e-7,10)
regularization_strengths = np.linspace(1e4, 5e4,3)# TODO: 和SVM的一樣
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****for lr in learning_rates:for reg in regularization_strengths:clf=Softmax()loss_hist = clf.train(X_train, y_train, learning_rate=lr, reg=reg,num_iters=1500, verbose=True)y_train_pred = clf.predict(X_train)y_val_pred = clf.predict(X_val)train_acc=np.mean(y_train == y_train_pred)val_acc=np.mean(y_val == y_val_pred)results[(lr,reg)]=train_acc,val_accif(val_acc>best_val):best_softmax=clfbest_val=val_acc
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# Print out results.
for lr, reg in sorted(results):train_accuracy, val_accuracy = results[(lr, reg)]print('lr %e reg %e train accuracy: %f val accuracy: %f' % (lr, reg, train_accuracy, val_accuracy))print('best validation accuracy achieved during cross-validation: %f' % best_val)
權重矩陣可視化:
two_layer_net:
兩層神經網絡
先經過第一層,再經過ReLU層,再經過第二層,再經過softmax層,最后輸出
前向傳播:
Loss:
梯度:
其中
其中
其中
neural_net.py
# X: 測試集 (N,D)
# y: 標簽 (N,)
# reg: 正則化強度
# 返回值:loss, grads
def loss(self, X, y=None, reg=0.0):# Unpack variables from the params dictionaryW1, b1 = self.params['W1'], self.params['b1']W2, b2 = self.params['W2'], self.params['b2']N, D = X.shape# Compute the forward passscores = None# TODO: 計算前向傳播# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****z1=X.dot(W1) + b1 # 計算第一層的線性變換a1=np.maximum(0,z1) # ReLU激活函數scores=a1.dot(W2) + b2 # 計算第二層的線性變換# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# If the targets are not given then jump out, we're doneif y is None:return scores# Compute the lossloss = None# TODO: 計算loss, 包括數據損失和W1,W2的Lw正則化損失, 使用softmax損失函數# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****exp_socres = np.exp(scores - np.max(scores, axis=1, keepdims=True)) # exp得分probs = exp_socres / np.sum(exp_socres, axis=1, keepdims=True) # 概率loss = -np.sum(np.log(probs[np.arange(N), y])) / N + reg * (np.sum(np.square(W1)) + np.sum(np.square(W2))) # 數據損失 + L2正則化損失# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# Backward pass: compute gradientsgrads = {}# TODO: 計算梯度# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****probs[np.arange(N), y] -= 1 # 正確類別的概率減1grads['W2'] = a1.T.dot(probs) / N + 2 * reg * W2grads['b2'] = np.sum(probs, axis=0) / Nda1 = probs.dot(W2.T)da1[z1 <= 0] = 0 # ReLU 反向傳播,小于等于0的部分梯度為0grads['W1'] = X.T.dot(da1) / N + 2 * reg * W1grads['b1'] = np.sum(da1, axis=0) / N# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return loss, grads
def train(self, X, y, X_val, y_val,learning_rate=1e-3, learning_rate_decay=0.95,reg=5e-6, num_iters=100,batch_size=200, verbose=False):num_train = X.shape[0]iterations_per_epoch = max(num_train / batch_size, 1)# Use SGD to optimize the parameters in self.modelloss_history = []train_acc_history = []val_acc_history = []for it in range(num_iters):X_batch = Noney_batch = None# TODO: 創建隨機batch# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****idx = np.random.choice(num_train, batch_size)X_batch = X[idx,:]y_batch = y[idx] # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# Compute loss and gradients using the current minibatchloss, grads = self.loss(X_batch, y=y_batch, reg=reg)loss_history.append(loss)# TODO: 梯度下降# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****self.params['W1'] -= learning_rate*grads['W1']self.params['b1'] -= learning_rate*grads['b1']self.params['W2'] -= learning_rate*grads['W2']self.params['b2'] -= learning_rate*grads['b2']# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****#if verbose and it % 100 == 0:#print('iteration %d / %d: loss %f' % (it, num_iters, loss))# Every epoch, check train and val accuracy and decay learning rate.if it % iterations_per_epoch == 0:# Check accuracytrain_acc = (self.predict(X_batch) == y_batch).mean()val_acc = (self.predict(X_val) == y_val).mean()train_acc_history.append(train_acc)val_acc_history.append(val_acc)# Decay learning ratelearning_rate *= learning_rate_decayreturn {'loss_history': loss_history,'train_acc_history': train_acc_history,'val_acc_history': val_acc_history,}
def predict(self, X):y_pred = None# TODO: 預測# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****z1=np.dot(X,self.params['W1'])+self.params['b1']a1=np.maximum(0,z1)scores=np.dot(a1,self.params['W2'])+self.params['b2']y_pred=np.argmax(scores,axis=1)# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****return y_pred
訓練的loss與acc如下
網格化搜索超參數
best_net = None # store the best model into this
best_val = -1
best_hs=None
best_lr=None
best_reg=Noneinput_size = 32 * 32 * 3
hidden_size = [50,100]
num_classes = 10learning_rate = np.linspace(0.75e-3,1.25e-3,5)
reg_ = np.linspace(0.2,0.4,5)# TODO: 網格化搜索超參數
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for hs in hidden_size:for lr in learning_rate:for reg in reg_:net = TwoLayerNet(input_size, hs, num_classes)stats = net.train(X_train, y_train, X_val, y_val,num_iters=1000, batch_size=200,learning_rate=lr, learning_rate_decay=0.95,reg=reg, verbose=True)train_acc = stats['train_acc_history'][-1]val_acc = stats['val_acc_history'][-1]loss = stats['loss_history'][-1]print('hs %d | lr %0.3e | reg %0.3e | loss= %0.3e | train_acc %f | val_acc %f'%(hs, lr, reg, loss, train_acc, val_acc))if(val_acc > best_val):best_val = val_accbest_net = netbest_hs = hsbest_lr = lrbest_reg = regprint('best val_acc = %f for hs %d | lr %e | reg %e' %(best_val,best_hs,best_lr,best_reg))# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
圖形特征提取:
HOG特征:
通過統計圖像局部區域的梯度方向分布來表征圖像,可以捕獲圖像的形狀和邊緣信息
Color Histogram HSV特征:
在HSV顏色空間中統計顏色分布的特征描述,來表征圖像的顏色信息
使用提取特征數據網格化搜索SVM超參
# Use the validation set to tune the learning rate and regularization strengthfrom cs231n.classifiers.linear_classifier import LinearSVMlearning_rates = [1e-9, 1e-8, 1e-7]
regularization_strengths = [5e4, 5e5, 5e6]results = {}
best_val = -1
best_svm = None# TODO: 使用提取的特征和不同的學習率和正則化強度訓練SVM分類器,并在驗證集上評估其性能。
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
for lr in learning_rates:for reg in regularization_strengths:svm = LinearSVM()loss_hist = svm.train(X_train_feats, y_train, learning_rate=lr, reg=reg,num_iters=1500, verbose=False)# 計算train acc和val accy_train_pred = svm.predict(X_train_feats)train_accuracy = np.mean(y_train == y_train_pred)y_val_pred = svm.predict(X_val_feats)val_accuracy = np.mean(y_val == y_val_pred)results[(lr, reg)] = (train_accuracy, val_accuracy)if val_accuracy > best_val:best_val = val_accuracybest_svm = svm# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****# Print out results.
for lr, reg in sorted(results):train_accuracy, val_accuracy = results[(lr, reg)]print('lr %e reg %e train accuracy: %f val accuracy: %f' % (lr, reg, train_accuracy, val_accuracy))print('best validation accuracy achieved during cross-validation: %f' % best_val)
2層神經網絡超參數搜索:
一開始我設置了很小的學習率范圍和很大的正則化強度,以及訓練輪數不夠多(1500),val acc一直只能在0.13左右,后來我擴大了學習率的范圍,減少了正則化強度,并提升了訓練輪數,最后發現在學習率為0.1,正則化強度為0,訓練輪數為5000的時候取得0.59 的val acc, 且這個模型在test acc也取得了0.578,說明泛化能力較好
best_net_simple = TwoLayerNet(input_dim=154, hidden_size=500, num_classes=10)stats = best_net_simple.train(X_train_feats, y_train, X_val_feats, y_val,learning_rate=0.1, reg=0,num_iters=5000,batch_size=200,learning_rate_decay=0.95,verbose=True) train_acc = (best_net_simple.predict(X_train_feats) == y_train).mean()
val_acc = (best_net_simple.predict(X_val_feats) == y_val).mean()
test_acc = (best_net_simple.predict(X_test_feats) == y_test).mean()print(f"\n最終結果:")
print(f"訓練準確率: {train_acc:.4f}")
print(f"驗證準確率: {val_acc:.4f}")
print(f"測試準確率: {test_acc:.4f}")