python打卡訓練營打卡記錄day37

知識點回顧:
  1. 過擬合的判斷:測試集和訓練集同步打印指標
  2. 模型的保存和加載
    1. 僅保存權重
    2. 保存權重和模型
    3. 保存全部信息checkpoint,還包含訓練狀態
  3. 早停策略

作業:對信貸數據集訓練后保存權重,加載權重后繼續訓練50輪,并采取早停策略

import pandas as pd    #用于數據處理和分析,可處理表格數據。
import numpy as np     #用于數值計算,提供了高效的數組操作。
import matplotlib.pyplot as plt    #用于繪制各種類型的圖表
import seaborn as sns   #基于matplotlib的高級繪圖庫,能繪制更美觀的統計圖形。
import warnings
warnings.filterwarnings("ignore")import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
from tqdm import tqdm  # 導入tqdm庫用于進度條顯示
import os
from sklearn.metrics import accuracy_score# 設置中文字體(解決中文顯示問題)
plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows系統常用黑體字體
plt.rcParams['axes.unicode_minus'] = False    # 正常顯示負號
data = pd.read_csv('data.csv')    #讀取數據# 設置GPU設備
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用設備: {device}")# 先篩選字符串變量 
discrete_features = data.select_dtypes(include=['object']).columns.tolist()
# Home Ownership 標簽編碼
home_ownership_mapping = {'Own Home': 1,'Rent': 2,'Have Mortgage': 3,'Home Mortgage': 4
}
data['Home Ownership'] = data['Home Ownership'].map(home_ownership_mapping)# Years in current job 標簽編碼
years_in_job_mapping = {'< 1 year': 1,'1 year': 2,'2 years': 3,'3 years': 4,'4 years': 5,'5 years': 6,'6 years': 7,'7 years': 8,'8 years': 9,'9 years': 10,'10+ years': 11
}
data['Years in current job'] = data['Years in current job'].map(years_in_job_mapping)# Purpose 獨熱編碼,記得需要將bool類型轉換為數值
data = pd.get_dummies(data, columns=['Purpose'])
data2 = pd.read_csv("data.csv") # 重新讀取數據,用來做列名對比
list_final = [] # 新建一個空列表,用于存放獨熱編碼后新增的特征名
for i in data.columns:if i not in data2.columns:list_final.append(i) # 這里打印出來的就是獨熱編碼后的特征名
for i in list_final:data[i] = data[i].astype(int) # 這里的i就是獨熱編碼后的特征名# Term 0 - 1 映射
term_mapping = {'Short Term': 0,'Long Term': 1
}
data['Term'] = data['Term'].map(term_mapping)
data.rename(columns={'Term': 'Long Term'}, inplace=True) # 重命名列
continuous_features = data.select_dtypes(include=['int64', 'float64']).columns.tolist()  #把篩選出來的列名轉換成列表# 連續特征用中位數補全
for feature in continuous_features:     mode_value = data[feature].mode()[0]            #獲取該列的眾數。data[feature].fillna(mode_value, inplace=True)          #用眾數填充該列的缺失值,inplace=True表示直接在原數據上修改。# 最開始也說了 很多調參函數自帶交叉驗證,甚至是必選的參數,你如果想要不交叉反而實現起來會麻煩很多
# 所以這里我們還是只劃分一次數據集
from sklearn.model_selection import train_test_split
X = data.drop(['Credit Default'], axis=1)  # 特征,axis=1表示按列刪除
y = data['Credit Default'] # 標簽
# 按照8:2劃分訓練集和測試集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)  # 80%訓練集,20%測試集
# 劃分訓練集和測試集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)# 歸一化數據
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train.values).to(device) 
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test.values).to(device) # 模型定義
class MLP(nn.Module):def __init__(self, input_size):super(MLP, self).__init__()self.fc1 = nn.Linear(input_size, 64)self.relu = nn.ReLU()self.fc2 = nn.Linear(64, 2)def forward(self, x):x = self.relu(self.fc1(x))x = self.fc2(x)return x# -------------------- 初始訓練階段(20000輪) --------------------
def initial_training():# 實例化模型并移至GPUmodel = MLP(input_size=X_train.shape[1]).to(device)criterion = nn.CrossEntropyLoss()optimizer = optim.SGD(model.parameters(), lr=0.01)# 初始化記錄列表train_losses = []test_losses = []test_accuracies = []eval_epochs = []# 訓練參數num_epochs = 20000best_test_loss = float('inf')patience = 50counter = 0# 訓練循環with tqdm(total=num_epochs, desc="初始訓練進度") as pbar:for epoch in range(num_epochs):# 訓練步驟model.train()outputs = model(X_train)train_loss = criterion(outputs, y_train)# 反向傳播optimizer.zero_grad()train_loss.backward()optimizer.step()# 每200輪評估測試損失if (epoch + 1) % 200 == 0:model.eval()with torch.no_grad():test_outputs = model(X_test)test_loss = criterion(test_outputs, y_test)# 準確率計算_, predicted = torch.max(test_outputs, 1)accuracy = accuracy_score(y_test.cpu(), predicted.cpu())model.train()# 打印準確率print(f"Epoch [{epoch+1}/{num_epochs}], Test Accuracy: {accuracy:.4f}")# 記錄指標train_losses.append(train_loss.item())test_losses.append(test_loss.item())test_accuracies.append(accuracy)eval_epochs.append(epoch + 1)# 保存最佳檢查點if test_loss < best_test_loss:best_test_loss = test_loss.item()torch.save({'model_state_dict': model.state_dict(),'optimizer_state_dict': optimizer.state_dict(),'best_test_loss': best_test_loss,'epoch': epoch}, 'best_model.pth')counter = 0else:counter += 1if counter >= patience:print(f"\n初始訓練早停于第{epoch+1}輪")break# 更新進度條pbar.set_postfix({'Train Loss': f'{train_loss.item():.4f}', 'Test Loss': f'{test_loss.item():.4f}'})# 更新進度if (epoch + 1) % 1000 == 0:pbar.update(1000)print("初始訓練完成,最佳模型已保存")plot_metrics(train_losses, test_losses, test_accuracies, eval_epochs, title_prefix="Initial Training")# -------------------- 繪圖函數 --------------------
def plot_metrics(train_losses, test_losses, test_accuracies, eval_epochs, title_prefix=""):plt.figure(figsize=(12, 5))# Loss 曲線plt.subplot(1, 2, 1)plt.plot(eval_epochs, train_losses, label='Train Loss')plt.plot(eval_epochs, test_losses, label='Test Loss')plt.xlabel('Epoch')plt.ylabel('Loss')plt.title(f'{title_prefix} Loss Curve')plt.legend()# Accuracy 曲線plt.subplot(1, 2, 2)plt.plot(eval_epochs, test_accuracies, label='Test Accuracy')plt.xlabel('Epoch')plt.ylabel('Accuracy')plt.title(f'{title_prefix} Accuracy Curve')plt.legend()plt.tight_layout()plt.show()# -------------------- 繼續訓練階段(50輪) --------------------  
def continue_training():# 重新實例化模型model = MLP(input_size=X_train.shape[1]).to(device)criterion = nn.CrossEntropyLoss()optimizer = optim.SGD(model.parameters(), lr=0.01)# 初始化記錄列表train_losses = []test_losses = []test_accuracies = []eval_epochs = []# 加載檢查點if not os.path.exists('best_model.pth'):raise FileNotFoundError("找不到檢查點文件,請先運行初始訓練")checkpoint = torch.load('best_model.pth')try:model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])except KeyError:# 兼容舊版檢查點model.load_state_dict(torch.load('best_model.pth'))# 重置訓練參數num_epochs = 50best_test_loss = float('inf')  # 獨立重置patience = 10counter = 0# 繼續訓練循環with tqdm(total=num_epochs, desc="繼續訓練進度") as pbar:for epoch in range(num_epochs):# 訓練步驟model.train()outputs = model(X_train)train_loss = criterion(outputs, y_train)# 反向傳播optimizer.zero_grad()train_loss.backward()optimizer.step()# 評估測試損失和準確率model.eval()with torch.no_grad():test_outputs = model(X_test)test_loss = criterion(test_outputs, y_test)_, predicted = torch.max(test_outputs, 1)accuracy = accuracy_score(y_test.cpu(), predicted.cpu())print(f"Epoch [{epoch+1}/{num_epochs}], Test Accuracy: {accuracy:.4f}")# 記錄train_losses.append(train_loss.item())test_losses.append(test_loss.item())test_accuracies.append(accuracy)eval_epochs.append(epoch + 1)model.train()# 早停判斷(僅用新參數)if test_loss < best_test_loss:best_test_loss = test_loss.item()counter = 0else:counter += 1if counter >= patience:print(f"\n繼續訓練早停于第{epoch+1}輪")break# 更新進度pbar.set_postfix({'Train Loss': f'{train_loss.item():.4f}','Test Loss': f'{test_loss.item():.4f}','Test Acc': f'{accuracy:.4f}' })pbar.update(1)print("繼續訓練完成")plot_metrics(train_losses, test_losses, test_accuracies, eval_epochs, title_prefix="Continue Training")if __name__ == "__main__":initial_training()
初始訓練進度:   0%|          | 0/20000 [00:00<?, ?it/s, Train Loss=0.5753, Test Loss=0.5885]
Epoch [200/20000], Test Accuracy: 0.7060
Epoch [400/20000], Test Accuracy: 0.7060
初始訓練進度:   0%|          | 0/20000 [00:00<?, ?it/s, Train Loss=0.5611, Test Loss=0.5740]
Epoch [600/20000], Test Accuracy: 0.7060
Epoch [800/20000], Test Accuracy: 0.7060
初始訓練進度:   5%|▌         | 1000/20000 [00:01<00:16, 1168.82it/s, Train Loss=0.5458, Test Loss=0.5581]
Epoch [1000/20000], Test Accuracy: 0.7060
Epoch [1200/20000], Test Accuracy: 0.7060
初始訓練進度:   5%|▌         | 1000/20000 [00:01<00:16, 1168.82it/s, Train Loss=0.5312, Test Loss=0.5427]
Epoch [1400/20000], Test Accuracy: 0.7140
Epoch [1600/20000], Test Accuracy: 0.7207
初始訓練進度:  10%|█         | 2000/20000 [00:01<00:13, 1303.08it/s, Train Loss=0.5185, Test Loss=0.5291]
Epoch [1800/20000], Test Accuracy: 0.7267
Epoch [2000/20000], Test Accuracy: 0.7407
初始訓練進度:  10%|█         | 2000/20000 [00:01<00:13, 1303.08it/s, Train Loss=0.5080, Test Loss=0.5179]
Epoch [2200/20000], Test Accuracy: 0.7473
Epoch [2400/20000], Test Accuracy: 0.7593
初始訓練進度:  10%|█         | 2000/20000 [00:02<00:13, 1303.08it/s, Train Loss=0.4996, Test Loss=0.5090]
Epoch [2600/20000], Test Accuracy: 0.7640
Epoch [2800/20000], Test Accuracy: 0.7640
初始訓練進度:  15%|█▌        | 3000/20000 [00:02<00:12, 1378.59it/s, Train Loss=0.4930, Test Loss=0.5021]
Epoch [3000/20000], Test Accuracy: 0.7653
Epoch [3200/20000], Test Accuracy: 0.7647
初始訓練進度:  15%|█▌        | 3000/20000 [00:02<00:12, 1378.59it/s, Train Loss=0.4879, Test Loss=0.4968]
Epoch [3400/20000], Test Accuracy: 0.7653
Epoch [3600/20000], Test Accuracy: 0.7660
初始訓練進度:  20%|██        | 4000/20000 [00:02<00:11, 1411.22it/s, Train Loss=0.4840, Test Loss=0.4927]
Epoch [3800/20000], Test Accuracy: 0.7667
Epoch [4000/20000], Test Accuracy: 0.7667
初始訓練進度:  20%|██        | 4000/20000 [00:03<00:11, 1411.22it/s, Train Loss=0.4810, Test Loss=0.4895]
Epoch [4200/20000], Test Accuracy: 0.7667
Epoch [4400/20000], Test Accuracy: 0.7667
初始訓練進度:  20%|██        | 4000/20000 [00:03<00:11, 1411.22it/s, Train Loss=0.4787, Test Loss=0.4871]
Epoch [4600/20000], Test Accuracy: 0.7667
Epoch [4800/20000], Test Accuracy: 0.7667
初始訓練進度:  25%|██▌       | 5000/20000 [00:03<00:10, 1454.26it/s, Train Loss=0.4768, Test Loss=0.4852]
Epoch [5000/20000], Test Accuracy: 0.7673
Epoch [5200/20000], Test Accuracy: 0.7673
初始訓練進度:  25%|██▌       | 5000/20000 [00:03<00:10, 1454.26it/s, Train Loss=0.4753, Test Loss=0.4836]
Epoch [5400/20000], Test Accuracy: 0.7673
Epoch [5600/20000], Test Accuracy: 0.7673
初始訓練進度:  30%|███       | 6000/20000 [00:04<00:09, 1504.37it/s, Train Loss=0.4740, Test Loss=0.4822]
Epoch [5800/20000], Test Accuracy: 0.7673
Epoch [6000/20000], Test Accuracy: 0.7673
初始訓練進度:  30%|███       | 6000/20000 [00:04<00:09, 1504.37it/s, Train Loss=0.4730, Test Loss=0.4811]
Epoch [6200/20000], Test Accuracy: 0.7673
Epoch [6400/20000], Test Accuracy: 0.7673
初始訓練進度:  30%|███       | 6000/20000 [00:04<00:09, 1504.37it/s, Train Loss=0.4720, Test Loss=0.4802]
Epoch [6600/20000], Test Accuracy: 0.7673
Epoch [6800/20000], Test Accuracy: 0.7660
初始訓練進度:  35%|███▌      | 7000/20000 [00:05<00:08, 1451.45it/s, Train Loss=0.4712, Test Loss=0.4794]
Epoch [7000/20000], Test Accuracy: 0.7660
Epoch [7200/20000], Test Accuracy: 0.7660
初始訓練進度:  35%|███▌      | 7000/20000 [00:05<00:08, 1451.45it/s, Train Loss=0.4705, Test Loss=0.4787]
Epoch [7400/20000], Test Accuracy: 0.7660
Epoch [7600/20000], Test Accuracy: 0.7660
初始訓練進度:  40%|████      | 8000/20000 [00:05<00:08, 1448.11it/s, Train Loss=0.4699, Test Loss=0.4780]
Epoch [7800/20000], Test Accuracy: 0.7660
Epoch [8000/20000], Test Accuracy: 0.7660
初始訓練進度:  40%|████      | 8000/20000 [00:05<00:08, 1448.11it/s, Train Loss=0.4693, Test Loss=0.4775]
Epoch [8200/20000], Test Accuracy: 0.7660
Epoch [8400/20000], Test Accuracy: 0.7660
初始訓練進度:  40%|████      | 8000/20000 [00:06<00:08, 1448.11it/s, Train Loss=0.4688, Test Loss=0.4769]
Epoch [8600/20000], Test Accuracy: 0.7660
Epoch [8800/20000], Test Accuracy: 0.7667
初始訓練進度:  45%|████▌     | 9000/20000 [00:06<00:07, 1468.30it/s, Train Loss=0.4683, Test Loss=0.4765]
Epoch [9000/20000], Test Accuracy: 0.7667
Epoch [9200/20000], Test Accuracy: 0.7667
初始訓練進度:  45%|████▌     | 9000/20000 [00:06<00:07, 1468.30it/s, Train Loss=0.4679, Test Loss=0.4761]
Epoch [9400/20000], Test Accuracy: 0.7667
Epoch [9600/20000], Test Accuracy: 0.7667
初始訓練進度:  50%|█████     | 10000/20000 [00:06<00:06, 1505.67it/s, Train Loss=0.4675, Test Loss=0.4757]
Epoch [9800/20000], Test Accuracy: 0.7660
Epoch [10000/20000], Test Accuracy: 0.7667
初始訓練進度:  50%|█████     | 10000/20000 [00:07<00:06, 1505.67it/s, Train Loss=0.4671, Test Loss=0.4753]
Epoch [10200/20000], Test Accuracy: 0.7667
Epoch [10400/20000], Test Accuracy: 0.7667
初始訓練進度:  50%|█████     | 10000/20000 [00:07<00:06, 1505.67it/s, Train Loss=0.4667, Test Loss=0.4750]
Epoch [10600/20000], Test Accuracy: 0.7673
Epoch [10800/20000], Test Accuracy: 0.7680
初始訓練進度:  55%|█████▌    | 11000/20000 [00:07<00:05, 1539.84it/s, Train Loss=0.4664, Test Loss=0.4747]
Epoch [11000/20000], Test Accuracy: 0.7680
Epoch [11200/20000], Test Accuracy: 0.7680
初始訓練進度:  55%|█████▌    | 11000/20000 [00:07<00:05, 1539.84it/s, Train Loss=0.4661, Test Loss=0.4744]
Epoch [11400/20000], Test Accuracy: 0.7680
Epoch [11600/20000], Test Accuracy: 0.7680
初始訓練進度:  60%|██████    | 12000/20000 [00:08<00:05, 1584.28it/s, Train Loss=0.4658, Test Loss=0.4742]
Epoch [11800/20000], Test Accuracy: 0.7687
Epoch [12000/20000], Test Accuracy: 0.7680
初始訓練進度:  60%|██████    | 12000/20000 [00:08<00:05, 1584.28it/s, Train Loss=0.4655, Test Loss=0.4739]
Epoch [12200/20000], Test Accuracy: 0.7680
Epoch [12400/20000], Test Accuracy: 0.7680
初始訓練進度:  60%|██████    | 12000/20000 [00:08<00:05, 1584.28it/s, Train Loss=0.4652, Test Loss=0.4737]
Epoch [12600/20000], Test Accuracy: 0.7680
Epoch [12800/20000], Test Accuracy: 0.7687
初始訓練進度:  65%|██████▌   | 13000/20000 [00:08<00:04, 1618.84it/s, Train Loss=0.4650, Test Loss=0.4735]
Epoch [13000/20000], Test Accuracy: 0.7687
Epoch [13200/20000], Test Accuracy: 0.7687
初始訓練進度:  65%|██████▌   | 13000/20000 [00:09<00:04, 1618.84it/s, Train Loss=0.4647, Test Loss=0.4733]
Epoch [13400/20000], Test Accuracy: 0.7687
Epoch [13600/20000], Test Accuracy: 0.7687
初始訓練進度:  70%|███████   | 14000/20000 [00:09<00:03, 1635.55it/s, Train Loss=0.4645, Test Loss=0.4731]
Epoch [13800/20000], Test Accuracy: 0.7687
Epoch [14000/20000], Test Accuracy: 0.7693
初始訓練進度:  70%|███████   | 14000/20000 [00:09<00:03, 1635.55it/s, Train Loss=0.4643, Test Loss=0.4730]
Epoch [14200/20000], Test Accuracy: 0.7687
Epoch [14400/20000], Test Accuracy: 0.7687
初始訓練進度:  70%|███████   | 14000/20000 [00:09<00:03, 1635.55it/s, Train Loss=0.4641, Test Loss=0.4728]
Epoch [14600/20000], Test Accuracy: 0.7687
Epoch [14800/20000], Test Accuracy: 0.7687
初始訓練進度:  75%|███████▌  | 15000/20000 [00:10<00:03, 1641.97it/s, Train Loss=0.4639, Test Loss=0.4727]
Epoch [15000/20000], Test Accuracy: 0.7687
Epoch [15200/20000], Test Accuracy: 0.7687
初始訓練進度:  75%|███████▌  | 15000/20000 [00:10<00:03, 1641.97it/s, Train Loss=0.4637, Test Loss=0.4725]
Epoch [15400/20000], Test Accuracy: 0.7700
Epoch [15600/20000], Test Accuracy: 0.7707
初始訓練進度:  80%|████████  | 16000/20000 [00:10<00:02, 1630.00it/s, Train Loss=0.4635, Test Loss=0.4724]
Epoch [15800/20000], Test Accuracy: 0.7707
Epoch [16000/20000], Test Accuracy: 0.7707
初始訓練進度:  80%|████████  | 16000/20000 [00:10<00:02, 1630.00it/s, Train Loss=0.4633, Test Loss=0.4723]
Epoch [16200/20000], Test Accuracy: 0.7707
Epoch [16400/20000], Test Accuracy: 0.7707
初始訓練進度:  80%|████████  | 16000/20000 [00:11<00:02, 1630.00it/s, Train Loss=0.4631, Test Loss=0.4722]
Epoch [16600/20000], Test Accuracy: 0.7707
Epoch [16800/20000], Test Accuracy: 0.7707
初始訓練進度:  85%|████████▌ | 17000/20000 [00:11<00:01, 1629.38it/s, Train Loss=0.4629, Test Loss=0.4721]
Epoch [17000/20000], Test Accuracy: 0.7707
Epoch [17200/20000], Test Accuracy: 0.7707
初始訓練進度:  85%|████████▌ | 17000/20000 [00:11<00:01, 1629.38it/s, Train Loss=0.4628, Test Loss=0.4720]
Epoch [17400/20000], Test Accuracy: 0.7707
Epoch [17600/20000], Test Accuracy: 0.7707
初始訓練進度:  90%|█████████ | 18000/20000 [00:11<00:01, 1594.18it/s, Train Loss=0.4626, Test Loss=0.4719]
Epoch [17800/20000], Test Accuracy: 0.7707
Epoch [18000/20000], Test Accuracy: 0.7700
初始訓練進度:  90%|█████████ | 18000/20000 [00:12<00:01, 1594.18it/s, Train Loss=0.4624, Test Loss=0.4718]
Epoch [18200/20000], Test Accuracy: 0.7700
Epoch [18400/20000], Test Accuracy: 0.7700
初始訓練進度:  90%|█████████ | 18000/20000 [00:12<00:01, 1594.18it/s, Train Loss=0.4623, Test Loss=0.4717]
Epoch [18600/20000], Test Accuracy: 0.7693
Epoch [18800/20000], Test Accuracy: 0.7693
初始訓練進度:  95%|█████████▌| 19000/20000 [00:12<00:00, 1597.16it/s, Train Loss=0.4621, Test Loss=0.4717]
Epoch [19000/20000], Test Accuracy: 0.7693
Epoch [19200/20000], Test Accuracy: 0.7693
初始訓練進度:  95%|█████████▌| 19000/20000 [00:12<00:00, 1597.16it/s, Train Loss=0.4620, Test Loss=0.4716]
Epoch [19400/20000], Test Accuracy: 0.7693
Epoch [19600/20000], Test Accuracy: 0.7693
初始訓練進度: 100%|██████████| 20000/20000 [00:13<00:00, 1534.45it/s, Train Loss=0.4619, Test Loss=0.4715]
Epoch [19800/20000], Test Accuracy: 0.7693
Epoch [20000/20000], Test Accuracy: 0.7693
初始訓練完成,最佳模型已保存

continue_training()
Epoch [29/50], Test Accuracy: 0.7693
Epoch [30/50], Test Accuracy: 0.7693
Epoch [31/50], Test Accuracy: 0.7693
Epoch [32/50], Test Accuracy: 0.7693
Epoch [33/50], Test Accuracy: 0.7693
Epoch [34/50], Test Accuracy: 0.7693
Epoch [35/50], Test Accuracy: 0.7693
Epoch [36/50], Test Accuracy: 0.7693
Epoch [37/50], Test Accuracy: 0.7693
Epoch [38/50], Test Accuracy: 0.7693
Epoch [39/50], Test Accuracy: 0.7693
Epoch [40/50], Test Accuracy: 0.7693
Epoch [41/50], Test Accuracy: 0.7693
Epoch [42/50], Test Accuracy: 0.7693
Epoch [43/50], Test Accuracy: 0.7693
Epoch [44/50], Test Accuracy: 0.7693
Epoch [45/50], Test Accuracy: 0.7693
Epoch [46/50], Test Accuracy: 0.7693
Epoch [47/50], Test Accuracy: 0.7693
Epoch [48/50], Test Accuracy: 0.7693
Epoch [49/50], Test Accuracy: 0.7693
Epoch [50/50], Test Accuracy: 0.7693
繼續訓練完成

總結

一、初始訓練階段分析(20000輪)

1. 損失曲線表現

  • 訓練損失:從約0.60穩步下降至0.46,說明模型在訓練集上持續優化。

  • 測試損失:從0.58下降至0.47,但下降幅度較小,且在訓練后期趨于穩定。

  • 關鍵問題

    • 訓練損失和測試損失的差距較小(0.46 vs 0.47),表明模型未明顯過擬合,但學習能力可能不足

    • 測試損失未進一步下降,可能是模型復雜度過低或數據特征難以捕捉。

2. 準確率曲線表現

  • 測試準確率:最終穩定在?76.93%,未突破77%。

  • 關鍵問題

    • 準確率曲線在訓練后期完全平坦,表明模型已收斂到局部最優,但性能瓶頸明顯

    • 可能原因:模型結構簡單(僅兩層全連接)、特征工程不足或類別不均衡。


二、繼續訓練階段分析(50輪)

1. 損失曲線表現

  • 訓練/測試損失:完全無變化(保持0.4619和0.4715)。

  • 關鍵問題

    • 加載初始訓練的最佳模型后,繼續訓練未帶來任何優化,說明模型參數已完全收斂,優化器無法找到更優方向。

2. 準確率曲線表現

  • 測試準確率:全程保持在76.93%,與初始訓練結果一致。

  • 關鍵問題

    • 繼續訓練未能提升性能,表明模型能力已達上限,或需要調整訓練策略。

?

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/bicheng/82517.shtml
繁體地址,請注明出處:http://hk.pswp.cn/bicheng/82517.shtml
英文地址,請注明出處:http://en.pswp.cn/bicheng/82517.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

卷積神經網絡(CNN)深度講解

卷積神經網絡&#xff08;CNN&#xff09; 本篇博客參考自大佬的開源書籍&#xff0c;幫助大家從頭開始學習卷積神經網絡&#xff0c;謝謝各位的支持了&#xff0c;在此期待各位能與我共同進步? 卷積神經網絡&#xff08;CNN&#xff09;是一種特殊的深度學習網絡結構&#x…

深度體驗:海螺 AI,開啟智能創作新時代

人工智能 AI 工具如雨后春筍般涌現&#xff0c;而海螺 AI 以其獨特的魅力與卓越的性能&#xff0c;迅速在眾多產品中嶄露頭角&#xff0c;成為了無數創作者、辦公族以及各行業人士的得力助手。近期&#xff0c;我對海螺 AI 進行了深入的使用體驗&#xff0c;接下來就為大家詳細…

哈希表day5

242 有效的字母異位詞 思路就是轉為ASCII碼&#xff0c;然后用一個數組記錄26位字母出現的次數 #include <string> class Solution{ public:bool isAnagram(string s,string t){int record[26]{0};for (int i0;i<s.size();i){record[s[i]-a];}for (int i0;i<t.si…

【Python數據庫全棧指南】從SQL到ORM深度實踐

目錄 &#x1f31f; 前言&#x1f3d7;? 技術背景與價值&#x1fa79; 當前技術痛點&#x1f6e0;? 解決方案概述&#x1f465; 目標讀者說明 &#x1f9e0; 一、技術原理剖析&#x1f4ca; 核心概念圖解&#x1f4a1; 核心作用講解&#x1f527; 關鍵技術模塊說明?? 技術選…

Android磁盤占用優化全解析:從監控到治理的存儲效率革命

引言 隨著移動應用功能的復雜化&#xff0c;磁盤占用問題日益突出。據統計&#xff0c;國內頭部應用的平均安裝包大小已超100MB&#xff0c;運行時緩存、日志、圖片等數據更可能使磁盤占用突破GB級。過度的磁盤消耗不僅影響用戶設備空間&#xff0c;還可能觸發系統的“應用數據…

AJAX-讓數據活起來(一):入門

目錄 一、AJAX概念和axios使用 1.1 什么是AJAX ? 1.2 怎么用AJAX ? 1.3 axios使用 二、認識URL 2.1 什么是URL? 2.2 URL的組成 組成 協議 域名 資源路徑 獲取-新聞列表 三、URL查詢參數 URL查詢參數 axios - 查詢參數 四、常用請求方法和數據提交 常用請求…

【C++篇】list模擬實現

實現接口&#xff1a; list的無參構造、n個val構造、拷貝構造 operator重載 實現迭代器 push_back() push_front() erase() insert() 頭尾刪 #pragma once #include<iostream> #include<assert.h> using namespace std;namespace liu {//定義list節點temp…

Go 語言范圍循環變量重用問題與 VSCode 調試解決方法

文章目錄 問題描述問題原因1. Go 1.21 及更早版本的范圍循環行為2. Go 1.22 的改進3. VSCode 調試中的問題4. 命令行 dlv debug 的正確輸出 三種解決方法1. 啟用 Go 模塊2. 優化 VSCode 調試配置3. 修改代碼以確保兼容性4. 清理緩存5. 驗證環境 驗證結果結論 在 Go 編程中&…

快速創建 Vue 3 項目

安裝 Node.js 和 Vue CL 安裝 Node.js&#xff1a;訪問 https://nodejs.org/ 下載并安裝 LTS 版本。 安裝完后&#xff0c;在終端檢查版本&#xff1a; node -v npm -v安裝 Vue CLI&#xff08;全局&#xff09;&#xff1a; npm install -g vue/cli創建 Vue 3 項目 vue cr…

java學習日志——Spring Security介紹

使用Spring Security要重寫UserDetailsService的loadUserByUsername方法&#xff08;相當于自定了認證邏輯&#xff09;

【C++進階篇】初識哈希

哈希表深度剖析&#xff1a;原理、沖突解決與C容器實戰 一. 哈希1.1 哈希概念1.2 哈希思想1.3 常見的哈希函數1.3.1 直接定址法1.3.2 除留余數法1.3.3 乘法散列法&#xff08;了解&#xff09;1.3.4 平方取中法&#xff08;了解&#xff09; 1.4 哈希沖突1.4.1 沖突原因1.4.2 解…

單機Kafka配置ssl并在springboot使用

目錄 SSL證書生成根證書生成服務端和客戶端證書生成keystore.jks和truststore.jks輔助腳本單獨生成truststore.jks 環境配置hosts文件kafka server.properties配置ssl 啟動kafkakafka基礎操作springboot集成準備工作需要配置的文件開始消費 SSL證書 證書主要包含兩大類&#x…

PCB設計教程【入門篇】——電路分析基礎-元件數據手冊

前言 本教程基于B站Expert電子實驗室的PCB設計教學的整理&#xff0c;為個人學習記錄&#xff0c;旨在幫助PCB設計新手入門。所有內容僅作學習交流使用&#xff0c;無任何商業目的。若涉及侵權&#xff0c;請隨時聯系&#xff0c;將會立即處理 目錄 前言 一、數據手冊的重要…

Vue2實現Office文檔(docx、xlsx、pdf)在線預覽

&#x1f31f; 前言 歡迎來到我的技術小宇宙&#xff01;&#x1f30c; 這里不僅是我記錄技術點滴的后花園&#xff0c;也是我分享學習心得和項目經驗的樂園。&#x1f4da; 無論你是技術小白還是資深大牛&#xff0c;這里總有一些內容能觸動你的好奇心。&#x1f50d; &#x…

【辰輝創聚生物】JAK-STAT信號通路相關蛋白:細胞信號傳導的核心樞紐

在細胞間復雜的信號傳遞網絡中&#xff0c;Janus 激酶 - 信號轉導和轉錄激活因子&#xff08;JAK-STAT&#xff09;信號通路猶如一條高速信息公路&#xff0c;承擔著傳遞細胞外信號、調控基因表達的重要使命。JAK-STAT 信號通路相關蛋白作為這條信息公路上的 “關鍵節點” 和 “…

OceanBase數據庫從入門到精通(運維監控篇)

文章目錄 一、OceanBase 運維監控體系概述二、OceanBase 系統表與元數據查詢2.1 元數據查詢基礎2.2 核心系統表詳解2.3 分區元數據查詢實戰三、OceanBase 性能監控SQL詳解3.1 關鍵性能指標監控3.2 SQL性能分析實戰四、OceanBase 空間使用監控4.1 表空間監控體系4.2 空間使用趨勢…

linux 進程間通信_共享內存

目錄 一、什么是共享內存&#xff1f; 二、共享內存的特點 優點 缺點 三、使用共享內存的基本函數 1、創建共享內存shmget() 2、掛接共享內存shmat 3、脫離掛接shmdt 4、共享內存控制shmctl 5.查看和刪除共享內存 comm.hpp server.cc Client.cc Makefile 一、什么…

Spring Boot 登錄實現:JWT 與 Session 全面對比與實戰講解

Spring Boot 登錄實現&#xff1a;JWT 與 Session 全面對比與實戰講解 2025.5.21-23:11今天在學習黑馬點評時突然發現用的是與蒼穹外賣jwt不一樣的登錄方式-Session&#xff0c;于是就想記錄一下這兩種方式有什么不同 在實際開發中&#xff0c;登錄認證是后端最基礎也是最重要…

Vue中的 VueComponent

VueComponent 組件的本質 Vue 組件是一個可復用的 Vue 實例。每個組件本質上就是通過 Vue.extend() 創建的構造函數&#xff0c;或者在 Vue 3 中是由函數式 API&#xff08;Composition API&#xff09;創建的。 // Vue 2 const MyComponent Vue.extend({template: <div…

使用 FFmpeg 將視頻轉換為高質量 GIF(保留原始尺寸和幀率)

在制作教程動圖、產品展示、前端 UI 演示等場景中,我們經常需要將視頻轉換為體積合適且清晰的 GIF 動圖。本文將詳細介紹如何使用 FFmpeg 工具將視頻轉為高質量 GIF,包括: ? 保留原視頻尺寸或自定義縮放? 保留原始幀率或自定義幀率? 使用調色板優化色彩質量? 降低體積同…