- ?????🍨 本文為🔗365天深度學習訓練營中的學習記錄博客
- ? ? ?🍖 原作者:K同學啊
一、數據預處理
1.設置GPU
import torch.nn.functional as F
import torch.nn as nn
import torch, torchvisiondevice = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
device(type='cuda')
2.數據導入
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['savefig.dpi'] = 500
plt.rcParams['figure.dpi'] = 500
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用來正常顯示中文標簽import warnings
warnings.filterwarnings("ignore")DataFrame = pd.read_excel('F:/jupyter lab/DL-100-days/datasets/diabetes_pre/dia.xls')
DataFrame.head()
?
?
DataFrame.shape
(1006, 16)
?3.數據檢查
# 查看是否有缺失值
print("數據缺失值------------------")
print(DataFrame.isnull().sum())
數據缺失值------------------ 卡號 0 性別 0 年齡 0 高密度脂蛋白膽固醇 0 低密度脂蛋白膽固醇 0 極低密度脂蛋白膽固醇 0 甘油三酯 0 總膽固醇 0 脈搏 0 舒張壓 0 高血壓史 0 尿素氮 0 尿酸 0 肌酐 0 體重檢查結果 0 是否糖尿病 0 dtype: int64
# 查看數據是否有重復值
print("數據重復值------------------")
print('數據的重復值為:'f'{DataFrame.duplicated().sum()}')
數據重復值------------------ 數據的重復值為:0
二、數據分析
1.數據分布分析?
feature_map = {'年齡': '年齡','高密度脂蛋白膽固醇': '高密度脂蛋白膽固醇','低密度脂蛋白膽固醇': '低密度脂蛋白膽固醇','極低密度脂蛋白膽固醇': '極低密度脂蛋白膽固醇','甘油三酯': '甘油三酯','總膽固醇': '總膽固醇','脈搏': '脈搏','舒張壓': '舒張壓','高血壓史': '高血壓史','尿素氮': '尿素氮','尿酸': '尿酸','肌酐': '肌酐','體重檢查結果': '體重檢查結果'
}plt.figure(figsize=(15, 10))
for i, (col, col_name) in enumerate(feature_map.items(), 1):plt.subplot(3, 5, i)sns.boxplot(x=DataFrame['是否糖尿病'], y=DataFrame[col])plt.title(f'{col_name}的箱線圖', fontsize=14)plt.ylabel('數值', fontsize=12)plt.grid(axis='y', linestyle='--', alpha=0.7)
plt.tight_layout()
plt.show()
?
2. 相關性分析
import plotly
import plotly.express as px#刪除列'卡號'
DataFrame.drop(columns=['卡號'], inplace=True)
# 計算各列之間的相關系數
df_corr = DataFrame.corr()#相關矩陣生成函數
def corr_generate(df):fig = px.imshow(df,text_auto=True,aspect="auto",color_continuous_scale='RdBu_r')fig.show()#生成相關矩陣
corr_generate(df_corr)
??
三、LSTM模型
1.劃分數據集
from sklearn.preprocessing import StandardScaler# '高密度脂蛋白膽固醇'字段與糖尿病負相關,故在X 中去掉該字段
X = DataFrame.drop(['卡號', '是否糖尿病', '高密度脂蛋白膽固醇'], axis=1)
y = DataFrame['是否糖尿病']sc_X = StandardScaler
X = sc_X.fit_transform(X)X = torch.tensor(np.array(X), dtype=torch.float32)
y = torch.tensor(np.array(y), dtype=torch.int64)train_X, test_X, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=1)train_X.shape, train_y.shape
(torch.Size([804, 13]), torch.Size([804]))
from torch.utils.data import TensorDataset, DataLoadertrain_dl = DataLoader(TensorDataset(train_X, train_y), batch_size=64, shuffle=False)
test_dl = DataLoader(TensorDataset(test_X, test_y), batch_size=64, shuffle=False)
2.定義模型
class model_lstm(nn.Module):def __init__(self):super(model_lstm, self).__init__()self.lstm0 = nn.LSTM(input_size=13, hidden_size=200,num_layers=1, batch_first=True)self.lstm1 = nn.LSTM(input_size=200, hidden_size=200,num_layers=1, batch_first=True)self.fc0 = nn.Linear(200, 2) # 輸出 2 類def forward(self, x):# 如果 x 是 2D 的,轉換為 3D 張量,假設 seq_len=1if x.dim() == 2:x = x.unsqueeze(1) # [batch_size, 1, input_size]# LSTM 處理數據out, (h_n, c_n) = self.lstm0(x) # 第一層 LSTM# 使用第二個 LSTM,并傳遞隱藏狀態out, (h_n, c_n) = self.lstm1(out, (h_n, c_n)) # 第二層 LSTM# 獲取最后一個時間步的輸出out = out[:, -1, :] # 選擇序列的最后一個時間步的輸出out = self.fc0(out) # [batch_size, 2]return outmodel = model_lstm().to(device)
print(model)
model_lstm((lstm0): LSTM(13, 200, batch_first=True)(lstm1): LSTM(200, 200, batch_first=True)(fc0): Linear(in_features=200, out_features=2, bias=True) )
三、訓練模型
1.定義訓練函數
def train(dataloader, model, loss_fn, optimizer):size = len(dataloader.dataset) # 訓練集的大小num_batches = len(dataloader) # 批次數目train_loss, train_acc = 0, 0 # 初始化訓練損失和正確率model.train() # 設置模型為訓練模式for X, y in dataloader: # 獲取數據和標簽# 如果 X 是 2D 的,調整為 3Dif X.dim() == 2:X = X.unsqueeze(1) # [batch_size, 1, input_size],即假設 seq_len=1X, y = X.to(device), y.to(device) # 將數據移動到設備# 計算預測誤差pred = model(X) # 網絡輸出loss = loss_fn(pred, y) # 計算網絡輸出和真實值之間的差距# 反向傳播optimizer.zero_grad() # 清除上一步的梯度loss.backward() # 反向傳播optimizer.step() # 更新權重# 記錄acc與losstrain_acc += (pred.argmax(1) == y).type(torch.float).sum().item()train_loss += loss.item()train_acc /= size # 平均準確率train_loss /= num_batches # 平均損失return train_acc, train_loss
2.定義測試函數
def test(dataloader, model, loss_fn):size = len(dataloader.dataset) # 測試集的大小num_batches = len(dataloader) # 批次數目, (size/batch_size,向上取test_loss, test_acc = 0, 0# 當不進行訓練時,停止梯度更新,節省計算內存消耗with torch.no_grad():for imgs, target in dataloader:imgs, target = imgs.to(device), target.to(device)# 計算losstarget_pred = model(imgs)loss = loss_fn(target_pred, target)test_loss += loss.item()test_acc += (target_pred.argmax(1) == target).type(torch.float).sum().item()test_acc /= sizetest_loss /= num_batchesreturn test_acc, test_loss
3.訓練模型
loss_fn = nn.CrossEntropyLoss() # 創建損失函數
learn_rate = 1e-4 # 學習率
opt = torch.optim.Adam(model.parameters(), lr=learn_rate)
epochs = 30
train_loss = []
train_acc = []
test_loss = []
test_acc = []
for epoch in range(epochs):model.train()epoch_train_acc, epoch_train_loss = train(train_dl, model, loss_fn, opt)model.eval()epoch_test_acc, epoch_test_loss = test(test_dl, model, loss_fn)train_acc.append(epoch_train_acc)train_loss.append(epoch_train_loss)test_acc.append(epoch_test_acc)test_loss.append(epoch_test_loss)# 獲取當前的學習率lr = opt.state_dict()['param_groups'][0]['lr']template = ('Epoch:{:2d}, Train_acc:{:.1f}%, Train_loss:{:.3f}, Test_acc:{:.1f}%, Test_loss:{:.3f},Lr:{:.2E}')print(template.format(epoch + 1, epoch_train_acc * 100, epoch_train_loss, epoch_test_acc * 100, epoch_test_loss, lr))print("=" * 20, 'Done', "=" * 20)
Epoch: 1, Train_acc:56.5%, Train_loss:0.688, Test_acc:53.0%, Test_loss:0.704,Lr:1.00E-04 Epoch: 2, Train_acc:56.3%, Train_loss:0.681, Test_acc:53.0%, Test_loss:0.704,Lr:1.00E-04 Epoch: 3, Train_acc:56.3%, Train_loss:0.676, Test_acc:53.0%, Test_loss:0.697,Lr:1.00E-04 Epoch: 4, Train_acc:56.3%, Train_loss:0.670, Test_acc:53.0%, Test_loss:0.690,Lr:1.00E-04 Epoch: 5, Train_acc:56.2%, Train_loss:0.663, Test_acc:54.5%, Test_loss:0.684,Lr:1.00E-04 ..........
Epoch:26, Train_acc:76.6%, Train_loss:0.481, Test_acc:71.3%, Test_loss:0.546,Lr:1.00E-04 Epoch:27, Train_acc:76.9%, Train_loss:0.475, Test_acc:71.8%, Test_loss:0.541,Lr:1.00E-04 Epoch:28, Train_acc:77.5%, Train_loss:0.470, Test_acc:71.3%, Test_loss:0.537,Lr:1.00E-04 Epoch:29, Train_acc:77.2%, Train_loss:0.465, Test_acc:71.8%, Test_loss:0.533,Lr:1.00E-04 Epoch:30, Train_acc:77.4%, Train_loss:0.460, Test_acc:70.8%, Test_loss:0.529,Lr:1.00E-04 ==================== Done ====================
五、模型評估
1.Loss和Accuracy圖
import matplotlib.pyplot as plt
#隱藏警告
import warnings
warnings.filterwarnings("ignore") #忽略警告信息
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用來正常顯示中文標簽
plt.rcParams['axes.unicode_minus'] = False # 用來正常顯示負號
plt.rcParams['figure.dpi'] = 100 #分辨率from datetime import datetime
current_time = datetime.now()epochs_range = range(epochs)plt.figure(figsize=(12, 3))
plt.subplot(1, 2, 1)plt.plot(epochs_range, train_acc, label='Training Accuracy')
plt.plot(epochs_range, test_acc, label='Test Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.xlabel(current_time)plt.subplot(1, 2, 2)
plt.plot(epochs_range, train_loss, label='Training Loss')
plt.plot(epochs_range, test_loss, label='Test Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
?
六、學習心得
1.本周延續上周的工作,開展了糖尿病預測模型優化探索。加入了相關性分析這個新模塊,更加直觀地實現了各種因素之間的相關性。
2.從訓練結果中可以發現,test_acc有所增長。
3.相較于R6而言,主要修改的地方在于數據集那部分,取消注釋了sc_X= StandardScaler()和X= sc_X.fit_transform(X)兩行代碼。