文章目錄
- 前言
- 一、庫導入與配置部分
- 介紹
- 二、超參數配置
- 簡介
- 三、模型定義
- 1. 改進殘差塊
- 2. 完整CNN模型
- 四、數據集類
- 五、數據加載函數
- 六、訓練函數
- 七、驗證函數
- 八、檢查點管理
- 九、主函數
- 十、執行入口
- 十一、關鍵設計亮點總結
- 1.維度管理
- 2.數據標準化
- 3.動態學習率
- 4.梯度剪裁
- 5.檢查點系統
- 6.結果可追溯
- 7.工業級健壯性
- 8.高效數據加載
前言
本文再網絡結構(1)的基礎上,完善數據讀取、數據增強、數據處理、模型訓練、斷點訓練等功能。
一、庫導入與配置部分
import torch
import torch.nn as nn # PyTorch核心神經網絡模塊
import pandas as pd # 數據處理
import numpy as np # 數值計算
from torch.utils.data import Dataset, DataLoader # 數據加載工具
from sklearn.preprocessing import StandardScaler # 數據標準化
from sklearn.model_selection import train_test_split # 數據分割
from torch.optim.lr_scheduler import ReduceLROnPlateau # 動態學習率調整
from collections import Counter # 統計類別分布
import csv # 結果記錄
import time # 時間戳生成
import joblib # 模型/參數持久化
介紹
導入Pytorch核心神經網路模塊、數據處理庫和數值處理庫數據標準化、數據分割、動態學習率調整、統計類別分布、結果記錄、時間戳生成、模型/參數持久化。
二、超參數配置
config = {"batch_size": 256, # 每批數據量"num_workers": 128, # 數據加載并行進程數"lr": 1e-3, # 初始學習率"weight_decay": 1e-4, # L2正則化強度"epochs": 200, # 最大訓練輪數"patience": 15, # 早停等待輪數"min_delta": 0.001, # 視為改進的最小精度提升"grad_clip": 5.0, # 梯度裁剪閾值"num_classes": None # 自動計算類別數
}
簡介
設置每批數據量、數據加載并行進程數、初始學習率、L2正則化強度、最大訓練輪數、早停等待輪數、視為改進的最小精度提升、梯度剪裁閾值、自動計算類別數。
三、模型定義
1. 改進殘差塊
class ImprovedResBlock(nn.Module):def __init__(self, in_channels, out_channels, stride=1):super().__init__() # 初始化父類# 第一個卷積層self.conv1 = nn.Conv1d(in_channels, out_channels, 5, stride, 2)# 參數解釋:輸入通道,輸出通道,卷積核大小5,步長,填充2(保持尺寸)self.bn1 = nn.BatchNorm1d(out_channels) # 批量歸一化# 第二個卷積層self.conv2 = nn.Conv1d(out_channels, out_channels, 3, 1, 1)# 3x1卷積,步長1,填充1保持尺寸self.bn2 = nn.BatchNorm1d(out_channels)self.relu = nn.ReLU() # 激活函數# 下采樣路徑(當需要調整維度時)self.downsample = nn.Sequential(nn.Conv1d(in_channels, out_channels, 1, stride), # 1x1卷積調整維度nn.BatchNorm1d(out_channels)) if in_channels != out_channels or stride != 1 else None# 當輸入輸出通道不同或步長>1時啟用def forward(self, x):identity = x # 保留原始輸入作為殘差# 主路徑處理x = self.relu(self.bn1(self.conv1(x))) # Conv1 -> BN1 -> ReLUx = self.bn2(self.conv2(x)) # Conv2 -> BN2(無激活)# 調整殘差路徑維度if self.downsample:identity = self.downsample(identity)x += identity # 殘差連接return self.relu(x) # 最終激活
2. 完整CNN模型
class EnhancedCNN(nn.Module):def __init__(self, input_channels, seq_len, num_classes):super().__init__()# 初始特征提取層self.initial = nn.Sequential(nn.Conv1d(input_channels, 64, 7, stride=2, padding=3), # 快速下采樣nn.BatchNorm1d(64),nn.ReLU(),nn.MaxPool1d(3, 2, 1) # 核3,步長2,填充1,輸出尺寸約為輸入1/4)# 殘差塊堆疊self.blocks = nn.Sequential(ImprovedResBlock(64, 128, stride=2), # 通道翻倍,尺寸減半ImprovedResBlock(128, 256, stride=2),ImprovedResBlock(256, 512, stride=2),nn.AdaptiveAvgPool1d(1) # 自適應全局平均池化到長度1)# 分類器self.classifier = nn.Sequential(nn.Linear(512, 256), # 全連接層nn.Dropout(0.5), # 強正則化防止過擬合nn.ReLU(),nn.Linear(256, num_classes) # 最終分類層)def forward(self, x):x = self.initial(x) # 初始特征提取x = self.blocks(x) # 通過殘差塊x = x.view(x.size(0), -1) # 展平維度 (batch, 512)return self.classifier(x) # 分類預測
四、數據集類
class SequenceDataset(Dataset):def __init__(self, sequences, labels, scaler=None):self.sequences = sequences # 原始序列數據self.labels = labels # 對應標簽self.scaler = scaler or StandardScaler() # 標準化器# 如果未提供scaler,用當前數據擬合新的if scaler is None:flattened = np.concatenate(sequences) # 展平所有數據點self.scaler.fit(flattened) # 計算均值和方差# 對每個序列進行標準化self.normalized = [self.scaler.transform(seq) for seq in sequences]def __len__(self):return len(self.sequences) # 返回數據集大小def __getitem__(self, idx):# 獲取單個樣本seq = torch.tensor(self.normalized[idx], dtype=torch.float32).permute(1, 0)# permute將形狀從(seq_len, features)轉為(features, seq_len)符合Conv1d輸入要求label = torch.tensor(self.labels[idx], dtype=torch.long)# 數據增強if np.random.rand() > 0.5: # 50%概率時序翻轉seq = seq.flip(-1) # 沿時間維度翻轉if np.random.rand() > 0.3: # 70%概率添加噪聲seq += torch.randn_like(seq) * 0.01 # 高斯噪聲(均值0,方差0.01)return seq, label
五、數據加載函數
def load_data(excel_path):df = pd.read_excel(excel_path) # 讀取Excel數據sequences = []labels = []for _, row in df.iterrows(): # 遍歷每一行數據try:# 處理可能存在的字符串格式異常loads = list(map(float, str(row['載荷']).split(',')))displacements = list(map(float, str(row['位移']).split(',')))powers = list(map(float, str(row['功率']).split(',')))# 對齊三列數據的長度min_len = min(len(loads), len(displacements), len(powers))# 組合成(時間步長, 3個特征)的數組combined = np.array([loads[:min_len], displacements[:min_len], powers[:min_len]).T # 轉置為(min_len, 3)label = int(float(row['工況結果'])) # 轉換標簽sequences.append(combined)labels.append(label)except Exception as e:print(f"處理第{_}行時出錯: {str(e)}") # 異常處理# 統計類別分布label_counts = Counter(labels)print("類別分布:", label_counts)# 創建標簽映射(將任意標簽轉換為0~N-1的索引)unique_labels = sorted(list(set(labels)))label_map = {l:i for i,l in enumerate(unique_labels)}config["num_classes"] = len(unique_labels) # 更新配置labels = [label_map[l] for l in labels] # 轉換所有標簽# 分層劃分訓練/驗證集(保持類別比例)return train_test_split(sequences, labels, test_size=0.2, stratify=labels)
六、訓練函數
def train_epoch(model, loader, optimizer, criterion, device):model.train() # 訓練模式total_loss = 0for x, y in loader: # 遍歷數據加載器x, y = x.to(device), y.to(device) # 數據遷移到設備optimizer.zero_grad() # 清空梯度outputs = model(x) # 前向傳播loss = criterion(outputs, y) # 計算損失loss.backward() # 反向傳播# 梯度裁剪防止爆炸nn.utils.clip_grad_norm_(model.parameters(), config["grad_clip"])optimizer.step() # 參數更新total_loss += loss.item() * x.size(0) # 累加損失(考慮批次大小)return total_loss / len(loader.dataset) # 平均損失
七、驗證函數
def validate(model, loader, criterion, device):model.eval() # 評估模式total_loss = 0correct = 0with torch.no_grad(): # 禁用梯度計算for x, y in loader:x, y = x.to(device), y.to(device)outputs = model(x) loss = criterion(outputs, y)total_loss += loss.item() * x.size(0)# 計算準確率preds = outputs.argmax(dim=1) # 取最大概率類別correct += preds.eq(y).sum().item() # 統計正確數return (total_loss / len(loader.dataset), # 平均損失(correct / len(loader.dataset)) # 準確率
八、檢查點管理
def save_checkpoint(epoch, model, optimizer, scheduler, best_acc, scaler, filename="checkpoint.pth"):torch.save({'epoch': epoch, # 當前輪數'model_state_dict': model.state_dict(), # 模型參數'optimizer_state_dict': optimizer.state_dict(), # 優化器狀態'scheduler_state_dict': scheduler.state_dict(), # 學習率調度器狀態'best_acc': best_acc, # 當前最佳準確率'scaler': scaler # 數據標準化參數}, filename)def load_checkpoint(filename, model, optimizer, scheduler):checkpoint = torch.load(filename)model.load_state_dict(checkpoint['model_state_dict']) # 加載模型optimizer.load_state_dict(checkpoint['optimizer_state_dict']) scheduler.load_state_dict(checkpoint['scheduler_state_dict'])return checkpoint['epoch'], checkpoint['best_acc'], checkpoint['scaler']
九、主函數
def main(resume=False):device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # 自動選擇設備# 生成帶時間戳的結果文件名timestamp = time.strftime("%Y%m%d_%H%M%S")results_file = f"training_results_{timestamp}.csv"# 加載并劃分數據train_seq, val_seq, train_lb, val_lb = load_data("./dcgt.xls")# 初始化模型(恢復訓練時自動獲取序列長度)sample_seq = train_seq[0].shape[1] if resume else Nonemodel = EnhancedCNN(input_channels=3, seq_len=sample_seq, num_classes=config["num_classes"]).to(device)# 定義損失函數和優化器criterion = nn.CrossEntropyLoss()optimizer = torch.optim.AdamW(model.parameters(), lr=config["lr"], weight_decay=config["weight_decay"])# 學習率調度器(根據驗證損失調整)scheduler = ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=5)# 恢復訓練邏輯start_epoch = 0best_acc = 0if resume:checkpoint = torch.load("checkpoint.pth")model.load_state_dict(checkpoint['model_state_dict'])optimizer.load_state_dict(checkpoint['optimizer_state_dict'])scheduler.load_state_dict(checkpoint['scheduler_state_dict'])start_epoch = checkpoint['epoch']best_acc = checkpoint['best_acc']train_set = SequenceDataset(train_seq, train_lb, scaler=checkpoint['scaler'])else:train_set = SequenceDataset(train_seq, train_lb)# 驗證集使用訓練集的scalerval_set = SequenceDataset(val_seq, val_lb, scaler=train_set.scaler)# 持久化標準化參數joblib.dump(train_set.scaler, 'scaler.save')# 創建數據加載器train_loader = DataLoader(train_set, batch_size=config["batch_size"], shuffle=True, num_workers=config["num_workers"] # 多進程加載加速)val_loader = DataLoader(val_set, batch_size=config["batch_size"], num_workers=config["num_workers"])# 訓練循環with open(results_file, 'w', newline='') as f:writer = csv.writer(f)writer.writerow(['epoch', 'train_loss', 'val_loss', 'val_acc', 'learning_rate'])for epoch in range(start_epoch, config["epochs"]):# 訓練一個epochtrain_loss = train_epoch(model, train_loader, optimizer, criterion, device)# 驗證val_loss, val_acc = validate(model, val_loader, criterion, device)current_lr = optimizer.param_groups[0]['lr'] # 獲取當前學習率# 更新學習率scheduler.step(val_loss)# 保存檢查點save_checkpoint(epoch+1, model, optimizer, scheduler, best_acc, train_set.scaler)# 記錄結果writer.writerow([epoch + 1, f"{train_loss:.4f}", f"{val_loss:.4f}", f"{val_acc:.4f}", f"{current_lr:.6f}"])print(f"\nEpoch {epoch+1}/{config['epochs']}")print(f"Train Loss: {train_loss:.4f} | Val Loss: {val_loss:.4f}")print(f"Val Acc: {val_acc*100:.2f}% | Learning Rate: {current_lr:.6f}")# 早停邏輯(偽代碼示意)if val_acc > best_acc + config["min_delta"]:best_acc = val_accpatience_counter = 0else:patience_counter += 1if patience_counter >= config["patience"]:print(f"早停觸發于第{epoch+1}輪")break# 保存最終模型torch.save(model.state_dict(), "best_model.pth")
十、執行入口
if __name__ == "__main__":main(resume=False) # 首次訓練# main(resume=True) # 恢復訓練
十一、關鍵設計亮點總結
1.維度管理
維度管理:通過permute確保數據形狀符合Conv1d要求
2.數據標準化
數據標準化:使用全體訓練數據計算均值和方差,避免數據泄露
3.動態學習率
動態學習率:ReduceLROnPlateau根據驗證損失自動調整
4.梯度剪裁
梯度裁剪:防止梯度爆炸,穩定訓練過程
5.檢查點系統
檢查點系統:完整保存訓練狀態,支持訓練中斷恢復
6.結果可追溯
結果可追溯:帶時間戳的CSV記錄和模型保存
7.工業級健壯性
工業級健壯性:異常捕獲、參數持久化、自動類別映射
8.高效數據加載
高效數據加載:多進程并行加速數據預處理
這個實現涵蓋了從數據預處理到模型訓練的完整流程,適合工業級時間序列分類任務,具有良好的可擴展性和可維護性。