🏆 作者簡介:席萬里
? 個人網站:https://dahua.bloggo.chat/
?? 一名后端開發小趴菜,同時略懂Vue與React前端技術,也了解一點微信小程序開發。
🍻 對計算機充滿興趣,愿意并且希望學習更多的技術,接觸更多的大神,提高自己的編程思維和解決問題的能力。
文章目錄
- 作品演示
- 代碼
- 1.train_and_test.py
- 2、view.py(可視化界面)
作品演示
代碼
采用模型VGG16、ALEXNet、Resnet18,訓練測試。python版本3.10.11 。
數據集:和鯨社區貓狗圖像數據集。https://www.heywhale.com/mw/project/631aedb893f47b16cb062b2a
1.train_and_test.py
# 導入 PyTorch 庫和相關模塊
import torch # PyTorch 的核心庫,提供張量計算和自動求導功能
import torchvision.transforms as transforms # 提供圖像數據增強和預處理的功能
from torch.utils.data import Dataset # 用于自定義數據集
from torch import nn, optim # nn 用于構建神經網絡,optim 用于優化算法
from PIL import Image # 用于加載和處理圖像文件
import time # 用于記錄訓練時長和其他時間相關操作
import torchvision.models as models # 包含一些預訓練模型,如 AlexNet、ResNet 等
import os # 用于與操作系統交互,如文件路徑處理、創建目錄等
import matplotlib.pyplot as plt # 用于繪制圖表,如準確率曲線、損失曲線等
from tqdm import tqdm # 用于顯示訓練過程中的進度條
from sklearn.metrics import confusion_matrix # 用于計算混淆矩陣,評估分類性能
import seaborn as sns # 用于繪制混淆矩陣的熱圖,提供美觀的圖表風格device = torch.device('cpu')# 數據預處理:縮放到224x224大小,并轉換為Tensor
transformer = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])# 加載訓練數據集
DogTrainImageList = os.listdir(r"./catsdogs/train/Dog") # 加載訓練集中的狗圖片列表
CatTrainImageList = os.listdir(r"./catsdogs/train/Cat") # 加載訓練集中的貓圖片列表
train_label = [] # 存儲訓練數據的標簽
train_data = [] # 存儲訓練數據的圖像數據
dog_train_data_dir = r"./catsdogs/train/Dog/" # 狗的圖片目錄路徑
cat_train_data_dir = r"./catsdogs/train/Cat/" # 貓的圖片目錄路徑# 將狗的圖片加載進訓練數據集
for i in range(len(DogTrainImageList)):train_label.append(1) # 狗的標簽為1dog_img = Image.open(dog_train_data_dir + DogTrainImageList[i]).convert('RGB') # 打開圖片并轉換為RGBdog_img = transformer(dog_img) # 進行預處理train_data.append(dog_img) # 添加到訓練數據# 將貓的圖片加載進訓練數據集
for i in range(len(CatTrainImageList)):train_label.append(0) # 貓的標簽為0cat_img = Image.open(cat_train_data_dir + CatTrainImageList[i]).convert('RGB') # 打開圖片并轉換為RGBcat_img = transformer(cat_img) # 進行預處理train_data.append(cat_img) # 添加到訓練數據# 加載測試數據集(與訓練集類似)
DogTestImageList = os.listdir(r"./catsdogs/train/Dog")
CatTestImageList = os.listdir(r"./catsdogs/train/Cat")
test_label = [] # 存儲測試數據的標簽
test_data = [] # 存儲測試數據的圖像數據
dog_test_data_dir = r"./catsdogs/train/Dog/" # 狗的測試圖片目錄路徑
cat_test_data_dir = r"./catsdogs/train/Cat/" # 貓的測試圖片目錄路徑# 將狗的測試圖片加載進測試數據集
for i in range(len(DogTestImageList)):test_label.append(1) # 狗的標簽為1dog_img = Image.open(dog_test_data_dir + DogTestImageList[i]).convert('RGB')dog_img = transformer(dog_img)test_data.append(dog_img)# 將貓的測試圖片加載進測試數據集
for i in range(len(CatTestImageList)):test_label.append(0) # 貓的標簽為0cat_img = Image.open(cat_test_data_dir + CatTestImageList[i]).convert('RGB')cat_img = transformer(cat_img)test_data.append(cat_img)# 自定義的數據集類,用于加載圖像數據
class DealDataset(Dataset):def __init__(self, data, label, transform=None):self.data = data # 圖像數據self.label = label # 圖像標簽self.transform = transform # 圖像預處理def __getitem__(self, index):data, label = self.data[index], int(self.label[index]) # 獲取指定索引的數據和標簽return data, label # 返回數據和標簽def __len__(self):return len(self.data) # 返回數據集的大小# 將訓練數據集和測試數據集包裝為DealDataset對象
TrainDataSet = DealDataset(train_data, train_label, transform=transformer)
TestDataSet = DealDataset(test_data, test_label, transform=transformer)# 定義AlexNet模型
class AlexNet(nn.Module):def __init__(self):super(AlexNet, self).__init__()# 定義卷積層部分self.conv = nn.Sequential(nn.Conv2d(3, 64, kernel_size=11, stride=4),nn.ReLU(),nn.MaxPool2d(kernel_size=3, stride=2),nn.BatchNorm2d(64),nn.Conv2d(64, 192, kernel_size=5, padding=2),nn.ReLU(),nn.MaxPool2d(kernel_size=3, stride=2),nn.BatchNorm2d(192),nn.Conv2d(192, 384, kernel_size=3, padding=1),nn.ReLU(),nn.Conv2d(384, 256, kernel_size=3, padding=1),nn.ReLU(),nn.Conv2d(256, 256, kernel_size=3, padding=1),nn.ReLU(),nn.MaxPool2d(kernel_size=3, stride=2),nn.BatchNorm2d(256))# 定義全連接層部分self.fc = nn.Sequential(nn.Linear(256 * 5 * 5, 4096),nn.ReLU(),nn.Dropout(0.5),nn.Linear(4096, 4096),nn.ReLU(),nn.Dropout(0.5),nn.Linear(4096, 2) # 輸出2個類別:貓或狗)def forward(self, img):feature = self.conv(img) # 通過卷積層提取特征output = self.fc(feature.view(img.shape[0], -1)) # 展開特征并通過全連接層進行分類return output# 使用預訓練的VGG16模型,并修改最后的全連接層以適應2個輸出類別
class VGG16(nn.Module):def __init__(self, num_classes=2):super(VGG16, self).__init__()self.model = models.vgg16(pretrained=True) # 加載預訓練的VGG16模型self.model.classifier[-1] = nn.Linear(self.model.classifier[-1].in_features, num_classes) # 修改輸出層def forward(self, x):return self.model(x) # 返回模型的輸出# 使用ResNet18模型,并修改最后的全連接層以適應2個輸出類別
class ResNet18(nn.Module):def __init__(self):super(ResNet18, self).__init__()self.model = models.resnet18(pretrained=False) # 加載ResNet18模型self.model.fc = nn.Linear(self.model.fc.in_features, 2) # 修改輸出層為2個類別def forward(self, x):return self.model(x) # 返回模型的輸出# 繪制混淆矩陣的函數
def plot_combined_confusion_matrix(true_labels_dict, predicted_labels_dict, classes,save_path='combined_confusion_matrix.png'):# 創建一個子圖,用來顯示多個模型的混淆矩陣fig, axes = plt.subplots(1, len(true_labels_dict), figsize=(15, 5))# 遍歷每個模型并繪制其混淆矩陣for i, (model_name, true_labels) in enumerate(true_labels_dict.items()):predicted_labels = predicted_labels_dict[model_name]cm = confusion_matrix(true_labels, predicted_labels) # 計算混淆矩陣# 使用Seaborn繪制熱圖sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", xticklabels=classes, yticklabels=classes,ax=axes[i], cbar=False, annot_kws={"size": 14})axes[i].set_xlabel('Predicted labels', fontsize=12)axes[i].set_ylabel('True labels', fontsize=12)axes[i].set_title(f'{model_name} Confusion Matrix', fontsize=14)# 調整布局并保存圖像plt.tight_layout()plt.savefig(save_path)plt.show()# 計算模型在測試集上的準確率
def evaluate_accuracy(data_iter, net, device=None):if device is None and isinstance(net, torch.nn.Module):device = list(net.parameters())[0].device # 獲取模型的設備acc_sum, n = 0.0, 0predicted_labels = []true_labels = []with torch.no_grad(): # 在測試時不需要計算梯度for X, y in tqdm(data_iter, desc="加載中:", leave=True):net.eval() # 將模型設置為評估模式outputs = net(X.to(device)) # 獲取模型輸出predicted = outputs.argmax(dim=1) # 獲取預測的標簽true_labels.extend(y.cpu().numpy()) # 存儲真實標簽predicted_labels.extend(predicted.cpu().numpy()) # 存儲預測標簽acc_sum += (predicted == y.to(device)).float().sum().cpu().item() # 累加準確的樣本數n += y.shape[0] # 累加樣本總數return acc_sum / n, true_labels, predicted_labels # 返回準確率,真實標簽和預測標簽# 訓練和評估模型
def train_and_evaluate_models(models, model_names, train_iter, test_iter, batch_size, optimizer_dict, device,num_epochs, save_model_paths, plot_path):train_acc_history = {name: [] for name in model_names} # 存儲訓練過程中每個模型的訓練準確率test_acc_history = {name: [] for name in model_names} # 存儲測試過程中每個模型的測試準確率train_loss_history = {name: [] for name in model_names} # 存儲每個模型的訓練損失# 存儲每個模型的混淆矩陣數據true_labels_dict = {name: [] for name in model_names}predicted_labels_dict = {name: [] for name in model_names}# 迭代訓練周期for epoch in range(num_epochs):for model, model_name in zip(models, model_names): # 遍歷每個模型model.train()optimizer = optimizer_dict[model_name] # 獲取當前模型的優化器loss_fn = torch.nn.CrossEntropyLoss() # 定義損失函數scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.7) # 學習率衰減策略train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()# 訓練每個模型for X, y in train_iter:X, y = X.to(device), y.to(device)y_hat = model(X) # 獲取模型預測loss = loss_fn(y_hat, y) # 計算損失optimizer.zero_grad() # 清空梯度loss.backward() # 反向傳播optimizer.step() # 更新參數train_l_sum += loss.item() # 累加損失train_acc_sum += (y_hat.argmax(dim=1) == y).sum().cpu().item() # 累加準確的樣本數n += y.shape[0]batch_count += 1scheduler.step() # 學習率衰減# 計算訓練集和測試集的準確率train_acc = train_acc_sum / ntest_acc, true_labels, predicted_labels = evaluate_accuracy(test_iter, model, device)# 存儲每個模型的混淆矩陣數據true_labels_dict[model_name].extend(true_labels)predicted_labels_dict[model_name].extend(predicted_labels)train_acc_history[model_name].append(train_acc)test_acc_history[model_name].append(test_acc)train_loss_history[model_name].append(train_l_sum / batch_count)print(f'{model_name} epoch {epoch + 1}, loss {train_l_sum / batch_count:.4f}, 'f'train acc {train_acc:.3f}, test acc {test_acc:.3f}, time {time.time() - start:.1f} sec')# 保存模型torch.save(model.state_dict(), save_model_paths[model_name]) # 保存模型的權重print(f"{model_name} Model saved to {save_model_paths[model_name]} after epoch {epoch + 1}")# 在所有訓練完成后生成混淆矩陣的綜合圖plot_combined_confusion_matrix(true_labels_dict, predicted_labels_dict, ['Cat', 'Dog'],save_path=os.path.join(plot_path, 'combined_confusion_matrix.png'))return train_acc_history, test_acc_history, train_loss_history# 可視化訓練結果并保存
def plot_and_save_results(train_acc_history, test_acc_history, train_loss_history, num_epochs, save_plots_path):plt.figure(figsize=(10, 5))# 繪制每個模型的訓練與測試準確率曲線for model_name in ['AlexNet', 'ResNet18', 'VGG16']:if model_name in train_acc_history and model_name in test_acc_history:plt.plot(range(num_epochs), train_acc_history[model_name], label=f'{model_name} Train Accuracy')plt.plot(range(num_epochs), test_acc_history[model_name], label=f'{model_name} Test Accuracy')plt.xlabel('Epochs')plt.ylabel('Accuracy')plt.title('AlexNet, ResNet18, VGG16 - Training and Test Accuracy Comparison')plt.legend()plt.grid(True)plt.savefig(os.path.join(save_plots_path, 'accuracy_plot.png')) # 保存準確率圖像plt.show()plt.figure(figsize=(10, 5))# 繪制每個模型的訓練損失曲線for model_name in ['AlexNet', 'ResNet18', 'VGG16']:if model_name in train_loss_history:plt.plot(range(num_epochs), train_loss_history[model_name], label=f'{model_name} Train Loss')plt.xlabel('Epochs')plt.ylabel('Loss')plt.title('Training Loss Comparison')plt.legend()plt.grid(True)plt.savefig(os.path.join(save_plots_path, 'loss_plot.png')) # 保存損失圖像plt.show()if __name__ == '__main__':# 設置訓練參數num_epochs = 25 # 設置為可配置參數batch_size = 16 # 設置為可配置參數learning_rate = 0.009 # 設置為可配置參數save_model_paths = {'AlexNet': 'AlexNet.pth','ResNet18': 'ResNet18.pth','VGG16': 'VGG16.pth'}save_plots_path = './python'os.makedirs(save_plots_path, exist_ok=True) # 創建保存模型和圖像的文件夾# 創建模型實例alexnet_model = AlexNet().to(device)resnet_model = ResNet18().to(device)vgg_model = VGG16().to(device)# 創建數據加載器train_iter = torch.utils.data.DataLoader(TrainDataSet, batch_size=batch_size, shuffle=True, num_workers=2)test_iter = torch.utils.data.DataLoader(TestDataSet, batch_size=batch_size, shuffle=False, num_workers=2)# 優化器字典optimizer_dict = {'AlexNet': torch.optim.SGD(alexnet_model.parameters(), lr=learning_rate),'ResNet18': torch.optim.SGD(resnet_model.parameters(), lr=learning_rate),'VGG16': torch.optim.SGD(vgg_model.parameters(), lr=learning_rate)}# 訓練并評估models = [alexnet_model, resnet_model, vgg_model]model_names = ['AlexNet', 'ResNet18', 'VGG16']train_acc_history, test_acc_history, train_loss_history = train_and_evaluate_models(models, model_names, train_iter, test_iter, batch_size, optimizer_dict, device, num_epochs, save_model_paths, save_plots_path)# 繪制并保存準確率和損失曲線plot_and_save_results(train_acc_history, test_acc_history, train_loss_history, num_epochs, save_plots_path)
2、view.py(可視化界面)
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QFileDialog, QVBoxLayout, QGridLayout, \QTextEdit, QComboBox, QSpacerItem, QSizePolicy
from PyQt5.QtGui import QPixmap, QFont, QTextCursor
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from PIL import Image
import torchvision.models as modelsclass AnimalClassifierApp(QWidget):def __init__(self):super().__init__()self.initUI()def initUI(self):self.setWindowTitle('貓狗識別系統')self.resize(600, 400) # 更小的窗口尺寸# 創建布局grid = QGridLayout()grid.setContentsMargins(10, 10, 10, 10) # 設置間距grid.setSpacing(5) # 設置控件間距# 顯示圖像的標簽self.image_label = QLabel(self)self.image_label.setFixedSize(250, 250) # 調整圖像顯示尺寸self.image_label.setAlignment(Qt.AlignCenter)grid.addWidget(self.image_label, 1, 0, 2, 1)# 識別結果的標簽self.result_label = QTextEdit(self)self.result_label.setFixedSize(250, 80)self.result_label.setReadOnly(True)self.result_label.setStyleSheet("color: red; font-size: 14px;")self.result_label.setAlignment(Qt.AlignCenter)grid.addWidget(self.result_label, 1, 1, 1, 2)# 模型選擇下拉框self.model_selector = QComboBox(self)self.model_selector.addItem("AlexNet")self.model_selector.addItem("VGG16")self.model_selector.addItem("ResNet18")grid.addWidget(self.model_selector, 2, 0, 1, 2)# 按鈕布局button_layout = QVBoxLayout()button_layout.setSpacing(5) # 設置按鈕間距# 上傳圖像按鈕upload_btn = QPushButton('上傳', self)upload_btn.clicked.connect(self.load_image)button_layout.addWidget(upload_btn)# 識別按鈕recognize_btn = QPushButton('識別', self)recognize_btn.clicked.connect(self.classify_image)button_layout.addWidget(recognize_btn)# 添加按鈕布局button_layout.addSpacerItem(QSpacerItem(10, 10, QSizePolicy.Minimum, QSizePolicy.Expanding))grid.addLayout(button_layout, 3, 1, 1, 2)self.setLayout(grid)# 加載模型self.device = torch.device('cpu')# 定義數據轉換self.transform = transforms.Compose([transforms.Resize((148, 148)),transforms.ToTensor(),transforms.Normalize(mean=[0.4, 0.4, 0.4], std=[0.2, 0.2, 0.2])])self.image_path = ''self.model = None # 模型初始化為空def load_image(self):options = QFileDialog.Options()options |= QFileDialog.ReadOnlyfile_name, _ = QFileDialog.getOpenFileName(self, "上傳圖片", "", "圖片文件 (*.jpg *.jpeg *.png)",options=options)if file_name:self.image_path = file_namepixmap = QPixmap(file_name)pixmap = pixmap.scaled(self.image_label.width(), self.image_label.height(), Qt.KeepAspectRatio)self.image_label.setPixmap(pixmap)self.result_label.setText('識別結果: ')def classify_image(self):if self.image_path:# 根據選擇的模型加載相應的模型selected_model = self.model_selector.currentText()if selected_model == "AlexNet":self.model = self.load_alexnet_model()elif selected_model == "VGG16":self.model = self.load_vgg16_model()elif selected_model == "ResNet18":self.model = self.load_resnet18_model()image = Image.open(self.image_path).convert('RGB')image_tensor = self.transform(image).unsqueeze(0).to(self.device)with torch.no_grad():output = self.model(image_tensor)probabilities = torch.nn.functional.softmax(output, dim=1)confidence, predicted = torch.max(probabilities, 1)label = 'cat' if predicted.item() == 0 else 'dog'confidence = confidence.item()# 將圖像轉換為QPixmappixmap = QPixmap(self.image_path)pixmap = pixmap.scaled(self.image_label.width(), self.image_label.height(), Qt.KeepAspectRatio)self.image_label.setPixmap(pixmap)# 設置識別結果字體顏色和對齊方式self.result_label.setText(f'識別結果: {label} \n\n置信度: {confidence:.2f}')self.result_label.setAlignment(Qt.AlignCenter)cursor = self.result_label.textCursor()cursor.select(QTextCursor.Document)self.result_label.setTextCursor(cursor)def load_alexnet_model(self):model = models.alexnet(pretrained=True)model.classifier[6] = nn.Linear(model.classifier[6].in_features, 2) # 修改最后一層model = model.to(self.device)model.eval()return modeldef load_vgg16_model(self):model = models.vgg16(pretrained=True)model.classifier[6] = nn.Linear(model.classifier[6].in_features, 2) # 修改最后一層model = model.to(self.device)model.eval()return modeldef load_resnet18_model(self):model = models.resnet18(pretrained=True)model.fc = nn.Linear(model.fc.in_features, 2) # 修改最后一層model = model.to(self.device)model.eval()return modelif __name__ == '__main__':app = QApplication(sys.argv)ex = AnimalClassifierApp()ex.show()sys.exit(app.exec_())