重復以下內容
作業:
kaggle找到一個圖像數據集,用cnn網絡進行訓練并且用grad-cam做可視化
進階:
并拆分成多個文件
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms, models
from PIL import Image # 添加缺失的導入
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm# 設置隨機種子確保結果可復現
torch.manual_seed(42)
if torch.cuda.is_available():torch.cuda.manual_seed_all(42)class CustomDataset(Dataset):def __init__(self, image_dir, label_dir, transform=None):self.image_paths = sorted([os.path.join(image_dir, f) for f in os.listdir(image_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png'))])self.label_paths = sorted([os.path.join(label_dir, f) for f in os.listdir(label_dir) if f.lower().endswith('.txt')])self.transform = transform# 確保圖像和標簽數量匹配assert len(self.image_paths) == len(self.label_paths), "圖像數量與標簽數量不匹配"def __len__(self):return len(self.image_paths)def __getitem__(self, idx):image = Image.open(self.image_paths[idx]).convert('RGB')with open(self.label_paths[idx], 'r') as f:label = int(f.read().strip()) # 假設標簽文件中是整數類別if self.transform:image = self.transform(image)return image, label
class CustomDataset(Dataset):def __init__(self, image_dir, label_dir, transform=None, debug=False):self.image_dir = image_dirself.label_dir = label_dirself.transform = transformself.debug = debug# 獲取圖像和標簽文件列表self.image_files = sorted([f for f in os.listdir(image_dir) if f.lower().endswith(('.jpg', '.jpeg', '.png'))])self.label_files = sorted([f for f in os.listdir(label_dir) if f.lower().endswith('.txt')])# 打印調試信息if self.debug:print(f"圖像目錄: {image_dir}")print(f"標簽目錄: {label_dir}")print(f"找到 {len(self.image_files)} 個圖像文件")print(f"找到 {len(self.label_files)} 個標簽文件")# 打印前10個文件檢查排序是否匹配print("\n前10個圖像文件:")for f in self.image_files[:10]:print(f" {f}")print("\n前10個標簽文件:")for f in self.label_files[:10]:print(f" {f}")# 確保圖像和標簽數量匹配assert len(self.image_files) == len(self.label_files), \f"圖像數量({len(self.image_files)})與標簽數量({len(self.label_files)})不匹配"# 創建文件映射(假設文件名除去擴展名后相同)self.image_to_label = {}for img_file in self.image_files:# 提取圖像文件名(不含擴展名)img_base = os.path.splitext(img_file)[0]# 查找對應的標簽文件found = Falsefor lbl_file in self.label_files:lbl_base = os.path.splitext(lbl_file)[0]if img_base == lbl_base:self.image_to_label[img_file] = lbl_filefound = Truebreakif not found and self.debug:print(f"警告: 找不到圖像 '{img_file}' 對應的標簽文件")# 再次確認所有圖像都有對應的標簽assert len(self.image_to_label) == len(self.image_files), \f"只有 {len(self.image_to_label)} 個圖像找到了對應的標簽,總數應為 {len(self.image_files)}"if self.debug:print(f"成功建立 {len(self.image_to_label)} 個圖像-標簽映射")def __len__(self):return len(self.image_files)def __getitem__(self, idx):img_file = self.image_files[idx]lbl_file = self.image_to_label[img_file]image_path = os.path.join(self.image_dir, img_file)label_path = os.path.join(self.label_dir, lbl_file)image = Image.open(image_path).convert('RGB')with open(label_path, 'r') as f:label = int(f.read().strip())if self.transform:image = self.transform(image)return image, label
# 數據路徑配置
data_dir = r"C:\Users\許蘭\Desktop\打卡文件\mix20230204" # 替換為你的數據集路徑
train_image_dir = os.path.join(data_dir, 'train/images')
train_label_dir = os.path.join(data_dir, 'train/labels')
val_image_dir = os.path.join(data_dir, 'validation/images')
val_label_dir = os.path.join(data_dir, 'validation/labels')
test_image_dir = os.path.join(data_dir, 'test/images')
test_label_dir = os.path.join(data_dir, 'test/labels')# 數據預處理和增強
data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224),transforms.RandomHorizontalFlip(),transforms.RandomRotation(10),transforms.ColorJitter(brightness=0.2, contrast=0.2),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),'val': transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),'test': transforms.Compose([transforms.Resize(256),transforms.CenterCrop(224),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
}# 使用自定義數據集類加載數據
image_datasets = {'train': CustomDataset(train_image_dir, train_label_dir, data_transforms['train']),'val': CustomDataset(val_image_dir, val_label_dir, data_transforms['val']),'test': CustomDataset(test_image_dir, test_label_dir, data_transforms['test'])
}# 創建數據加載器
batch_size = 32
dataloaders = {'train': DataLoader(image_datasets['train'], batch_size=batch_size, shuffle=True, num_workers=4),'val': DataLoader(image_datasets['val'], batch_size=batch_size, shuffle=False, num_workers=4),'test': DataLoader(image_datasets['test'], batch_size=batch_size, shuffle=False, num_workers=4)
}# 獲取類別數量(假設類別從0開始連續編號)
num_classes = len(set([label for _, label in image_datasets['train']]))
print(f"數據集包含 {num_classes} 個類別")# 檢查GPU是否可用
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用設備: {device}")# 定義CNN模型
class CNNModel(nn.Module):def __init__(self, num_classes):super(CNNModel, self).__init__()# 特征提取部分self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, padding=1),nn.ReLU(inplace=True),nn.MaxPool2d(kernel_size=2, stride=2),nn.Conv2d(64, 128, kernel_size=3, padding=1),nn.ReLU(inplace=True),nn.MaxPool2d(kernel_size=2, stride=2),nn.Conv2d(128, 256, kernel_size=3, padding=1),nn.ReLU(inplace=True),nn.Conv2d(256, 256, kernel_size=3, padding=1),nn.ReLU(inplace=True),nn.MaxPool2d(kernel_size=2, stride=2),nn.Conv2d(256, 512, kernel_size=3, padding=1),nn.ReLU(inplace=True),nn.Conv2d(512, 512, kernel_size=3, padding=1),nn.ReLU(inplace=True),nn.MaxPool2d(kernel_size=2, stride=2),)# 分類部分self.classifier = nn.Sequential(nn.Dropout(0.5),nn.Linear(512 * 7 * 7, 4096),nn.ReLU(inplace=True),nn.Dropout(0.5),nn.Linear(4096, 4096),nn.ReLU(inplace=True),nn.Linear(4096, num_classes))# 初始化權重self._initialize_weights()def forward(self, x):x = self.features(x)x = torch.flatten(x, 1)x = self.classifier(x)return xdef _initialize_weights(self):for m in self.modules():if isinstance(m, nn.Conv2d):nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')if m.bias is not None:nn.init.constant_(m.bias, 0)elif isinstance(m, nn.Linear):nn.init.normal_(m.weight, 0, 0.01)nn.init.constant_(m.bias, 0)# 初始化模型
model = CNNModel(num_classes)
model = model.to(device)# 定義損失函數和優化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)# 訓練模型
def train_model(model, dataloaders, criterion, optimizer, scheduler, num_epochs=25):best_model_wts = model.state_dict()best_acc = 0.0# 記錄訓練過程history = {'train_loss': [], 'train_acc': [],'val_loss': [], 'val_acc': []}for epoch in range(num_epochs):print(f'Epoch {epoch+1}/{num_epochs}')print('-' * 10)# 每個epoch有訓練和驗證階段for phase in ['train', 'val']:if phase == 'train':model.train() # 訓練模式else:model.eval() # 評估模式running_loss = 0.0running_corrects = 0# 迭代數據for inputs, labels in tqdm(dataloaders[phase]):inputs = inputs.to(device)labels = labels.to(device)# 梯度歸零optimizer.zero_grad()# 前向傳播# 只有在訓練時才跟蹤歷史with torch.set_grad_enabled(phase == 'train'):outputs = model(inputs)_, preds = torch.max(outputs, 1)loss = criterion(outputs, labels)# 只有在訓練階段才反向傳播+優化if phase == 'train':loss.backward()optimizer.step()# 統計running_loss += loss.item() * inputs.size(0)running_corrects += torch.sum(preds == labels.data)if phase == 'train' and scheduler is not None:scheduler.step()epoch_loss = running_loss / len(dataloaders[phase].dataset)epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)print(f'{phase} Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')# 記錄歷史history[f'{phase}_loss'].append(epoch_loss)history[f'{phase}_acc'].append(epoch_acc.item())# 保存最佳模型if phase == 'val' and epoch_acc > best_acc:best_acc = epoch_accbest_model_wts = model.state_dict()print(f'保存最佳模型,準確率: {best_acc:.4f}')print()# 加載最佳模型權重model.load_state_dict(best_model_wts)return model, history# 訓練模型
num_epochs = 25
model, history = train_model(model, dataloaders, criterion, optimizer, scheduler, num_epochs)# 在測試集上評估模型
def evaluate_model(model, dataloader):model.eval()running_corrects = 0all_preds = []all_labels = []with torch.no_grad():for inputs, labels in dataloader:inputs = inputs.to(device)labels = labels.to(device)outputs = model(inputs)_, preds = torch.max(outputs, 1)running_corrects += torch.sum(preds == labels.data)all_preds.extend(preds.cpu().numpy())all_labels.extend(labels.cpu().numpy())accuracy = running_corrects.double() / len(dataloader.dataset)print(f'測試集準確率: {accuracy:.4f}')return accuracy.item(), all_preds, all_labels# 評估模型
test_accuracy, predictions, true_labels = evaluate_model(model, dataloaders['test'])# 可視化訓練過程
plt.figure(figsize=(12, 4))plt.subplot(1, 2, 1)
plt.plot(history['train_loss'], label='Training Loss')
plt.plot(history['val_loss'], label='Validation Loss')
plt.title('Loss Over Time')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()plt.subplot(1, 2, 2)
plt.plot(history['train_acc'], label='Training Accuracy')
plt.plot(history['val_acc'], label='Validation Accuracy')
plt.title('Accuracy Over Time')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()plt.tight_layout()
plt.savefig('training_history.png')
plt.show()# 保存模型
torch.save(model.state_dict(), 'cnn_image_classifier.pth')
print("模型已保存為 'cnn_image_classifier.pth'")# 可選:使用預訓練模型進行遷移學習
def use_pretrained_model(num_classes):# 加載預訓練的ResNet18model_ft = models.resnet18(pretrained=True)# 凍結部分層for param in list(model_ft.parameters())[:-4]:param.requires_grad = False# 修改最后的全連接層num_ftrs = model_ft.fc.in_featuresmodel_ft.fc = nn.Linear(num_ftrs, num_classes)return model_ft.to(device)