kaggle找到一個圖像數據集,用cnn網絡進行訓練并且用grad-cam做可視化
進階:并拆分成多個文件
config.py
import os# 基礎配置類
class Config:def __init__(self):# Kaggle配置self.kaggle_username = "" # Kaggle用戶名self.kaggle_key = "" # Kaggle API密鑰# 數據集配置self.dataset_name = "chest-xray-pneumonia" # 默認使用胸部X光數據集self.data_dir = "data"self.train_dir = os.path.join(self.data_dir, "train")self.val_dir = os.path.join(self.data_dir, "val")self.test_dir = os.path.join(self.data_dir, "test")# 模型配置self.model_save_path = "models/cnn_model.h5"self.img_width, self.img_height = 224, 224self.batch_size = 32self.epochs = 10self.learning_rate = 0.001# Grad-CAM配置self.gradcam_output_dir = "gradcam_output"self.target_layer = "block5_conv3" # VGG16最后一個卷積層,根據模型調整
data_loader.py
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from config import Configclass DataLoader:def __init__(self, config: Config):self.config = configself.train_generator = Noneself.val_generator = Noneself.test_generator = Noneself.class_indices = Nonedef setup_data_generators(self):# 數據增強配置train_datagen = ImageDataGenerator(rescale=1./255,rotation_range=20,width_shift_range=0.2,height_shift_range=0.2,shear_range=0.2,zoom_range=0.2,horizontal_flip=True,fill_mode='nearest')test_datagen = ImageDataGenerator(rescale=1./255)# 創建數據生成器self.train_generator = train_datagen.flow_from_directory(self.config.train_dir,target_size=(self.config.img_width, self.config.img_height),batch_size=self.config.batch_size,class_mode='categorical')self.val_generator = test_datagen.flow_from_directory(self.config.val_dir,target_size=(self.config.img_width, self.config.img_height),batch_size=self.config.batch_size,class_mode='categorical')self.test_generator = test_datagen.flow_from_directory(self.config.test_dir,target_size=(self.config.img_width, self.config.img_height),batch_size=self.config.batch_size,class_mode='categorical',shuffle=False)self.class_indices = self.train_generator.class_indicesreturn self.train_generator, self.val_generator, self.test_generatordef get_class_names(self):if self.class_indices is None:self.setup_data_generators()return list(self.class_indices.keys())
grad_cam.py
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
from tensorflow.keras.models import Model
from config import Configclass GradCAM:def __init__(self, model, class_names, config: Config):self.model = modelself.class_names = class_namesself.config = configos.makedirs(self.config.gradcam_output_dir, exist_ok=True)def generate_heatmap(self, img_array, layer_name=None):if layer_name is None:layer_name = self.config.target_layer# 創建一個用于獲取輸出的模型grad_model = Model(inputs=[self.model.inputs],outputs=[self.model.get_layer(layer_name).output, self.model.output])# 計算梯度with tf.GradientTape() as tape:conv_outputs, predictions = grad_model(img_array)class_idx = np.argmax(predictions[0])class_name = self.class_names[class_idx]loss = predictions[:, class_idx]# 獲取梯度grads = tape.gradient(loss, conv_outputs)# 平均梯度pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))# 權重激活映射conv_outputs = conv_outputs[0]heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_outputs), axis=-1)# 歸一化熱圖heatmap = np.maximum(heatmap, 0) / np.max(heatmap)return heatmap, class_name, predictions[0][class_idx]def overlay_heatmap(self, heatmap, img_path, alpha=0.4):# 加載原始圖像img = cv2.imread(img_path)img = cv2.resize(img, (self.config.img_width, self.config.img_height))# 調整熱圖大小heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))# 將熱圖轉換為RGBheatmap = np.uint8(255 * heatmap)heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)# 將熱圖疊加到原圖superimposed_img = heatmap * alpha + imgsuperimposed_img = np.uint8(superimposed_img)return img, heatmap, superimposed_imgdef process_image(self, img_path, layer_name=None):# 加載和預處理圖像img = tf.keras.preprocessing.image.load_img(img_path, target_size=(self.config.img_width, self.config.img_height))img_array = tf.keras.preprocessing.image.img_to_array(img)img_array = np.expand_dims(img_array, axis=0)img_array = img_array / 255.0# 生成熱圖heatmap, class_name, confidence = self.generate_heatmap(img_array, layer_name)# 疊加熱圖original_img, heatmap_img, superimposed_img = self.overlay_heatmap(heatmap, img_path)# 保存結果filename = os.path.basename(img_path)output_path = os.path.join(self.config.gradcam_output_dir, f"gradcam_{filename}")# 創建可視化fig, axes = plt.subplots(1, 3, figsize=(15, 5))axes[0].imshow(cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB))axes[0].set_title('原始圖像')axes[0].axis('off')axes[1].imshow(heatmap)axes[1].set_title('Grad-CAM熱圖')axes[1].axis('off')axes[2].imshow(cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB))axes[2].set_title(f'疊加圖像 - {class_name} ({confidence:.2%})')axes[2].axis('off')plt.tight_layout()plt.savefig(output_path)plt.close()return output_path, class_name, confidence
kaggle_downloader.py
import os
import json
import kaggle
from kaggle.api.kaggle_api_extended import KaggleApi
from config import Config
import zipfileclass KaggleDownloader:def __init__(self, config: Config):self.config = configself.api = Nonedef authenticate(self):# 設置Kaggle API憑證os.environ['KAGGLE_USERNAME'] = self.config.kaggle_usernameos.environ['KAGGLE_KEY'] = self.config.kaggle_key# 初始化API客戶端self.api = KaggleApi()self.api.authenticate()def download_dataset(self):if not self.api:self.authenticate()# 創建數據目錄os.makedirs(self.config.data_dir, exist_ok=True)# 下載數據集print(f"正在下載數據集: {self.config.dataset_name}")self.api.dataset_download_files(self.config.dataset_name, path=self.config.data_dir, unzip=True)print(f"數據集下載完成,保存路徑: {self.config.data_dir}")# 解壓文件(如果需要)for file in os.listdir(self.config.data_dir):if file.endswith('.zip'):zip_path = os.path.join(self.config.data_dir, file)with zipfile.ZipFile(zip_path, 'r') as zip_ref:zip_ref.extractall(self.config.data_dir)os.remove(zip_path)
main.py
import argparse
from config import Config
from kaggle_downloader import KaggleDownloader
from data_loader import DataLoader
from model_builder import ModelBuilder
from trainer import Trainer
from grad_cam import GradCAM
import tensorflow as tf
import osdef main():# 解析命令行參數parser = argparse.ArgumentParser(description='Kaggle圖像數據CNN訓練與Grad-CAM可視化')parser.add_argument('--download', action='store_true', help='下載Kaggle數據集')parser.add_argument('--train', action='store_true', help='訓練模型')parser.add_argument('--evaluate', action='store_true', help='評估模型')parser.add_argument('--visualize', action='store_true', help='運行Grad-CAM可視化')parser.add_argument('--dataset', type=str, help='Kaggle數據集名稱')parser.add_argument('--model_type', type=str, default='vgg16', choices=['simple', 'vgg16'], help='模型類型')parser.add_argument('--img_path', type=str, help='用于Grad-CAM可視化的圖像路徑')args = parser.parse_args()# 配置config = Config()# 更新配置if args.dataset:config.dataset_name = args.dataset# 1. 下載Kaggle數據集if args.download:downloader = KaggleDownloader(config)downloader.download_dataset()# 2. 加載數據data_loader = DataLoader(config)train_generator, val_generator, test_generator = data_loader.setup_data_generators()class_names = data_loader.get_class_names()print(f"分類類別: {class_names}")# 3. 構建模型model_builder = ModelBuilder(config, len(class_names))if args.model_type == 'simple':model = model_builder.build_simple_cnn()else:model = model_builder.build_vgg16_model()# 4. 訓練模型if args.train:trainer = Trainer(config)history = trainer.train(model, train_generator, val_generator)print("模型訓練完成")# 5. 評估模型if args.evaluate:if os.path.exists(config.model_save_path):model = tf.keras.models.load_model(config.model_save_path)print("加載已保存的模型")test_loss, test_acc = model.evaluate(test_generator)print(f"測試集準確率: {test_acc:.2%}")# 6. Grad-CAM可視化if args.visualize:if os.path.exists(config.model_save_path):model = tf.keras.models.load_model(config.model_save_path)print("加載已保存的模型用于可視化")if args.img_path and os.path.exists(args.img_path):grad_cam = GradCAM(model, class_names, config)output_path, class_name, confidence = grad_cam.process_image(args.img_path)print(f"可視化完成,結果保存在: {output_path}")print(f"預測類別: {class_name}, 置信度: {confidence:.2%}")else:print("請提供有效的圖像路徑")if __name__ == "__main__":main()
model_builder.py
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from tensorflow.keras.applications import VGG16
from tensorflow.keras.optimizers import Adam
from config import Configclass ModelBuilder:def __init__(self, config: Config, num_classes: int):self.config = configself.num_classes = num_classesdef build_simple_cnn(self):# 構建簡單的CNN模型model = Sequential([Conv2D(32, (3, 3), activation='relu', input_shape=(self.config.img_width, self.config.img_height, 3)),MaxPooling2D((2, 2)),Conv2D(64, (3, 3), activation='relu'),MaxPooling2D((2, 2)),Conv2D(128, (3, 3), activation='relu'),MaxPooling2D((2, 2)),Flatten(),Dense(128, activation='relu'),Dropout(0.5),Dense(self.num_classes, activation='softmax')])model.compile(optimizer=Adam(learning_rate=self.config.learning_rate),loss='categorical_crossentropy',metrics=['accuracy'])return modeldef build_vgg16_model(self, fine_tune=False):# 構建基于VGG16的預訓練模型base_model = VGG16(weights='imagenet',include_top=False,input_shape=(self.config.img_width, self.config.img_height, 3))# 是否微調預訓練模型if not fine_tune:for layer in base_model.layers:layer.trainable = False# 添加自定義層x = base_model.outputx = Flatten()(x)x = Dense(256, activation='relu')(x)x = Dropout(0.5)(x)predictions = Dense(self.num_classes, activation='softmax')(x)model = Model(inputs=base_model.input, outputs=predictions)model.compile(optimizer=Adam(learning_rate=self.config.learning_rate),loss='categorical_crossentropy',metrics=['accuracy'])return model
trainer.py
import os
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from config import Configclass Trainer:def __init__(self, config: Config):self.config = configdef train(self, model, train_generator, val_generator):# 創建模型保存目錄os.makedirs(os.path.dirname(self.config.model_save_path), exist_ok=True)# 定義回調函數callbacks = [ModelCheckpoint(self.config.model_save_path, monitor='val_accuracy', save_best_only=True, mode='max',verbose=1),EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True,verbose=1),ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.00001,verbose=1)]# 訓練模型history = model.fit(train_generator,steps_per_epoch=train_generator.samples // self.config.batch_size,validation_data=val_generator,validation_steps=val_generator.samples // self.config.batch_size,epochs=self.config.epochs,callbacks=callbacks)return history
@浙大疏錦行