● 語言環境:Python3.8.8
● 編譯器:Jupyter Lab
● 深度學習環境:TensorFlow2.4.1
貓狗識別
- 一、前期工作
- 1. 設置GPU
- 二、數據預處理
- 1. 加載數據
- 2.再次檢查數據
- 3.配置數據集
- 三、構建VG-16網絡
- 四、編譯
- 五、訓練模型
- 六、模型評估
- 七、預測
- 八、總結
一、前期工作
1. 設置GPU
import tensorflow as tfgpus = tf.config.list_physical_devices("GPU")if gpus:tf.config.experimental.set_memory_growth(gpus[0], True) #設置GPU顯存用量按需使用tf.config.set_visible_devices([gpus[0]],"GPU")# 打印顯卡信息,確認GPU可用
print(gpus)
2.導入數據
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用來正常顯示中文標簽
plt.rcParams['axes.unicode_minus'] = False # 用來正常顯示負號import os,PIL,pathlib#隱藏警告
import warnings
warnings.filterwarnings('ignore')data_dir = "./365-7-data"
data_dir = pathlib.Path(data_dir)image_count = len(list(data_dir.glob('*/*')))print("圖片總數為:",image_count)
二、數據預處理
1. 加載數據
使用image_dataset_from_directory方法將磁盤中的數據加載到tf.data.Dataset中
batch_size = 8
img_height = 224
img_width = 224
"""
關于image_dataset_from_directory()的詳細介紹可以參考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir,validation_split=0.2,subset="training",seed=12,image_size=(img_height, img_width),batch_size=batch_size)
"""
關于image_dataset_from_directory()的詳細介紹可以參考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir,validation_split=0.2,subset="validation",seed=12,image_size=(img_height, img_width),batch_size=batch_size)
我們可以通過class_names輸出數據集的標簽。標簽將按字母順序對應于目錄名稱。
class_names = train_ds.class_names
print(class_names)
2.再次檢查數據
for image_batch, labels_batch in train_ds:print(image_batch.shape)print(labels_batch.shape)break
● Image_batch是形狀的張量(8, 224, 224, 3)。這是一批形狀224x224x3的8張圖片(最后一維指的是彩色通道RGB)。
● Label_batch是形狀(8,)的張量,這些標簽對應8張圖片
3.配置數據集
● shuffle() : 打亂數據,關于此函數的詳細介紹可以參考:https://zhuanlan.zhihu.com/p/42417456
● prefetch() :預取數據,加速運行,其詳細介紹可以參考我前兩篇文章,里面都有講解。
● cache() :將數據集緩存到內存當中,加速運行
AUTOTUNE = tf.data.AUTOTUNEdef preprocess_image(image,label):return (image/255.0,label)# 歸一化處理
train_ds = train_ds.map(preprocess_image, num_parallel_calls=AUTOTUNE)
val_ds = val_ds.map(preprocess_image, num_parallel_calls=AUTOTUNE)train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
三、構建VG-16網絡
from tensorflow.keras import layers, models, Input
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropoutdef VGG16(nb_classes, input_shape):input_tensor = Input(shape=input_shape)# 1st blockx = Conv2D(64, (3,3), activation='relu', padding='same',name='block1_conv1')(input_tensor)x = Conv2D(64, (3,3), activation='relu', padding='same',name='block1_conv2')(x)x = MaxPooling2D((2,2), strides=(2,2), name = 'block1_pool')(x)# 2nd blockx = Conv2D(128, (3,3), activation='relu', padding='same',name='block2_conv1')(x)x = Conv2D(128, (3,3), activation='relu', padding='same',name='block2_conv2')(x)x = MaxPooling2D((2,2), strides=(2,2), name = 'block2_pool')(x)# 3rd blockx = Conv2D(256, (3,3), activation='relu', padding='same',name='block3_conv1')(x)x = Conv2D(256, (3,3), activation='relu', padding='same',name='block3_conv2')(x)x = Conv2D(256, (3,3), activation='relu', padding='same',name='block3_conv3')(x)x = MaxPooling2D((2,2), strides=(2,2), name = 'block3_pool')(x)# 4th blockx = Conv2D(512, (3,3), activation='relu', padding='same',name='block4_conv1')(x)x = Conv2D(512, (3,3), activation='relu', padding='same',name='block4_conv2')(x)x = Conv2D(512, (3,3), activation='relu', padding='same',name='block4_conv3')(x)x = MaxPooling2D((2,2), strides=(2,2), name = 'block4_pool')(x)# 5th blockx = Conv2D(512, (3,3), activation='relu', padding='same',name='block5_conv1')(x)x = Conv2D(512, (3,3), activation='relu', padding='same',name='block5_conv2')(x)x = Conv2D(512, (3,3), activation='relu', padding='same',name='block5_conv3')(x)x = MaxPooling2D((2,2), strides=(2,2), name = 'block5_pool')(x)# full connectionx = Flatten()(x)x = Dense(4096, activation='relu', name='fc1')(x)x = Dense(4096, activation='relu', name='fc2')(x)output_tensor = Dense(nb_classes, activation='softmax', name='predictions')(x)model = Model(input_tensor, output_tensor)return modelmodel=VGG16(1000, (img_width, img_height, 3))
model.summary()
四、編譯
model.compile(optimizer="adam",loss ='sparse_categorical_crossentropy',metrics =['accuracy'])
五、訓練模型
from tqdm import tqdm
import tensorflow.keras.backend as Kepochs = 10
lr = 1e-4# 記錄訓練數據,方便后面的分析
history_train_loss = []
history_train_accuracy = []
history_val_loss = []
history_val_accuracy = []for epoch in range(epochs):train_total = len(train_ds)val_total = len(val_ds)"""total:預期的迭代數目ncols:控制進度條寬度mininterval:進度更新最小間隔,以秒為單位(默認值:0.1)"""with tqdm(total=train_total, desc=f'Epoch {epoch + 1}/{epochs}',mininterval=1,ncols=100) as pbar:lr = lr*0.92K.set_value(model.optimizer.lr, lr)for image,label in train_ds: """訓練模型,簡單理解train_on_batch就是:它是比model.fit()更高級的一個用法想詳細了解 train_on_batch 的同學,可以看看我的這篇文章:https://www.yuque.com/mingtian-fkmxf/hv4lcq/ztt4gy"""history = model.train_on_batch(image,label)train_loss = history[0]train_accuracy = history[1]pbar.set_postfix({"loss": "%.4f"%train_loss,"accuracy":"%.4f"%train_accuracy,"lr": K.get_value(model.optimizer.lr)})pbar.update(1)history_train_loss.append(train_loss)history_train_accuracy.append(train_accuracy)print('開始驗證!')with tqdm(total=val_total, desc=f'Epoch {epoch + 1}/{epochs}',mininterval=0.3,ncols=100) as pbar:for image,label in val_ds: history = model.test_on_batch(image,label)val_loss = history[0]val_accuracy = history[1]pbar.set_postfix({"loss": "%.4f"%val_loss,"accuracy":"%.4f"%val_accuracy})pbar.update(1)history_val_loss.append(val_loss)history_val_accuracy.append(val_accuracy)print('結束驗證!')print("驗證loss為:%.4f"%val_loss)print("驗證準確率為:%.4f"%val_accuracy)
六、模型評估
from datetime import datetime
current_time = datetime.now() # 獲取當前時間epochs_range = range(epochs)plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)plt.plot(epochs_range, history_train_accuracy, label='Training Accuracy')
plt.plot(epochs_range, history_val_accuracy, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.xlabel(current_time) # 打卡請帶上時間戳,否則代碼截圖無效plt.subplot(1, 2, 2)
plt.plot(epochs_range, history_train_loss, label='Training Loss')
plt.plot(epochs_range, history_val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
七、預測
import numpy as np# 采用加載的模型(new_model)來看預測結果
plt.figure(figsize=(18, 3)) # 圖形的寬為18高為5
plt.suptitle("預測結果展示")for images, labels in val_ds.take(1):for i in range(8):ax = plt.subplot(1,8, i + 1) # 顯示圖片plt.imshow(images[i].numpy())# 需要給圖片增加一個維度img_array = tf.expand_dims(images[i], 0) # 使用模型預測圖片中的人物predictions = model.predict(img_array)plt.title(class_names[np.argmax(predictions)])plt.axis("off")
八、總結
VGG優缺點分析:
● VGG優點
VGG的結構非常簡潔,整個網絡都使用了同樣大小的卷積核尺寸(3x3)和最大池化尺寸(2x2)。
● VGG缺點
1)訓練時間過長,調參難度大。2)需要的存儲容量大,不利于部署。例如存儲VGG-16權重值文件的大小為500多MB,不利于安裝到嵌入式系統中。
結構說明:
● 13個卷積層(Convolutional Layer),分別用blockX_convX表示
● 3個全連接層(Fully connected Layer),分別用fcX與predictions表示
● 5個池化層(Pool layer),分別用blockX_pool表示
VGG的網絡結構比較統一,重復使用卷積層堆疊,然后接最大池化。池化層的窗口是2x2,步長2,這樣每次池化后特征圖尺寸減半。然后全連接層部分有三個,最后是softmax分類。VGG16和VGG19的區別在于卷積層的數量,比如在某個塊中使用2個還是3個卷積層,或者更后面塊中的數量不同。比如,VGG16的配置可能是:塊1有2個卷積層,塊2有2個,塊3有3個,塊4有3個,塊5有3個,然后全連接層。而VGG19可能在這些塊中多加一些卷積層,使得總層數達到19層。