- 🍨 本文為🔗365天深度學習訓練營中的學習記錄博客
- 🍖 原作者:K同學啊
目錄
一、導入數據并檢查
二、配置數據集
三、數據可視化
四、構建模型
五、訓練模型
六、模型對比評估
七、總結
一、導入數據并檢查
import pathlib,PIL
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用來正常顯示中文標簽data_dir = pathlib.Path("./T6")
image_count = len(list(data_dir.glob('*/*')))
batch_size = 16
img_height = 336
img_width = 336
"""
關于image_dataset_from_directory()的詳細介紹可以參考文章:https://mtyjkh.blog.csdn.net/article/details/117018789
"""
train_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir,validation_split=0.2,subset="training",seed=12,image_size=(img_height, img_width),batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(data_dir,validation_split=0.2,subset="validation",seed=12,image_size=(img_height, img_width),batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
for image_batch, labels_batch in train_ds:print(image_batch.shape)print(labels_batch.shape)break
二、配置數據集
AUTOTUNE = tf.data.AUTOTUNE
#歸一化處理
def train_preprocessing(image,label):return (image/255.0,label)train_ds = (train_ds.cache().shuffle(1000).map(train_preprocessing) # 這里可以設置預處理函數
# .batch(batch_size) # 在image_dataset_from_directory處已經設置了batch_size.prefetch(buffer_size=AUTOTUNE)
)val_ds = (val_ds.cache().shuffle(1000).map(train_preprocessing) # 這里可以設置預處理函數
# .batch(batch_size) # 在image_dataset_from_directory處已經設置了batch_size.prefetch(buffer_size=AUTOTUNE)
)
三、數據可視化
plt.figure(figsize=(10, 8)) # 圖形的寬為10高為5
plt.suptitle("數據展示")for images, labels in train_ds.take(1):for i in range(15):plt.subplot(4, 5, i + 1)plt.xticks([])plt.yticks([])plt.grid(False)# 顯示圖片plt.imshow(images[i])# 顯示標簽plt.xlabel(class_names[labels[i]-1])plt.show()
四、構建模型
from tensorflow.keras.layers import Dropout,Dense,BatchNormalization
from tensorflow.keras.models import Modeldef create_model(optimizer='adam'):# 加載預訓練模型vgg16_base_model = tf.keras.applications.vgg16.VGG16(weights='imagenet',include_top=False,#不包含頂層的全連接層input_shape=(img_width, img_height, 3),pooling='avg')#平均池化層替代頂層的全連接層for layer in vgg16_base_model.layers:layer.trainable = False #將 trainable屬性設置為 False 意味著在訓練過程中,這些層的權重不會更新X = vgg16_base_model.outputX = Dense(170, activation='relu')(X)X = BatchNormalization()(X)X = Dropout(0.5)(X)output = Dense(len(class_names), activation='softmax')(X)#神經元數量等于類別數vgg16_model = Model(inputs=vgg16_base_model.input, outputs=output)vgg16_model.compile(optimizer=optimizer,loss='sparse_categorical_crossentropy',metrics=['accuracy'])return vgg16_modelmodel1 = create_model(optimizer=tf.keras.optimizers.Adam())
model2 = create_model(optimizer=tf.keras.optimizers.SGD())#隨機梯度下降(SGD)優化器的
model2.summary()
五、訓練模型
NO_EPOCHS = 20history_model1 = model1.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
history_model2 = model2.fit(train_ds, epochs=NO_EPOCHS, verbose=1, validation_data=val_ds)
六、模型對比評估
from matplotlib.ticker import MultipleLocator
plt.rcParams['savefig.dpi'] = 300 #圖片像素
plt.rcParams['figure.dpi'] = 300 #分辨率acc1 = history_model1.history['accuracy']
acc2 = history_model2.history['accuracy']
val_acc1 = history_model1.history['val_accuracy']
val_acc2 = history_model2.history['val_accuracy']loss1 = history_model1.history['loss']
loss2 = history_model2.history['loss']
val_loss1 = history_model1.history['val_loss']
val_loss2 = history_model2.history['val_loss']epochs_range = range(len(acc1))plt.figure(figsize=(16, 4))
plt.subplot(1, 2, 1)plt.plot(epochs_range, acc1, label='Training Accuracy-Adam')
plt.plot(epochs_range, acc2, label='Training Accuracy-SGD')
plt.plot(epochs_range, val_acc1, label='Validation Accuracy-Adam')
plt.plot(epochs_range, val_acc2, label='Validation Accuracy-SGD')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
# 設置刻度間隔,x軸每1一個刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss1, label='Training Loss-Adam')
plt.plot(epochs_range, loss2, label='Training Loss-SGD')
plt.plot(epochs_range, val_loss1, label='Validation Loss-Adam')
plt.plot(epochs_range, val_loss2, label='Validation Loss-SGD')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')# 設置刻度間隔,x軸每1一個刻度
ax = plt.gca()
ax.xaxis.set_major_locator(MultipleLocator(1))plt.show()
可以看出,在這個實例中,Adam優化器的效果優于SGD優化器
七、總結
? ? ? 通過本次實驗,學會了比較不同優化器(Adam和SGD)在訓練過程中的性能表現,可視化訓練過程的損失曲線和準確率等指標。這是一項非常重要的技能,在研究論文中,可以通過這些優化方法可以提高工作量。