- ????????🍨 本文為🔗365天深度學習訓練營中的學習記錄博客
- ? ? ? ? 🍖 原作者:K同學啊
一、前期準備
1.加載數據
import torch
import torch.nn as nn
import torchvision
from torchvision import transforms,datasets
import os,PIL,pathlib,warningswarnings.filterwarnings("ignore")
#忽略警告信息#win10系統,調用GPU運行
#device = torch.device("cuda" if torch.cuda.is_available()else "cpu")
#devicedevice = torch.device("cpu")
device
device(type='cpu')
from torchtext.datasets import AG_NEWStrain_iter = list(AG_NEWS(split='train')) #加載 AG_News 數據集
num_class = len(set([label for (label, text) in train_iter]))
2.構建詞典
from torchtext.data.utils import get_tokenizer
from torchtext.vocab import build_vocab_from_iteratortokenizer = get_tokenizer('basic_english') # 返回分詞器函數,訓練營內“get tokenizer函數詳解def yield_tokens(data_iter):for _, text in data_iter:yield tokenizer(text)vocab = build_vocab_from_iterator(yield_tokens(train_iter),specials=["<unk>"])
vocab.set_default_index(vocab["<unk>"]) #設置默認索引,如果找不到單詞,則會選擇默認索引
vocab(['here','is','an','example'])
?[475, 21, 30, 5297]
text_pipeline= lambda x:vocab(tokenizer(x))
label_pipeline=lambda x:int(x)-1text_pipeline('here is the an example')
?[475, 21, 2, 30, 5297]
label_pipeline('10')
9?
3.生成數據批次和迭代器
from torch.utils.data import DataLoaderdef collate_batch(batch):label_list,text_list,offsets =[],[],[0]for(_label, _text) in batch:#標簽列表label_list.append(label_pipeline(_label))#文本列表processed_text = torch.tensor(text_pipeline(_text),dtype=torch.int64)text_list.append(processed_text)#偏移量,即語句的總詞匯量offsets.append(processed_text.size(0))label_list = torch.tensor(label_list, dtype=torch.int64)text_list = torch.cat(text_list)offsets = torch.tensor(offsets[:-1]).cumsum(dim=0) #返回維度dim中輸入元素的累計和return label_list.to(device),text_list.to(device),offsets.to(device)#數據加載器
dataloader =DataLoader(train_iter,batch_size=8,shuffle =False,collate_fn=collate_batch)
二、準備模型
1.定義模型
from torch import nnclass TextclassificationModel(nn.Module):def __init__(self, vocab_size, embed_dim, num_class):super(TextclassificationModel,self).__init__()self.embedding =nn.EmbeddingBag(vocab_size, #詞典大小embed_dim, #嵌入的維度sparse=False) #self.fc =nn.Linear(embed_dim,num_class)self.init_weights()def init_weights(self):initrange =0.5self.embedding.weight.data.uniform_(-initrange, initrange)self.fc.weight.data.uniform_(-initrange, initrange)self.fc.bias.data.zero_()def forward(self,text, offsets):embedded =self.embedding(text,offsets)return self.fc(embedded)
2.定義實例
num_class = len(set([label for(label,text)in train_iter]))
vocab_size = len(vocab)
em_size = 64
model = TextclassificationModel(vocab_size,em_size,num_class).to(device)
3.定義訓練函數和評估函數
import timedef train(dataloader, model, optimizer, criterion, epoch):model.train()total_acc, train_loss, total_count = 0, 0, 0log_interval = 500start_time = time.time()for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)optimizer.zero_grad()loss = criterion(predicted_label, label)loss.backward()optimizer.step()total_acc += (predicted_label.argmax(1) == label).sum().item()train_loss += loss.item()total_count += label.size(0)if idx % log_interval == 0 and idx > 0:elapsed = time.time() - start_timeprint('| epoch {:1d} | {:4d}/{:4d} batches ''| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),total_acc / total_count, train_loss / total_count))total_acc, train_loss, total_count = 0, 0, 0start_time = time.time()def evaluate(dataloader, model, criterion):model.eval() # 切換為測試模式total_acc, train_loss, total_count = 0, 0, 0with torch.no_grad():for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)loss = criterion(predicted_label, label) # 計算loss值# 記錄測試數據total_acc += (predicted_label.argmax(1) == label).sum().item()train_loss += loss.item()total_count += label.size(0)return total_acc/total_count, train_loss/total_count
三、訓練模型
1.拆分數據集并運行模型
import timedef train(dataloader, model, optimizer, criterion, epoch):model.train()total_acc, train_loss, total_count = 0, 0, 0log_interval = 500start_time = time.time()for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)optimizer.zero_grad()loss = criterion(predicted_label, label)loss.backward()optimizer.step()total_acc += (predicted_label.argmax(1) == label).sum().item()train_loss += loss.item()total_count += label.size(0)if idx % log_interval == 0 and idx > 0:elapsed = time.time() - start_timeprint('| epoch {:1d} | {:4d}/{:4d} batches ''| train_acc {:4.3f} train_loss {:4.5f}'.format(epoch, idx, len(dataloader),total_acc / total_count, train_loss / total_count))total_acc, train_loss, total_count = 0, 0, 0start_time = time.time()def evaluate(dataloader, model, criterion):model.eval() # 切換為測試模式total_acc, train_loss, total_count = 0, 0, 0with torch.no_grad():for idx, (label, text, offsets) in enumerate(dataloader):predicted_label = model(text, offsets)loss = criterion(predicted_label, label) # 計算loss值# 記錄測試數據total_acc += (predicted_label.argmax(1) == label).sum().item()train_loss += loss.item()total_count += label.size(0)return total_acc/total_count, train_loss/total_count
| epoch 1 | 500/1782 batches | train_acc 0.904 train_loss 0.00450 | epoch 1 | 1000/1782 batches | train_acc 0.903 train_loss 0.00455 | epoch 1 | 1500/1782 batches | train_acc 0.904 train_loss 0.00443 --------------------------------------------------------------------- | epoch 1 | time:11.72s | valid_acc 0.901 valid_loss 0.005 --------------------------------------------------------------------- | epoch 2 | 500/1782 batches | train_acc 0.918 train_loss 0.00379 | epoch 2 | 1000/1782 batches | train_acc 0.920 train_loss 0.00377 | epoch 2 | 1500/1782 batches | train_acc 0.913 train_loss 0.00399 --------------------------------------------------------------------- | epoch 2 | time:11.52s | valid_acc 0.907 valid_loss 0.005 --------------------------------------------------------------------- | epoch 3 | 500/1782 batches | train_acc 0.930 train_loss 0.00323 | epoch 3 | 1000/1782 batches | train_acc 0.925 train_loss 0.00345 | epoch 3 | 1500/1782 batches | train_acc 0.925 train_loss 0.00350 --------------------------------------------------------------------- | epoch 3 | time:11.77s | valid_acc 0.915 valid_loss 0.004 --------------------------------------------------------------------- | epoch 4 | 500/1782 batches | train_acc 0.937 train_loss 0.00294 | epoch 4 | 1000/1782 batches | train_acc 0.931 train_loss 0.00317 | epoch 4 | 1500/1782 batches | train_acc 0.927 train_loss 0.00332 --------------------------------------------------------------------- | epoch 4 | time:11.81s | valid_acc 0.914 valid_loss 0.004 --------------------------------------------------------------------- | epoch 5 | 500/1782 batches | train_acc 0.951 train_loss 0.00243 | epoch 5 | 1000/1782 batches | train_acc 0.950 train_loss 0.00243 | epoch 5 | 1500/1782 batches | train_acc 0.949 train_loss 0.00245 --------------------------------------------------------------------- | epoch 5 | time:11.94s | valid_acc 0.917 valid_loss 0.004 --------------------------------------------------------------------- | epoch 6 | 500/1782 batches | train_acc 0.951 train_loss 0.00236 | epoch 6 | 1000/1782 batches | train_acc 0.951 train_loss 0.00241 | epoch 6 | 1500/1782 batches | train_acc 0.951 train_loss 0.00241 --------------------------------------------------------------------- | epoch 6 | time:11.69s | valid_acc 0.918 valid_loss 0.004 --------------------------------------------------------------------- | epoch 7 | 500/1782 batches | train_acc 0.952 train_loss 0.00233 | epoch 7 | 1000/1782 batches | train_acc 0.952 train_loss 0.00236 | epoch 7 | 1500/1782 batches | train_acc 0.952 train_loss 0.00235 --------------------------------------------------------------------- | epoch 7 | time:11.88s | valid_acc 0.920 valid_loss 0.004 --------------------------------------------------------------------- | epoch 8 | 500/1782 batches | train_acc 0.953 train_loss 0.00233 | epoch 8 | 1000/1782 batches | train_acc 0.954 train_loss 0.00226 | epoch 8 | 1500/1782 batches | train_acc 0.953 train_loss 0.00229 --------------------------------------------------------------------- | epoch 8 | time:11.92s | valid_acc 0.917 valid_loss 0.004 --------------------------------------------------------------------- | epoch 9 | 500/1782 batches | train_acc 0.956 train_loss 0.00223 | epoch 9 | 1000/1782 batches | train_acc 0.955 train_loss 0.00219 | epoch 9 | 1500/1782 batches | train_acc 0.955 train_loss 0.00223 --------------------------------------------------------------------- | epoch 9 | time:11.78s | valid_acc 0.919 valid_loss 0.004 --------------------------------------------------------------------- | epoch 10 | 500/1782 batches | train_acc 0.955 train_loss 0.00226 | epoch 10 | 1000/1782 batches | train_acc 0.954 train_loss 0.00223 | epoch 10 | 1500/1782 batches | train_acc 0.955 train_loss 0.00221 --------------------------------------------------------------------- | epoch 10 | time:11.82s | valid_acc 0.919 valid_loss 0.004 ---------------------------------------------------------------------
2.使用測試數據集評估模型?
print('checking the results of test dataset.')
test_acc,test_loss = evaluate(test_dataloader,model, criterion)
print('test accuracy{:8.3f}'.format(test_acc))
四、學習心得
? ? ? ?本周額外安裝了 portalocker 庫,并且下載了AG_News數據集,并TextClassificationModel模型,首先對文本進行嵌入,然后對句子嵌入之后的結果進行均值聚合,從而最終實現了文本分類的任務。在訓練過程出現一些問題得到有效解決。