Pytorch神經網絡分類初探
1.數據準備
環境采用之前創建的Anaconda虛擬環境pytorch,為了方便查看每一步的返回值,可以使用Jupyter Notebook來進行開發。首先把需要的包導入進來
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor
torch框架的數據輸入依賴兩個基類:torch.utils.data.DataLoader和torch.utils.data.Dataset,Dataset 存儲樣本及其相應的標簽,DataLoader 將 Dataset 封裝為迭代器。
為了方便使用數據,我們采用Mnist數據集
%matplotlib inline
from pathlib import Path
import requestsDATA_PATH = Path("data")
PATH = DATA_PATH / "mnist"PATH.mkdir(parents=True, exist_ok=True)URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"if not (PATH / FILENAME).exists():content = requests.get(URL + FILENAME).content(PATH / FILENAME).open("wb").write(content)
等待數據下載完畢,然后將數據讀入進來。
import pickle
import gzipwith gzip.open((PATH / FILENAME).as_posix(), "rb") as f:((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
讀入進來的數據并不是tensor格式的,需要將其轉化成Tensor格式
import torchx_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid)
)
最重要的一步,將其轉換成dataset和dataloader
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoadertrain_ds = TensorDataset(x_train, y_train)
train_dl = DataLoader(train_ds, batch_size=bs, shuffle=True)valid_ds = TensorDataset(x_valid, y_valid)
valid_dl = DataLoader(valid_ds, batch_size=bs * 2)
這樣就完成了數據準備的工作
2.定義模型
這邊直接引用官網教程的模型
# Get cpu, gpu or mps device for training.
device = ("cuda"if torch.cuda.is_available()else "mps"if torch.backends.mps.is_available()else "cpu"
)
print(f"Using {device} device")# Define model
class NeuralNetwork(nn.Module):def __init__(self):super().__init__()#self.flatten = nn.Flatten()self.linear_relu_stack = nn.Sequential(nn.Linear(28*28, 512),nn.ReLU(),nn.Linear(512, 512),nn.ReLU(),nn.Linear(512, 10))def forward(self, x):#x = self.flatten(x)logits = self.linear_relu_stack(x)return logitsmodel = NeuralNetwork().to(device)
print(model)
將打印的結果放在下面,可以查看一下
Using cuda device
NeuralNetwork((flatten): Flatten(start_dim=1, end_dim=-1)(linear_relu_stack): Sequential((0): Linear(in_features=784, out_features=512, bias=True)(1): ReLU()(2): Linear(in_features=512, out_features=512, bias=True)(3): ReLU()(4): Linear(in_features=512, out_features=10, bias=True))
)
3.定義模型損失函數和優化器
這里我們依舊使用官網教程中的直接來
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
這里的SGD是最基礎的優化器,采用的是梯度遞減的方式,其收斂的會比較慢,如果希望收斂快些,可以使用Adam方式。
4. 定義訓練和測試函數
訓練函數
def train(dataloader, model, loss_fn, optimizer):size = len(dataloader.dataset)model.train()for batch, (X, y) in enumerate(dataloader):X, y = X.to(device), y.to(device)# Compute prediction errorpred = model(X)loss = loss_fn(pred, y)# Backpropagationloss.backward()optimizer.step()optimizer.zero_grad()if batch % 100 == 0:loss, current = loss.item(), (batch + 1) * len(X)print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
測試函數
def test(dataloader, model, loss_fn):size = len(dataloader.dataset)num_batches = len(dataloader)model.eval()test_loss, correct = 0, 0with torch.no_grad():for X, y in dataloader:X, y = X.to(device), y.to(device)pred = model(X)test_loss += loss_fn(pred, y).item()correct += (pred.argmax(1) == y).type(torch.float).sum().item()test_loss /= num_batchescorrect /= sizeprint(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
5.開始訓練
epochs = 5
for t in range(epochs):print(f"Epoch {t+1}\n-------------------------------")train(train_dl, model, loss_fn, optimizer)test(valid_dl, model, loss_fn)
print("Done!")
6.模型保存
torch.save(model.state_dict(), "model.pth")
print("Saved PyTorch Model State to model.pth")
7.模型加載和使用模型預測
model = NeuralNetwork().to(device)
model.load_state_dict(torch.load("model.pth"))
模型預測
classes = ["0","1","2","3","4","5","6","7","8","9",
]model.eval()
x, y = train_ds[2][0], train_ds[2][1]
with torch.no_grad():x = x.to(device)pred = model(x)print(pred)predicted, actual = classes[pred.argmax(0)], classes[y]print(f'Predicted: "{predicted}", Actual: "{actual}"')
使用SGD優化器訓練,訓練5次的最高精度為76%,而使用Adam優化器第一個epoch的精度就已經達到了97%