編寫步驟
之前已經記錄國多次的編寫步驟了,無需多言。
(1)準備數據集
這里我們使用MNIST數據集,有官方下載渠道。我們直接使用torchvison里面提供的數據讀取功能包就行。如果不使用這個,自己像這樣子構建也一樣。
# 自己構建數據讀取模塊
#(1) 數據讀取模塊
class Mydataset(Dataset):def __init__(self,filepath):xy=np.loadtxt(filepath,delimiter=',',dtype=np.float32)self.len=xy.shape[0]self.x_data=torch.from_numpy(xy[:,:-1])self.y_data=torch.from_numpy(xy[:,[-1]])#魔法方法,容許用戶通過索引index得到值def __getitem__(self,index):return self.x_data[index],self.y_data[index]def __len__(self):return self.len
這里直接使用torchvison里面的工具
#準備數據集
batch_size = 64
transforms = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])trainset = torchvision.datasets.MNIST(root=r'../data/mnist',train=True,download=True,transform=transforms)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)testset = torchvision.datasets.MNIST(root=r'../data/mnist',train=False,download=True,transform=transforms)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
(2) 構建模型
這次我們使用不帶dropout的全連接模型
# 定義模型
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.linear1 = nn.Linear(784, 100)self.linear2 = nn.Linear(100, 20)self.linear3 = nn.Linear(20, 10)def forward(self, x):x=x.view(x.size(0), -1)x = F.relu(self.linear1(x))x = F.relu(self.linear2(x))x = self.linear3(x)return x
(3) 選擇損失和優化器
# 構建模型和損失
model=Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
(4)訓練模型
def train(epoch):running_loss = 0.0for batch_idx, (inputs, targets) in enumerate(trainloader):optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, targets)loss.backward()optimizer.step()#需要將張量轉換為浮點數運算running_loss += loss.item()if batch_idx % 100 == 0:print('Train Epoch: {}, Loss: {:.6f}'.format(epoch, loss.item()))running_loss = 0
(5)測試模型
def test(epoch):correct = 0total = 0with torch.no_grad():for batch_idx, (inputs, targets) in enumerate(testloader):outputs = model(inputs)_, predicted = torch.max(outputs.data, 1)total += targets.size(0)correct=correct+(predicted.eq(targets).sum()*1.0)print('Accuracy of the network on the 10000 test images: %d %%' % (100*correct/total))
全部代碼
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
#準備數據集
batch_size = 64
transforms = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])trainset = torchvision.datasets.MNIST(root=r'../data/mnist',train=True,download=True,transform=transforms)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)testset = torchvision.datasets.MNIST(root=r'../data/mnist',train=False,download=True,transform=transforms)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)# 定義模型
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.linear1 = nn.Linear(784, 100)self.linear2 = nn.Linear(100, 20)self.linear3 = nn.Linear(20, 10)def forward(self, x):x=x.view(x.size(0), -1)x = F.relu(self.linear1(x))x = F.relu(self.linear2(x))x = self.linear3(x)return x
# 構建模型和損失
model=Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)def train(epoch):running_loss = 0.0for batch_idx, (inputs, targets) in enumerate(trainloader):optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, targets)loss.backward()optimizer.step()#需要將張量轉換為浮點數運算running_loss += loss.item()if batch_idx % 100 == 0:print('Train Epoch: {}, Loss: {:.6f}'.format(epoch, loss.item()))running_loss = 0
def test(epoch):correct = 0total = 0with torch.no_grad():for batch_idx, (inputs, targets) in enumerate(testloader):outputs = model(inputs)_, predicted = torch.max(outputs.data, 1)total += targets.size(0)correct=correct+(predicted.eq(targets).sum()*1.0)print('Accuracy of the network on the 10000 test images: %d %%' % (100*correct/total))
if __name__ == '__main__':for epoch in range(10):train(epoch)test(epoch)