第一部分——起手式
import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimuse_cuda = torch.cuda.is_available()if use_cuda:device = torch.device("cuda")
else: device = torch.device("cpu")print(f"Using device {device}")
第二部分——計算均值、方差
transform = transforms.Compose([#將數據轉換成Tensor張量transforms.ToTensor()
]
)#讀取數據
datasets1 = datasets.MNIST('./data',train=True,download = True, transform =transform)
datasets1_len = len(datasets1)#設置數據加載器、批次大小全部圖片
train_loader = torch.utils.data.DataLoader(datasets1, batch_size=datasets1_len, shuffle = True)#循環訓練集 DataLoader,0是起始索引
for batch_idx, data in enumerate(train_loader,0):inputs, targets = data #將訓練集圖(60000,1,28,28)像轉換為(60000*1,28*28)的二維數組,-1 是占位符用于自動計算維度大小x = inputs.view(-1,28*28)#計算均值-0.3081x_mean =x.mean().item()#計算標準差-0.1307x_std =x.std().item()print(f"mean: {x_mean}, std: {x_std}")
#mean: 0.13066047430038452, std: 0.30810782313346863
第三部分——網絡模型
#自定義類構建模型、繼承torch.nn.module初始化網絡模型
class Net(torch.nn.Module):def __init__(self):super(Net,self).__init__()self.fc1 = torch.nn.Linear(784, 128)#Liner線性加權求和,784是input,128是當前層神經元個數self.dropout = torch.nn.Dropout(p = 0.2)self.fc2 = torch.nn.Linear(128, 10)#input=上一層的神經元個數,輸出是10,做一個0-9的10分類def forward(self, x):#把x的每條數據展成一維數組28*28=784x = torch.flatten(x,1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)output = F.log_softmax(x, dim=1)#做完softmax然后取log,便于后續計算損失函數(損失函數需要取log)return output
第四部分——訓練策略、測試策略
#創建實例
model = Net().to(device)#每個批次如何訓練
def train_step(data, target, model, optimizer):optimizer.zero_grad()#梯度歸零output = model(data)loss = F.nll_loss(output,target)#nll是負對數似然,output是y_head,target是y_trueloss.backward()#反向傳播求梯度optimizer.step()#根據梯度更新網絡return loss#每個批次如何測試
def test(data, target, model, test_loss, correct):output = model(data)#累積計算每個批次的損失test_loss += F.nll_loss(output,target,reduction='sum').item()#獲取對數概率最大對應的索引,dim=1:表示選取每一行概率最大的索引,keepdim = True 表示維度保持不變pred = output.argmax(dim=1, keepdim=True)#統計預測值與正確值相同的數量,eq在做比較,返回True/Fasle,sum是求和,item是將數據取出來(原來是tensor)correct += pred.eq(target.view_as(pred)).sum().item()return test_loss, correct
第五部分——開始訓練
#真正分輪次訓練
EPOCHS = 5#調參優化器,lr是學習率
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)for epoch in range(EPOCHS):model.train()#設置為訓練模式:BN層計算的是均值方差for batch_index, (data, target) in enumerate(train_loader):data, target = data.to(device),target.to(device)loss = train_step(data, target, model, optimizer)#每隔10個批次打印一次信息if batch_index%10 ==0:print('Train Epoch:{epoch} [{batch}/{total_batch} {percent}%] train_loss:{loss:.3f}'.format(epoch=epoch+1,#第幾個批次batch = batch_index*len(data),#已跑多少數據total_batch = len(train_loader.dataset),#當前輪總數據條數percent = 100.0*batch_index/len(train_loader),#當前輪數已占訓練集百分比loss = loss.item()#損失是tensor,轉為數值)) #設置為測試模式:BN層計算的是滑動平均,Droput層不進行預測model.eval()test_loss = 0correct = 0with torch.no_grad():#不求梯度for data, target in test_loader:data, target = data.to(device), target.to(device)test_loss, correct = test_step(data, target, model, test_loss, correct) test_loss = test_loss/len(test_loader.dataset)print('\n Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(test_loss,correct,len(test_loader.dataset),100. * correct / len(test_loader.dataset)))
完整代碼
import torch
from torchvision import datasets, transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimuse_cuda = torch.cuda.is_available()if use_cuda:device = torch.device("cuda")
else: device = torch.device("cpu")print(f"Using device {device}")#數據預處理
transform = transforms.Compose([#將數據轉換成Tensor張量transforms.ToTensor(),#圖片數據歸一化:0.1307是均值,0.3081是方差。數值和數據集有關系transforms.Normalize((0.1307),(0.3081))
]
)#讀取數據
datasets1 =datasets.MNIST('./data',train=True,download = True, transform =transform)
datasets2 =datasets.MNIST('./data',train=False,download = True, transform =transform)#設置數據加載器、批次大小128、是否打亂順序-是
train_loader = torch.utils.data.DataLoader(datasets1, batch_size=128, shuffle = True)
#測試批次可以大,測試集不需要打亂順序-False
test_loader = torch.utils.data.DataLoader(datasets2, batch_size =1000,shuffle = False)#自定義類構建模型、繼承torch.nn.module初始化網絡模型
class Net(torch.nn.Module):def __init__(self):super(Net,self).__init__()self.fc1 = torch.nn.Linear(784, 128)#Liner線性加權求和,784是input,128是當前層神經元個數self.dropout = torch.nn.Dropout(p = 0.2)self.fc2 = torch.nn.Linear(128, 10)#input=上一層的神經元個數,輸出是10,做一個0-9的10分類def forward(self, x):#把x的每條數據展成一維數組28*28=784x = torch.flatten(x,1)x = self.fc1(x)x = F.relu(x)x = self.dropout(x)x = self.fc2(x)output = F.log_softmax(x, dim=1)#做完softmax然后取log,便于后續計算損失函數(損失函數需要取log)return output #創建實例
model = Net().to(device)#每個批次如何訓練
def train_step(data, target, model, optimizer):optimizer.zero_grad()#梯度歸零output = model(data)loss = F.nll_loss(output,target)#nll是負對數似然,output是y_head,target是y_trueloss.backward()#反向傳播求梯度optimizer.step()#根據梯度更新網絡return loss#每個批次如何測試
def test_step(data, target, model, test_loss, correct):output = model(data)#累積計算每個批次的損失test_loss += F.nll_loss(output,target,reduction='sum').item()#獲取對數概率最大對應的索引,dim=1:表示選取每一行概率最大的索引,keepdim = True 表示維度保持不變pred = output.argmax(dim=1, keepdim=True)#統計預測值與正確值相同的數量,eq在做比較,返回True/Fasle,sum是求和,item是將數據取出來(原來是tensor)correct += pred.eq(target.view_as(pred)).sum().item()return test_loss, correct#真正分輪次訓練
EPOCHS = 5#調參優化器,lr是學習率
optimizer = torch.optim.Adam(model.parameters(), lr=0.003)for epoch in range(EPOCHS):model.train()#設置為訓練模式:BN層計算的是均值方差for batch_index, (data, target) in enumerate(train_loader):data, target = data.to(device),target.to(device)loss = train_step(data, target, model, optimizer)#每隔10個批次打印一次信息if batch_index%10 ==0:print('Train Epoch:{epoch} [{batch}/{total_batch} {percent}%] train_loss:{loss:.3f}'.format(epoch=epoch+1,#第幾個批次batch = batch_index*len(data),#已跑多少數據total_batch = len(train_loader.dataset),#當前輪總數據條數percent = 100.0*batch_index/len(train_loader),#當前輪數已占訓練集百分比loss = loss.item()#損失是tensor,轉為數值)) #設置為測試模式:BN層計算的是滑動平均,Droput層不進行預測model.eval()test_loss = 0correct = 0with torch.no_grad():#不求梯度for data, target in test_loader:data, target = data.to(device), target.to(device)test_loss, correct = test_step(data, target, model, test_loss, correct) test_loss = test_loss/len(test_loader.dataset)print('\n Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(test_loss,correct,len(test_loader.dataset),100. * correct / len(test_loader.dataset)))