卷積神經網絡
卷積神經網絡本質是共享權重+稀疏鏈接的全連接網絡
編寫步驟
構建一個神經網絡,步驟是幾乎不變的,大概有以下幾步
- 準備數據集
#更高級的CNN網絡
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
#準備數據集
batch_size = 64transforms = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])trainset = torchvision.datasets.MNIST(root=r'../data/mnist',train=True,download=True,transform=transforms)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)testset = torchvision.datasets.MNIST(root=r'../data/mnist',train=False,download=True,transform=transforms)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)
不使用官方數據集讀取方法,可以自己繼承Dataset類重寫
class Mydataset(Dataset):def __init__(self,filepath):xy=np.loadtxt(filepath,delimiter=',',dtype=np.float32)self.len=xy.shape[0]self.x_data=torch.from_numpy(xy[:,:-1])self.y_data=torch.from_numpy(xy[:,[-1]])#魔法方法,容許用戶通過索引index得到值def __getitem__(self,index):return self.x_data[index],self.y_data[index]def __len__(self):return self.len
- 構建模型
class CNN_net(nn.Module):def __init__(self):super(CNN_net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=3, bias=False)#輸出存為[10,26,26]self.conv2 = nn.Conv2d(10, 20, kernel_size=3, bias=False)#輸出為【20,5,5】self.pooling = nn.MaxPool2d(2, 2)#輸出為將其中尺寸減半self.fc1 = nn.Linear(500, 10)def forward(self, x):batch_size = x.size(0)x = F.relu(self.pooling(self.conv1(x)))x = F.relu(self.pooling(self.conv2(x)))#x = x.view(batch_size, -1)#(x.size())x = F.relu(self.fc1(x))return x
如果想使用殘差網絡可以定義殘差網絡塊兒
# 定義殘差網絡塊兒
class ResidualBlock(nn.Module):def __init__(self, channels):super(ResidualBlock, self).__init__()self.conv1 = nn.Conv2d(channels, kernel_size=3, padding=1, bias=False)self.conv2 = nn.Conv2d(channels,channels, kernel_size=3, padding=1, bias=False)def forward(self, x):out = F.relu(self.conv1(x))out = self.conv2(out)return F.relu(out + x)
那么對應的forward哪里可以添加進去殘差塊
class CNN_net(nn.Module):def __init__(self):super(CNN_net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=3, bias=False)#輸出存為[10,26,26]self.conv2 = nn.Conv2d(10, 20, kernel_size=3, bias=False)#輸出為【20,5,5】self.resblock1 = ResidualBlock(10)self.resblock2 = ResidualBlock(20)self.pooling = nn.MaxPool2d(2, 2)#輸出為將其中尺寸減半self.fc1 = nn.Linear(500, 10)def forward(self, x):batch_size = x.size(0)x = F.relu(self.pooling(self.conv1(x)))x=self.resblock1(x)x = F.relu(self.pooling(self.conv2(x)))x = self.resblock2(x)x = x.view(batch_size, -1)#(x.size())x = F.relu(self.fc1(x))return x
- 構建模型和損失函數
# 構建模型和損失
model=CNN_net()# 定義一個設備,如果我們=有能夠訪問的CUDA設備
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(torch.cuda.is_available())
#將模型搬移到CUDA支持的GPU上面
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
- 訓練(和測試)模型
def train(epoch):running_loss = 0.0for batch_idx, (inputs, targets) in enumerate(trainloader):inputs,targets = inputs.to(device),targets.to(device)optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, targets)loss.backward()optimizer.step()#需要將張量轉換為浮點數運算running_loss += loss.item()if batch_idx % 100 == 0:print('Train Epoch: {}, Loss: {:.6f}'.format(epoch, loss.item()))running_loss = 0
def test(epoch):correct = 0total = 0with torch.no_grad():for batch_idx, (inputs, targets) in enumerate(testloader):inputs,targets = inputs.to(device),targets.to(device)outputs = model(inputs)_, predicted = torch.max(outputs.data, 1)total += targets.size(0)correct=correct+(predicted.eq(targets).sum()*1.0)print('Accuracy of the network on the 10000 test images: %d %%' % (100*correct/total))
全部代碼
#更高級的CNN網絡
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
#準備數據集
batch_size = 64
transforms = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,),(0.3081,))])trainset = torchvision.datasets.MNIST(root=r'../data/mnist',train=True,download=True,transform=transforms)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)testset = torchvision.datasets.MNIST(root=r'../data/mnist',train=False,download=True,transform=transforms)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)# 定義殘差網絡塊兒
class ResidualBlock(nn.Module):def __init__(self, channels):super(ResidualBlock, self).__init__()self.conv1 = nn.Conv2d(channels,channels, kernel_size=3, padding=1, bias=False)self.conv2 = nn.Conv2d(channels,channels, kernel_size=3, padding=1, bias=False)def forward(self, x):out = F.relu(self.conv1(x))out = self.conv2(out)return F.relu(out + x)
# 定義卷積神經網絡class CNN_net(nn.Module):def __init__(self):super(CNN_net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=3, bias=False)#輸出存為[10,26,26]self.conv2 = nn.Conv2d(10, 20, kernel_size=3, bias=False)#輸出為【20,5,5】self.resblock1 = ResidualBlock(10)self.resblock2 = ResidualBlock(20)self.pooling = nn.MaxPool2d(2, 2)#輸出為將其中尺寸減半self.fc1 = nn.Linear(500, 10)def forward(self, x):batch_size = x.size(0)x = F.relu(self.pooling(self.conv1(x)))x=self.resblock1(x)x = F.relu(self.pooling(self.conv2(x)))x = self.resblock2(x)x = x.view(batch_size, -1)#(x.size())x = F.relu(self.fc1(x))return x# 構建模型和損失
model=CNN_net()# 定義一個設備,如果我們=有能夠訪問的CUDA設備
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(torch.cuda.is_available())
#將模型搬移到CUDA支持的GPU上面
model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)def train(epoch):running_loss = 0.0for batch_idx, (inputs, targets) in enumerate(trainloader):inputs,targets = inputs.to(device),targets.to(device)optimizer.zero_grad()outputs = model(inputs)loss = criterion(outputs, targets)loss.backward()optimizer.step()#需要將張量轉換為浮點數運算running_loss += loss.item()if batch_idx % 100 == 0:print('Train Epoch: {}, Loss: {:.6f}'.format(epoch, loss.item()))running_loss = 0
def test(epoch):correct = 0total = 0with torch.no_grad():for batch_idx, (inputs, targets) in enumerate(testloader):inputs,targets = inputs.to(device),targets.to(device)outputs = model(inputs)_, predicted = torch.max(outputs.data, 1)total += targets.size(0)correct=correct+(predicted.eq(targets).sum()*1.0)print('Accuracy of the network on the 10000 test images: %d %%' % (100*correct/total))
if __name__ == '__main__':for epoch in range(10):train(epoch)test(epoch)
運行結果如下所示: