PyTorch官方教程中文版:Pytorch之圖像篇

微調基于 torchvision 0.3的目標檢測模型

"""
為數據集編寫類
"""
import os
import numpy as np
import torch
from PIL import Imageclass PennFudanDataset(object):def __init__(self, root, transforms):self.root = rootself.transforms = transforms# 下載所有圖像文件,為其排序# 確保它們對齊self.imgs = list(sorted(os.listdir(os.path.join(root, "PNGImages"))))self.masks = list(sorted(os.listdir(os.path.join(root, "PedMasks"))))def __getitem__(self, idx):# load images ad masksimg_path = os.path.join(self.root, "PNGImages", self.imgs[idx])mask_path = os.path.join(self.root, "PedMasks", self.masks[idx])img = Image.open(img_path).convert("RGB")# 請注意我們還沒有將mask轉換為RGB,# 因為每種顏色對應一個不同的實例# 0是背景mask = Image.open(mask_path)# 將PIL圖像轉換為numpy數組mask = np.array(mask)# 實例被編碼為不同的顏色obj_ids = np.unique(mask)# 第一個id是背景,所以刪除它obj_ids = obj_ids[1:]# 將顏色編碼的mask分成一組# 二進制格式masks = mask == obj_ids[:, None, None]# 獲取每個mask的邊界框坐標num_objs = len(obj_ids)boxes = []for i in range(num_objs):pos = np.where(masks[i])xmin = np.min(pos[1])xmax = np.max(pos[1])ymin = np.min(pos[0])ymax = np.max(pos[0])boxes.append([xmin, ymin, xmax, ymax])# 將所有轉換為torch.Tensorboxes = torch.as_tensor(boxes, dtype=torch.float32)# 這里僅有一個類labels = torch.ones((num_objs,), dtype=torch.int64)masks = torch.as_tensor(masks, dtype=torch.uint8)image_id = torch.tensor([idx])area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])# 假設所有實例都不是人群iscrowd = torch.zeros((num_objs,), dtype=torch.int64)target = {}target["boxes"] = boxestarget["labels"] = labelstarget["masks"] = maskstarget["image_id"] = image_idtarget["area"] = areatarget["iscrowd"] = iscrowdif self.transforms is not None:img, target = self.transforms(img, target)return img, targetdef __len__(self):return len(self.imgs)"""
第一個是我們想要從預先訓練的模型開始,然后微調最后一層。 
另一種是當我們想要用不同的模型替換模型的主干時(例如,用于更快的預測)。
下面是對這兩種情況的處理。 
"""
# """
# 1.微調已經預訓練的模型
# """
#
# import torchvision
# from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
#
# # 在COCO上加載經過預訓練的預訓練模型
# model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
#
# # replace the classifier with a new one, that has
# # 將分類器替換為具有用戶定義的 num_classes的新分類器
# num_classes = 2  # 1 class (person) + background
# # 獲取分類器的輸入參數的數量
# in_features = model.roi_heads.box_predictor.cls_score.in_features
# # 用新的頭部替換預先訓練好的頭部
# model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
#
# """
# 2.修改模型以添加不同的主干
# """
#
# import torchvision
# from torchvision.models.detection import FasterRCNN
# from torchvision.models.detection.rpn import AnchorGenerator
#
# # 加載預先訓練的模型進行分類和返回
# # 只有功能
# backbone = torchvision.models.mobilenet_v2(pretrained=True).features
# # FasterRCNN需要知道骨干網中的輸出通道數量。對于mobilenet_v2,它是1280,所以我們需要在這里添加它
# backbone.out_channels = 1280
#
# # 我們讓RPN在每個空間位置生成5 x 3個錨點
# # 具有5種不同的大小和3種不同的寬高比。
# # 我們有一個元組[元組[int]]
# # 因為每個特征映射可能具有不同的大小和寬高比
# anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),),
#                                    aspect_ratios=((0.5, 1.0, 2.0),))
#
# # 定義一下我們將用于執行感興趣區域裁剪的特征映射,以及重新縮放后裁剪的大小。
# # 如果您的主干返回Tensor,則featmap_names應為[0]。
# # 更一般地,主干應該返回OrderedDict [Tensor]
# # 并且在featmap_names中,您可以選擇要使用的功能映射。
# roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=[0],
#                                                 output_size=7,
#                                                 sampling_ratio=2)
#
# # 將這些pieces放在FasterRCNN模型中
# model = FasterRCNN(backbone,
#                    num_classes=2,
#                    rpn_anchor_generator=anchor_generator,
#                    box_roi_pool=roi_pooler)"""
PennFudan 數據集的實例分割模型
"""
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictordef get_model_instance_segmentation(num_classes):# 加載在COCO上預訓練的預訓練的實例分割模型model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)# 獲取分類器的輸入特征數in_features = model.roi_heads.box_predictor.cls_score.in_features# 用新的頭部替換預先訓練好的頭部model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)# 現在獲取掩膜分類器的輸入特征數in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channelshidden_layer = 256# 并用新的掩膜預測器替換掩膜預測器model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,hidden_layer,num_classes)return model"""
為數據擴充/轉換編寫輔助函數:
"""
import transforms as Tdef get_transform(train):transforms = []transforms.append(T.ToTensor())if train:transforms.append(T.RandomHorizontalFlip(0.5))return T.Compose(transforms)"""
編寫執行訓練和驗證的主要功能
"""
from engine import train_one_epoch, evaluate
import utilsdef main():# 在GPU上訓練,若無GPU,可選擇在CPU上訓練device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')# 我們的數據集只有兩個類 - 背景和人num_classes = 2# 使用我們的數據集和定義的轉換dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))dataset_test = PennFudanDataset('PennFudanPed', get_transform(train=False))# 在訓練和測試集中拆分數據集indices = torch.randperm(len(dataset)).tolist()dataset = torch.utils.data.Subset(dataset, indices[:-50])dataset_test = torch.utils.data.Subset(dataset_test, indices[-50:])# 定義訓練和驗證數據加載器data_loader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=True, num_workers=4,collate_fn=utils.collate_fn)data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=1, shuffle=False, num_workers=4,collate_fn=utils.collate_fn)# 使用我們的輔助函數獲取模型model = get_model_instance_segmentation(num_classes)# 將我們的模型遷移到合適的設備model.to(device)# 構造一個優化器params = [p for p in model.parameters() if p.requires_grad]optimizer = torch.optim.SGD(params, lr=0.005,momentum=0.9, weight_decay=0.0005)# 和學習率調度程序lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=3,gamma=0.1)# 訓練10個epochsnum_epochs = 10for epoch in range(num_epochs):# 訓練一個epoch,每10次迭代打印一次train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)# 更新學習速率lr_scheduler.step()# 在測試集上評價evaluate(model, data_loader_test, device=device)print("That's it!")

微調 Torchvision 模型

from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import time
import os
import copy"""
輸入
"""
# 頂級數據目錄。 這里我們假設目錄的格式符合ImageFolder結構
data_dir = "./data/hymenoptera_data"# 從[resnet, alexnet, vgg, squeezenet, densenet, inception]中選擇模型
model_name = "squeezenet"# 數據集中類別數量
num_classes = 2# 訓練的批量大小(根據您的內存量而變化)
batch_size = 8# 你要訓練的epoch數
num_epochs = 15# 用于特征提取的標志。 當為False時,我們微調整個模型,
# 當True時我們只更新重新形成的圖層參數
feature_extract = True"""
輔助函數
"""
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):since = time.time()val_acc_history = []best_model_wts = copy.deepcopy(model.state_dict())best_acc = 0.0for epoch in range(num_epochs):print('Epoch {}/{}'.format(epoch, num_epochs - 1))print('-' * 10)# 每個epoch都有一個訓練和驗證階段for phase in ['train', 'val']:if phase == 'train':model.train()  # Set model to training modeelse:model.eval()   # Set model to evaluate moderunning_loss = 0.0running_corrects = 0# 迭代數據for inputs, labels in dataloaders[phase]:inputs = inputs.to(device)labels = labels.to(device)# 零參數梯度optimizer.zero_grad()# 前向# 如果只在訓練時則跟蹤軌跡with torch.set_grad_enabled(phase == 'train'):# 獲取模型輸出并計算損失# 開始的特殊情況,因為在訓練中它有一個輔助輸出。# 在訓練模式下,我們通過將最終輸出和輔助輸出相加來計算損耗# 但在測試中我們只考慮最終輸出。if is_inception and phase == 'train':outputs, aux_outputs = model(inputs)loss1 = criterion(outputs, labels)loss2 = criterion(aux_outputs, labels)loss = loss1 + 0.4*loss2else:outputs = model(inputs)loss = criterion(outputs, labels)_, preds = torch.max(outputs, 1)# backward + optimize only if in training phaseif phase == 'train':loss.backward()optimizer.step()# 統計running_loss += loss.item() * inputs.size(0)running_corrects += torch.sum(preds == labels.data)epoch_loss = running_loss / len(dataloaders[phase].dataset)epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))# deep copy the modelif phase == 'val' and epoch_acc > best_acc:best_acc = epoch_accbest_model_wts = copy.deepcopy(model.state_dict())if phase == 'val':val_acc_history.append(epoch_acc)print()time_elapsed = time.time() - sinceprint('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))print('Best val Acc: {:4f}'.format(best_acc))# load best model weightsmodel.load_state_dict(best_model_wts)return model, val_acc_historydef set_parameter_requires_grad(model, feature_extracting):if feature_extracting:for param in model.parameters():param.requires_grad = False"""
初始化和重塑網絡
當進行特征提取時,我們只想更新最后一層的參數,換句話說,我們只想更新我們正在重塑層的參數。
因此,我們不需要計算不需要改變 的參數的梯度,因此為了提高效率,我們將其它層的.requires_grad屬性設置為False。
這很重要,因為默認情況下,此屬性設置為True。 然后,當我們初始化新層時,默認情況下新參數.requires_grad = True,因此只更新新層的參數。
當我們進行微調時,我們可以將所有 .required_grad設置為默認值True。
"""
# #Resnet
# model.fc = nn.Linear(512, num_classes)
# #Alexnet
# model.classifier[6] = nn.Linear(4096,num_classes)
# #VGG
# model.classifier[6] = nn.Linear(4096,num_classes)
# #Squeezenet
# model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))
# #Densenet
# model.classifier = nn.Linear(1024, num_classes)
# #Inception v3
# model.AuxLogits.fc = nn.Linear(768, num_classes)
# model.fc = nn.Linear(2048, num_classes)"""
重塑代碼
"""def initialize_model(model_name, num_classes, feature_extract, use_pretrained=True):# 初始化將在此if語句中設置的這些變量。# 每個變量都是模型特定的。model_ft = Noneinput_size = 0if model_name == "resnet":""" Resnet18"""model_ft = models.resnet18(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.fc.in_featuresmodel_ft.fc = nn.Linear(num_ftrs, num_classes)input_size = 224elif model_name == "alexnet":""" Alexnet"""model_ft = models.alexnet(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.classifier[6].in_featuresmodel_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)input_size = 224elif model_name == "vgg":""" VGG11_bn"""model_ft = models.vgg11_bn(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.classifier[6].in_featuresmodel_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)input_size = 224elif model_name == "squeezenet":""" Squeezenet"""model_ft = models.squeezenet1_0(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))model_ft.num_classes = num_classesinput_size = 224elif model_name == "densenet":""" Densenet"""model_ft = models.densenet121(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)num_ftrs = model_ft.classifier.in_featuresmodel_ft.classifier = nn.Linear(num_ftrs, num_classes)input_size = 224elif model_name == "inception":""" Inception v3Be careful, expects (299,299) sized images and has auxiliary output"""model_ft = models.inception_v3(pretrained=use_pretrained)set_parameter_requires_grad(model_ft, feature_extract)# 處理輔助網絡num_ftrs = model_ft.AuxLogits.fc.in_featuresmodel_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)# 處理主要網絡num_ftrs = model_ft.fc.in_featuresmodel_ft.fc = nn.Linear(num_ftrs,num_classes)input_size = 299else:print("Invalid model name, exiting...")exit()return model_ft, input_size# 在這步中初始化模型
model_ft, input_size = initialize_model(model_name, num_classes, feature_extract, use_pretrained=True)# 打印我們剛剛實例化的模型
print(model_ft)"""
加載模型
"""
# 數據擴充和訓練規范化
# 只需驗證標準化
data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(input_size),transforms.RandomHorizontalFlip(),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),'val': transforms.Compose([transforms.Resize(input_size),transforms.CenterCrop(input_size),transforms.ToTensor(),transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]),
}print("Initializing Datasets and Dataloaders...")# 創建訓練和驗證數據集
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x]) for x in ['train', 'val']}
# 創建訓練和驗證數據加載器
dataloaders_dict = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in ['train', 'val']}# 檢測我們是否有可用的GPU
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")"""
創建優化器
"""# 將模型發送到GPU
model_ft = model_ft.to(device)# 在此運行中收集要優化/更新的參數。
# 如果我們正在進行微調,我們將更新所有參數。
# 但如果我們正在進行特征提取方法,我們只會更新剛剛初始化的參數,即`requires_grad`的參數為True。
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:params_to_update = []for name,param in model_ft.named_parameters():if param.requires_grad == True:params_to_update.append(param)print("\t",name)
else:for name,param in model_ft.named_parameters():if param.requires_grad == True:print("\t",name)# 觀察所有參數都在優化
optimizer_ft = optim.SGD(params_to_update, lr=0.001, momentum=0.9)"""
運行訓練和驗證
"""
# 設置損失函數
criterion = nn.CrossEntropyLoss()# Train and evaluate
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft, num_epochs=num_epochs, is_inception=(model_name=="inception"))"""
對比從頭開始模型
"""
# 初始化用于此運行的模型的非預訓練版本
scratch_model,_ = initialize_model(model_name, num_classes, feature_extract=False, use_pretrained=False)
scratch_model = scratch_model.to(device)
scratch_optimizer = optim.SGD(scratch_model.parameters(), lr=0.001, momentum=0.9)
scratch_criterion = nn.CrossEntropyLoss()
_,scratch_hist = train_model(scratch_model, dataloaders_dict, scratch_criterion, scratch_optimizer, num_epochs=num_epochs, is_inception=(model_name=="inception"))# 繪制驗證精度的訓練曲線與轉移學習方法
# 和從頭開始訓練的模型的訓練epochs的數量
ohist = []
shist = []ohist = [h.cpu().numpy() for h in hist]
shist = [h.cpu().numpy() for h in scratch_hist]plt.title("Validation Accuracy vs. Number of Training Epochs")
plt.xlabel("Training Epochs")
plt.ylabel("Validation Accuracy")
plt.plot(range(1,num_epochs+1),ohist,label="Pretrained")
plt.plot(range(1,num_epochs+1),shist,label="Scratch")
plt.ylim((0,1.))
plt.xticks(np.arange(1, num_epochs+1, 1.0))
plt.legend()
plt.show()

空間變換器網絡

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as npplt.ion()   # 交互模式"""
加載數據
"""device = torch.device("cuda" if torch.cuda.is_available() else "cpu")# 訓練數據集
train_loader = torch.utils.data.DataLoader(datasets.MNIST(root='./data/mnist/MNIST', train=True, download=False,transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])), batch_size=64, shuffle=True, num_workers=4)
# 測試數據集
test_loader = torch.utils.data.DataLoader(datasets.MNIST(root='./data/mnist/MNIST', train=False, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.1307,), (0.3081,))])), batch_size=64, shuffle=True, num_workers=4)"""
空間變換器網絡:
結構:
本地網絡(Localisation Network)是常規CNN,其對變換參數進行回歸。不會從該數據集中明確地學習轉換,而是網絡自動學習增強 全局準確性的空間變換。
網格生成器( Grid Genator)在輸入圖像中生成與輸出圖像中的每個像素相對應的坐標網格。
采樣器(Sampler)使用變換的參數并將其應用于輸入圖像。
"""
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=5)self.conv2 = nn.Conv2d(10, 20, kernel_size=5)self.conv2_drop = nn.Dropout2d()self.fc1 = nn.Linear(320, 50)self.fc2 = nn.Linear(50, 10)# 空間變換器定位 - 網絡self.localization = nn.Sequential(nn.Conv2d(1, 8, kernel_size=7),nn.MaxPool2d(2, stride=2),nn.ReLU(True),nn.Conv2d(8, 10, kernel_size=5),nn.MaxPool2d(2, stride=2),nn.ReLU(True))# 3 * 2 affine矩陣的回歸量self.fc_loc = nn.Sequential(nn.Linear(10 * 3 * 3, 32),nn.ReLU(True),nn.Linear(32, 3 * 2))# 使用身份轉換初始化權重/偏差self.fc_loc[2].weight.data.zero_()self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))# 空間變換器網絡轉發功能def stn(self, x):xs = self.localization(x)xs = xs.view(-1, 10 * 3 * 3)theta = self.fc_loc(xs)theta = theta.view(-1, 2, 3)grid = F.affine_grid(theta, x.size())x = F.grid_sample(x, grid)return xdef forward(self, x):# transform the inputx = self.stn(x)# 執行一般的前進傳遞x = F.relu(F.max_pool2d(self.conv1(x), 2))x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))x = x.view(-1, 320)x = F.relu(self.fc1(x))x = F.dropout(x, training=self.training)x = self.fc2(x)return F.log_softmax(x, dim=1)model = Net().to(device)"""
訓練模型
"""optimizer = optim.SGD(model.parameters(), lr=0.01)def train(epoch):model.train()for batch_idx, (data, target) in enumerate(train_loader):data, target = data.to(device), target.to(device)optimizer.zero_grad()output = model(data)loss = F.nll_loss(output, target)loss.backward()optimizer.step()if batch_idx % 500 == 0:print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset),100. * batch_idx / len(train_loader), loss.item()))
#
# 一種簡單的測試程序,用于測量STN在MNIST上的性能。.
#def test():with torch.no_grad():model.eval()test_loss = 0correct = 0for data, target in test_loader:data, target = data.to(device), target.to(device)output = model(data)# 累加批量損失test_loss += F.nll_loss(output, target, size_average=False).item()# 獲取最大對數概率的索引pred = output.max(1, keepdim=True)[1]correct += pred.eq(target.view_as(pred)).sum().item()test_loss /= len(test_loader.dataset)print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(test_loss, correct, len(test_loader.dataset),100. * correct / len(test_loader.dataset)))"""
可視化 STN 結果
"""
def convert_image_np(inp):"""Convert a Tensor to numpy image."""inp = inp.numpy().transpose((1, 2, 0))mean = np.array([0.485, 0.456, 0.406])std = np.array([0.229, 0.224, 0.225])inp = std * inp + meaninp = np.clip(inp, 0, 1)return inp# 我們想要在訓練之后可視化空間變換器層的輸出
# 我們使用STN可視化一批輸入圖像和相應的變換批次。
def visualize_stn():with torch.no_grad():# Get a batch of training datadata = next(iter(test_loader))[0].to(device)input_tensor = data.cpu()transformed_input_tensor = model.stn(data).cpu()in_grid = convert_image_np(torchvision.utils.make_grid(input_tensor))out_grid = convert_image_np(torchvision.utils.make_grid(transformed_input_tensor))# Plot the results side-by-sidef, axarr = plt.subplots(1, 2)axarr[0].imshow(in_grid)axarr[0].set_title('Dataset Images')axarr[1].imshow(out_grid)axarr[1].set_title('Transformed Images')for epoch in range(1, 20 + 1):train(epoch)test()# 在某些輸入批處理上可視化STN轉換
visualize_stn()plt.ioff()
plt.show()

使用 PyTorch 進行 Neural-Transfer

from __future__ import print_functionimport torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optimfrom PIL import Image
import matplotlib.pyplot as pltimport torchvision.transforms as transforms
import torchvision.models as modelsimport copy"""
基本原理
我們定義兩個間距,一個用于內容D_C,另一個用于風格D_S。D_C測量兩張圖片內容的不同,而D_S用來測量兩張圖片風格的不同。 
然后,我們輸入第三張圖片,并改變這張圖片,使其與內容圖片的內容間距和風格圖片的風格間距最小化。
開始圖像風格轉換。
"""device = torch.device("cuda" if torch.cuda.is_available() else "cpu")"""
加載圖片
原始的PIL圖片的值介于0到255之間,但是當轉換成torch張量時,它們的值被轉換成0到1之間。
圖片也需要被重設成相同的維度。
一個重要的細節是,注意torch庫中的神經網絡用來訓練的張量的值為0到1之間。
如果你嘗試將0到255的張量圖片加載到神經網絡,然后激活的特征映射將不能偵測到目標內容和風格。
然而,Caffe庫中的預訓練網絡用來訓練的張量值為0到255之間的圖片。
"""
imsize = 512 if torch.cuda.is_available() else 128  # use small size if no gpuloader = transforms.Compose([transforms.Resize(imsize),  # scale imported imagetransforms.ToTensor()])  # transform it into a torch tensordef image_loader(image_name):image = Image.open(image_name)# 需要假批量維度來匹配網絡的輸入維度image = loader(image).unsqueeze(0)return image.to(device, torch.float)style_img = image_loader("./data/images//neural-style/picasso.jpg")
content_img = image_loader("./data/images//neural-style/dancing.jpg")assert style_img.size() == content_img.size(), \"我們需要導入相同大小的樣式和內容圖像"
#現在,讓我們創建一個方法,通過重新將圖片轉換成PIL格式來展示,并使用plt.imshow展示它的拷貝。我們將嘗試展示內容和風格圖片來確保它們被正確的導入。unloader = transforms.ToPILImage()  # reconvert into PIL imageplt.ion()def imshow(tensor, title=None):image = tensor.cpu().clone()  # we clone the tensor to not do changes on itimage = image.squeeze(0)      # remove the fake batch dimensionimage = unloader(image)plt.imshow(image)if title is not None:plt.title(title)plt.pause(0.001) # pause a bit so that plots are updatedplt.figure()
imshow(style_img, title='Style Image')plt.figure()
imshow(content_img, title='Content Image')"""
內容損失
內容損失是一個表示一層內容間距的加權版本。
"""class ContentLoss(nn.Module):def __init__(self, target,):super(ContentLoss, self).__init__()# 我們從用于動態計算梯度的樹中“分離”目標內容:# 這是一個聲明的值,而不是變量。# 否則標準的正向方法將引發錯誤。self.target = target.detach()def forward(self, input):self.loss = F.mse_loss(input, self.target)return input"""
風格損失
它要作為一個網絡中的透明層,來計算相應層的風格損失
"""def gram_matrix(input):a, b, c, d = input.size()  # a=batch size(=1)# 特征映射 b=number# (c,d)=dimensions of a f. map (N=c*d)features = input.view(a * b, c * d)  # resise F_XL into \hat F_XLG = torch.mm(features, features.t())  # compute the gram product# 我們通過除以每個特征映射中的元素數來“標準化”gram矩陣的值.return G.div(a * b * c * d)class StyleLoss(nn.Module):def __init__(self, target_feature):super(StyleLoss, self).__init__()self.target = gram_matrix(target_feature).detach()def forward(self, input):G = gram_matrix(input)self.loss = F.mse_loss(G, self.target)return input"""
導入模型
"""cnn = models.vgg19(pretrained=True).features.to(device).eval()"""
圖片預處理
"""
cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device)
cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device)# 創建一個模塊來規范化輸入圖像
# 這樣我們就可以輕松地將它放入nn.Sequential中
class Normalization(nn.Module):def __init__(self, mean, std):super(Normalization, self).__init__()# .view the mean and std to make them [C x 1 x 1] so that they can# directly work with image Tensor of shape [B x C x H x W].# B is batch size. C is number of channels. H is height and W is width.self.mean = torch.tensor(mean).view(-1, 1, 1)self.std = torch.tensor(std).view(-1, 1, 1)def forward(self, img):# normalize imgreturn (img - self.mean) / self.std"""
創建一個新的Sequential模型,并正確的插入內容損失和風格損失模型。
"""
# 期望的深度層來計算樣式/內容損失:
content_layers_default = ['conv_4']
style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5']def get_style_model_and_losses(cnn, normalization_mean, normalization_std,style_img, content_img,content_layers=content_layers_default,style_layers=style_layers_default):cnn = copy.deepcopy(cnn)# 規范化模塊normalization = Normalization(normalization_mean, normalization_std).to(device)# 只是為了擁有可迭代的訪問權限或列出內容/系統損失content_losses = []style_losses = []# 假設cnn是一個`nn.Sequential`,# 所以我們創建一個新的`nn.Sequential`來放入應該按順序激活的模塊model = nn.Sequential(normalization)i = 0  # increment every time we see a convfor layer in cnn.children():if isinstance(layer, nn.Conv2d):i += 1name = 'conv_{}'.format(i)elif isinstance(layer, nn.ReLU):name = 'relu_{}'.format(i)# 對于我們在下面插入的`ContentLoss`和`StyleLoss`,# 本地版本不能很好地發揮作用。所以我們在這里替換不合適的layer = nn.ReLU(inplace=False)elif isinstance(layer, nn.MaxPool2d):name = 'pool_{}'.format(i)elif isinstance(layer, nn.BatchNorm2d):name = 'bn_{}'.format(i)else:raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__))model.add_module(name, layer)if name in content_layers:# 加入內容損失:target = model(content_img).detach()content_loss = ContentLoss(target)model.add_module("content_loss_{}".format(i), content_loss)content_losses.append(content_loss)if name in style_layers:# 加入風格損失:target_feature = model(style_img).detach()style_loss = StyleLoss(target_feature)model.add_module("style_loss_{}".format(i), style_loss)style_losses.append(style_loss)# 現在我們在最后的內容和風格損失之后剪掉了圖層for i in range(len(model) - 1, -1, -1):if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss):breakmodel = model[:(i + 1)]return model, style_losses, content_losses"""
選擇輸入圖片
"""input_img = content_img.clone()
# 如果您想使用白噪聲而取消注釋以下行:
# input_img = torch.randn(content_img.data.size(), device=device)# 將原始輸入圖像添加到圖中:
plt.figure()
imshow(input_img, title='Input Image')"""
梯度下降
"""def get_input_optimizer(input_img):# 此行顯示輸入是需要漸變的參數optimizer = optim.LBFGS([input_img.requires_grad_()])return optimizer"""
每次網絡運行的時候將輸 入的值矯正到0到1之間
"""def run_style_transfer(cnn, normalization_mean, normalization_std,content_img, style_img, input_img, num_steps=300,style_weight=1000000, content_weight=1):"""Run the style transfer."""print('Building the style transfer model..')model, style_losses, content_losses = get_style_model_and_losses(cnn,normalization_mean, normalization_std, style_img, content_img)optimizer = get_input_optimizer(input_img)print('Optimizing..')run = [0]while run[0] <= num_steps:def closure():# 更正更新的輸入圖像的值input_img.data.clamp_(0, 1)optimizer.zero_grad()model(input_img)style_score = 0content_score = 0for sl in style_losses:style_score += sl.lossfor cl in content_losses:content_score += cl.lossstyle_score *= style_weightcontent_score *= content_weightloss = style_score + content_scoreloss.backward()run[0] += 1if run[0] % 50 == 0:print("run {}:".format(run))print('Style Loss : {:4f} Content Loss: {:4f}'.format(style_score.item(), content_score.item()))print()return style_score + content_scoreoptimizer.step(closure)# 最后的修正......input_img.data.clamp_(0, 1)return input_img"""
運行這個算法。
"""output = run_style_transfer(cnn, cnn_normalization_mean, cnn_normalization_std,content_img, style_img, input_img)plt.figure()
imshow(output, title='Output Image')# sphinx_gallery_thumbnail_number = 4
plt.ioff()
plt.show()

生成對抗示例

from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt"""
輸入
"""
epsilons = [0, .05, .1, .15, .2, .25, .3]
pretrained_model = "data/lenet_mnist_model.pth"
use_cuda=True"""
被攻擊的模型
"""
# 定義LeNet模型
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 = nn.Conv2d(1, 10, kernel_size=5)self.conv2 = nn.Conv2d(10, 20, kernel_size=5)self.conv2_drop = nn.Dropout2d()self.fc1 = nn.Linear(320, 50)self.fc2 = nn.Linear(50, 10)def forward(self, x):x = F.relu(F.max_pool2d(self.conv1(x), 2))x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))x = x.view(-1, 320)x = F.relu(self.fc1(x))x = F.dropout(x, training=self.training)x = self.fc2(x)return F.log_softmax(x, dim=1)#聲明 MNIST 測試數據集何數據加載
test_loader = torch.utils.data.DataLoader(datasets.MNIST('../data', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),])),batch_size=1, shuffle=True)# 定義我們正在使用的設備
print("CUDA Available: ",torch.cuda.is_available())
device = torch.device("cuda" if (use_cuda and torch.cuda.is_available()) else "cpu")# 初始化網絡
model = Net().to(device)# 加載已經預訓練的模型
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))# 在評估模式下設置模型。在這種情況下,這適用于Dropout圖層
model.eval()"""
FGSM算法攻擊
"""
# FGSM算法攻擊代碼
def fgsm_attack(image, epsilon, data_grad):# 收集數據梯度的元素符號sign_data_grad = data_grad.sign()# 通過調整輸入圖像的每個像素來創建擾動圖像perturbed_image = image + epsilon*sign_data_grad# 添加剪切以維持[0,1]范圍perturbed_image = torch.clamp(perturbed_image, 0, 1)# 返回被擾動的圖像return perturbed_image"""
測試函數
"""
def test( model, device, test_loader, epsilon ):# 精度計數器correct = 0adv_examples = []# 循環遍歷測試集中的所有示例for data, target in test_loader:# 把數據和標簽發送到設備data, target = data.to(device), target.to(device)# 設置張量的requires_grad屬性,這對于攻擊很關鍵data.requires_grad = True# 通過模型前向傳遞數據output = model(data)init_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability# 如果初始預測是錯誤的,不打斷攻擊,繼續if init_pred.item() != target.item():continue# 計算損失loss = F.nll_loss(output, target)# 將所有現有的漸變歸零model.zero_grad()# 計算后向傳遞模型的梯度loss.backward()# 收集datagraddata_grad = data.grad.data# 喚醒FGSM進行攻擊perturbed_data = fgsm_attack(data, epsilon, data_grad)# 重新分類受擾亂的圖像output = model(perturbed_data)# 檢查是否成功final_pred = output.max(1, keepdim=True)[1] # get the index of the max log-probabilityif final_pred.item() == target.item():correct += 1# 保存0 epsilon示例的特例if (epsilon == 0) and (len(adv_examples) < 5):adv_ex = perturbed_data.squeeze().detach().cpu().numpy()adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )else:# 稍后保存一些用于可視化的示例if len(adv_examples) < 5:adv_ex = perturbed_data.squeeze().detach().cpu().numpy()adv_examples.append( (init_pred.item(), final_pred.item(), adv_ex) )# 計算這個epsilon的最終準確度final_acc = correct/float(len(test_loader))print("Epsilon: {}\tTest Accuracy = {} / {} = {}".format(epsilon, correct, len(test_loader), final_acc))# 返回準確性和對抗性示例return final_acc, adv_examples"""
運行攻擊
"""
accuracies = []
examples = []# 對每個epsilon運行測試
for eps in epsilons:acc, ex = test(model, device, test_loader, eps)accuracies.append(acc)examples.append(ex)"""
準確度
"""
plt.figure(figsize=(5,5))
plt.plot(epsilons, accuracies, "*-")
plt.yticks(np.arange(0, 1.1, step=0.1))
plt.xticks(np.arange(0, .35, step=0.05))
plt.title("Accuracy vs Epsilon")
plt.xlabel("Epsilon")
plt.ylabel("Accuracy")
plt.show()"""
樣本對抗性示例
"""
# 在每個epsilon上繪制幾個對抗樣本的例子
cnt = 0
plt.figure(figsize=(8,10))
for i in range(len(epsilons)):for j in range(len(examples[i])):cnt += 1plt.subplot(len(epsilons),len(examples[0]),cnt)plt.xticks([], [])plt.yticks([], [])if j == 0:plt.ylabel("Eps: {}".format(epsilons[i]), fontsize=14)orig,adv,ex = examples[i][j]plt.title("{} -> {}".format(orig, adv))plt.imshow(ex, cmap="gray")
plt.tight_layout()
plt.show()

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/news/389419.shtml
繁體地址,請注明出處:http://hk.pswp.cn/news/389419.shtml
英文地址,請注明出處:http://en.pswp.cn/news/389419.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

大數據數據科學家常用面試題_進行數據科學工作面試

大數據數據科學家常用面試題During my time as a Data Scientist, I had the chance to interview my fair share of candidates for data-related roles. While doing this, I started noticing a pattern: some kinds of (simple) mistakes were overwhelmingly frequent amo…

scrapy模擬模擬點擊_模擬大流行

scrapy模擬模擬點擊復雜系統 (Complex Systems) In our daily life, we encounter many complex systems where individuals are interacting with each other such as the stock market or rush hour traffic. Finding appropriate models for these complex systems may give…

公司想申請網易企業電子郵箱,怎么樣?

不論公司屬于哪個行業&#xff0c;選擇企業郵箱&#xff0c;交互界面友好度、穩定性、安全性都是選擇郵箱所必須考慮的因素。網易企業郵箱郵箱方面已有21年的運營經驗&#xff0c;是國內資歷最高的電子郵箱&#xff0c;在各個方面都非常成熟完善。 從交互界面友好度來看&#x…

莫煩Matplotlib可視化第二章基本使用代碼學習

基本用法 import matplotlib.pyplot as plt import numpy as np""" 2.1基本用法 """ # x np.linspace(-1,1,50) #[-1,1]50個點 # #y 2*x 1 # # y x**2 # plt.plot(x,y) #注意&#xff1a;x,y順序不能反 # plt.show()"""…

vue.js python_使用Python和Vue.js自動化報告過程

vue.js pythonIf your organization does not have a data visualization solution like Tableau or PowerBI nor means to host a server to deploy open source solutions like Dash then you are probably stuck doing reports with Excel or exporting your notebooks.如果…

plsql中導入csvs_在命令行中使用sql分析csvs

plsql中導入csvsIf you are familiar with coding in SQL, there is a strong chance you do it in PgAdmin, MySQL, BigQuery, SQL Server, etc. But there are times you just want to use your SQL skills for quick analysis on a small/medium sized dataset.如果您熟悉SQ…

第十八篇 Linux環境下常用軟件安裝和使用指南

提醒&#xff1a;如果之后要安裝virtualenvwrapper的話&#xff0c;可以直接跳到安裝virtualenvwrapper的方法&#xff0c;而不需要先安裝好virtualenv安裝virtualenv和生成虛擬環境安裝virtualenv&#xff1a;yum -y install python-virtualenv生成虛擬環境&#xff1a;先切換…

莫煩Matplotlib可視化第三章畫圖種類代碼學習

3.1散點圖 import matplotlib.pyplot as plt import numpy as npn 1024 X np.random.normal(0,1,n) Y np.random.normal(0,1,n) T np.arctan2(Y,X) #用于計算顏色plt.scatter(X,Y,s75,cT,alpha0.5)#alpha是透明度 #plt.scatter(np.arange(5),np.arange(5)) #一條線的散點…

計算機科學必讀書籍_5篇關于數據科學家的產品分類必讀文章

計算機科學必讀書籍Product categorization/product classification is the organization of products into their respective departments or categories. As well, a large part of the process is the design of the product taxonomy as a whole.產品分類/產品分類是將產品…

es6解決回調地獄問題

本文摘抄自阮一峰老師的 http://es6.ruanyifeng.com/#docs/generator-async 異步 所謂"異步"&#xff0c;簡單說就是一個任務不是連續完成的&#xff0c;可以理解成該任務被人為分成兩段&#xff0c;先執行第一段&#xff0c;然后轉而執行其他任務&#xff0c;等做好…

交替最小二乘矩陣分解_使用交替最小二乘矩陣分解與pyspark建立推薦系統

交替最小二乘矩陣分解pyspark上的動手推薦系統 (Hands-on recommender system on pyspark) Recommender System is an information filtering tool that seeks to predict which product a user will like, and based on that, recommends a few products to the users. For ex…

莫煩Matplotlib可視化第四章多圖合并顯示代碼學習

4.1Subplot多合一顯示 import matplotlib.pyplot as plt import numpy as npplt.figure() """ 每個圖占一個位置 """ # plt.subplot(2,2,1) #將畫板分成兩行兩列&#xff0c;選取第一個位置,可以去掉逗號 # plt.plot([0,1],[0,1]) # # plt.su…

python 網頁編程_通過Python編程檢索網頁

python 網頁編程The internet and the World Wide Web (WWW), is probably the most prominent source of information today. Most of that information is retrievable through HTTP. HTTP was invented originally to share pages of hypertext (hence the name Hypertext T…

Python+Selenium自動化篇-5-獲取頁面信息

1.獲取頁面title title&#xff1a;獲取當前頁面的標題顯示的字段from selenium import webdriver import time browser webdriver.Chrome() browser.get(https://www.baidu.com) #打印網頁標題 print(browser.title) #輸出內容&#xff1a;百度一下&#xff0c;你就知道 2.…

火種 ctf_分析我的火種數據

火種 ctfOriginally published at https://www.linkedin.com on March 27, 2020 (data up to date as of March 20, 2020).最初于 2020年3月27日 在 https://www.linkedin.com 上 發布 (數據截至2020年3月20日)。 Day 3 of social distancing.社會疏離的第三天。 As I sit on…

莫煩Matplotlib可視化第五章動畫代碼學習

5.1 Animation 動畫 import numpy as np import matplotlib.pyplot as plt from matplotlib import animationfig,ax plt.subplots()x np.arange(0,2*np.pi,0.01) line, ax.plot(x,np.sin(x))def animate(i):line.set_ydata(np.sin(xi/10))return line,def init():line.set…

data studio_面向營銷人員的Data Studio —報表指南

data studioIn this guide, we describe both the theoretical and practical sides of reporting with Google Data Studio. You can use this guide as a comprehensive cheat sheet in your everyday marketing.在本指南中&#xff0c;我們描述了使用Google Data Studio進行…

人流量統計系統介紹_統計介紹

人流量統計系統介紹Its very important to know about statistics . May you be a from a finance background, may you be data scientist or a data analyst, life is all about mathematics. As per the wiki definition “Statistics is the discipline that concerns the …

pyhive 連接 Hive 時錯誤

一、User: xx is not allowed to impersonate xxx 解決辦法&#xff1a;修改 core-site.xml 文件&#xff0c;加入下面的內容后重啟 hadoop。 <property><name>hadoop.proxyuser.xx.hosts</name><value>*</value> </property><property…

樂高ev3 讀取外部數據_數據就是新樂高

樂高ev3 讀取外部數據When I was a kid, I used to love playing with Lego. My brother and I built almost all kinds of stuff with Lego — animals, cars, houses, and even spaceships. As time went on, our creations became more ambitious and realistic. There were…