文章目錄
- 添加噪聲
- 衡量擾動
- 示例數值
- 總結
- 高級索引
- 變量名
- 代碼
- 總體代碼
添加噪聲
操作:將頻率擾動通過trans( )轉為像素域擾動加到原始圖像上(trans返回頻率域轉換為像素域的結果)
expanded = (images_batch[remaining_indices] + # 原始圖像(剩余樣本)trans( # 頻率擾動轉換為像素擾動 trans(IDCT)self.expand_vector( # 擴展頻率擾動向量x[remaining_indices], # 當前擾動向量(剩余樣本)expand_dims # 擴展尺寸))
).clamp(0, 1) # 限制像素范圍
衡量擾動
l2_norms = torch.zeros(batch_size, max_iters, device=self.device) # L2擾動范數linf_norms = torch.zeros(batch_size, max_iters, device=self.device) # L∞擾動范數
典型應用場景
??無限制攻擊??:只關注攻擊成功率,不限制范數
??L2約束攻擊??:要求總擾動能量小于閾值
??L∞約束攻擊??:要求每個像素變化小于閾值(更常見)
-
??L2范數(歐幾里得范數)??:
計算所有像素擾動值的平方和的平方根
公式:∥δ∥? = √(Σ(δ_i)2)
特點:衡量擾動的整體能量(幅度) -
L∞范數(無窮范數)??:
取所有像素擾動值的絕對值的最大值
公式:∥δ∥_∞ = max(|δ_i|)
特點:衡量單個像素的最大變化量,對局部大擾動敏感 -
在對抗攻擊中的重要性
??L2范數小??:
表示擾動分散在整個圖像
人眼不易察覺(類似高斯噪聲)
示例:輕微改變所有像素??L∞范數小??:
表示沒有單個像素被大幅修改
人眼對局部突變更敏感
示例:所有像素最多只改變0.01 -
評估攻擊隱蔽性??:
L2范數小 → 擾動不易察覺
L∞范數小 → 沒有明顯突變點
示例數值
假設一個3×3圖像的擾動:
δ = [[0.1, 0.2, 0.1],
[0.3, 0.0, 0.1],
[0.1, 0.1, 0.2]]
L2范數 = √(0.12+0.22+0.12+0.32+0.02+0.12+0.12+0.12+0.22) ≈ 0.55
L∞范數 = max(0.1,0.2,0.1,0.3,0.0,0.1,0.1,0.1,0.2) = 0.3
總結
這兩種范數提供了互補的視角:
L2:關注全局擾動能量
L∞:關注局部最大變化
高級索引
高級索引
- 代碼中用到了布爾索引
用布爾張量篩選元素
x = torch.tensor([1, 2, 3, 4, 5])
mask = x > 3 #大于三才會是true
print(mask) # tensor([False, False, False, True, True])
print(x[mask]) # tensor([4, 5])
可用于篩選滿足某種條件的元素,非常直觀且強大。
left_indices = remaining_indices[improved]#用高級索引來篩選
remaining_indices = [0, 1, 2, 3] # 剩余樣本索引
improved = [True, False, True, False] # 改善狀態
left_indices = [0, 2] # 改善樣本的索引
變量名
batch_size = x.size(0)#這一批次圖片的數量
------------------------------------
remaining #ne結果為1(true,未攻擊成功),為0(false,攻擊成功)
#在用到remaining時,只有1才會被計入,如remaining.sum()。
-----------------------------------------------------
#這個一維矩陣里只放0(攻擊成功),1(攻擊不成功),# 更新剩余樣本索引
remaining_indices = torch.arange(0, batch_size, device=self.device).long()
--------------------------------------------
#對抗樣本集adv
--------------------------------------------------------
#x用來放(樣本,該樣本各個維度的像素值(通道*長*寬)),一行就是一個樣本
x = torch.zeros(batch_size, n_dims, device=self.device)
---------------------------------------------------------
# 創建日志記錄張量,張量就是矩陣
probs = torch.zeros(batch_size, max_iters, device=self.device) # 目標標簽概率
succs = torch.zeros(batch_size, max_iters, device=self.device) # 成功標志
queries = torch.zeros(batch_size, max_iters, device=self.device) # 查詢次數
l2_norms = torch.zeros(batch_size, max_iters, device=self.device) # L2擾動范數
linf_norms = torch.zeros(batch_size, max_iters, device=self.device) # L∞擾動范數
# all_probs[[[]]]每個樣本在每個迭代步驟中,模型對所有 10 個類別的預測概率,用的是CIFAR-10所以10個類別
all_probs = torch.zeros(batch_size, max_iters, 10, device=self.device) # 所有類別概率
------------------------------------------------------#可能的隨機序列:(通道索引, 行索引, 列索引) [ (0,0,0), (2,2,2), (1,1,0), (0,1,2), ... ]
indices = torch.randperm(3 * freq_dims * freq_dims, device=self.device)[:max_iters]
#indices里放的是同一批次中每張圖片增加擾動的維度順序for k in range(max_iters):dim = indices[k] # indices擾動位置的索引張量
#dim是特定次批次要更改的維度數diff[:, dim] = epsilon #一行是一個樣本(圖片),所有行的第dim維度(列)賦值為epsilon
# 攻擊不成功的那些樣本里都要減去diff擾動向量
# 生成兩個方向的擾動向量
left_vec = x[remaining_indices] - diff # 負方向擾動過的樣本集
right_vec = x[remaining_indices] + diff # 正方向擾動過的樣本集------------------------------------------
# 設置攻擊參數
max_iters = 200 # 最大迭代次數1000,應小于3*freq_dims*freq_dims
freq_dims = 10 # 頻率維度32
stride = 7 # 步長
epsilon = 0.2 # 擾動大小
targeted = False # 非目標攻擊
pixel_attack = False # 像素攻擊=false,使用DCT攻擊
代碼
# 只加載第一個批次的圖像和標簽
images, labels = next(iter(testloader))
images = images.to(device)
labels = labels.to(device)
總體代碼
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader, Subset
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
import numpy as np
import os
import time
import utils # 確保有utils模塊# 定義與訓練代碼相同的網絡結構
class Net(nn.Module):def __init__(self):super(Net, self).__init__()self.conv1 = nn.Conv2d(3, 64, 3, padding=1)self.conv2 = nn.Conv2d(64, 64, 3, padding=1)self.pool1 = nn.MaxPool2d(2, 2)self.bn1 = nn.BatchNorm2d(64)#64是輸入的通道數self.relu1 = nn.ReLU()self.conv3 = nn.Conv2d(64, 128, 3, padding=1)self.conv4 = nn.Conv2d(128, 128, 3, padding=1)self.pool2 = nn.MaxPool2d(2, 2, padding=1)self.bn2 = nn.BatchNorm2d(128)self.relu2 = nn.ReLU()self.conv5 = nn.Conv2d(128, 128, 3, padding=1)self.conv6 = nn.Conv2d(128, 128, 3, padding=1)self.conv7 = nn.Conv2d(128, 128, 1, padding=1)self.pool3 = nn.MaxPool2d(2, 2, padding=1)self.bn3 = nn.BatchNorm2d(128)self.relu3 = nn.ReLU()self.conv8 = nn.Conv2d(128, 256, 3, padding=1)self.conv9 = nn.Conv2d(256, 256, 3, padding=1)self.conv10 = nn.Conv2d(256, 256, 1, padding=1)self.pool4 = nn.MaxPool2d(2, 2, padding=1)self.bn4 = nn.BatchNorm2d(256)self.relu4 = nn.ReLU()self.conv11 = nn.Conv2d(256, 512, 3, padding=1)self.conv12 = nn.Conv2d(512, 512, 3, padding=1)self.conv13 = nn.Conv2d(512, 512, 1, padding=1)self.pool5 = nn.MaxPool2d(2, 2, padding=1)self.bn5 = nn.BatchNorm2d(512)self.relu5 = nn.ReLU()self.fc14 = nn.Linear(512 * 4 * 4, 1024)self.drop1 = nn.Dropout2d()self.fc15 = nn.Linear(1024, 1024)self.drop2 = nn.Dropout2d()self.fc16 = nn.Linear(1024, 10)def forward(self, x):x = self.conv1(x)x = self.conv2(x)x = self.pool1(x)x = self.bn1(x)x = self.relu1(x)#對張量矩陣中的每個數值依次relu1()x = self.conv3(x)x = self.conv4(x)x = self.pool2(x)x = self.bn2(x)x = self.relu2(x)x = self.conv5(x)x = self.conv6(x)x = self.conv7(x)x = self.pool3(x)x = self.bn3(x)x = self.relu3(x)x = self.conv8(x)x = self.conv9(x)x = self.conv10(x)x = self.pool4(x)x = self.bn4(x)x = self.relu4(x)x = self.conv11(x)x = self.conv12(x)x = self.conv13(x)x = self.pool5(x)x = self.bn5(x)x = self.relu5(x)x = x.view(-1, 512 * 4 * 4)#扁平化x = F.relu(self.fc14(x))x = self.drop1(x)x = F.relu(self.fc15(x))x = self.drop2(x)x = self.fc16(x)###不歸一化???return xclass SimBA:def __init__(self, model, dataset, image_size):self.model = modelself.dataset = datasetself.image_size = image_sizeself.model.eval()self.device = next(model.parameters()).device # 獲取模型所在的設備#expand_vector有兩種用法:像素攻擊與DCT頻率域攻擊(DCT頻率域攻擊這個輸入的x應該提前DCT化了)#x擾動過的樣本集,size要修改的低頻維度數#注意 必須 size <= self.image_sizedef expand_vector(self, x, size):#用于鎖定低頻部分進行擾動batch_size = x.size(0)x = x.view(-1, 3, size, size)#從一維向量 (batch_size, 3*size*size) 變為四維張量 (batch_size, 3, size, size)z = torch.zeros(batch_size, 3, self.image_size, self.image_size, device=self.device)#補size與image_size差的0z[:, :, :size, :size] = xreturn zdef normalize(self, x):return utils.apply_normalization(x, self.dataset)def get_probs(self, x, y):#返回特定標簽預測概率# 確保輸入在正確設備上x = x.to(self.device)y = y.to(self.device)output = self.model(self.normalize(x))probs = F.softmax(output, dim=-1)# 選擇對應標簽的概率probs_selected = probs[torch.arange(probs.size(0)), y]return probs_selecteddef get_all_probs(self, x):#返回全部標簽預測概率x = x.to(self.device)output = self.model(self.normalize(x))probs = F.softmax(output, dim=-1)return probsdef get_preds(self, x):#返回標簽預測概率最大的那個的標簽(序號)x = x.to(self.device)output = self.model(self.normalize(x))#把圖形歸一化后送入模型中得到預測概率_, preds = output.data.max(1)return preds#攻擊函數定向攻擊與否,取決于labels_batch是原標簽(非定向),還是目標標簽(定向)def simba_batch(self, images_batch, labels_batch, max_iters, freq_dims, stride, epsilon, linf_bound=0.0,order='rand', targeted=False, pixel_attack=False, log_every=1):#ef simba_batch(self, images_batch, labels_batch, max_iters 最大嘗試擾動次數,# freq_dims, stride strided找擾動順序時跳動的步長, epsilon(擾動的幅度大小), linf_bound=0.0 L2范數,# order='rand''rand': 隨機順序(只在低頻部分進行擾動)# , targeted=False非定向攻擊,# pixel_attack=False ;True在像素空間進行攻擊;False在頻率域(DCT域)進行攻擊# , log_every=1 每隔多少次迭代打印一次日志):# 將 labels_batch,images_batch 放在self.device上,確保輸入在正確設備上#labels_batch(標簽),images_batch(樣本數,通道,長,寬)是張量,images_batch = images_batch.to(self.device)labels_batch = labels_batch.to(self.device)#模型和數據都要在GPU同一個設備上batch_size = images_batch.size(0)image_size = images_batch.size(2)assert self.image_size == image_sizeif order == 'rand':##擾動dct系數矩陣的順序indices = torch.randperm(3 * freq_dims * freq_dims, device=self.device)[:max_iters]elif order == 'diag':indices = utils.diagonal_order(image_size, 3)[:max_iters].to(self.device)elif order == 'strided':indices = utils.block_order(image_size, 3, initial_size=freq_dims, stride=stride)[:max_iters].to(self.device)else:indices = utils.block_order(image_size, 3)[:max_iters].to(self.device)if order == 'rand':##低頻擾動只擾freq_dims*freq_dimsexpand_dims = freq_dimselse:#其它從全局中選點擾動expand_dims = image_sizen_dims = 3 * expand_dims * expand_dims#x用來放(樣本,該樣本各個維度的像素值(通道*長*寬)),一行就是一個樣本x = torch.zeros(batch_size, n_dims, device=self.device)# 創建日志記錄張量,張量就是矩陣probs = torch.zeros(batch_size, max_iters, device=self.device) # 目標標簽概率succs = torch.zeros(batch_size, max_iters, device=self.device) # 成功標志queries = torch.zeros(batch_size, max_iters, device=self.device) # 查詢次數l2_norms = torch.zeros(batch_size, max_iters, device=self.device) # L2擾動范數linf_norms = torch.zeros(batch_size, max_iters, device=self.device) # L∞擾動范數#all_probs[[[]]]每個樣本在每個迭代步驟中,模型對所有 10 個類別的預測概率,用的是CIFAR-10所以10個類別all_probs = torch.zeros(batch_size, max_iters, 10, device=self.device) # 所有類別概率# 獲取初始概率和預測prev_probs = self.get_probs(images_batch, labels_batch)#返回圖像集對應真實類別的概率矩陣preds = self.get_preds(images_batch)#get_preds()求一個標簽,對一個批次用就返回**標簽預測矩陣**if pixel_attack:trans = lambda z: z#逆DCT變換,將頻率域轉化為像素域#z 要變換的左上角低頻部分的張量大小#block_size是進行DCT變化是的基本塊大小(基函數波面的數目為block_size*block_size)#linf_bound為L∞的閾值else:trans = lambda z: utils.block_idct(z, block_size=image_size, linf_bound=linf_bound)#創建一個從0到batch_size-1的整數序列,用于后面標記各個序號的圖片攻擊成功了么#remaining_indices后面表示未被攻擊成功的圖片。#這個一維矩陣里只放0(攻擊成功),1(攻擊不成功)remaining_indices = torch.arange(0, batch_size, device=self.device).long()#攻擊核心步驟,添加擾動,對一張圖片有max_iters次擾動for k in range(max_iters):dim = indices[k]#indices擾動位置的索引張量#操作:將像素域擾動加到原始圖像上(trans返回頻率域轉換為像素域的結果)#expanded包含所有尚未成功攻擊的樣本expanded = (images_batch[remaining_indices] + trans(self.expand_vector(x[remaining_indices], expand_dims))).clamp(0, 1)#將像素值限制在[0,1]范圍內# 計算整個批次的頻率擾動張量(多維矩陣)perturbation = trans(self.expand_vector(x, expand_dims))l2_norms[:, k] = perturbation.view(batch_size, -1).norm(2, 1)#L2#view(batch_size, -1) 形狀變化:(batch_size, 3, H, W) → (batch_size, 3*H*W)#norm(2, dim=1):沿維度1(行方向)計算L2范數(norm是范數的意思)linf_norms[:, k] = perturbation.view(batch_size, -1).abs().max(1)[0]#L∞#max(1):沿維度1(行方向)求最大值 [0]:取元組的第一個元素(最大值)# expanded(剩余未攻擊成功圖像)是已經擾動過的圖像集合,輸入到預測中,得到對抗樣本的最大預測概率的標簽的一維矩陣preds_next = self.get_preds(expanded)#的到矩陣preds[remaining_indices] = preds_next## 更新未被攻擊成功圖片的預測結果if targeted:#定向攻擊,preds是標簽預測矩陣,labels_batch是定向攻擊的標簽集#ne結果為1(true,未攻擊成功),為0(false,攻擊成功)remaining = preds.ne(labels_batch)#逐元素比較 preds 和 labels_batchelse:#非定向攻擊,labels_batch是原真實的標簽集remaining = preds.eq(labels_batch)#逐元素比較 preds 和 labels_batch,同為ture##為了統一定向與不定向的返回0,1的意義。返回1 (True): 攻擊尚未成功,0 (False): 攻擊已成功,#expanded擾動過的之前未攻擊成功的圖像集current_all_probs = self.get_all_probs(expanded)#remaining_indices指明是第幾個樣本,k是迭代輪次,賦上10個類別的預測概率all_probs[remaining_indices, k] = current_all_probs#如果全部都生成了對抗樣本,收尾if remaining.sum() == 0:#獲取最終對抗樣本集advadv = (images_batch + trans(self.expand_vector(x, expand_dims))).clamp(0, 1)#計算對抗樣本adv對應標簽的最終預測概率#對于非定向攻擊:原始標簽的概率(目標是降低),對于定向攻擊:目標標簽的概率(目標是提高)probs_k = self.get_probs(adv, labels_batch)probs[:, k:] = probs_k.unsqueeze(1).repeat(1, max_iters - k)#第k次成功,則之后迭代都標為1succs[:, k:] = torch.ones(batch_size, max_iters - k, device=self.device)#這里只填充從k到max_iters位,后面求查詢總次數時要把queries相加queries[:, k:] = torch.zeros(batch_size, max_iters - k, device=self.device)#填充剩余迭代的所有概率for j in range(k, max_iters):all_probs[:, j] = current_all_probsbreak# 提前結束循環#如果沒有全部都生成了對抗樣本,繼續remaining_indices = torch.arange(0, batch_size, device=self.device)[remaining].long()if k > 0:succs[:, k] = ~remaining#succs中1成功,0失敗;而remaining 1(true,未攻擊成功),為0(false,攻擊成功)正好相反,所以要取反## 創建擾動向量#擾動向量(二維)(剩余未成功樣本,該樣本各個維度的像素值(通道*長*寬))diff = torch.zeros(remaining.sum(), n_dims, device=self.device)diff[:, dim] = epsilon #一行是一個樣本(圖片),所有行的第dim維度(列)賦值為epsilon# 攻擊不成功的那些樣本里都要減去diff擾動向量# 生成兩個方向的擾動向量left_vec = x[remaining_indices] - diff # 負方向擾動過的樣本集right_vec = x[remaining_indices] + diff # 正方向擾動過的樣本集# 嘗試負方向擾動#對抗樣本集adv,remaining_indices未攻擊成功的樣本,expand_dims要修改的低頻維度數#像素域,頻率域攻擊都從這進去adv = (images_batch[remaining_indices] + trans(self.expand_vector(left_vec, expand_dims))).clamp(0, 1)#負方向擾動后的預測概率集,一維數組left_probs = self.get_probs(adv, labels_batch[remaining_indices])#初始化查詢計數queries_k = torch.zeros(batch_size, device=self.device)#一維數組,每個元素對應一個樣本的查詢計數queries_k[remaining_indices] += 1#remaining_indices=1:當前尚未攻擊成功的樣本索引,對這些索引位置的計數器加1,通過累加來得到最終查詢次數if targeted:#定向攻擊所以要提高預測概率,向目標標簽集靠近# left_probs攻擊后的每個樣本的預測概率一維數組,prev_probs[remaining_indices]還未攻擊成功的原預測概率improved = left_probs.gt(prev_probs[remaining_indices])#improve是一個僅有0,1的一維數組else:#不定向攻擊所以要降低預測概率,遠離原標簽集improved = left_probs.lt(prev_probs[remaining_indices])#如果有樣本未改進,需要額外查詢if improved.sum() < remaining_indices.size(0):#.size(0)返回remaining_indices的的第一維度元素數,即長度(元素個數)queries_k[remaining_indices[~improved]] += 1# 嘗試正方向擾動,與上面一樣adv = (images_batch[remaining_indices] + trans(self.expand_vector(right_vec, expand_dims))).clamp(0, 1)right_probs = self.get_probs(adv, labels_batch[remaining_indices])if targeted:right_improved = right_probs.gt(torch.max(prev_probs[remaining_indices], left_probs))else:right_improved = right_probs.lt(torch.min(prev_probs[remaining_indices], left_probs))probs_k = prev_probs.clone()# 更新負方向改進的樣本if improved.sum() > 0:# 如果有至少一個樣本在負方向擾動中得到了改善left_indices = remaining_indices[improved]# 獲取改善樣本的索引,也就是improved為1的那部分left_mask_remaining = improved.unsqueeze(1).repeat(1, n_dims)#n_dims要擾動的低頻部分x[left_indices] = left_vec[left_mask_remaining].view(-1, n_dims)## left_vec負方向擾動過的樣本集,把x更新為負方向擾動過的probs_k[left_indices] = left_probs[improved]#更新改善樣本的預測概率# 更新正方向改進的樣本if right_improved.sum() > 0:right_indices = remaining_indices[right_improved]right_mask_remaining = right_improved.unsqueeze(1).repeat(1, n_dims)x[right_indices] = right_vec[right_mask_remaining].view(-1, n_dims)probs_k[right_indices] = right_probs[right_improved]probs[:, k] = probs_k# 記錄當前第k次迭代預測概率queries[:, k] = queries_k# 記錄當前第k次迭代查詢次數prev_probs = probs[:, k]# 更新圖像集對應真實類別的概率矩陣# # 定期打印日志,eg:Iteration 50: queries = 99.8000, prob = 0.5678, remaining = 0.6000if (k + 1) % log_every == 0 or k == max_iters - 1:print('Iteration %d: queries = %.4f, prob = %.4f, remaining = %.4f' % (k + 1, queries.sum(1).mean().item(), probs[:, k].mean().item(), remaining.float().mean().item()))# for k in range(max_iters): for循環結束## 計算最終對抗樣本expanded = (images_batch + trans(self.expand_vector(x, expand_dims))).clamp(0, 1)preds = self.get_preds(expanded)# # 獲取最終預測if targeted:remaining = preds.ne(labels_batch)else:remaining = preds.eq(labels_batch)succs[:, max_iters - 1] = ~remainingreturn expanded, probs, succs, queries, l2_norms, linf_norms, all_probs, preds#得到/simba_attack_results/data文件夾
def save_results(images, adversarial_images, labels, adversarial_preds, classes,probs, all_probs, save_path="results"):# def save_results(# images, # 原始圖像張量 (形狀: [batch, 3, H, W])# adversarial_images, # 對抗樣本張量 (同原始圖像形狀)# labels, # 原始標簽 (形狀: [batch])# adversarial_preds, # 對抗樣本的預測標簽 (形狀: [batch])# classes, # 類別名稱列表 (如: ['plane', 'car', ...])# probs, # 原始標簽的概率變化 (形狀: [batch, max_iters])# all_probs, # 所有類別的概率 (形狀: [batch, max_iters, num_classes])# save_path="results" # 保存路徑,默認為"results"# ):# 反歸一化圖像,將圖像從 [-1, 1] 范圍恢復到 [0, 1] 范圍def denormalize(tensor):tensor = tensor.clone()for t, m, s in zip(tensor, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]):t.mul_(s).add_(m)return tensor# 創建目錄結構os.makedirs(os.path.join(save_path, "images"), exist_ok=True)# 保存圖像文件os.makedirs(os.path.join(save_path, "probabilities"), exist_ok=True)# 保存概率數據# 保存原始和對抗樣本圖像for i in range(len(images)):#遍歷批次中的所有圖像# 原始圖像plt.imshow(denormalize(images[i]).permute(1, 2, 0).cpu().numpy())plt.title(f"原始: {classes[labels[i]]}")plt.savefig(os.path.join(save_path, "images", f"original_{i}.png"))plt.close()# 對抗樣本plt.imshow(denormalize(adversarial_images[i]).permute(1, 2, 0).cpu().numpy())plt.title(f"對抗樣本: {classes[adversarial_preds[i]]}")plt.savefig(os.path.join(save_path, "images", f"adversarial_{i}.png"))plt.close()# 保存概率數據torch.save({'probs': probs,#目標標簽的概率變化'all_probs': all_probs#所有類別的概率}, os.path.join(save_path, "probabilities", "probability_data.pt"))def load_model(device):"""加載預訓練模型"""net = Net().to(device)checkpoint_path = 'C:\\python\\project\\Resnet50\\SimBA\\weights.tar'# 設置預訓練權重文件路徑if os.path.exists(checkpoint_path):# 檢查權重文件是否存在checkpoint = torch.load(checkpoint_path, map_location=device)net.load_state_dict(checkpoint['model_state_dict'])print("成功加載預訓練模型權重")else:print(f"警告: 未找到權重文件 {checkpoint_path}")print("將使用隨機初始化的權重進行預測")net.eval() # 設置模型為評估模式,不再改變權重return net # 返回加載好的模型def load_cifar10_data(batch_size=10, num_samples=10):#當調用者不提供該參數時,使用默認值"""加載CIFAR-10測試數據子集"""# 1. 創建數據處理流水線transform = transforms.Compose([transforms.ToTensor(),# 將圖像轉換為Tensor格式transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))# 數據歸一化處理])# 2. 加載完整的CIFAR-10測試集testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)# 3. 創建數據子集(取testset前num_samples個樣本)subset_indices = list(range(num_samples))subset = Subset(testset, subset_indices)#取testset的前num_samples個作為測試集# 4. 創建DataLoader用于批量加載數據testloader = DataLoader(subset, # 要加載的數據子集batch_size=batch_size, # 每個批次的樣本圖片數量shuffle=False # 不隨機打亂數據順序)return testloader, subset_indices#返回DataLoader,數據子集(取testset前num_samples個樣本)# 修復后的可視化函數,生成simba_attack_results/visualizations,原始圖像 對抗樣本 擾動可視化(3倍增強)
def visualize_attack(original_images, adversarial_images, labels, adversarial_preds, classes, idx,save_path="attack_results", perturbation_factor=3):"""可視化攻擊結果"""def denormalize(tensor):tensor = tensor.clone()for t, m, s in zip(tensor, [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]):t.mul_(s).add_(m)return tensor# 獲取當前樣本original_img = original_images[idx]adversarial_img = adversarial_images[idx]label = labels[idx]pred = adversarial_preds[idx]# 確保在CPU上轉換original = denormalize(original_img).cpu().permute(1, 2, 0).numpy()adversarial = denormalize(adversarial_img).cpu().permute(1, 2, 0).numpy()perturbation = (adversarial - original) * perturbation_factor + 0.5fig, axes = plt.subplots(1, 3, figsize=(18, 6))axes[0].imshow(original)axes[0].set_title(f"原始圖像\n真實類別: {classes[label]}")axes[0].axis('off')pred_label = classes[pred.item()]axes[1].imshow(adversarial)axes[1].set_title(f"對抗樣本\n預測類別: {pred_label}")axes[1].axis('off')axes[2].imshow(perturbation)axes[2].set_title(f"擾動可視化(x{perturbation_factor})")axes[2].axis('off')os.makedirs(save_path, exist_ok=True)plt.savefig(os.path.join(save_path, f"attack_visualization_{idx}.png"))plt.close()# 修改后的分析函數,輸出simba_attack_results/analysis,各樣本概率變化圖像
def analyze_results(probs, succs, queries, all_probs, classes, save_path="analysis_results"):"""分析攻擊結果"""# 統一移動到CPU并分離計算圖probs = probs.cpu().detach()succs = succs.cpu().detach()queries = queries.cpu().detach()all_probs = all_probs.cpu().detach()os.makedirs(save_path, exist_ok=True)# 計算攻擊成功率success_rate = succs[:, -1].float().mean().item() * 100print(f"\n最終攻擊成功率: {success_rate:.2f}%")# 平均查詢次數avg_queries = queries.sum(dim=1).float().mean().item()print(f"平均查詢次數: {avg_queries:.2f}")# 1. 綜合概率變化圖plt.figure(figsize=(15, 10))num_samples = all_probs.size(0)for i in range(num_samples):plt.subplot((num_samples + 1) // 2, 2, i + 1)probs_data = all_probs[i].cpu().numpy() # 確保在CPU上for cls_idx, cls_name in enumerate(classes):plt.plot(probs_data[:, cls_idx], label=cls_name)plt.title(f"樣本 {i + 1} 概率變化")plt.xlabel("迭代次數")plt.ylabel("概率")plt.legend()plt.tight_layout()plt.savefig(os.path.join(save_path, "combined_prob_changes.png"))plt.close()def main():# 設置設備device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")print(f"使用設備: {device}")# 加載模型model = load_model(device)# 加載數據,一次多少個樣本batch_size = 5 # 較小的批量大小以便快速演示,加載幾個批次由后面決定num_samples = 100 # #取testset的前num_samples個作為測試集testloader, _ = load_cifar10_data(batch_size, num_samples)# 只加載第一個批次的圖像和標簽images, labels = next(iter(testloader))images = images.to(device)labels = labels.to(device)# 處理每個批次# for batch_idx, (images, labels) in enumerate(testloader):# # 處理每個批次# print(f"處理批次 {batch_idx + 1}/{len(testloader)}")# process_batch(images, labels)# CIFAR-10類別的名稱classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')# 初始化SimBA攻擊器simba = SimBA(model, dataset='cifar10', image_size=32)# model:要攻擊的目標模型# dataset='cifar10':指定數據集類型(用于標準化)# image_size=32:圖像尺寸(CIFAR-10 為 32x32)# 設置攻擊參數max_iters = 1000 # 最大迭代次數freq_dims = 32 # 頻率維度stride = 7 # 步長epsilon = 0.2 # 擾動大小targeted = False # 非目標攻擊pixel_attack = False # 像素攻擊=false,使用DCT攻擊print("\n開始黑盒攻擊...")start_time = time.time()# 新增:用戶可自定義的保存路徑result_dir = "simba_attack_results"os.makedirs(result_dir, exist_ok=True)# 注意:simba_batch現在返回8個值,包括predsadversarial_images, probs, succs, queries, l2_norms, linf_norms, all_probs, preds = simba.simba_batch(images_batch=images,labels_batch=labels,max_iters=max_iters,freq_dims=freq_dims,stride=stride,epsilon=epsilon,targeted=targeted,pixel_attack=pixel_attack,log_every=50)# 分析結果(保存到指定路徑)analyze_results(probs, succs, queries, all_probs, classes,save_path=os.path.join(result_dir, "analysis"))# 可視化攻擊結果#生成simba_attack_results/visualizations,原始圖像 對抗樣本 擾動可視化(3倍增強)for i in range(min(3, batch_size)):visualize_attack(images, adversarial_images, labels, preds, classes, i,save_path=os.path.join(result_dir, "visualizations"))# 保存完整結果save_results(images, adversarial_images, labels, preds, classes,probs, all_probs, save_path=os.path.join(result_dir, "data"))# 保存對抗樣本數據torch.save({'original_images': images.cpu(),'adversarial_images': adversarial_images.cpu(),'labels': labels.cpu(),'adversarial_preds': preds.cpu(),'probs': probs.cpu(),'all_probs': all_probs.cpu(),'queries': queries.cpu(),'l2_norms': l2_norms.cpu(),'linf_norms': linf_norms.cpu()}, os.path.join(result_dir, 'adversarial_results.pth'))print(f"所有結果已保存到 {result_dir} 目錄")if __name__ == '__main__':main()