深度集成學習不均衡樣本圖像分類

用五個不同的網絡,然后對分類概率進行平均,得到分類結果。基本上分類精度可以提升10%

1.導入基本庫

import torch
import copy
import torch.nn as nn
import torchvision.models as models
from torchvision import datasets
from torchvision import transforms
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from transformers import AutoModelForImageClassification,AutoConfig

2.數據集準備

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 數據預處理
transform = transforms.Compose([transforms.Resize((224, 224)),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),  
])train_dataset = datasets.ImageFolder(root='./aug_datasets1', transform=transform)
dataset_size  = len(train_dataset)train_size = int(0.8 * dataset_size)
val_size = dataset_size - train_sizetrain_dataset, val_dataset = random_split(train_dataset, [train_size, val_size])train_dataloader = DataLoader(train_dataset, batch_size=32, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=32, shuffle=False)

3.定義不同模型與對應的訓練策略

模型1 ResNet

class ResNet(nn.Module):def __init__(self, num_classes=21,train=True):super(ResNet, self).__init__()if(train):self.resnet = models.resnet50(weights=torchvision.models.ResNet50_Weights.IMAGENET1K_V1)else:self.resnet = models.resnet50(weights=None)in_features = self.resnet.fc.in_featuresself.resnet.fc = nn.Sequential(nn.Linear(in_features, 512),nn.ReLU(inplace=True),nn.Dropout(0.5),nn.Linear(512, num_classes))self.resnet.to(device)def forward(self, x):return self.resnet(x)# 訓練策略def startTrain(self, train_loader, val_loader):criterion = nn.CrossEntropyLoss()optimizer = torch.optim.AdamW(self.parameters(), lr=1e-4, weight_decay=1e-4)scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)Best_Acc = 0.0print("Training ResNet.....")for epoch in range(10):  # 訓練 10 個 epochself.train()train_loss = 0for batch in tqdm(train_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)optimizer.zero_grad()# 處理圖像并將其傳遞給模型logits = self(images)# 計算損失并進行反向傳播loss = criterion(logits, labels)loss.backward()optimizer.step()train_loss += loss.item()print(f"Epoch {epoch+1}/{10}, Train Loss: {train_loss/len(train_loader)}")scheduler.step()self.eval()val_loss = 0correct = 0total = 0with torch.no_grad():for batch in tqdm(val_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)# 處理圖像并傳遞給模型logits = self(images)# 計算損失loss = criterion(logits, labels)val_loss += loss.item()# 計算準確率_, predicted = torch.max(logits, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f"Validation Loss: {val_loss/len(val_loader)}")print(f"Accuracy: {100 * correct / total}%")if(100 * correct / total > Best_Acc):Best_Acc = 100 * correct / totaltorch.save(self.state_dict(), './saved/resnet/model_weights_{}.pth'.format(Best_Acc))

模型2 EfficientNet

class EfficientNet(nn.Module):def __init__(self, num_classes=21,train=True):super(EfficientNet, self).__init__()if(train):self.effnet = models.efficientnet_b2(weights=torchvision.models.EfficientNet_B2_Weights.IMAGENET1K_V1)else:self.effnet = models.efficientnet_b2(weights=None)in_features = self.effnet.classifier[1].in_featuresself.effnet.classifier = nn.Sequential(nn.Linear(in_features, 512),nn.ReLU(inplace=True),nn.Dropout(0.5),nn.Linear(512, num_classes))self.effnet.to(device)def forward(self, x):return self.effnet(x)# 訓練策略def startTrain(self, train_loader, val_loader):# 焦點損失,gamma參數增強對少數類的關注criterion = nn.CrossEntropyLoss()optimizer = torch.optim.AdamW(self.parameters(), lr=1e-4, weight_decay=1e-4)scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=5)Best_Acc = 0.0print("Training EfficientNet.....")for epoch in range(10):  # 訓練 10 個 epochself.train()train_loss = 0for batch in tqdm(train_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)optimizer.zero_grad()# 處理圖像并將其傳遞給模型logits = self(images)# 計算損失并進行反向傳播loss = criterion(logits, labels)loss.backward()optimizer.step()train_loss += loss.item()print(f"Epoch {epoch+1}/{10}, Train Loss: {train_loss/len(train_loader)}")scheduler.step(train_loss/len(train_loader))self.eval()val_loss = 0correct = 0total = 0with torch.no_grad():for batch in tqdm(val_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)# 處理圖像并傳遞給模型logits = self(images)# 計算損失loss = criterion(logits, labels)val_loss += loss.item()# 計算準確率_, predicted = torch.max(logits, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f"Validation Loss: {val_loss/len(val_loader)}")print(f"Accuracy: {100 * correct / total}%")if(100 * correct / total > Best_Acc):Best_Acc = 100 * correct / totaltorch.save(self.state_dict(), './saved/efficientnet/model_weights_{}.pth'.format(Best_Acc))    

模型3 DenseNet

class DenseNet(nn.Module):def __init__(self, num_classes=21, train=True):super(DenseNet, self).__init__()self.num_classes = num_classesif(train):self.densenet = models.densenet121(weights=torchvision.models.DenseNet121_Weights.IMAGENET1K_V1)else:self.densenet = models.densenet121(weights=None) in_features = self.densenet.classifier.in_featuresself.densenet.classifier = nn.Sequential(nn.BatchNorm1d(in_features),nn.Linear(in_features, 512),nn.ReLU(inplace=True),nn.Dropout(0.5),nn.Linear(512, num_classes))self.densenet.to(device)def forward(self, x):return self.densenet(x)# 訓練策略def startTrain(self, train_loader, val_loader):criterion = nn.CrossEntropyLoss()optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)Best_Acc = 0.0print("Training DenseNet.....")for epoch in range(10):  # 訓練 10 個 epochself.train()train_loss = 0for batch in tqdm(train_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)optimizer.zero_grad()# 處理圖像并將其傳遞給模型logits = self(images)# 計算損失并進行反向傳播loss = criterion(logits, labels)loss.backward()optimizer.step()train_loss += loss.item()print(f"Epoch {epoch+1}/{10}, Train Loss: {train_loss/len(train_loader)}")scheduler.step()self.eval()val_loss = 0correct = 0total = 0with torch.no_grad():for batch in tqdm(val_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)# 處理圖像并傳遞給模型logits = self(images)# 計算損失loss = criterion(logits, labels)val_loss += loss.item()# 計算準確率_, predicted = torch.max(logits, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f"Validation Loss: {val_loss/len(val_loader)}")print(f"Accuracy: {100 * correct / total}%")if(100 * correct / total > Best_Acc):Best_Acc = 100 * correct / totaltorch.save(self.state_dict(), './saved/densenet/model_weights_{}.pth'.format(Best_Acc))        

?模型4 ResNeXt

class ResNeXt(nn.Module):def __init__(self, num_classes=21,train=True):super(ResNeXt, self).__init__()if(train):self.resnext50 = models.resnext50_32x4d(weights=torchvision.models.ResNeXt50_32X4D_Weights.IMAGENET1K_V1)else:self.resnext50 = models.resnext50_32x4d(weights=None)in_features = self.resnext50.fc.in_featuresself.resnext50.fc = nn.Sequential(nn.BatchNorm1d(in_features),nn.Linear(in_features, 512),nn.ReLU(inplace=True),nn.Dropout(0.5),nn.Linear(512, num_classes))self.resnext50.to(device)self.to(device)def forward(self, x):return self.resnext50(x)def startTrain(self, train_loader, val_loader):optimizer = torch.optim.AdamW(self.parameters(), lr=1e-4)scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=5e-4, epochs=30, steps_per_epoch=len(train_loader))        criterion = nn.CrossEntropyLoss()Best_Acc = 0.0print("Training ResNeXt.....")for epoch in range(10):  # 訓練 10 個 epochself.train()train_loss = 0for batch in tqdm(train_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)optimizer.zero_grad()# 處理圖像并將其傳遞給模型logits = self(images)# 計算損失并進行反向傳播loss = criterion(logits, labels)loss.backward()optimizer.step()train_loss += loss.item()print(f"Epoch {epoch+1}/{10}, Train Loss: {train_loss/len(train_loader)}")scheduler.step(train_loss/len(train_loader))self.eval()val_loss = 0correct = 0total = 0with torch.no_grad():for batch in tqdm(val_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)# 處理圖像并傳遞給模型logits = self(images)# 計算損失loss = criterion(logits, labels)val_loss += loss.item()# 計算準確率_, predicted = torch.max(logits, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f"Validation Loss: {val_loss/len(val_loader)}")print(f"Accuracy: {100 * correct / total}%")if(100 * correct / total > Best_Acc):Best_Acc = 100 * correct / totaltorch.save(self.state_dict(), './saved/se-resnext/model_weights_{}.pth'.format(Best_Acc))           

模型5 SwinTransformer

class SwinTransformer(nn.Module):def __init__(self, num_classes=21,train=True):super(SwinTransformer, self).__init__()if(train):self.vit = AutoModelForImageClassification.from_pretrained('./swinv2-tiny-patch4-window16-256/models--microsoft--swinv2-tiny-patch4-window16-256/snapshots/f4d3075206f2ad5eda586c30d6b4d0500f312421/')   #這個地方怎么寫加載模型self.vit.classifier = nn.Sequential(nn.Dropout(0.5),nn.Linear(self.vit.classifier.in_features, num_classes))# 凍結Swin Transformer模型中的所有層for param in self.vit.parameters():param.requires_grad = False        # 只解凍最后兩個Transformer塊和分類頭for param in self.vit.swinv2.encoder.layers[-4:].parameters():  # 假設你想解凍最后兩層param.requires_grad = Truefor param in self.vit.classifier.parameters():param.requires_grad = Trueelse:# 先加載 config,然后手動修改 num_labelsconfig = AutoConfig.from_pretrained('./saved/swin-transformer/')config.num_labels = 21self.vit = AutoModelForImageClassification.from_pretrained('./saved/swin-transformer/',config=config)   self.vit.to(device)def forward(self, x):return self.vit(x)# 訓練策略def startTrain(self, train_loader, val_loader):# 使用標簽平滑處理,考慮到類別是連續尺度criterion = nn.CrossEntropyLoss()# 兩階段訓練策略# 階段1: 只訓練解凍的層num_epochs_stage1 = 10num_epochs_stage2 = 10optimizer_stage1 = torch.optim.AdamW([p for p in self.parameters() if p.requires_grad], lr=1e-3)scheduler_stage1 = torch.optim.lr_scheduler.OneCycleLR(optimizer_stage1, max_lr=1e-3, epochs=num_epochs_stage1, steps_per_epoch=len(train_loader))best_model_wts = copy.deepcopy(self.state_dict())print("Training SwinTransformer.....") print("===== Stage 1 Training =====")Best_Acc = 0.0for epoch in range(num_epochs_stage1):  # 訓練 10 個 epochself.train()train_loss = 0for batch in tqdm(train_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)optimizer_stage1.zero_grad()# 處理圖像并將其傳遞給模型outputs = self(images)logits = outputs.logits# 計算損失并進行反向傳播loss = criterion(logits, labels)loss.backward()optimizer_stage1.step()train_loss += loss.item()print(f"Epoch {epoch+1}/{10}, Train Loss: {train_loss/len(train_loader)}")scheduler_stage1.step()self.eval()val_loss = 0correct = 0total = 0with torch.no_grad():for batch in tqdm(val_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)# 處理圖像并傳遞給模型outputs = self(images)logits = outputs.logits# 計算損失loss = criterion(logits, labels)val_loss += loss.item()# 計算準確率_, predicted = torch.max(logits, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f"Validation Loss: {val_loss/len(val_loader)}")print(f"Accuracy: {100 * correct / total}%")if(100 * correct / total > Best_Acc):Best_Acc = 100 * correct / totalbest_model_wts = copy.deepcopy(self.state_dict())self.vit.save_pretrained('./saved/swin-transformer/', safe_serialization=False)       # 階段1結束后加載最佳模型權重self.load_state_dict(best_model_wts)    Best_Acc = 0.0print("===== Stage 2 Training =====")# 階段2: 微調整個網絡for param in self.parameters():param.requires_grad = Trueoptimizer_stage2 = torch.optim.Adam(self.parameters(), lr=1e-6)scheduler_stage2 = torch.optim.lr_scheduler.OneCycleLR(optimizer_stage2, max_lr=5e-6, epochs=num_epochs_stage2, steps_per_epoch=len(train_loader))for epoch in range(num_epochs_stage2):  # 訓練 10 個 epochself.train()train_loss = 0for batch in tqdm(train_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)optimizer_stage2.zero_grad()# 處理圖像并將其傳遞給模型outputs = self(images)logits = outputs.logits# 計算損失并進行反向傳播loss = criterion(logits, labels)loss.backward()optimizer_stage2.step()train_loss += loss.item()print(f"Epoch {epoch+1}/{10}, Train Loss: {train_loss/len(train_loader)}")scheduler_stage2.step()self.eval()val_loss = 0correct = 0total = 0with torch.no_grad():for batch in tqdm(val_loader):images, labels = batchimages, labels = images.to(device), labels.to(device)# 處理圖像并傳遞給模型outputs = self(images)logits = outputs.logits# 計算損失loss = criterion(logits, labels)val_loss += loss.item()# 計算準確率_, predicted = torch.max(logits, 1)total += labels.size(0)correct += (predicted == labels).sum().item()print(f"Validation Loss: {val_loss/len(val_loader)}")print(f"Accuracy: {100 * correct / total}%")if(100 * correct / total > Best_Acc):Best_Acc = 100 * correct / totalself.vit.save_pretrained('./saved/swin-transformer/', safe_serialization=False)       

4.分別訓練,然后得到權重

    swinTransformer= SwinTransformer()swinTransformer.startTrain(train_dataloader,val_dataloader)efficientNet= EfficientNet()efficientNet.startTrain(train_dataloader,val_dataloader)resNet= ResNet()resNet.startTrain(train_dataloader,val_dataloader)resNeXt= ResNeXt()resNeXt.startTrain(train_dataloader,val_dataloader)denseNet= DenseNet()denseNet.startTrain(train_dataloader,val_dataloader)

5.構建集成分類模型

import torch
import torchvision.transforms as transforms
import torch.nn as nn
from torchvision import datasets
from torchvision import transforms
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.utils.data import random_split
from tqdm import tqdm
from PIL import Imagedef remove_prefix_from_state_dict(state_dict, prefix='resnext.'):return {"resnext50." + k[len(prefix):] if k.startswith(prefix) else k: v for k, v in state_dict.items()}# 定義集成模型
class EnsembleModel():def __init__(self, efficientNet, resNet, resNeXt, denseNet,swinTransformer):super(EnsembleModel, self).__init__()self.efficientNet= efficientNet.eval()self.resNet= resNet.eval()self.resNeXt= resNeXt.eval()self.denseNet= denseNet.eval()self.swinTransformer= swinTransformer.eval()def predict(self, x):efficientNet_out = torch.softmax(self.efficientNet(x),dim=1)resNet_out = torch.softmax(self.resNet(x),dim=1)resNeXt_out = torch.softmax(self.resNeXt(x),dim=1)denseNet_out = torch.softmax(self.denseNet(x),dim=1)swinTransformer_out = torch.softmax(self.swinTransformer(x).logits,dim=1)avg_pred = (efficientNet_out + resNet_out + resNeXt_out + denseNet_out + swinTransformer_out ) / 5return avg_pred

這樣就可以提升性能

本文來自互聯網用戶投稿,該文觀點僅代表作者本人,不代表本站立場。本站僅提供信息存儲空間服務,不擁有所有權,不承擔相關法律責任。
如若轉載,請注明出處:http://www.pswp.cn/news/901069.shtml
繁體地址,請注明出處:http://hk.pswp.cn/news/901069.shtml
英文地址,請注明出處:http://en.pswp.cn/news/901069.shtml

如若內容造成侵權/違法違規/事實不符,請聯系多彩編程網進行投訴反饋email:809451989@qq.com,一經查實,立即刪除!

相關文章

從零開始學java--泛型

泛型 目錄 泛型 引入 泛型類 泛型與多態 泛型方法 泛型的界限 類型擦除 函數式接口 Supplier供給型函數式接口: Consumer消費型函數式接口: Function函數型函數式接口: Predicate斷言式函數式接口: 判空包裝 引入 …

5?? Coze+AI應用基礎教學(2025年全新版本)

目錄 一、了解應用開發 1.1 扣子應用能做什么 1.2 開發流程 1.3 開發環境 二、快速搭建一個AI應用 2.1 AI翻譯應用介紹 2.2 設計你的應用功能 2.3 創建 AI 應用項目 2.4 編寫業務邏輯(新建工作流) 2.5 搭建用戶界面 2.6 效果測試 2.7 發布應用 一、了解應用開發 …

工會成立100周年紀念,開發職工健身AI運動小程序、APP方案推薦

時光荏苒,轉眼間2025年五一將至,這一年對于中華全國總工會而言,具有非凡的歷史意義——它將迎來成立100周年的輝煌時刻。為了慶祝這一盛事,各級工會組織將精心籌備了一系列豐富多彩、形式多樣的紀念活動,旨在展現工會百…

【深度學習】Ubuntu 服務器配置開源項目FIGRET(PyTorch、torch-scatter、torch-sparse、Gurobi 安裝)

開源項目網址:https://github.com/FIGRET/figret 該項目在SIGCOMM2024發表,用深度學習方法處理流量工程中的突發問題 1. 創建新的 Conda 環境 使用國內鏡像源創建環境? conda create -n figret python3.8.0 --override-channels -c https://mirrors.…

【SpringCloud】從入門到精通(上)

今天主播我把黑馬新版微服務課程MQ高級之前的內容都看完了,雖然在看視頻的時候也記了筆記,但是看完之后還是忘得差不多了,所以打算寫一篇博客再溫習一下內容。 課程坐標:黑馬程序員SpringCloud微服務開發與實戰 微服務 認識單體架構 單體架…

MySQL中動態生成SQL語句去掉所有字段的空格

在MySQL中動態生成SQL語句去掉所有字段的空格 在數據庫管理過程中,我們常常會遇到需要對表中字段進行清洗和整理的情況。其中,去掉字段中的空格是一項常見的操作。當表中的字段數量較少時,我們可以手動編寫 UPDATE 語句來處理。但如果表中包…

【Grok 大模型深度解析】第二期:架構探秘與訓練哲學

在上一期的內容中,我們對 Grok 大模型從技術溯源的角度,了解了它從 Transformer 架構局限性出發,邁向混合架構創新的歷程,同時也梳理了從 Grok - 1 到 Grok - 3 的版本迭代所帶來的技術躍遷以及其獨特的差異化優勢。這一期,我們將深入到 Grok 大模型的架構內部,探究其精妙…

c# 使用NPOI將datatable的數據導出到excel

以下是使用 NPOI 庫 將 DataTable 數據導出到 Excel 的詳細步驟和代碼示例(支持 .xls 和 .xlsx 格式): 步驟 1:安裝 NPOI NuGet 包 Install-Package NPOI Install-Package NPOI.OOXML # 若需導出 .xlsx 格式 步驟 2:完整代碼實現 using NPOI.SS.UserModel; using NPOI.…

基于SpringBoot的求職招聘網站系統(源碼+數據庫)

473基于SpringBoot的求職招聘網站系統,本系統共分為2個角色:系統管理員、用戶,主要功能如下 【前臺功能】 用戶角色功能: 1. 注冊和登錄:注冊賬戶并登錄系統,以便訪問更多功能。 2. 個人信息管理&#x…

CSS 過渡與變形:讓交互更絲滑

在網頁設計中,動效能讓用戶交互更自然、流暢,提升使用體驗。本文將通過 CSS 的 transition(過渡)和 transform(變形)屬性,帶你入門基礎動效設計,結合案例演示如何實現顏色漸變、元素…

rqlite:一個基于SQLite構建的分布式數據庫

今天給大家介紹一個基于 SQLite 構建的輕量級分布式關系型數據庫:rqlite。 rqlite 基于 Raft 協議,結合了 SQLite 的簡潔性以及高可用分布式系統的穩健性,對開發者友好,操作極其簡便,其核心設計理念是以最低的復雜度實…

mujoco graspnet 仿真項目的復現記錄

開源項目:https://gitee.com/chaomingsanhua/manipulator_grasp 復現使用的配置:linux系統ubuntu20.04 項目配置記錄: git clone 對應的code后: 需要在graspnet-baseline文件夾中繼續拉取文件,指令記錄:…

【js面試題】new操作做了什么?

這些年也面試了一些外包同事,不知道其他面試官的想法,但就我而言,我更喜歡聽到的是口述代碼的方式: 比如下述代碼 function Animal(age) {this.age age; // 設置新對象的屬性 }const cat new Animal("8");最有效的回…

freecad內部python來源 + pip install 裝包

cmake來源: 只能find默認地址,我試過用虛擬的python地址提示缺python3config.cmake python解釋器位置: python控制臺位置: pip install 裝包: module_to_install "your pakage" import os import FreeCAD …

樹和圖論【詳細整理,簡單易懂!】(C++實現 藍橋杯速查)

樹和圖論 樹的遍歷模版 #include <iostream> #include <cstring> #include <vector> #include <queue> // 添加queue頭文件 using namespace std;const int MAXN 100; // 假設一個足夠大的數組大小 int ls[MAXN], rs[MAXN]; // 定義左右子樹數…

展訊android15源碼編譯之apk單編

首先找到你要單編的apk生成的路徑&#xff1a; sys\out_system\target\product\ussi_arm64\system_ext\app\HelloDemo\HelloDemo.apk接著打開下面這個文件&#xff1a; sys\out_system\ussi_arm64_full-userdebug-gms.system.build.log在里面找關鍵字"Running command&q…

如何關閉MacOS中鼠標滾輪滾動加速

一、背景 想要關閉滾輪的 “滾動加速”&#xff0c;即希望滾動了多少就對應滾動頁面固定行數&#xff0c;現在macOS是加速滾動的&#xff0c;即滾動相同的角度會根據你滾動滾輪的速度不同最終頁面滾動的幅度不同。這點很煩&#xff0c;常導致很難定位。 macOS本身的設置是沒有…

河北工程大學e2e平臺,python

題目&#xff0c;選擇題包100分&#xff01; 題目&#xff0c;選擇題包100分&#xff01; 題目&#xff0c;選擇題包100分&#xff01; 聯系&#x1f6f0;&#xff1a;18039589633

【藍橋杯】貪心算法

1. 區間調度 1.1. 題目 給定個區間,每個區間由開始時間start和結束時間end表示。請選擇最多的互不重疊的區間,返回可以選擇的區間的最大數量。 輸入格式: 第一行包含一個整數n,表示區間的數量 接下來n行,每行包含兩個整數,分別表示區間的開始時間和結束時間 輸出格式:…

一維差分數組

2.一維差分 - 藍橋云課 問題描述 給定一個長度為 n 的序列 a。 再給定 m 組操作&#xff0c;每次操作給定 3 個正整數 l, r, d&#xff0c;表示對 a_{l} 到 a_{r} 中的所有數增加 d。 最終輸出操作結束后的序列 a。 ??Update??: 由于評測機過快&#xff0c;n, m 于 20…