?GPU訓練
要讓模型在 GPU 上訓練,主要是將模型和數據遷移到 GPU 設備上。
在 PyTorch 里,.to(device) 方法的作用是把張量或者模型轉移到指定的計算設備(像 CPU 或者 GPU)上。
對于張量(Tensor):調用 .to(device) 之后,會返回一個在新設備上的新張量。
對于模型(nn.Module):調用 .to(device) 會直接對模型進行修改,讓其所有參數和緩沖區都移到新設備上。在進行計算時,所有輸入張量和模型必須處于同一個設備。要是它們不在同一設備上,就會引發運行時錯誤。并非所有 PyTorch 對象都有 .to(device) 方法,只有繼承自 torch.nn.Module 的模型以及 torch.Tensor 對象才有此方法。
RuntimeError: Tensor for argument #1 'input' is on CPU, but expected it to be on GPU 這個常見錯誤就是輸入張量和模型處于不同的設備。
import torchif torch.cuda.is_available():print("CUDA可用!")device_count = torch.cuda.device_count()print(f"可用的CUDA設備數量: {device_count}")current_device = torch.cuda.current_device()print(f"當前使用的CUDA設備索引: {current_device}")device_name = torch.cuda.get_device_name(current_device)print(f"當前CUDA設備的名稱: {device_name}")cuda_version = torch.version.cudaprint(f"CUDA版本: {cuda_version}")print("cuDNN版本:", torch.backends.cudnn.version())else:print("CUDA不可用。")iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)class MLP(nn.Module):def __init__(self):super(MLP, self).__init__()self.fc1 = nn.Linear(4, 10)self.relu = nn.ReLU()self.fc2 = nn.Linear(10, 3)def forward(self, x):out = self.fc1(x)out = self.relu(out)out = self.fc2(out)return outmodel = MLP().to(device)criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01)num_epochs = 20000
losses = []
start_time = time.time()for epoch in range(num_epochs):outputs = model(X_train)loss = criterion(outputs, y_train)optimizer.zero_grad()loss.backward()optimizer.step()losses.append(loss.item())if (epoch + 1) % 100 == 0:print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')time_all = time.time() - start_time
print(f'Training time: {time_all:.2f} seconds')plt.plot(range(num_epochs), losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss over Epochs')
plt.show()
能夠優化的只有數據傳輸時間,針對性解決即可,很容易想到2個思路:
1. 直接不打印訓練過程的loss了,但是這樣會沒辦法記錄最后的可視化圖片,只能肉眼觀察loss數值變化。
2. 每隔200個epoch保存一下loss,不需要20000個epoch每次都打印,
下面先嘗試第一個思路:
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import numpy as npiris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)X_train = torch.FloatTensor(X_train)
y_train = torch.LongTensor(y_train)
X_test = torch.FloatTensor(X_test)
y_test = torch.LongTensor(y_test)class MLP(nn.Module): def __init__(self): super(MLP, self).__init__() self.fc1 = nn.Linear(4, 10) self.relu = nn.ReLU()self.fc2 = nn.Linear(10, 3) def forward(self, x):out = self.fc1(x)out = self.relu(out)out = self.fc2(out)return outmodel = MLP()criterion = nn.CrossEntropyLoss()optimizer = optim.SGD(model.parameters(), lr=0.01)num_epochs = 20000 losses = []import time
start_time = time.time() for epoch in range(num_epochs): outputs = model.forward(X_train) # outputs = model(X_train) loss = criterion(outputs, y_train) optimizer.zero_grad() loss.backward(optimizer.step() if (epoch + 1) % 100 == 0:print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')time_all = time.time() - start_time
print(f'Training time: {time_all:.2f} seconds')
優化后發現確實效果好,近乎和用cpu訓練的時長差不多。所以可以理解為數據從gpu到cpu的傳輸占用了大量時間。
下面嘗試下第二個思路:
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import time
import matplotlib.pyplot as pltdevice = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"使用設備: {device}")iris = load_iris()
X = iris.data 、
y = iris.target 、X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)X_train = torch.FloatTensor(X_train).to(device)
y_train = torch.LongTensor(y_train).to(device)
X_test = torch.FloatTensor(X_test).to(device)
y_test = torch.LongTensor(y_test).to(device)class MLP(nn.Module):def __init__(self):super(MLP, self).__init__()self.fc1 = nn.Linear(4, 10) self.relu = nn.ReLU()self.fc2 = nn.Linear(10, 3) def forward(self, x):out = self.fc1(x)out = self.relu(out)out = self.fc2(out)return outmodel = MLP().to(device)criterion = nn.CrossEntropyLoss()、
optimizer = optim.SGD(model.parameters(), lr=0.01)num_epochs = 20000 、losses = []start_time = time.time() 、for epoch in range(num_epochs):outputs = model(X_train) 、loss = criterion(outputs, y_train)optimizer.zero_grad()loss.backward()optimizer.step()if (epoch + 1) % 200 == 0:losses.append(loss.item()) # item()方法返回一個Python數值,loss是一個標量張量print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')if (epoch + 1) % 100 == 0:print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')time_all = time.time() - start_time 、
print(f'Training time: {time_all:.2f} seconds')plt.plot(range(len(losses)), losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Loss over Epochs')
plt.show()