本期是《第33步 機器學習分類實戰:誤判病例分析》的R版本。
嘗試使用Deepseek-R1來試試寫代碼,效果還不錯。
下面上R語言代碼,以Xgboost為例:
# 加載必要的庫
library(caret)
library(pROC)
library(ggplot2)
library(xgboost)# 假設 'data' 是包含數據的數據框
# 設置隨機種子以確保結果可重復
set.seed(123)# 將數據分為訓練集和驗證集(80% 訓練集,20% 驗證集)
trainIndex <- createDataPartition(data$X, p = 0.8, list = FALSE)
trainData <- data[trainIndex, ]
validData <- data[-trainIndex, ]# 為 XGBoost 準備數據矩陣
dtrain <- xgb.DMatrix(data = as.matrix(trainData[, -which(names(trainData) == "X")]), label = trainData$X)
dvalid <- xgb.DMatrix(data = as.matrix(validData[, -which(names(validData) == "X")]), label = validData$X)# 定義 XGBoost 的參數
params <- list(booster = "gbtree", objective = "binary:logistic", eta = 0.1, gamma = 0, max_depth = 6, min_child_weight = 1, subsample = 0.5, colsample_bytree = 0.9,lambda = 10,alpha = 5,eval_metric = "logloss") # 使用 logloss 作為評估指標# 訓練 XGBoost 模型,并加入早停法
model <- xgb.train(params = params, data = dtrain, nrounds = 250, watchlist = list(train = dtrain, eval = dvalid), # 監控訓練集和驗證集early_stopping_rounds = 10, # 如果驗證集性能在 10 輪內沒有提升,則停止訓練verbose = 1)# 對訓練集和驗證集進行預測
trainPredict <- predict(model, dtrain)
validPredict <- predict(model, dvalid)# 將概率值轉換為二分類預測結果(默認閾值為 0.5)
trainPredictBinary <- ifelse(trainPredict > 0.5, 1, 0)
validPredictBinary <- ifelse(validPredict > 0.5, 1, 0)# 提取正常分類和誤判樣本
# 訓練集
trainResults <- trainData
trainResults$Predicted <- trainPredictBinary # 添加預測結果列
trainResults$Actual <- trainData$X # 添加真實標簽列
trainResults$Status <- ifelse(trainResults$Predicted == trainResults$Actual, "Correct", "Misclassified") # 添加分類狀態列# 驗證集
validResults <- validData
validResults$Predicted <- validPredictBinary # 添加預測結果列
validResults$Actual <- validData$X # 添加真實標簽列
validResults$Status <- ifelse(validResults$Predicted == validResults$Actual, "Correct", "Misclassified") # 添加分類狀態列# 分離正常分類和誤判樣本
trainCorrect <- trainResults[trainResults$Status == "Correct", ]
trainMisclassified <- trainResults[trainResults$Status == "Misclassified", ]validCorrect <- validResults[validResults$Status == "Correct", ]
validMisclassified <- validResults[validResults$Status == "Misclassified", ]# 輸出正常分類和誤判樣本
print("訓練集中的正常分類樣本:")
print(trainCorrect)print("訓練集中的誤判樣本:")
print(trainMisclassified)print("驗證集中的正常分類樣本:")
print(validCorrect)print("驗證集中的誤判樣本:")
print(validMisclassified)# 將結果保存到文件(可選)
write.csv(trainCorrect, file = "train_correct.csv", row.names = FALSE)
write.csv(trainMisclassified, file = "train_misclassified.csv", row.names = FALSE)
write.csv(validCorrect, file = "valid_correct.csv", row.names = FALSE)
write.csv(validMisclassified, file = "valid_misclassified.csv", row.names = FALSE)
簡單解說:
(A)最終結果輸出成excel查看,輸出地址就是你的工作路徑,不懂的話可以使用代碼getwd()展示出來。
(B)打開工作路徑,可以發現四個文件,分別對應混淆矩陣的四個數字:
(C)題外話:Deepseek-R1-0528 改改代碼感覺可以跟GPT類似了,平替的好手。