內容繼續機器學習-人與機器生數據的區分模型測試
使用隨機森林的弱學習樹來篩選相對穩定的特征數據
# 隨機森林篩選特征
X = data.drop(['city', 'target'], axis=1) # 去除修改前的城市名稱列和目標變量列
y = data['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)rf_model = RandomForestClassifier(n_estimators=100, random_state=42) # 假設使用隨機森林模型
rf_model.fit(X_train, y_train)feature_importances = rf_model.feature_importances_
feature_names = X.columns
# 打印特征重要性
for feature_name, importance in zip(feature_names, feature_importances):print(f"{feature_name}: {importance}")#特征有消息放入DATAFRAME中
feature_importances_df = pd.DataFrame({'Feature': feature_names, 'Importance': feature_importances})
feature_importances_df = feature_importances_df.sort_values(by='Importance', ascending=False)
feature_importances_df.index = range(1, len(feature_importances_df) + 1)
#feature_importances_df.to_csv('feature_importances.csv', index=False)# 繪制優化后的特征重要性圖
plt.figure(figsize=(12, 8))
plt.barh(feature_importances_df['Feature'], feature_importances_df['Importance'],height=0.8,color='#1f77b4' # 可選:調整顏色:ml-citation{ref="3" data="citationList"}
)
plt.gca().invert_yaxis()
plt.xlabel('Feature Importance', fontsize=12)
plt.ylabel('Feature', fontsize=12)
plt.title('Feature Importance in Random Forest Model', fontsize=14)
plt.grid(axis='x', linestyle='--', alpha=0.6) # 可選:添加網格線:ml-citation{ref="3" data="citationList"}
plt.tight_layout()
plt.savefig('feature_importance.png', dpi=300) # 可選:保存高清圖:ml-citation{ref="3" data="citationList"}
plt.show()
計算得出以下特征
其他指標計算有效性
IV值
#定義計算VIF函數
def calculate_vif(data):vif_data = pd.DataFrame()vif_data["feature"] = data.columnsvif_data["VIF"] = [variance_inflation_factor(data.values, i) for i in range(data.shape[1])]return vif_datavif_results = calculate_vif(X) # X為自變量數據框
print(vif_results)
#VIF ≥ 10時,存在顯著共線性
相關系數矩陣
corr_matrix = X.corr() # X為自變量數據框
plt.figure(figsize=(10, 8))
sns.heatmap(corr_matrix, annot=True, cmap='coolwarm')
plt.show()
共線性
X_matrix = X.values # X為自變量數據框
cond_number = np.linalg.cond(X_matrix)
print("條件數:", cond_number)
#條件數 > 100時,可能存在顯著共線性
容忍度
#容忍度是VIF的倒數,反映變量獨立性。
tol = 1 / np.array([variance_inflation_factor(X.values, i) for i in range(X.shape[1])])
print("容忍度:", tol)
#容忍度 < 0.1時,可能存在顯著共線性
特征篩選
# 選擇重要特征
threshold = 0.01 # 設定閾值
important_features = feature_names[feature_importances > threshold]#篩選前30個特征
important_features = feature_importances_df['Feature'][:30]# 構建新的數據集
new_data = data[important_features]
new_data['target'] = data['target'] # 將目標變量添加到新的數據集中df_temp =temp[important_features]
df_temp['target'] = temp['target']# 劃分訓練集和測試集
X = new_data.drop('target', axis=1)
y = new_data['target']X_temp =df_temp.drop('target', axis=1)
y_temp = df_temp['target']#重新劃分數據
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)# 數據標準化
scaler = StandardScaler() # 假設使用標準化方法
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
上述代碼的運行效果
IV值
相關性矩陣
共線與容忍度