開始使?hyperopt進??動調參
algo = partial(tpe.suggest, n_startup_jobs=1)
best = fmin(lightgbm_factory, space, algo=algo, max_evals=20,
pass_expr_memo_ctrl=None)
RMSE = lightgbm_factory(best)
print(‘best :’, best)
print(‘best param after transform :’)
argsDict_tranform(best,isPrint=True)
print(‘rmse of the best lightgbm:’, np.sqrt(RMSE))
model= MultiOutputRegressor(LGBMRegressor(**best))#,device=‘gpu’))
return model
def tune_optuna_lightgbm_model(train_x, train_y,test_x,test_y):
from lightgbm import log_evaluation, early_stopping
callbacks = [log_evaluation(period=100), early_stopping(stopping_rounds=100)]
def objective(trial):#x_test,y_test
param = {
‘metric’: ‘rmse’,
‘random_state’: 48,
‘n_estimators’: 2,#0000,
‘reg_alpha’: trial.suggest_loguniform(‘reg_alpha’, 1e-3, 10.0),
‘reg_lambda’: trial.suggest_loguniform(‘reg_lambda’, 1e-3, 10.0),
‘colsample_bytree’: trial.suggest_categorical(‘colsample_bytree’,
[0.3,0.4,0.5,0.6,0.7,0.8,0.9, 1.0]),
‘subsample’: trial.suggest_categorical(‘subsample’, [0.4,0.5,0.6,0.7,0.8,1.0]),
‘learning_rate’: trial.suggest_categorical(‘learning_rate’,
[0.006,0.008,0.01,0.014,0.017,0.02]),
‘max_depth’: trial.suggest_categorical(‘max_depth’, [5, 7, 9, 11, 13, 15, 17, 20,
50]),
‘num_leaves’ : trial.suggest_int(‘num_leaves’, 1, 1000),
‘min_child_samples’: trial.suggest_int(‘min_child_samples’, 1, 300),
‘cat_smooth’ : trial.suggest_int(‘cat_smooth’, 1, 100) ,
‘verbose’:-1,
}
mlgb= MultiOutputRegressor(LGBMRegressor(**param))
mlgb.?t(train_x, train_y)#, eval_set=[(X_test, y_test)],callbacks=callbacks)
pred_lgb=mlgb.predict(test_x)
rmse = mean_squared_error(test_y, pred_lgb, squared=False)
return rmse
study=optuna.create_study(direction=‘minimize’)
n_trials=2#50 # try50次
study.optimize(objective, n_trials=n_trials)
print(‘Number of ?nished trials:’, len(study.trials))
print(“------------------------------------------------”)
print(‘Best trial:’, study.best_trial.params)
print(“------------------------------------------------”)
print(“study.best_params:”,study.best_params)
print(“------------------------------------------------”)
print(study.trials_dataframe())
print(“------------------------------------------------”)
optuna.visualization.plot_optimization_history(study).show()
#plot_parallel_coordinate: interactively visualizes the hyperparameters and scores
optuna.visualization.plot_parallel_coordinate(study).show()
params=study.best_params
model=MultiOutputRegressor(LGBMRegressor(**params))
return model
ridge_model=Ridge(alpha=10e-6,?t_intercept=True)
def tune_GridSearchCV_ridge_model(train_x, train_y,test_x,test_y):
from sklearn.model_selection import GridSearchCV
model=Ridge(?t_intercept=True)
alpha_can = np.logspace(-1, 1, 10)#(-10, 10, 1000)
model = GridSearchCV(model, param_grid={‘alpha’: alpha_can}, cv=5)
return model
###########超短期預測-3.模型訓練與預測-輸出最后1個點情況##########
def ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,model,modelname,Cap,result_):
model.?t(train_x, train_y)
print(“=“+modelname+”=”)
#save model
joblib.dump(model, path+r’/model/‘+modelname+r’ultrashorttime.pkl’)
#load model
model= joblib.load(path+r’/model/'+modelname+r’ultrashorttime.pkl’)
pred_y = model.predict(test_x)
test_y,pred_y=test_y[:,-1],pred_y[:,-1]
print(test_y.shape,pred_y.shape)
校正
for j in range(len(pred_y)):
pred_y[j] = np.round(pred_y[j], 3)
if pred_y[j] < 0:
pred_y[j] = ?oat(0)
if pred_y[j]>Cap:
pred_y[j]=Cap
mse=mean_squared_error(test_y,pred_y)
rmse=np.sqrt(mean_squared_error(test_y,pred_y))
mae=mean_absolute_error(test_y,pred_y)
mape = mean_absolute_percentage_error(test_y, pred_y)
r2score=r2_score(test_y, pred_y)
print(‘mse:’,mse)
print(‘rmse:’,rmse)
print(‘mae’,mae)
print(‘mape’,mape)
print(‘r2score’,r2score)
#分辨率參數-dpi,畫布??參數-?gsize
#plt.?gure(dpi=300,?gsize=(24,8))
plt.title(modelname+str(“預測結果”))
plt.plot(test_y.ravel(),label=“真實數據”)
plt.plot(pred_y.ravel(),label=“預測值”)
plt.legend(loc=1)
plt.save?g(path+r"/pictures/“+modelname+“超短期.png”)
plt.close()
result_[‘真實值’]=test_y
result_[‘預測值’]=pred_y
result_.to_csv(path+r”/result/“+modelname+“超短期.csv”, sep=‘,’)
modelname=[“Catboost”,“Lightgbm”,“Ridge”]
‘’’
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,catboost_model,modelname[0],Cap)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,lightgbm_model,modelname[1],Cap)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,ridge_model,modelname[2],Cap)
‘’’
###########超短期預測-4.模型訓練與預測-輸出16個點情況##########
#########按照要求模型預測16個點,但是為了便于部署模型預測出相應的時間點,在實際
中,通常多預測出來?個點,變為預測17個點
#print(”“+“超短期16個點預測開始”+”********“)
def ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,model,modelname,Cap,result16):
model.?t(train_x, train_y)
print(”=“+modelname+”=“)
#save model
joblib.dump(model, path+r’/model/‘+modelname+r’ultrashorttime.pkl’)
#load model
model= joblib.load(path+r’/model/'+modelname+r’ultrashorttime.pkl’)
pred_y = model.predict(test_x)
test_y_,pred_y_=test_y[:,-1],pred_y[:,-1]
print(test_y_.shape,pred_y_.shape)
mse=mean_squared_error(test_y_,pred_y_)
rmse=np.sqrt(mean_squared_error(test_y_,pred_y_))
mae=mean_absolute_error(test_y_,pred_y_)
mape = mean_absolute_percentage_error(test_y_, pred_y_)
r2score=r2_score(test_y_, pred_y_)
print(‘mse:’,mse)
print(‘rmse:’,rmse)
print(‘mae’,mae)
print(‘mape’,mape)
print(‘r2score’,r2score)
#分辨率參數-dpi,畫布??參數-?gsize
#plt.?gure(dpi=300,?gsize=(24,8))
plt.title(modelname+str(“預測結果”))
plt.plot(test_y_.ravel(),label=“真實數據”)
plt.plot(pred_y_.ravel(),label=“預測值”)
plt.legend(loc=1)
plt.save?g(path+r”/pictures/“+modelname+“超短期16個點取最后?個點畫圖.png”)
plt.close()
def correction(jj):
for j in range(len(pred_y[:,jj])):
pred_y[:,jj][j] = np.round(pred_y[:,jj][j], 3)
if pred_y[:,jj][j] < 0:
pred_y[:,jj][j] = ?oat(0)
if pred_y[:,jj][j]>Cap:
pred_y[:,jj][j]=Cap
for j in range(16):
correction(j)
result16[‘真實值’]=test_y_
for i in range(16):
result16[‘預測值’+str(i)]=pred_y[:,i]
result16.to_csv(path+r”/result/“+modelname+“超短期16個點.csv”, sep=‘,’)
modelname=[“Catboost”,“Lightgbm”,“Ridge”]
‘’’
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,catboost_model,modelname[0],Cap)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,lightgbm_model,modelname[1],Cap)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,ridge_model,modelname[2],Cap)
‘’’
if name == “main”:
###########場站數據##########
Cap=17
data=pd.read_csv(path+r”/data/with_nwp2024-05-14.csv", parse_dates=True,
index_col=‘時間’)
data = data.sort_index()
print(“場站數據:”,data.shape)
print(data.head())
print(“===============================場站數據相關性
===============================”)
print(data.corr())
data.plot(?gsize=(24,10))
plt.save?g(path+r"/pictures/with_nwp.png")
plt.close()
data[‘實際功率’]=data[‘實際功率’].map(lambda x: x if x> 0 else 0)
data= data[[“實際功率”,“預測?速”]]
#刪除某?中某個值為0的?
data= data[data[‘實際功率’] != np.nan]
data=data.?llna(value=‘0’)
‘’’
print(“===?成短期預測數據集
“)
#?成短期預測數據集
x_train, y_train,x_test,y_test,result=make_shorttime_data(data)
print(”=短期預測catboost?動調參VS?動調參
=“)
print(“1.短期預測catboost?動調參或者使?默認參數結果:”)
shorttime_model_make_result(x_train,
y_train,x_test,y_test,model_catboost,“catboost”,Cap,result)
print(“2.短期預測catboost使?hyperopt?動調參結果:”)
tune_hyperopt_catboost=tune_hyperopt_model_catboost(x_train, y_train,x_test,y_test)
shorttime_model_make_result(x_train, y_train,x_test,y_test,
tune_hyperopt_catboost,“tune_hyperopt_catboost”,Cap,result)
print(“3.短期預測catboost使?optuna?動調參結果:”)
tune_optuna_catboost=tune_optuna_model_catboost(x_train, y_train,x_test,y_test)
shorttime_model_make_result(x_train, y_train,x_test,y_test,
tune_optuna_catboost,“tune_optuna_catboost”,Cap,result)
print(”
“)
print(“1.短期預測lightgbm?動調參或者使?默認參數結果:”)
shorttime_model_make_result(x_train,
y_train,x_test,y_test,model_lightgbm,“lightgbm”,Cap,result)
print(“2.短期預測lightgbm使?hyperopt?動調參結果:”)
tune_hyperopt_lightgbm=tune_hyperopt_model_lightgbm(x_train, y_train,x_test,y_test)
shorttime_model_make_result(x_train, y_train,x_test,y_test,
tune_hyperopt_lightgbm,“tune_hyperopt_lightgbm”,Cap,result)
print(“3.短期預測lightgbm使?optuna?動調參結果:”)
tune_optuna_lightgbm=tune_optuna_model_lightgbm(x_train, y_train,x_test,y_test)
shorttime_model_make_result(x_train, y_train,x_test,y_test,
tune_optuna_lightgbm,“tune_optuna_lightgbm”,Cap,result)
print(“4.短期預測lightgbm使?RandomizedSearchCV?動調參結果:”)
#tune_RandomizedSearchCV_lightgbm=ttune_RandomizedSearchCV_model_lightgbm(x_tr
ain, y_train,x_test,y_test)
#shorttime_model_make_result(x_train, y_train,x_test,y_test,
tune_RandomizedSearchCV_lightgbm,“tune_RandomizedSearchCV_lightgbm”,Cap,result)
print(“5.短期預測lightgbm的交叉驗證結果:”)
cv_lightgbm=CV_model_lightgbm(x_train,y_train,x_test,y_test)
shorttime_model_make_result(x_train, y_train,x_test,y_test,
cv_lightgbm,“cv_lightgbm”,Cap,result)
print(”
“)
print(“1.短期預測ridge?動調參或者使?默認參數結果:”)
shorttime_model_make_result(x_train,
y_train,x_test,y_test,model_ridge,“ridge”,Cap,result)
print(“2.短期預測ridge使?GridSearchCV?動調參結果:”)
tune_GridSearchCV_ridge=tune_GridSearchCV_model_ridge(x_train,
y_train,x_test,y_test)
shorttime_model_make_result(x_train, y_train,x_test,y_test,
tune_GridSearchCV_ridge,“tune_GridSearchCV_ridge”,Cap,result)
‘’’
print(”=?成超短期預測數據集
“)
#?成超短期預測數據集
train_x, train_y,test_x,test_y,result_,result16=make_ultrashorttime_data(data)
print(”=超短期預測catboost?動調參VS?動調參
=“)
print(“1.超短期預測catboost?動調參或者使?默認參數結果:”)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,catboost_model,“catboost”,Cap,result_)
print(“2.超短期預測catboost使?hyperopt?動調參結果:”)
tune_hyperopt_catboost_=tune_hyperopt_catboost_model(train_x, train_y,test_x,test_y)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,tune_hyperopt_catboost_,“tune_hyperopt_catboost_”,Cap,result_)
print(“3.超短期預測catboost使?optuna?動調參結果:”)
tune_optuna_catboost_=tune_optuna_catboost_model(train_x, train_y,test_x,test_y)
ultrashorttime_model_make_result(train_x, train_y,test_x,test_y,
tune_optuna_catboost_,“tune_optuna_catboost_”,Cap,result_)
print(”
“)
print(“1.超短期預測lightgbm?動調參或者使?默認參數結果:”)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,lightgbm_model,“lightgbm”,Cap,result_)
print(“2.超短期預測lightgbm使?hyperopt?動調參結果:”)
tune_hyperopt_lightgbm_=tune_hyperopt_lightgbm_model(train_x, train_y,test_x,test_y)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,tune_hyperopt_lightgbm_,“tune_hyperopt_lightgbm_”,Cap,result_)
print(“3.超短期預測lightgbm使?optuna?動調參結果:”)
tune_optuna_lightgbm_=tune_optuna_lightgbm_model(train_x, train_y,test_x,test_y)
ultrashorttime_model_make_result(train_x, train_y,test_x,test_y,
tune_optuna_lightgbm_,“tune_optuna_lightgbm_”,Cap,result_)
print(”
“)
print(“1.超短期預測ridge?動調參或者使?默認參數結果:”)
ultrashorttime_model_make_result(train_x,
train_y,test_x,test_y,model_ridge,“ridge”,Cap,result_)
print(“2.超短期預測ridge使?GridSearchCV?動調參結果:”)
tune_GridSearchCV_ridge_=tune_GridSearchCV_ridge_model(train_x,
train_y,test_x,test_y)
ultrashorttime_model_make_result(train_x, train_y,test_x,test_y,
tune_GridSearchCV_ridge_,“tune_GridSearchCV_ridge”,Cap,result_)
print(”=超短期預測16個點catboost?動調參VS?動調參
=“)
print(“1.超短期預測catboost?動調參或者使?默認參數結果:”)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,catboost_model,“catboost”,Cap,result16)
print(“2.超短期預測catboost使?hyperopt?動調參結果:”)
#tune_hyperopt_catboost_=tune_hyperopt_catboost_model(train_x,
train_y,test_x,test_y)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,tune_hyperopt_catboost_,“tune_hyperopt_catboost_”,Cap,result16)
print(“3.超短期預測catboost使?optuna?動調參結果:”)
#tune_optuna_catboost_=tune_optuna_catboost_model(train_x, train_y,test_x,test_y)
ultrashorttime_model_make_result_16output(train_x, train_y,test_x,test_y,
tune_optuna_catboost_,“tune_optuna_catboost_”,Cap,result16)
print(”
“)
print(“1.超短期預測lightgbm?動調參或者使?默認參數結果:”)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,lightgbm_model,“lightgbm”,Cap,result16)
print(“2.超短期預測lightgbm使?hyperopt?動調參結果:”)
#tune_hyperopt_lightgbm_=tune_hyperopt_lightgbm_model(train_x,
train_y,test_x,test_y)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,tune_hyperopt_lightgbm_,“tune_hyperopt_lightgbm_”,Cap,result16)
print(“3.超短期預測lightgbm使?optuna?動調參結果:”)
#tune_optuna_lightgbm_=tune_optuna_lightgbm_model(train_x, train_y,test_x,test_y)
ultrashorttime_model_make_result_16output(train_x, train_y,test_x,test_y,
tune_optuna_lightgbm_,“tune_optuna_lightgbm_”,Cap,result16)
print(”
====================”)
print(“1.超短期預測ridge?動調參或者使?默認參數結果:”)
ultrashorttime_model_make_result_16output(train_x,
train_y,test_x,test_y,model_ridge,“ridge”,Cap,result16)
print(“2.超短期預測ridge使?GridSearchCV?動調參結果:”)
#tune_GridSearchCV_ridge_=tune_GridSearchCV_ridge_model(train_x,
train_y,test_x,test_y)
ultrashorttime_model_make_result_16output(train_x, train_y,test_x,test_y,
tune_GridSearchCV_ridge_,“tune_GridSearchCV_ridge”,Cap,result16)