模型選型
模型調參
1 模型選型
採用了xgboost和lightGBM以及它倆的加權模型
2 模型調參
主要對葉子節點數,學習率以及估計器參數進行調整
def xgb_model_fit(self,
X_train, X_test, y_train, y_test,alg, useTrainCV=True, cv_folds=5, early_stopping_rounds=50):
if useTrainCV:
"""訓練集訓練數據"""
xgb_param = alg.get_xgb_params()
xgtrain = xgb.DMatrix(X_train, label=y_train)
cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], nfold=cv_folds,
metrics='mae', early_stopping_rounds=early_stopping_rounds)
alg.set_params(n_estimators=cvresult.shape[0])
# 擬合模型
alg.fit(X_train, y_train, eval_metric='mae')
# 預測訓練集、測試集
train_data_df_predictions = alg.predict(X_train)
test_data_df_predictions = alg.predict(X_test)
# 迴歸問題評價標--訓練集
print("training mean_absolute_error is : " )
print(mean_absolute_error(y_train, train_data_df_predictions))
#測試集
print("test mean_absolute_error is : ")
print(mean_absolute_error(y_test, test_data_df_predictions))
#特徵重要度
plt.ylabel('Feature Importance is')
plot_importance(alg)
plt.show()
def bak_log(self,x_list):
return list(map(lambda x:pow(2.72,x),x_list))
def light_gbm_model_fit(self,X_train, X_test, y_train, y_test):
gbm = lgb.LGBMRegressor(objective='regression', num_leaves=175, learning_rate=0.05, n_estimators=20) #num_leaves=31
gbm.fit(X_train, y_train, eval_set=[(X_test, y_test)], eval_metric='l1', early_stopping_rounds=5)
print('Start lightgbm predicting...')
# 訓練集與測試集預測
y_train_pred = gbm.predict(X_train, num_iteration=gbm.best_iteration_)
y_test_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration_)
# 模型評估
print('The y_train mae of test prediction is:', mean_absolute_error(DataSearch().bak_log(y_train), DataSearch().bak_log(y_train_pred)))
print('The y_test mae of test prediction is:', mean_absolute_error(DataSearch().bak_log(y_test), DataSearch().bak_log(y_test_pred)))
# feature importances
print('Feature importances:', list(gbm.feature_importances_))
# 網格搜索,參數優化
estimator = lgb.LGBMRegressor(num_leaves=175, metrics='mae', max_depth=7, min_child_samples=1000) #num_leaves=64
param_grid = {
'learning_rate': [0.01, 0.1, 1],
'n_estimators': [20, 40]
}
gbm_grid = GridSearchCV(estimator, param_grid)
gbm_grid = gbm_grid.fit(X_train, y_train)
print("用網格搜索的方式開始進行預測")
print('Best parameters found by grid search are:', gbm_grid.best_params_)
# 訓練集與測試集預測
y_train_pred = gbm_grid.predict(X_train)
y_test_pred = gbm_grid.predict(X_test)
# 模型評估
print('grid search cv The y_train mae of test prediction is:', mean_absolute_error(y_train, y_train_pred))
print('grid search cv The y_test mae of test prediction is:', mean_absolute_error(y_test, y_test_pred))
# feature importances
print('Feature importances:', list(gbm_grid.feature_importances_))
結果
The bak log test prediction is: 776.484369
待續