[Kaggle競賽] Ames房價迴歸預測Part2:多模型Stacking進行房價預測

Kaggle入門競賽——房價預測問題:House Prices: Advanced Regression Techniques

Part1詳見:[Kaggle競賽] Part1:特徵工程+利用XGBoost進行房價預測
內含數據初步分析、特徵工程處理以及簡單地用XGBoost實現預測的方法。本篇的模型stacking基於Part1所進行的分析與處理之上,如對整個賽題感興趣的話請在閱讀Part1之後再閱讀本篇。

ydata_train = df_train.SalePrice.values
xdata_train = df1_train.drop("SalePrice",axis=1)
df1_test.drop("SalePrice",axis=1,inplace=True)
#數據集備份
xtrain = xdata_train
xtest = df1_test

Stacking

接下來會用到的包:

from sklearn.linear_model import ElasticNet, Lasso,  BayesianRidge, LassoLarsIC
from sklearn.ensemble import RandomForestRegressor,  GradientBoostingRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin, clone
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import mean_squared_error
import xgboost as xgb
import lightgbm as lgb
#寫個5折交叉驗證用函數
n_folds = 5

def rmsle_cv(model):
    kf = KFold(n_folds, shuffle=True, random_state=42).get_n_splits(df1_train.values)#單獨做fold劃分訓練集
    rmse= np.sqrt(-cross_val_score(model, df1_train.values, ydata_train, scoring="neg_mean_squared_error", cv = kf))#房價預測慣例取均方根誤差
    return(rmse)

構建不同的基學習器,這裏我們使用了LASSO Regression、Elastic Net Regression、Kernel Ridge Regression、Gradient Boosting Regression、XGBoost、LightGBM 共6個基學習器,其參數可先按經驗初始值設定後GridSearch調參尋優(速度過慢),也可以考慮使用貝葉斯調參尋優,本文使用貝葉斯優化。

#    
lasso = make_pipeline(RobustScaler(), Lasso(alpha =0.0005, random_state=1))
ENet = make_pipeline(RobustScaler(), ElasticNet(alpha=0.0005, l1_ratio=.9, random_state=3))
KRR = KernelRidge(alpha=0.6, kernel='polynomial', degree=2, coef0=2.5)
GBoost = GradientBoostingRegressor(n_estimators=3000, learning_rate=0.05,
                                   max_depth=4, max_features='sqrt',
                                   min_samples_leaf=15, min_samples_split=10, 
                                   loss='huber', random_state =5)
model_xgb = xgb.XGBRegressor(colsample_bytree=0.4603, gamma=0.0468, 
                             learning_rate=0.05, max_depth=3, 
                             min_child_weight=1.7817, n_estimators=2200,
                             reg_alpha=0.4640, reg_lambda=0.8571,
                             subsample=0.5213, silent=1,
                             random_state =7, nthread = -1)
model_lgb = lgb.LGBMRegressor(objective='regression',num_leaves=5,
                              learning_rate=0.05, n_estimators=720,
                              max_bin = 55, bagging_fraction = 0.8,
                              bagging_freq = 5, feature_fraction = 0.2319,
                              feature_fraction_seed=9, bagging_seed=9,
                              min_data_in_leaf =6, min_sum_hessian_in_leaf = 11)
score = rmsle_cv(lasso)
print("\nLasso score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(ENet)
print("ElasticNet score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(KRR)
print("Kernel Ridge score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(GBoost)
print("Gradient Boosting score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_xgb)
print("Xgboost score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
score = rmsle_cv(model_lgb)
print("LGBM score: {:.4f} ({:.4f})\n" .format(score.mean(), score.std()))

輸出:

Lasso score: 0.0165 (0.0012)
ElasticNet score: 0.0164 (0.0011)
Kernel Ridge score: 0.0174 (0.0023)
Gradient Boosting score: 0.0762 (0.0080)
Xgboost score: 0.0521 (0.0028)
LGBM score: 0.0647 (0.0074)

現簡單地取各基學習器的預測結果的均值爲預測結果。並對結果做交叉檢驗,查看簡單取平均的模型融合的表現

class AveragingModels(BaseEstimator, RegressorMixin, TransformerMixin):
    def __init__(self, models):
        self.models = models
        
    # we define clones of the original models to fit the data in
    def fit(self, X, y):
        self.models_ = [clone(x) for x in self.models]
        
        # Train cloned base models
        for model in self.models_:
            model.fit(X, y)
        return self
    
    #Now we do the predictions for cloned models and average them
    def predict(self, X):
        predictions = np.column_stack([model.predict(X) for model in self.models_])
        return np.mean(predictions, axis=1) 
averaged_models = AveragingModels(models = (ENet, GBoost, KRR, lasso))#多個基學習器的預測結果做平均
score = rmsle_cv(averaged_models)#對結果做交叉檢驗:查看簡單取平均的模型融合的表現
print(" Averaged base models score: {:.4f} ({:.4f})\n".format(score.mean(), score.std()))
Averaged base models score: 0.0228 (0.0019)
class StackingAveragedModels(BaseEstimator, RegressorMixin, TransformerMixin):
    def __init__(self, base_models, meta_model, n_folds=5):
        self.base_models = base_models
        self.meta_model = meta_model
        self.n_folds = n_folds
   
    # We again fit the data on clones of the original models
    def fit(self, X, y):
        self.base_models_ = [list() for x in self.base_models]
        self.meta_model_ = clone(self.meta_model)
        kfold = KFold(n_splits=self.n_folds, shuffle=True, random_state=156)
        
        # Train cloned base models then create out-of-fold predictions
        # that are needed to train the cloned meta-model
        out_of_fold_predictions = np.zeros((X.shape[0], len(self.base_models)))
        for i, model in enumerate(self.base_models):
            for train_index, holdout_index in kfold.split(X, y):
                instance = clone(model)
                self.base_models_[i].append(instance)
                instance.fit(X[train_index], y[train_index])
                y_pred = instance.predict(X[holdout_index])
                out_of_fold_predictions[holdout_index, i] = y_pred
                
        # Now train the cloned  meta-model using the out-of-fold predictions as new feature
        self.meta_model_.fit(out_of_fold_predictions, y)
        return self
   
    #Do the predictions of all base models on the test data and use the averaged predictions as 
    #meta-features for the final prediction which is done by the meta-model
    def predict(self, X):
        meta_features = np.column_stack([
            np.column_stack([model.predict(X) for model in base_models]).mean(axis=1)
            for base_models in self.base_models_ ])
        return self.meta_model_.predict(meta_features)
    
stacked_averaged_models = StackingAveragedModels(base_models = (ENet, GBoost, KRR), meta_model = lasso)

score = rmsle_cv(stacked_averaged_models)
print("Stacking Averaged models score: {:.4f} ({:.4f})".format(score.mean(), score.std()))
Stacking Averaged models score: 0.0142 (0.0011)
def rmsle(y, y_pred):
    return np.sqrt(mean_squared_error(y, y_pred))
#最終模型訓練與預測
stacked_averaged_models.fit(xtrain.values, ydata_train)
stacked_train_pred = stacked_averaged_models.predict(xtrain.values)
stacked_pred = np.expm1(stacked_averaged_models.predict(xtest.values))
print(rmsle(ydata_train, stacked_train_pred))

model_xgb.fit(xtrain, ydata_train)
xgb_train_pred = model_xgb.predict(xtrain)
xgb_pred = np.expm1(model_xgb.predict(xtest))#還原用log1p的反變換即expm1方法
print(rmsle(ydata_train, xgb_train_pred))

model_lgb.fit(xtrain, ydata_train)
lgb_train_pred = model_lgb.predict(xtrain)
lgb_pred = np.expm1(model_lgb.predict(xtest.values))
print(rmsle(ydata_train, lgb_train_pred))
'''RMSE on the entire Train data when averaging'''

print('RMSLE score on train data:')
print(rmsle(ydata_train,stacked_train_pred*0.70 +
               xgb_train_pred*0.15 + lgb_train_pred*0.15 ))
ensemble = stacked_pred*0.70 + xgb_pred*0.15 + lgb_pred*0.15
submission = pd.DataFrame({'Id':test_ID,'SalePrice':ensemble})
submission.to_csv('C:\\Users\\rinnko\\Submission.csv',index=False,sep=',')

輸出:

0.08048048781965618
0.07821477199548352
0.07385849940712184
RMSLE score on train data:
0.07660642433392315

結果如下,較直接使用XGBoost進行預測得到的結果有較大提升。
try2

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章