kaggle 房價預測

問題描述
代碼

經典的迴歸問題,過了一遍流程。

1)導入工具包

	import pandas as pd
	import numpy as np 
	import matplotlib.pyplot as plt 
	%matplotlib inline

2)導入數據集,分割爲訓練集、測試集

train_df = pd.read_csv('house_price_data/train.csv',index_col=0)
test_df = pd.read_csv('house_price_data/test.csv',index_col=0)

#查看數據
train_df.head()

#查看標籤缺失值
(train_df['SalePrice'] == 0).sum()

#訓練集標籤平滑化處理
y_train = np.log1p(train_df.pop('SalePrice'))

3)特徵工程

#對所有數據的特徵一起處理
all_df = pd.concat((train_df, test_df), axis=0)

#選擇性將連續值特徵轉離散特徵
all_df['MSSubClass'] = all_df['MSSubClass'].astype(str)

#離散特徵做one-hot處理
all_df_dummy = pd.get_dummies(all_df)

#缺失數據處理
mean_cols = all_df_dummy.mean()
all_df_dummy = all_df_dummy.fillna(mean_cols)

#連續值特徵標準化處理
numeric_cols = all_df.columns[all_df.dtypes!='object']
numeric_cols_mean = all_df_dummy.loc[:,numeric_cols].mean()
numeric_cols_std = all_df_dummy.loc[:,numeric_cols].std()
all_df_dummy.loc[:,numeric_cols] = (all_df_dummy.loc[:,numeric_cols] - numeric_cols_mean) / numeric_cols_std

4)建立模型

train_df_dummy = all_df_dummy.loc[train_df.index]
test_df_dummy = all_df_dummy.loc[test_df.index]

#data frame轉numpy array
X_train = train_df_dummy.values
X_test = test_df_dummy.values
X_train.shape, X_test.shape

#4.1構建嶺迴歸模型
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
#grid search調參
alphas = np.logspace(-3, 2, 50)
test_scores = []
for alpha in alphas:
    clf = Ridge(alpha)
    # cross validation 取平均測試誤差
    test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=10, scoring='neg_mean_squared_error'))
    test_scores.append(np.mean(test_score))

import matplotlib.pyplot as plt 
%matplotlib inline
plt.plot(alphas,test_scores)
plt.title("Alpha vs Cross Validation Error")  #如圖,alpha取15

#4.2構建隨機森林模型
from sklearn.ensemble import RandomForestRegressor
max_features = [.1, .3, .5, .7, .9, .99]
test_scores = []
for max_feat in max_features:
    clf = RandomForestRegressor(n_estimators=200, max_features=max_feat)  #n_estimators表示決策樹的數量
    test_score = np.sqrt(-cross_val_score(clf, X_train, y_train, cv=5, scoring='neg_mean_squared_error'))
    test_scores.append(np.mean(test_score))

import matplotlib.pyplot as plt 
%matplotlib inline
plt.plot(max_features,test_scores)
plt.title("max_features vs Cross Validation Error")  #如圖,max_feature取0.3

5)模型集成

#定義模型
ridge = Ridge(alpha=15)
rf = RandomForestRegressor(n_estimators=500, max_features=0.3)

#擬合數據
ridge.fit(X_train, y_train)
rf.fit(X_train, y_train)

#預測結果
y_ridge = np.expm1(ridge.predict(X_test))  #訓練集做過標籤平滑,還原到正確的結果
y_rf = np.expm1(rf.predict(X_test))

#迴歸問題採用平均法做集成
y_final = (y_ridge + y_rf) / 2

6)提交結果(以df方式)

submission_df = pd.DataFrame(data= {'SalePrice': y_final},index = test_df.index)
submission_df.to_csv("house_price_submission.csv")
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章