Kaggle_Titanic 集成算法樣例程序分析

在kaggl上的入門實戰代碼,把一些心得和分析寫在了註釋中。

# -- coding: utf-8 --

import pandas as pd
import numpy as np
import re
import sklearn
import xgboost as xgb
import seaborn as sns
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')

import plotly.offline as py
py.init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.tools as tls

import warnings
warnings.filterwarnings('ignore')
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier)
from sklearn.svm import SVC
from sklearn.cross_validation import KFold
from sklearn.preprocessing import Imputer

train = pd.read_csv('/home/wrg/kaggle/titanic/train.csv')
test = pd.read_csv('/home/wrg/kaggle/titanic/test.csv')
PassengerId = test['PassengerId']

train.head(3)

full_data = [train, test]


train['Name_length'] = train['Name'].apply(len)
test['Name_length'] = test['Name'].apply(len)
train['Has_Cabin'] = train['Cabin'].apply(lambda x: 0 if type(x) == float else 1)
test['Has_Cabin'] = test['Cabin'].apply(lambda x : 0 if type(x) == float  else 1)#就是這裏把test寫成了train導致失敗從而兩個小時找bug


for dataset in full_data:#dataset只有兩個元素,train和set
    dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1

for dataset in full_data:
    dataset['IsAlone'] = 0
    dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1

for dataset in full_data:
    dataset['Embarked'] = dataset['Embarked'].fillna('S')

for dataset in full_data:
    dataset['Fare'] = dataset['Fare'].fillna(train['Fare'].median())
train['CategoricalFare'] = pd.qcut(train['Fare'], 4)#只是爲了下面變成離散特徵做計算而已,之後要刪除

for dataset in full_data:
    age_avg = dataset['Age'].mean()
    age_std = dataset['Age'].std()
    age_null_count = dataset['Age'].isnull().sum()
    age_null_random_list = np.random.randint(age_avg - age_std, 
                                             age_avg + age_std, 
                                             size = age_null_count)
    dataset['Age'][np.isnan(dataset['Age'])] = age_null_random_list#這裏讀不懂應該是python水平還不到位
    dataset['Age'] = dataset['Age'].astype(int)
train['CategoricalAge'] = pd.cut(train['Age'], 5)#只是爲了下面變成離散特徵做計算而已,之後要刪除

def get_title(name):
    title_search = re.search(' ([A-Za-z]+)\.', name)#正則表達式找到包含字母的名字,以.分開(數據中的,不知道爲什麼也被分開了
    if title_search:
        return title_search.group(1)#(0)是名字,(1)是Miss之類的,(2)是姓
    return ""
for dataset in full_data:
    dataset['Title'] = dataset['Name'].apply(get_title)#apply應用了一個函數規則

for dataset in full_data:
    dataset['Title'] = dataset['Title'].replace(['Lady', 'Countess','Capt',
                                                 'Col','Don', 'Dr', 'Major', 
                                                 'Rev', 'Sir', 'Jonkheer', 'Dona'],
                                                'Rare')
    dataset['Title'] = dataset['Title'].replace('Mlle','Miss')
    dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
    dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs')

for dataset in full_data:
    dataset['Sex'] = dataset['Sex'].map({'female': 0, 'male': 1}).astype(int)

    title_mapping = {"Mr": 1, "Miss": 2, "Mrs":3, "Master": 4, "rare": 5}
    dataset['Title'] = dataset['Title'].map(title_mapping)
    dataset['Title'] = dataset['Title'].fillna(0)

    dataset['Embarked'] = dataset['Embarked'].map({'S': 0, 'C': 1, 'Q': 2}).astype(int)

    dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
    dataset.loc[(dataset['Fare']> 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1
    dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare']   = 2
    dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
    dataset['Fare'] = dataset['Fare'].astype(int)

    dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
    dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
    dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
    dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
    dataset.loc[ dataset['Age'] > 64, 'Age'] = 4 ;

drop_elements = ['PassengerId', 'Name', 'Ticket', 'Cabin', 'SibSp']
train = train.drop(drop_elements, axis = 1)
train = train.drop(['CategoricalAge', 'CategoricalFare'], axis = 1)
test = test.drop(drop_elements, axis = 1)

train.head(3)

colormap = plt.cm.RdBu#cm:colormap, RdBu:red, white, blue
plt.figure(figsize=(14,12))
plt.title('Pearson Correlation of Features',y=1.05, size=15)#y表示距離座標軸的距離;size表示字體大小
sns.heatmap(train.astype(float).corr(), linewidths=0.1, vmax=1.0, 
            square=True, cmap=colormap, linecolor='white',annot=True)#linewidths:劃分單元格的直線的寬度;vmax:右邊條形顏色區域的最大值
#square:如果爲True,則將Axes縱橫比設置爲“相等”,這樣每個單元格將是正方形的
#cmap:從數據值到色彩空間的映射。如果沒有提供,默認將取決於中心是否設置。
#annot:如果爲True,則在每個單元格中寫入數據值。如果一個數組的形狀與數據形狀相同,那麼使用它來標註熱圖而不是原始數據。
#從圖中可以看出:我們的訓練集中沒有太多冗餘或多餘的數據(根據pearson相關性)

#下面的pairplot因爲python版本的原因沒有畫出來
#g = sns.pairplot(train[['Survived', 'Pclass', 'Sex', 'Age', 'Parch', 'Fare', 'Embarked',
#      'FamilySize', 'Title']], hue='Survived', palette = 'seismic',size=1.2,diag_kind = 'kde',diag_kws=dict(shade=True),plot_kws=dict(s=10))
#g.set(xticklabels=[])

ntrain = train.shape[0]
ntest = test.shape[0]
SEED = 0
NFOLDS = 5
kf = KFold(ntrain, n_folds= NFOLDS, random_state=SEED)

#KFold文檔:#http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.KFold.html#sklearn.cross_validation.KFold

#創建一個skleranhelper類
class SklearnHelper(object):
    def __init__(self, clf, seed=0, params=None):
        params['random_state'] = seed
        self.clf = clf(**params)#這裏表示python中的多參數轉字典類型的意思,傳入要用哪種ML庫

    def train(self, x_train, y_train):
        self.clf.fit(x_train, y_train)

    def predict(self, x):
        return self.clf.predict(x)

    def fit(self,x,y):
        return self.clf.fit(x,y)

    def feature_importances(self,x,y):
        print(self.clf.fit(x,y).feature_importances_)

#預測函數(這裏還是有點懵逼)
def get_oof(clf, x_train, y_train, x_test):#x_train,y_train:是訓練集的變量xy;x_test:是測試集中的x,要預測y_test
    oof_train = np.zeros((ntrain,))
    oof_test = np.zeros((ntest,))
    oof_test_skf = np.empty((NFOLDS, ntest))#初始化數組,注意這裏是行向量

    for i, (train_index, test_index) in enumerate(kf):#枚舉kf,kf是上面的KFold產生的,i表示第幾折交叉驗證
        #但是這裏kf爲什麼能產生這樣的結構我還沒有搞清楚
        x_tr = x_train[train_index]#第i折的訓練集x_str
        y_tr = y_train[train_index]#第i折的訓練集y_str
        x_te = x_train[test_index]#第i折的測試集x_te
        clf.train(x_tr, y_tr)#用第i折的x,y進行模型訓練
        oof_train[test_index] = clf.predict(x_te)#第i折的測試集x_te進行的預測(也就是再把訓練集劃分爲k-1訓練集和1分測試集)
        oof_test_skf[i, :] = clf.predict(x_test)#用真實的測試集進行預測

    oof_test[:] = oof_test_skf.mean(axis=0)#求平均值,在橫軸上
    return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)#相當於轉置
#reshape(-1,1)舉例
#a = np.array([1, 2, 3, 4]);
#d = a.reshape((1,-1))    array([[1, 2, 3, 4]])
#d = a.reshape((-1,1))   
#array([[1],
#[2],
#[3],
#[4]])

#開始基分類器的構建
#Random Forest classifier; Extra Trees classifier; AdaBoost classifer; Gradient Boosting classifer; Support Vector Machine
#先是每個分類器的參數
rf_params = {
    'n_jobs': -1,
    'n_estimators': 500,
     'warm_start': True, 
     #'max_features': 0.2,
    'max_depth': 6,
    'min_samples_leaf': 2,
    'max_features' : 'sqrt',
    'verbose': 0
}

et_params = {
    'n_jobs': -1,
    'n_estimators':500,
    #'max_features': 0.5,
    'max_depth': 8,
    'min_samples_leaf': 2,
    'verbose': 0
}

ada_params = {
    'n_estimators': 500,
    'learning_rate' : 0.75
}

gb_params = {
    'n_estimators': 500,
     #'max_features': 0.2,
    'max_depth': 5,
    'min_samples_leaf': 2,
    'verbose': 0
}

svc_params = {
    'kernel' : 'linear',
    'C' : 0.025
    }


#通過上面定義的類sklearnhelper來產生五個對象
rf = SklearnHelper(clf=RandomForestClassifier, seed=SEED, params=rf_params)
et = SklearnHelper(clf=ExtraTreesClassifier, seed=SEED, params=et_params)
ada = SklearnHelper(clf=AdaBoostClassifier, seed=SEED, params=ada_params)
gb = SklearnHelper(clf=GradientBoostingClassifier, seed=SEED, params=gb_params)
svc = SklearnHelper(clf=SVC, seed=SEED, params=svc_params)

#創建numpy數組來feed我們的模型
y_train = train['Survived'].ravel()#ravel和flatten的區別:http://blog.csdn.net/lanchunhui/article/details/50354978
train = train.drop(['Survived'], axis=1)
x_train = train.values 
x_test = test.values


#用上面的fole函數產生第一層基分類器的輸出
et_oof_train, et_oof_test = get_oof(et, x_train, y_train, x_test) # Extra Trees
rf_oof_train, rf_oof_test = get_oof(rf,x_train, y_train, x_test) # Random Forest
ada_oof_train, ada_oof_test = get_oof(ada, x_train, y_train, x_test) # AdaBoost 
gb_oof_train, gb_oof_test = get_oof(gb,x_train, y_train, x_test) # Gradient Boost
svc_oof_train, svc_oof_test = get_oof(svc,x_train, y_train, x_test) # Support Vector Classifier

print("Training is complete")

#下面是展示基分類器中,每個分類器的特徵的重要性;幾乎每一個sklearn都有feature_importances這個屬性
rf_feature = rf.feature_importances(x_train,y_train)
et_feature = et.feature_importances(x_train, y_train)
ada_feature = ada.feature_importances(x_train, y_train)
gb_feature = gb.feature_importances(x_train,y_train)
#沒有好的辦法直接存儲這些重要性,只好複製粘貼到列表中
rf_features = [ 0.13489756, 0.18166321, 0.02758151, 0.02169647, 0.06888994, 0.02290583, 
               0.10921789, 0.06505552, 0.06656376, 0.01332795, .28820037]
et_features = [ 0.12543729, 0.36809979, 0.0264444, 0.01642862, 0.05474128, 0.0273463, 
               0.04570301, 0.08072946, 0.04706416, 0.01955174, 0.18845394]
ada_features = [ 0.038, 0.01, 0.014, 0.066, 0.036, 0.008, 0.75, 0.002, 0.046, 0.006, 0.024]
gb_features = [ 0.07536723, 0.04250414, 0.12307408, 0.03138359, 0.09732762, 0.03925738, 
               0.39045526, 0.01596467, 0.07150022, 0.02626842, 0.08689739]

cols = train.columns.values
# Create a dataframe with features
feature_dataframe = pd.DataFrame( {'features': cols,
     'Random Forest feature importances': rf_features,
     'Extra Trees  feature importances': et_features,
      'AdaBoost feature importances': ada_features,
    'Gradient Boost feature importances': gb_features
    })
feature_dataframe.head()
#用散點圖scatter來畫出不同分類器中的特徵重要性
#import plotly.graph_objs as go

trace = go.Scatter(
    y = feature_dataframe['Random Forest feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Random Forest feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'Random Forest Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')#以上的一些參數設置需要查看文檔

trace = go.Scatter(
    y = feature_dataframe['Extra Trees  feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Extra Trees  feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'Extra Trees Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')

# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['AdaBoost feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['AdaBoost feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'AdaBoost Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')

# Scatter plot 
trace = go.Scatter(
    y = feature_dataframe['Gradient Boost feature importances'].values,
    x = feature_dataframe['features'].values,
    mode='markers',
    marker=dict(
        sizemode = 'diameter',
        sizeref = 1,
        size = 25,
#       size= feature_dataframe['AdaBoost feature importances'].values,
        #color = np.random.randn(500), #set color equal to a variable
        color = feature_dataframe['Gradient Boost feature importances'].values,
        colorscale='Portland',
        showscale=True
    ),
    text = feature_dataframe['features'].values
)
data = [trace]

layout= go.Layout(
    autosize= True,
    title= 'Gradient Boosting Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig,filename='scatter2010')

#創建一個平均值的新列
feature_dataframe['mean'] = feature_dataframe.mean(axis= 1) # axis = 1 計算每行的平均值 ; 0:計算每列的平均值

#畫出平均值的重要性大小
y = feature_dataframe['mean'].values
x = feature_dataframe['features'].values
data = [go.Bar(
            x= x,
             y= y,
            width = 0.5,
            marker=dict(
               color = feature_dataframe['mean'].values,
            colorscale='Portland',
            showscale=True,
            reversescale = False
            ),
            opacity=0.6
        )]

layout= go.Layout(
    autosize= True,
    title= 'Barplots of Mean Feature Importance',
    hovermode= 'closest',
#     xaxis= dict(
#         title= 'Pop',
#         ticklen= 5,
#         zeroline= False,
#         gridwidth= 2,
#     ),
    yaxis=dict(
        title= 'Feature Importance',
        ticklen= 5,
        gridwidth= 2
    ),
    showlegend= False
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='bar-direct-labels')

#把基分類器的結果保存起來用於後面分類器使用
base_predictions_train = pd.DataFrame( {'RandomForest': rf_oof_train.ravel(),
     'ExtraTrees': et_oof_train.ravel(),
     'AdaBoost': ada_oof_train.ravel(),
      'GradientBoost': gb_oof_train.ravel()
    })

#相關性圖
data = [
    go.Heatmap(
        z= base_predictions_train.astype(float).corr().values ,
        x=base_predictions_train.columns.values,
        y= base_predictions_train.columns.values,
          colorscale='Viridis',
            showscale=True,
            reversescale = True
    )
]
py.iplot(data, filename='labelled-heatmap')

x_train = np.concatenate(( et_oof_train, rf_oof_train, ada_oof_train, gb_oof_train, svc_oof_train), axis=1)
x_test = np.concatenate(( et_oof_test, rf_oof_test, ada_oof_test, gb_oof_test, svc_oof_test), axis=1)
#現在已經把x-train和x_test的一級訓練和測試預測連接起來,現在我們可以適應二級學習模型。

#Second level learning model via XGBoost
#用XGBoost進行第二層分類器的訓練
gbm = xgb.XGBClassifier(
    #learning_rate = 0.02,
 n_estimators= 2000,
 max_depth= 4,
 min_child_weight= 2,
 #gamma=1,
 gamma=0.9,                        
 subsample=0.8,
 colsample_bytree=0.8,
 objective= 'binary:logistic',
 nthread= -1,
 scale_pos_weight=1).fit(x_train, y_train)#這些參數需要查看文檔來了解作用,對XGBoost還不是太瞭解,這裏先不寫參數功能,後續再去學習。
predictions = gbm.predict(x_test)

#產生格式正確的submission文件:最後,我們現在可以將所有的一級和二級模型進行訓練和適應,然後將預測結果輸出到泰坦尼克號競賽的正確格式如下
StackingSubmission = pd.DataFrame({ 'PassengerId': PassengerId,
                            'Survived': predictions })
StackingSubmission.to_csv("StackingSubmission.csv", index=False)

`
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章