cross_validation.KFold與model_selection.KFold的區別

KFold是sklearn中用來做交叉檢驗的,在sklearn 的版本升級中,KFold被挪了地方。

在sklearn 0.18及以上的版本中,sklearn.cross_validation包被廢棄,KFold被挪到了sklearn.model_selection中,本來以爲挪就挪了,用法沒變就行,結果,,誰用誰知道。

cross_validation.KFold與model_selection.KFold的不同用法

cross_validation.KFold做交叉驗證

from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import KFold, cross_val_score
from sklearn.metrics import confusion_matrix,recall_score,classification_report

def printing_Kfold_scores(x_train_data,y_train_data):
    fold = KFold(len(y_train_data),5,shuffle=False) 
    #將訓練集切分成5份,做交叉驗證
  
    #正則化懲罰項係數
    c_param_range = [0.01,0.1,1,10,100]

    results_table = pd.DataFrame(index = range(len(c_param_range),2), columns = ['C_parameter','Mean recall score'])
    results_table['C_parameter'] = c_param_range

    j = 0
    for c_param in c_param_range:
        print('-------------------------------------------')
        print('C parameter: ', c_param)
        print('-------------------------------------------')
        print('')

        recall_accs = []
        #循環進行交叉驗證
        for iteration, indices in enumerate(fold,start=1):
            #建立邏輯迴歸模型,選擇正則懲罰類型L1
            lr = LogisticRegression(C = c_param, penalty = 'l1')

            lr.fit(x_train_data.iloc[indices[0],:],y_train_data.iloc[indices[0],:].values.ravel())

            y_pred_undersample = lr.predict(x_train_data.iloc[indices[1],:].values)

            recall_acc = recall_score(y_train_data.iloc[indices[1],:].values,y_pred_undersample)#計算召回率
            
            recall_accs.append(recall_acc)
            
            print('Iteration ', iteration,': recall score = ', recall_acc)

        results_table.ix[j,'Mean recall score'] = np.mean(recall_accs)
        j += 1
        print('')
        print('Mean recall score ', np.mean(recall_accs))
        print('')
    results_table['Mean recall score'] = results_table['Mean recall score'].astype('float64')
    best_c = results_table.loc[results_table['Mean recall score'].idxmax()]['C_parameter']
    
    # Finally, we can check which C parameter is the best amongst the chosen.
    print('*********************************************************************************')
    print('Best model to choose from cross validation is with C parameter = ', best_c)
    print('*********************************************************************************')
    
    return best_c

model_selection.KFold做交叉驗證

from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold, cross_val_score
from sklearn.metrics import confusion_matrix,recall_score,classification_report 

def printing_Kfold_scores(x_train_data,y_train_data):
    #將訓練集切分成5份,做交叉驗證
    kf = KFold(n_splits=5,shuffle=False)
    kf.get_n_splits(x_train_data)

    #正則化懲罰項係數
    c_param_range = [0.01,0.1,1,10,100]

    results_table = pd.DataFrame(index = range(len(c_param_range),2), columns = ['C_parameter','Mean recall score'])
    results_table['C_parameter'] = c_param_range

    j = 0
    for c_param in c_param_range:
        print('-------------------------------------------')
        print('C parameter: ', c_param)
        print('-------------------------------------------')
        print('')

        recall_accs = []
        #循環進行交叉驗證
        for iteration, indices in kf.split(x_train_data):
            
            lr = LogisticRegression(C = c_param, penalty = 'l1',solver='liblinear')
            
            lr.fit(x_train_data.iloc[iteration,:],y_train_data.iloc[iteration,:].values.ravel())

            y_pred_undersample = lr.predict(x_train_data.iloc[indices,:].values)

            recall_acc = recall_score(y_train_data.iloc[indices,:].values,y_pred_undersample)#計算召回率
            recall_accs.append(recall_acc)
            
            print('recall score = ', recall_acc)

        results_table.ix[j,'Mean recall score'] = np.mean(recall_accs)
        j += 1
        print('')
        print('Mean recall score ', np.mean(recall_accs))
        print('')
    results_table['Mean recall score'] = results_table['Mean recall score'].astype('float64')
    best_c = results_table.loc[results_table['Mean recall score'].idxmax()]['C_parameter']
    
    # Finally, we can check which C parameter is the best amongst the chosen.
    print('*********************************************************************************')
    print('Best model to choose from cross validation is with C parameter = ', best_c)
    print('*********************************************************************************')
    
    return best_c

在新版中,將數據切分需要兩行代碼:kf = KFold(n_splits=5,shuffle=False)  、 kf.get_n_splits(x_train_data),用for iteration, indices in kf.split(x_train_data):取出,看到iteration和indices裝的是兩段index值,iteration裝了五分之四,indices裝的是五分之一,如下圖

 在舊版本中,將數據切分成n份就是一句代碼:fold = KFold(len(y_train_data),5,shuffle=False),並且切分後用:for iteration, indices in enumerate(fold,start=1):,取出的iteration是1、2、3、4、5這幾個數,indices是上圖中兩部分的合集

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章