探索五種機器學習模型最優參數(某金融數據集)

1.在網格搜索部分其實會過擬合,因爲網格搜索優化參數的過程中已經看過了整個訓練集的數據然後挑選出來最優參數,接着再用最優參數去擬合訓練數據集(相當於建模之前已經偷看了)

2.可以嘗試分成三個數據集,訓練數據集,驗證數據集,測試數據集,用最優參數模型去擬合驗證數據集。

導入各種包

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,roc_auc_score,roc_curve,auc
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost.sklearn import XGBClassifier
from lightgbm.sklearn import LGBMClassifier

導入數據

data=pd.read_csv('./data.csv',index_col=0,encoding='gbk')

數據理解

#單獨提取出y列標籤,和其餘的88列標記爲x
y=data['status']
X=data.drop('status',axis=1)
#X值的行列數,以及y的分佈類型
print('X.shape:',X.shape)
print('y的分佈:',y.value_counts())
X.shape: (4754, 88)
y的分佈: 0    3561
1    1193
Name: status, dtype: int64

數據準備

#首先剔除一些明顯無用的特徵,如id_name,custid,trade_no,bank_card_no
X.drop(['id_name','custid','trade_no','bank_card_no'],axis=1,inplace=True)
print(X.shape)
#選取數值型特徵
X_num=X.select_dtypes('number').copy()
print(X_num.shape)
type(X_num.mean())
#使用均值填充缺失值
X_num.fillna(X_num.mean(),inplace=True)
#觀察數值型以外的變量
X_str=X.select_dtypes(exclude='number').copy()
X_str.describe()
#把reg_preference用虛擬變量代替,其它三個變量刪除
X_str['reg_preference_for_trad'] = X_str['reg_preference_for_trad'].fillna(X_str['reg_preference_for_trad'].mode()[0])
X_str_dummy = pd.get_dummies(X_str['reg_preference_for_trad'])
X_str_dummy.head()
#合併數值型變量和名義型(字符型)變量
X_cl = pd.concat([X_num,X_str_dummy],axis=1,sort=False)
#X_cl.shape
print(X_cl.head())

#數據標準化和歸一化
from sklearn import preprocessing
min_max_scale = preprocessing.MinMaxScaler()
min_max_data = min_max_scale.fit_transform(X_cl)

from sklearn import preprocessing
X_cl = preprocessing.scale(X_cl)

(4754, 84)
(4754, 80)

數據建模和評估(參數優化:網格搜索)

#以三七比例分割訓練集和測試集
random_state = 1118
X_train,X_test,y_train,y_test = train_test_split(X_cl,y,test_size=0.3,random_state=1118)
print(X_train.shape)
print(X_test.shape)

def gridSearch_vali(model,param_grid,cv=5):
    print("parameters:{}".format(param_grid))
    grid_search = GridSearchCV(estimator=model,param_grid=param_grid,cv=5,scoring='roc_auc')
    grid_search.fit(X_train,y_train)
    print(grid_search.best_params_)
    return grid_search.best_params_
    


"""
logistic regression
"""
lr_param_temp = {'C':[0.05,0.1,0.5,1],'penalty':['l1','l2']}
lr = LogisticRegression()
lr.set_params(**gridSearch_vali(lr,lr_param_temp))
lr.fit(X_train,y_train)
y_train_pred = lr.predict(X_train)
y_test_pred = lr.predict(X_test)
print('Train:{:.4f}'.format(roc_auc_score(y_train, y_train_pred)))
print('Test:{:.4f}'.format(roc_auc_score(y_test, y_test_pred)))

"""
決策樹
"""
dtc_param_temp = {'max_depth':[3,4,5,6]}
dtc = DecisionTreeClassifier()
dtc.set_params(**gridSearch_vali(dtc,dtc_param_temp))
dtc.fit(X_train,y_train)
y_train_pred = dtc.predict(X_train)
y_test_pred = dtc.predict(X_test)
print('Train:{:.4f}'.format(roc_auc_score(y_train, y_train_pred)))
print('Test:{:.4f}'.format(roc_auc_score(y_test, y_test_pred)))

"""
svm
"""
svm_param_temp = {"gamma":[0.01,0.1],"C":[0.01,1]}
svm = SVC(kernel='linear',probability=True)  
svm.set_params(**gridSearch_vali(svm,svm_param_temp) )
svm.fit(X_train,y_train)
y_train_pred = svm.predict(X_train)
y_test_pred = svm.predict(X_test)
print('Train:{:.4f}'.format(roc_auc_score(y_train, y_train_pred)))
print('Test:{:.4f}'.format(roc_auc_score(y_test, y_test_pred)))

"""
xgboost
"""
xgbc_param_temp = {'max_depth':[5,10],'learning_rate':[0.1,1]}
xgbc = XGBClassifier()
xgbc.set_params(**gridSearch_vali(xgbc,xgbc_param_temp))
xgbc.fit(X_train,y_train)
y_train_pred = xgbc.predict(X_train)
y_test_pred = xgbc.predict(X_test)
print('Train:{:.4f}'.format(roc_auc_score(y_train, y_train_pred)))
print('Test:{:.4f}'.format(roc_auc_score(y_test, y_test_pred)))

"""
lightgbm
"""
lgbc_param_temp = {'max_depth':[5,10],'num_leaves':[20,50]}
lgbc = LGBMClassifier()
lgbc.set_params(**gridSearch_vali(lgbc,lgbc_param_temp))
lgbc.fit(X_train,y_train)
y_train_pred = lgbc.predict(X_train)
y_test_pred = lgbc.predict(X_test)
print('Train:{:.4f}'.format(roc_auc_score(y_train, y_train_pred)))
print('Test:{:.4f}'.format(roc_auc_score(y_test, y_test_pred)))
(3327, 85)
(1427, 85)

parameters:{'C': [0.05, 0.1, 0.5, 1], 'penalty': ['l1', 'l2']}
{'C': 0.05, 'penalty': 'l1'}
Train:0.6524
Test:0.6120


parameters:{'max_depth': [3, 4, 5, 6]}
{'max_depth': 4}
Train:0.6598
Test:0.6029


parameters:{'gamma': [0.01, 0.1], 'C': [0.01, 1]}
{'C': 0.01, 'gamma': 0.01}
Train:0.6108
Test:0.5935


parameters:{'max_depth': [5, 10], 'learning_rate': [0.1, 1]}
{'learning_rate': 0.1, 'max_depth': 5}
Train:0.8898
Test:0.6352


parameters:{'max_depth': [5, 10], 'num_leaves': [20, 50]}
{'max_depth': 5, 'num_leaves': 20}
Train:0.8732
Test:0.6315
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章