xgboost實戰

#!/usr/bin/env python
# -*- coding:utf-8 -*- 
# Author: Jia ShiLin

import pandas as pd
import xgboost as xgb
from sklearn import preprocessing
import numpy as np

#data
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from xgboost import XGBClassifier

path_value= 'data/xlnet_emb_pca_768.npy'
path_label= 'data/signal.npy'
x =np.load(path_value)
y =np.load(path_label)
#數據標準化,
scalar = StandardScaler().fit(x)
x = scalar.transform(x)


print(x)
print(y)
# #data
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.15)

# eval_set = [(X_test,y_test)]
# model = XGBClassifier()
# # make predictions for test data
# y_pre = model.predict(X_test)
# predictions = [round(value) for value in y_pre]
# # evaluate predictions
# accuracy = accuracy_score(y_test,predictions)
# print("Accuracy: %.2f%%" % (accuracy * 100.0))

print(X_train.shape, X_test.shape)
#
# #模型參數設置
# params ={'learning_rate': 0.1,
#           'max_depth': 5,
#           'num_boost_round':20,
#           'objective': 'multi:softmax',
#           'random_state': 27,
#           'silent':0,
#           'num_class':4
#         }
# model = xgb.train(params,xgb.XGBClassifier(X_train, y_train))#,num_boost_round=20)
# y_pred=model.predict(xgb.XGBClassifier(X_test))



#XGBClassifier(max_depth=3, learning_rate=0.1, n_estimators=100, silent=True, objective='binary:logistic',
#              booster='gbtree', n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0, subsample=1,
#              colsample_bytree=1, colsample_bylevel=1, reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
#              base_score=0.5, random_state=0, seed=None, missing=None, **kwargs)


clf = XGBClassifier(
#    n_estimators=10,  # 迭代次數
#    learning_rate=0.03,  # 步長
#    max_depth=6,  # 樹的最大深度

    n_estimators=300,  # Number of boosted trees to fit.
    learning_rate=0.01,  # 步長 Boosting learning rate (xgb’s “eta”)
    max_depth=30,  # 樹的最大深度Maximum tree depth for base learners.

    min_child_weight=1,  # 決定最小葉子節點樣本權重和
    subsample=0.9,  # 每個決策樹所用的子樣本佔總樣本的比例(作用於樣本)
    colsample_bytree=0.6,  # 建立樹時對特徵隨機採樣的比例(作用於特徵)典型值:0.5-1
    objective='multi:softmax',  # 多分類!!!!!!
    num_class=3,
#    nthread=50,
    n_jobs=50,
    max_delta_step=10,
    reg_lambda=1,
    reg_alpha=0,
    seed=27)

print ("training...")
clf.fit(X_train, y_train,  verbose=True)

clf.save_model('tree300.model')

print('training is ok')
fit_pred = clf.predict(X_test)
print (fit_pred)
count=0
for i in range(len(fit_pred)):
    if fit_pred[i] == y_test[i]:
        count += 1
print ("len:", count/len(y_test))

'''
y_test=test['label'].values
tar = xgb.Booster(model_file='tree200.model')
x_test1 = xgb.DMatrix(x_test)
fit_pred1 = tar.predict(x_test1)
count=0
for i in range(len(fit_pred1)):
    arg=np.argmax(fit_pred1[i])
    if arg == y_test[i]:
        count += 1
print ("len:", count/len(y_test))
'''

# inde=list(train.drop(target,axis=1).columns)
#
#
# ww=(clf.feature_importances_)
# print(ww)
# feat_imp = pd.Series(ww,index=inde).sort_values(ascending=False)
#
# feat_imp.to_excel('feature_importance.xlsx')
#
# #print(feat_imp)
# #plt.set_size_inches(20,10)
# feat_imp.plot(kind='bar', title='Feature Importances')
#
#
# plt.ylabel('Feature Importance Score')

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章