【python】案例實戰:使用sklearn構造決策樹模型

import matplotlib.pyplot as plt
import pandas as pd

from sklearn.datasets.california_housing import fetch_california_housing
housing = fetch_california_housing()
print(housing.DESCR)

print(housing.data.shape)
print(housing.data[0])
from sklearn import tree
dtr = tree.DecisionTreeRegressor(max_depth = 2)
dtr.fit(housing.data[:, [6, 7]], housing.target)#。fit相當於建造這樣一個dtr的模型。。model.fit(x,y).

#要可視化顯示 首先需要安裝 graphviz   http://www.graphviz.org/Download..php
dot_data = \
    tree.export_graphviz(
        dtr,
        out_file = None,
        feature_names = housing.feature_names[6:8],
        filled = True,
        impurity = False,
        rounded = True
    )
#pip install pydotplus
import pydotplus
graph = pydotplus.graph_from_dot_data(dot_data)
graph.get_nodes()[7].set_fillcolor("#FFF2DD")
from IPython.display import Image
#Image(graph.create_png())##報錯,未解決。。。
#graph.write_png("dtr_white_background.png")

from sklearn.model_selection import train_test_split#分出訓練集和驗證集
data_train,data_test,target_train,target_test=\
train_test_split(housing.data,housing.target,test_size=0.1,random_state=42)#random_state隨機數的種子。
dtr = tree.DecisionTreeRegressor(random_state=42)##①決策樹迴歸
dtr.fit(data_train,target_train)#model.fit??
dtr.score(data_test,target_test)#score??

from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor( random_state = 42)#②RandomForestRegressor隨機森林算法
rfr.fit(data_train, target_train)
rfr.score(data_test, target_test)

from sklearn.grid_search import GridSearchCV#GridSearchCV,網格搜索,它存在的意義就是自動調參,只要把參數輸進去,就能給出最優化的結果和參數。但是這個方法適合於小數據集,一旦數據的量級上去了,很難得出結果。
tree_param_grid = { 'min_samples_split': list((3,6,9)),'n_estimators':list((10,50,100))}
grid = GridSearchCV(RandomForestRegressor(),param_grid=tree_param_grid, cv=5)#??
grid.fit(data_train, target_train)
print(grid.grid_scores_, grid.best_params_, grid.best_score_)

rfr = RandomForestRegressor( min_samples_split=3,n_estimators = 100,random_state = 42)
rfr.fit(data_train, target_train)
rfr.score(data_test, target_test)
pd.Series(rfr.feature_importances_, index = housing.feature_names).sort_values(ascending = False)




一、參數

常用的就是max_depth(預剪枝的核心,需要遍歷一些值);min_samples_split,限制過擬合的。

二、可視化

,,,

//有問題先去看sklearn的API文檔。

 

三、參數選擇

交叉驗證:

train訓練集是用來建模的,test驗證集是用來驗證模型的。交叉驗證是用來參數選擇的。

 

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章