注:本博客數據仍採用決策樹調參的泰坦尼克號數據,前奏(數據預處理)請參考☞
決策樹R&Python調參對比☜
一、R語言
方法一、手動調參
PS.僅使用常規包:randomForest和循環編寫。
1-建模
set.seed(6)
rf <- randomForest(Survived~.,data=train,ntree=100)
y_pred <- predict(rf,test)
A <- as.matrix(table(y_pred,test$Survived))
acc <- sum(diag(A))/sum(A);acc
未做任何處理時模型精度達到0.8345865。(甚至超過決策樹python調參後的結果)
2-1調參——特徵數
err <- as.numeric()
for(i in 1:(ncol(train)-1)){
set.seed(6)
mtry_n <- randomForest(Survived~.,data=train,mtry=i)
err <- append(err,mean(mtry_n$err.rate))
}
print(err)
mtry <- which.min(err);mtry
2-2調參——樹的個數
set.seed(6)
ntree_fit <- randomForest(Survived~.,data=train,mtry=mtry,
ntree=400)
plot(ntree_fit)
set.seed(0219)
fold <- createFolds(y = data$Survived, k=10)
right <- as.numeric()
for (i in 40:100){
accuracy <- as.numeric()
for(j in 1:10){
fold_test <- data[fold[[j]],]
fold_train <- data[-fold[[j]],]
set.seed(1234)
fold_fit <- randomForest(Survived~.,data=fold_train,mtry=mtry,
ntree=i)
fold_pred <- predict(fold_fit,fold_test)
confumat <- as.matrix(table(fold_pred,fold_test$Survived))
acc <- sum(diag(confumat))/sum(confumat)
accuracy = append(accuracy,acc)
}
right <- append(right,mean(accuracy))
}
print(max(right))
print(which.max(right)+40)
本段結合交叉驗證的隨機森林樹個數的調參爲博主自行編寫,若有問題請私信討論,轉摘請註明出處,謝謝!
3-最優模型預測
set.seed(6)
rf_best <- randomForest(Survived~.,data=train,mtry=3,ntree=58)
pred <- predict(rf_best,test)
A <- as.matrix(table(pred,test$Survived))
acc <- sum(diag(A))/sum(A);acc
特徵數和基分類器的個數調整後模型精度達到0.8609023!!!比未調整結果提高約2.5%!!!
方法二、網格調參
PS.使用強大的caret包和trainControl、tunegrid函數。
但隨機森林網格調參只有一個參數mtry,且爲隨機調整.
library(caret)
metric = "Accuracy"
control <- trainControl(method = "repeatedcv", number = 10, repeats = 10)
set.seed(6)
rf_carte <- train(Survived~.,data=train,method = "rf",
metric = "Accuracy", trControl = control,
search = "random")
modelLookup(model = "rf")
rf_carte
y <- predict(rf_carte,test
#,type = "prob"
)
A <- as.matrix(table(y,test$Survived))
acc <- sum(diag(A))/sum(A);acc # 0.8345865
特徵數減少,模型泛化效果變差,此時模型誤差上升,出現過擬合。
PS.若小夥伴有關於隨機森林網格調參更好的辦法或更多關於caret包的資料,歡迎一起學習討論!
二、python
0-導入所需庫(決策樹調參已導入的不再導入)
from sklearn.ensemble import RandomForestClassifier
1-建模
rf = RandomForestClassifier(n_estimators=100,random_state=19)
rf.cv = cross_val_score(rf,x,y,cv=10).mean()
print(rf.cv)
rf0 = rf.fit(xtrain,ytrain)
score = rf0.score(xtest,ytest)
print(score)
2-調參
2-1 n_estimators
best_ntree = []
for i in range(1,201,10):
rf = RandomForestClassifier(n_estimators=i,
n_jobs=-1,
random_state = 19)
score = cross_val_score(rf,x,y,cv=10).mean()
best_ntree.append(score)
print(max(best_ntree),np.argmax(best_ntree)*10)
plt.figure()
plt.plot(range(1,201,10),best_ntree)
plt.show()
# 縮短區間查看:
ntree = []
for i in range(30,60):
rf = RandomForestClassifier(n_estimators=i
,random_state=19
,n_jobs=-1)
score = cross_val_score(rf,x,y,cv=10).mean()
ntree.append(score)
print(max(ntree),np.argmax(ntree)+30) # ntree = 55
plt.plot(range(30,60),ntree)
plt.show()
6-調參(2)max_depth
from sklearn.model_selection import GridSearchCV
param_grid = {'max_depth':[*range(1, 9)]} # 設置參數
rf = RandomForestClassifier(n_estimators=55
,random_state=19
)
GS = GridSearchCV(rf,param_grid,cv=10)
GS.fit(x,y)
GS.best_score_ # 0.8335208098987626
GS.best_params_ # max_depth=8
6-調參(3)max_features
param_grid = {'max_features':[*range(1,4)]}
rf = RandomForestClassifier(n_estimators=55
,random_state=19
,max_depth=8
)
GS = GridSearchCV(rf,param_grid,cv=10)
GS.fit(x,y)
GS.best_score_ # 0.8335208098987626
GS.best_params_ # max_features=3
6-調參(4)min_samples_leaf,min_samples_split
param_grid = {'min_samples_leaf':[*range(1,11)],'min_samples_split':[*range(2,22)]}
rf = RandomForestClassifier(n_estimators=55
,random_state=19
,max_depth=8
,max_features=3
)
GS = GridSearchCV(rf,param_grid,cv=10)
GS.fit(x,y)
GS.best_score_ # 0.8368953880764904
GS.best_params_ # min_samples_leaf=1,min_samples_split=4
本人也試了所有參數整體調參,但費時很長,有興趣的小夥伴可以試試。
# 整體調參
param_grid = {'max_depth':[*range(1,9)],'max_features':[*range(1,9)]
,'min_samples_leaf':[*range(1,11)],'min_samples_split':[*range(2,22)]}
rf = RandomForestClassifier(n_estimators=55
,random_state=19
)
GS = GridSearchCV(rf,param_grid,cv=10)
GS.fit(x,y)
GS.best_score_ # 0.8402699662542182
綜上,此時單獨調參的訓練結果得到的最優模型交叉驗證的準確率約爲0.8369;
整體調參可達到約0.8403。
R網格調參結果約爲:0.8346;
R手動調參結果約爲:0.8609,R手動調參結果最優,而網格調參可調整的參數有限僅能達到0.8346低於python網格調參,python的sklearn庫調參可操作空間大。
PS.R語言中重要參數(需要調節的)爲ntree和mtry,分別爲所建森林中樹的個數和建立每棵樹需要的特徵數。Python中的重要參數爲:n_estimators、max_depths和max_features。R中沒有樹的最大深度我猜想是因爲默認讓每棵樹最大限度生長,但是mtry同樣可以限制樹的深度,試想,若mtry控制的小,即使樹完全生長,它的深度也不會很大的!因此python中的控制樹過擬合的剪枝參數都是相互有聯繫的,也可以只調節某一些即可。