ptyhon 隨機森林算法及其優化 RandomForest

優化隨機森林算法,正確率提高1%~5%(已經有90%+的正確率,再調高會導致過擬合)

論文當然是參考的,畢竟出現早的算法都被人研究爛了,什麼優化基本都做過。而人類最高明之處就是懂得利用前人總結的經驗和製造的工具(說了這麼多就是爲偷懶找藉口。hhhh)

優化思路

1. 計算傳統模型準確率
2. 計算設定樹木顆數時最佳樹深度,以最佳深度重新生成隨機森林
3. 計算新生成森林中每棵樹的AUC,選取AUC靠前的一定百分比的樹
4. 通過計算各個樹的數據相似度,排除相似度超過設定值且AUC較小的樹
5. 計算最終的準確率

主要代碼粘貼如下(註釋比較詳細,就不介紹代碼了)

#-*- coding: utf-8 -*-
import time
from csv import reader
from random import randint
from random import seed

import numpy as np
from numpy import mat

from group_11 import caculateAUC_1, plotTree

# 建立一棵CART樹
'''試探分枝'''
def data_split(index, value, dataset):
    left, right = list(), list()
    for row in dataset:
        if row[index] < value:
            left.append(row)
        else:
            right.append(row)
    return left, right

'''計算基尼指數'''
def calc_gini(groups, class_values):
    gini = 0.0
    total_size = 0
    for group in groups:
        total_size += len(group)
    for group in groups:
        size = len(group)
        if size == 0:
            continue
        for class_value in class_values:
            proportion = [row[-1] for row in group].count(class_value) / float(size)
            gini += (size / float(total_size)) * (proportion * (1.0 - proportion))# 二分類執行兩次,相當於*2
    return gini

'''找最佳分叉點'''
def get_split(dataset, n_features):
    class_values = list(set(row[-1] for row in dataset))# 類別標籤集合
    b_index, b_value, b_score, b_groups = 999, 999, 999, None

    # 隨機選取特徵子集,包含n_features個特徵
    features = list()
    while len(features) < n_features:
        # 隨機選取特徵
        # 特徵索引
        index = randint(0, len(dataset[0]) - 2)  # 往features添加n_features個特徵(n_feature等於特徵數的根號),特徵索引從dataset中隨機取
        if index not in features:
            features.append(index)
    for index in features:      # 對每一個特徵
        # 計算Gini指數
        for row in dataset:   # 按照每個記錄的該特徵的取值劃分成兩個子集,計算對於的Gini(D,A),取最小的
            groups = data_split(index, row[index], dataset)
            gini = calc_gini(groups, class_values)
            if gini < b_score:
                b_index, b_value, b_score, b_groups = index, row[index], gini, groups
    return {'index': b_index, 'value': b_value, 'groups': b_groups}  # 每個節點由字典組成

'''多數表決'''
def to_terminal(group):
    outcomes = [row[-1] for row in group]
    return max(set(outcomes), key=outcomes.count)

'''分枝'''
def split(node, max_depth, min_size, n_features, depth):
    left, right = node['groups']  # 自動分包/切片
    del (node['groups'])
    if not left or not right:    # left或者right爲空時
        node['left'] = node['right'] = to_terminal(left + right)  # 葉節點不好理解
        return

    if depth >= max_depth:
        node['left'], node['right'] = to_terminal(left), to_terminal(right)
        return
    # 左子樹
    if len(left) <= min_size:
        node['left'] = to_terminal(left)
    else:
        node['left'] = get_split(left, n_features)
        split(node['left'], max_depth, min_size, n_features, depth + 1)
    # 右子樹
    if len(right) <= min_size:   # min_size最小的的分枝樣本數
        node['right'] = to_terminal(right)
    else:
        node['right'] = get_split(right, n_features)
        split(node['right'], max_depth, min_size, n_features, depth + 1)

'''建立一棵樹'''
def build_one_tree(train, max_depth, min_size, n_features):
    # 尋找最佳分裂點作爲根節點
    root = get_split(train, n_features)
    split(root, max_depth, min_size, n_features, 1)
    return root

'''用森林裏的一棵樹來預測'''
def predict(node, row):
    if row[node['index']] < node['value']:
        if isinstance(node['left'], dict):
            return predict(node['left'], row)
        else:
            return node['left']
    else:
        if isinstance(node['right'], dict):
            return predict(node['right'], row)
        else:
            return node['right']


# 隨機森林類
class randomForest:
    def __init__(self,trees_num, max_depth, leaf_min_size, sample_ratio, feature_ratio):
        self.trees_num = trees_num                # 森林的樹的數目
        self.max_depth = max_depth                # 樹深
        self.leaf_min_size = leaf_min_size        # 建立樹時,停止的分枝樣本最小數目
        self.samples_split_ratio = sample_ratio   # 採樣,創建子集的比例(行採樣)
        self.feature_ratio = feature_ratio        # 特徵比例(列採樣)
        self.trees = list()                       # 森林

    '''有放回的採樣,創建數據子集'''
    def sample_split(self, dataset):
        sample = list()
        n_sample = round(len(dataset) * self.samples_split_ratio)  #每棵樹的採樣數
        while len(sample) < n_sample:
            index = randint(0, len(dataset) - 2)  #隨機有放回的採樣
            sample.append(dataset[index])
        return sample

    ##############***Out-of-Bag***################################
    # 進行袋外估計等相關函數的實現,需要注意並不是每個樣本都可能出現在隨機森林的袋外數據中
    # 因此進行oob估計時需要注意估計樣本的數量
    def OOB(self, oobdata, train, trees):
        '''輸入爲:袋外數據dict,訓練集,tree_list
        return oob準確率'''

        n_rows = []
        count = 0
        n_trees = len(trees)  # 森林中樹的棵樹

        for key, item in oobdata.items():
            n_rows.append(item)

        # print(len(n_rows))  # 所有trees中的oob數據的合集

        n_rows_list = sum(n_rows, [])

        unique_list = []
        for l1 in n_rows_list:  # 從oob合集中計算獨立樣本數量
            if l1 not in unique_list:
                unique_list.append(l1)

        n = len(unique_list)
        # print(n)

        # 對訓練集中的每個數據,進行遍歷,尋找其作爲oob數據時的所有trees,並進行多數投票
        for row in train:
            pre = []
            for i in range(n_trees):
                if row not in oobdata[i]:
                    # print('row: ',row)
                    # print('trees[i]: ', trees[i])
                    pre.append(predict(trees[i], row))
            if len(pre) > 0:
                label = max(set(pre), key=pre.count)
                if label == row[-1]:
                    count += 1

        return (float(count) / n) * 100

    '''建立隨機森林'''
    def build_randomforest(self, train):
        temp_flag = 0
        max_depth = self.max_depth           # 樹深
        min_size = self.leaf_min_size        # 建立樹時,停止的分枝樣本最小數目
        n_trees = self.trees_num             # 森林的樹的數目
        n_features = int(self.feature_ratio * (len(train[0])-1))    #列採樣,從M個feature中,選擇m個(m<<M)
        # print('特徵值爲 : ',n_features)
        oobs = {}  # ----------------------
        for i in range(n_trees):            # 建立n_trees棵決策樹
            sample = self.sample_split(train)     # 有放回的採樣,創建數據子集
            oobs[i] = sample  # ----------------
            tree = build_one_tree(sample, max_depth, min_size, n_features)   # 建立決策樹
            self.trees.append(tree)
            temp_flag += 1
            # print(i,tree)
        oob_score = self.OOB(oobs, train, self.trees)  # oob準確率---------
        print("oob_score is ", oob_score)  # 打印oob準確率---------
        return self.trees

    '''隨機森林預測的多數表決'''
    def bagging_predict(self, onetestdata):
        predictions = [predict(tree, onetestdata) for tree in self.trees]
        return max(set(predictions), key=predictions.count)

    '''計算建立的森林的精確度'''
    def accuracy_metric(self, testdata):
        correct = 0
        for i in range(len(testdata)):
            predicted = self.bagging_predict(testdata[i])
            if testdata[i][-1] == predicted:
                correct += 1
        return correct / float(len(testdata)) * 100.0


# 數據處理
'''導入數據'''
def load_csv(filename):
    dataset = list()
    with open(filename, 'r') as file:
        csv_reader = reader(file)
        for row in csv_reader:
            if not row:
                continue
            # dataset.append(row)
            dataset.append(row[:-1])
    # return dataset
    return dataset[1:], dataset[0]

'''劃分訓練數據與測試數據'''
def split_train_test(dataset, ratio=0.3):
    #ratio = 0.2  # 取百分之二十的數據當做測試數據
    num = len(dataset)
    train_num = int((1-ratio) * num)
    dataset_copy = list(dataset)
    traindata = list()
    while len(traindata) < train_num:
        index = randint(0,len(dataset_copy)-1)
        traindata.append(dataset_copy.pop(index))
    testdata = dataset_copy
    return traindata, testdata

'''分析樹,將向量內積寫入list'''
def analyListTree(node, tag, result):
    # 葉子節點的父節點
    if (isinstance(node['left'], dict)):
        # 計算node與node[tag]的內積
        tag="left"
        re = Inner_product(node, tag)
        result.append(re)
        analyListTree(node['left'], 'left', result)
        return
    elif (isinstance(node['right'], dict)):
        # 計算node與node[tag]的內積
        tag = "right"
        re = Inner_product(node, tag)
        result.append(re)
        analyListTree(node['right'], 'right', result)
        return
    else:
        return

'''求向量內積'''
# 計算node與node[tag]的內積
def Inner_product(node ,tag):
    a = mat([[float(node['index'])], [float(node['value'])]])
    b = mat([[float(node[tag]['index'])], [float(node[tag]['value'])]])
    return (a.T * b)[0,0]

'''相似度優化'''
''' same_value = 20      # 向量內積的差(小於此值認爲相似)
    same_rate = 0.63     # 樹的相似度(大於此值認爲相似)
    返回新的森林(已去掉相似度高的樹)'''
def similarity_optimization(newforest, samevalue, samerate):
    res = list()                # 存儲森林的內積
    result = list()             # 存儲某棵樹的內積
    i = 1
    for tree in newforest:
        # 分析樹,將向量內積寫入list
        # result 存儲tree的內積
        analyListTree(tree, None, result)
        res.append(result)
        # print('第',i,'棵樹:',len(result),result)
        result = []
    # print('res = ',len(res),res)
    # 取一棵樹的單個向量內積與其他樹的單個向量內積做完全對比(相似度)
    # 遍歷列表的列
    for i in range(0, len(res) - 1):
        # 保證此列未被置空、
        if not newforest[i] == None:
            # 遍歷做對比的樹的列
            for k in range(i + 1, len(res)):
                if not newforest[k] == None:
                    # time用於統計相似的次數,在每次更換對比樹時重置爲0
                    time = 0
                    # 遍歷列表的當前行
                    for j in range(0, len(res[i])):
                        # 當前兩顆樹對比次數
                        all_contrast = (res[ i].__len__() * res[k].__len__())
                        # 遍歷做對比的樹的行
                        for l in range(0, len(res[k])):
                            # 如果向量的內積相等,計數器加一
                            if res[i][j] - res[k][l] < samevalue:
                                time = time + 1
                        # 如果相似度大於設定值
                    real_same_rate = time / all_contrast
                    if (real_same_rate > samerate):
                        # 將對比樹置空
                        newforest[k] = None
    result_forest = list()
    for i in range(0, newforest.__len__()):
        if not newforest[i] == None:
            result_forest.append(newforest[i])
    return result_forest


'''auc優化method'''
def auc_optimization(auclist,trees_num,trees):
    # 爲auc排序,獲取從大到小的與trees相對應的索引列表
    b = sorted(enumerate(auclist), key=lambda x: x[1], reverse=True)
    index_list = [x[0] for x in b]
    auc_num = int(trees_num * 2 / 3)
    # 取auc高的前auc_num個
    print('auc: ', auc_num, index_list)
    newTempForest = list()
    for i in range(auc_num):
        # myRF.trees.append(tempForest[i])
        # newTempForest.append(myRF.trees[index_list[i]])
        newTempForest.append(trees[index_list[i]])
    return newTempForest

'''得到森林中決策樹的最佳深度'''
def getBestDepth(min_size,sample_ratio,trees_num,feature_ratio,traindata,testdata):
    max_depth = np.linspace(1, 15, 15, endpoint=True)
    # max_depth=[5,6,7,8,9,10,11,12,13,14,15]
    scores_final = []
    i=0
    for depth in max_depth:
        # 初始化隨機森林
        # print('=========>',i,'<=============')
        myRF_ = randomForest(trees_num, depth, min_size, sample_ratio, feature_ratio)
        # 生成隨機森林
        myRF_.build_randomforest(traindata)
        # 測試評估
        acc = myRF_.accuracy_metric(testdata[:-1])
        # print('模型準確率:', acc, '%')
        # scores_final.append(acc.mean())
        scores_final.append(acc*0.01)
        i=i+1
    # print('scores_final: ',scores_final)
    # 找到深度小且準確率高的值
    best_depth = 0
    temp_score = 0
    for i in range(len(scores_final)):
        if scores_final[i] > temp_score:
            temp_score = scores_final[i]
            best_depth = max_depth[i]
    # print('best_depth:',np.mean(scores_final),best_depth)
    # plt.plot(max_depth, scores_final, 'r-', lw=2)
    # # plt.plot(max_depth, list(range(0,max(scores_final))), 'r-', lw=2)
    # plt.xlabel('max_depth')
    # plt.ylabel('CV scores')
    # plt.ylim(bottom=0.0,top=1.0)
    # plt.grid()
    # plt.show()
    return best_depth


'''對比不同樹個數時的模型正確率'''
def getMyRFAcclist(treenum_list):
    seed(1)  # 每一次執行本文件時都能產生同一個隨機數
    filename = 'DataSet3.csv'            #SMOTE處理過的數據
    min_size = 1
    sample_ratio = 1
    feature_ratio = 0.3  # 儘可能小,但是要保證 int(self.feature_ratio * (len(train[0])-1)) 大於1
    same_value = 20  # 向量內積的差(小於此值認爲相似)
    same_rate = 0.63  # 樹的相似度(大於此值認爲相似)

    # 加載數據
    dataset, features = load_csv(filename)
    traindata, testdata = split_train_test(dataset, feature_ratio)
    # 森林中不同樹個數的對比
    # treenum_list = [20, 30, 40, 50, 60]
    acc_num_list = list()
    acc_list=list()
    for trees_num in treenum_list:
        # 優化1-獲取最優深度
        max_depth = getBestDepth(min_size, sample_ratio, trees_num, feature_ratio, traindata, testdata)
        print('max_depth is ', max_depth)

        # 初始化隨機森林
        myRF = randomForest(trees_num, max_depth, min_size, sample_ratio, feature_ratio)
        # 生成隨機森林
        myRF.build_randomforest(traindata)

        print('Tree_number: ', myRF.trees.__len__())
        # 計算森林中每棵樹的AUC
        auc_list = caculateAUC_1.caculateRFAUC(testdata, myRF.trees)
        # 選取AUC高的決策數形成新的森林(auc優化)
        newTempForest = auc_optimization(auc_list,trees_num,myRF.trees)
        # 相似度優化
        myRF.trees = similarity_optimization(newTempForest, same_value, same_rate)
        # 測試評估
        acc = myRF.accuracy_metric(testdata[:-1])
        print('myRF1_模型準確率:', acc, '%')
        acc_num_list.append([myRF.trees.__len__(), acc])
        acc_list.append(acc)
    print('trees_num from 20 to 60: ', acc_num_list)
    return acc_list


if __name__ == '__main__':
    start = time.clock()
    seed(1)  # 每一次執行本文件時都能產生同一個隨機數
    filename = 'DataSet3.csv'       # 這裏是已經利用SMOTE進行過預處理的數據集
    max_depth = 15  # 調參(自己修改) #決策樹深度不能太深,不然容易導致過擬合
    min_size = 1
    sample_ratio = 1
    trees_num = 20

    feature_ratio = 0.3     # 儘可能小,但是要保證 int(self.feature_ratio * (len(train[0])-1)) 大於1
    same_value = 20        # 向量內積的差(小於此值認爲相似)
    same_rate = 0.82     # 樹的相似度(大於此值認爲相似)
    # 加載數據
    dataset,features = load_csv(filename)
    traindata,testdata = split_train_test(dataset, feature_ratio)

    # 優化1-獲取最優深度
    # max_depth = getBestDepth(min_size, sample_ratio, trees_num, feature_ratio, traindata, testdata)
    # print('max_depth is ',max_depth)

    # 初始化隨機森林
    myRF = randomForest(trees_num, max_depth, min_size, sample_ratio, feature_ratio)
    # 生成隨機森林
    myRF.build_randomforest(traindata)

    print('Tree_number: ', myRF.trees.__len__())
    acc = myRF.accuracy_metric(testdata[:-1])
    print('傳統RF模型準確率:',acc,'%')

    # 畫出某棵樹用以可視化觀察(這裏是第一棵樹)
    # plotTree.creatPlot(myRF.trees[0], features)
    # 計算森林中每棵樹的AUC
    auc_list = caculateAUC_1.caculateRFAUC(testdata,myRF.trees)
    # 畫出每棵樹的auc——柱狀圖
    # plotTree.plotAUCbar(auc_list.__len__(),auc_list)

    # 選取AUC高的決策數形成新的森林(auc優化)
    newTempForest = auc_optimization(auc_list,trees_num,myRF.trees)
    # 相似度優化
    myRF.trees=similarity_optimization(newTempForest, same_value, same_rate)

    print('優化後Tree_number: ', myRF.trees.__len__())
    # 測試評估
    acc = myRF.accuracy_metric(testdata[:-1])
    # print('優化後模型準確率:', acc, '%')
    print('myRF1_模型準確率:', acc, '%')
    # 畫出某棵樹用以可視化觀察(這裏是第一棵樹)
    # plotTree.creatPlot(myRF.trees[0], features)
    # 計算森林中每棵樹的AUC
    auc_list = caculateAUC_1.caculateRFAUC(testdata, myRF.trees)
    # 畫出每棵樹的auc——柱狀圖
    plotTree.plotAUCbar(auc_list.__len__(), auc_list)
    end = time.clock()
    print('The end!')
    print(end-start)
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章