python 使用Id3算法實現決策樹

依然是學習《統計學習方法》一書所做的簡單實驗,寫代碼的過程參考了大量其他的博客,本人在此深表感謝。代碼實現的依然是書上的例子:
這裏寫圖片描述

import numpy as np
import math
import operator

def CreateDataSet():
    dataset = [ [1, 0,0,0,'no'],
                [1, 0,0,1,'no'],
                [1, 1,0,1,'yes'],
                [1, 1,1,0,'yes'],
                [1, 0,0,0,'no'],

                [2, 0,0,0,'no'],
                [2, 0,0,1,'no'],
                [2, 1,1,1,'yes'],
                [2, 0,1,2,'yes'],
                [2, 0,1,2,'yes'],

                [3, 0,1,2,'yes'],
                [3, 0,1,1,'yes'],
                [3, 1,0,1,'yes'],
                [3, 1,0,2,'yes'],
                [3, 0,0,0,'no'] ]
    labels = ['age', 'job','building','credit']
    return dataset, labels

#計算香農熵
def calcShannonEnt(dataSet):
    Ent = 0.0
    numEntries = len(dataSet)
    labelCounts = {}
    for feaVec in dataSet:
        currentLabel = feaVec[-1]
        if currentLabel not in labelCounts:
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    for key in labelCounts:
        prob = float(labelCounts[key])/numEntries
        Ent -= prob * math.log(prob, 2)
    return Ent

def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] = 1
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

def splitDataSet(dataSet,axis,value):
    retDataSet=[]
    for featVec in dataSet:
        if featVec[axis]==value :
            reduceFeatVec=featVec[:axis]
            reduceFeatVec.extend(featVec[axis+1:])
            retDataSet.append(reduceFeatVec)
    return retDataSet #返回不含劃分特徵的子集

def choiceBestFea(dataSet):
    baseEntropy = calcShannonEnt(dataSet)
    numberFeatures = len(dataSet[0]) - 1
    bestFeatureId = -1;
    bestInfoGain = 0.0
    for i in range(numberFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subFea = splitDataSet(dataSet,i,value)
            prob = len(subFea) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subFea)
        infoGain = baseEntropy - newEntropy
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeatureId = i
    return bestFeatureId


def createDTree(dataSet,labels):
    #第一步,判斷數據是不是都是同一類的,如果是同一類的,則只有一個節點即根節點
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList):
        return classList[0]

    # 第二步,判斷特徵的個數,特徵集爲空,則只有一個節點即根節點,此時,需要通過投票的方式決定根節點的屬性
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)

    # 第三步,通過計算信息增益,選擇出最優的特徵,也就是信息增益最大的特徵
    bestFeaId = choiceBestFea(dataSet)
    #第四步,選擇出信息增益最大的特徵,並使用該特徵切分數據
    bestFeatLabel = labels[bestFeaId]
    del (labels[bestFeaId])
    featValues = [example[bestFeaId] for example in dataSet]
    uniqueVals = set(featValues)

    myTree = {bestFeatLabel: {}}
    #第五步,遞歸調用createDTree
    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = createDTree(splitDataSet(dataSet, bestFeaId, value), subLabels)
    return myTree


#輸入兩個變量(決策樹,測試的數據)
def classify(inputTree,testVec):
    print(inputTree)
    firstStr=list(inputTree.keys())[0] #獲取樹的第一個特徵屬性
    secondDict=inputTree[firstStr] #樹的分支,子集合Dict
    i=0
    classLabel = ""
    for key in secondDict.keys():
        if testVec[i]==key:
            if type(secondDict[key]).__name__=='dict':
                classLabel=classify(secondDict[key],testVec)
            else:
                #表明已經是葉子節點了
                classLabel=secondDict[key]
                break
            i += 1

    return classLabel

def storeTree(inputTree,filename):
    import pickle
    fw=open(filename,'wb') #pickle默認方式是二進制,需要制定'wb'
    pickle.dump(inputTree,fw)
    fw.close()

def reStoreTree(filename):
    import pickle
    fr=open(filename,'rb')#需要制定'rb',以byte形式讀取
    return pickle.load(fr)


def test():
    dataSet,labels = CreateDataSet1()
    tree = createDTree(dataSet,labels);
    print(tree)

    return None

def train():
    myDat, labels = CreateDataSet()
    tree = createDTree(myDat, labels)
    storeTree(tree,"dtree.txt")
    return None

def test():
    tree = reStoreTree("dtree.txt")
    result = classify(tree,[0,0])
    return result

result = test()
print(result)
#train()

train()方法用來生成決策樹,生成的決策樹會被保存在dtree.txt文件中
test()方法用來測試決策樹。
從生成的決策樹來看,總共只有兩個節點。第一個節點是有沒有房,第二個節點是有沒有工作。所以,測試的時候只需輸入【0,0】或者【1,0】這樣的長度爲2的向量即可。

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章