統計學習方法 習題5.1 c4.5實現

題目要求:根據訓練數據集,利用信息增益比(C4.5算法)生成決策樹。


信息增益比算法是id3算法的改進:


信息增益比的定義:


補充:信息增益計算方式:



代碼實現(機器學習實戰的改編,保存爲tree.py):

from math import log
import operator
def createDataSet():
    dataSet = [1,0,0,1,0],\
              [1,0,0,2,0],\
              [1,1,0,2,1],\
              [1,1,1,1,1],\
              [1,0,0,1,0],\
              [2,0,0,1,0],\
              [2,0,0,2,0],\
              [2,1,1,2,1],\
              [2,0,1,3,1],\
              [2,0,1,3,1],\
              [3,0,1,3,1],\
              [3,0,1,2,1],\
              [3,1,0,2,1],\
              [3,1,0,3,1],\
              [3,0,0,1,0]
    labels = ['age','job','house','creadit']
    return dataSet, labels

def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries
        shannonEnt -= prob * log(prob, 2)
    return shannonEnt

def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis + 1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet

def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    for i in range(numFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)
        infoGain = (baseEntropy - newEntropy) / baseEntropy
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature

def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

def createTree(dataSet, labels):
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel: {}}
    del (labels[bestFeat])
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)
    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
    return myTree

調用方式:

import tree
mydat,mylab = tree.createDataSet()
mytree = tree.createTree(mydat,mylab)
print mytree
結果和id3算法效果一樣:


發佈了43 篇原創文章 · 獲贊 26 · 訪問量 11萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章