决策树

  • 优点: 计算复杂度不高、输出结果易于理解、对中间值的缺失不敏感、可以处理不相关特征数据
  • 缺点: 可能过度匹配
  • 适用数据类型: 数值型和标称型

可用来帮助理解数据中蕴含的知识信息,从不熟悉的数据集合中提取规则。如应用于专家系统。

过程:

在所有特征上尝试划分并计算信息增益,按信息增益最大的特征进行划分,然后在子集上重复上述过程,直到所有特征都被用完或者每个分支下的所有实例都具有同一分类。如果所有特征都被用完而类标签仍不唯一,则一般通过多数表决确定类标签。

整体看好象是贪心算法。。。

信息论香农熵信息熵信息增益

可通过谷歌以上词语了解相关概念。

另,可了解基尼不纯度


In [1]:
from math import log
def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]
        if currentLabel not in labelCounts.keys():
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1
    shannonEnt = 0.0
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries
        shannonEnt -= prob * log(prob, 2)
    return shannonEnt

In [2]:
def createDataSet():
    dataset = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
    labels = ['no surfacing', 'flippers']
    return dataset, labels

In [3]:
myDat, labels = createDataSet()

In [4]:
myDat


Out[4]:
[[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]

In [5]:
calcShannonEnt(myDat)


Out[5]:
0.9709505944546686

In [6]:
myDat[0][-1] = 'maybe'

In [7]:
myDat


Out[7]:
[[1, 1, 'maybe'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]

In [8]:
calcShannonEnt(myDat)


Out[8]:
1.3709505944546687

In [9]:
def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec[:axis]
            reducedFeatVec.extend(featVec[axis+1:])
            retDataSet.append(reducedFeatVec)
    return retDataSet

In [10]:
myDat, labels=createDataSet()

In [11]:
myDat


Out[11]:
[[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]

In [12]:
splitDataSet(myDat, 0, 1)


Out[12]:
[[1, 'yes'], [1, 'yes'], [0, 'no']]

In [13]:
splitDataSet(myDat, 0, 0)


Out[13]:
[[1, 'no'], [1, 'no']]

In [14]:
def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    for i in range(numFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt(subDataSet)
        infoGain = baseEntropy - newEntropy
        if infoGain > bestInfoGain:
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature

In [15]:
chooseBestFeatureToSplit(myDat)


Out[15]:
0

In [16]:
import operator
def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
        classCount[vote] += 1
    sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
    return sortedClassCount[0][0]

In [17]:
def createTree(dataSet,labels):
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    myTree = {bestFeatLabel:{}}
    del labels[bestFeat]
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVals = set(featValues)
    for value in uniqueVals:
        subLabels = labels[:]
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
    return myTree

In [18]:
myTree = createTree(myDat, labels)

In [19]:
myTree


Out[19]:
{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}

In [20]:
%matplotlib inline

In [21]:
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False #用来正常显示负号
decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-")
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
    createPlot.ax1.annotate(nodeTxt, xy=parentPt,
     xycoords="axes fraction",
     xytext=centerPt, textcoords="axes fraction", va="center", ha="center", bbox=nodeType, arrowprops=arrow_args)
def createPlot():
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    createPlot.ax1 = plt.subplot(111, frameon=False)
    plotNode(u'决策节点', (0.5, 0.1),(0.1, 0.5), decisionNode)
    plotNode(u'叶节点', (0.8, 0.1), (0.3, 0.8), leafNode)
    plt.show()

In [22]:
createPlot()



In [23]:
def getNumLeafs(myTree):
    numLeafs = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            numLeafs += getNumLeafs(secondDict[key])
        else:
            numLeafs += 1
    return numLeafs

def getTreeDepth(myTree):
    maxDepth = 0
    firstStr = list(myTree.keys())[0]
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            thisDepth = 1 + getTreeDepth(secondDict[key])
        else:
            thisDepth = 1
        if thisDepth > maxDepth:
            maxDepth = thisDepth
    return maxDepth

In [24]:
def retrieveTree(i):
    return myTree

In [25]:
getNumLeafs(myTree)


Out[25]:
3

In [26]:
getTreeDepth(myTree)


Out[26]:
2

In [27]:
def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
    yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString)

def plotTree(myTree, parentPt, nodeTxt):
    numLeafs = getNumLeafs(myTree)
    depth = getTreeDepth(myTree)
    firstStr = list(myTree.keys())[0]
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW,plotTree.yOff)
    plotMidText(cntrPt, parentPt, nodeTxt)
    plotNode(firstStr, cntrPt, parentPt, decisionNode)
    secondDict = myTree[firstStr]
    plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            plotTree(secondDict[key], cntrPt, str(key))
        else:
            plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD
def createPlot(inTree):
    fig = plt.figure(1, facecolor='white')
    fig.clf()
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
    plotTree.totalW = float(getNumLeafs(inTree))
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = -0.5 / plotTree.totalW
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5, 1.0), '')
    plt.show()

In [28]:
createPlot(myTree)



In [29]:
def myCreatePlot(inTree):
    fig = plt.figure(1, facecolor = 'white')
    fig.clf()
    #axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=True)
    plotTree.totalW = float(getNumLeafs(inTree)) + 1
    plotTree.totalD = float(getTreeDepth(inTree))
    plotTree.xOff = 0.0 #-0.5 / plotTree.totalW
    print(plotTree.totalW)
    print(plotTree.xOff)
    plotTree.yOff = 1.0
    plotTree(inTree, (0.5, 1.0), '')
    plt.show()

In [30]:
myCreatePlot(myTree)


4.0
0.0

In [31]:
newTree = {'no surfacing':{0:'no', 1:{'flippers':{0:'no', 1:'yes'}}, 3:'maybe'}}

In [32]:
myCreatePlot(newTree)


5.0
0.0

In [33]:
createPlot(newTree)



In [34]:
newNewTree={'no surfacing':{0:'no', 1:{'flippers':{0:'no', 1:{'haha':{0:'yes', 1:{'hey':{0:'yes', 1:{'aha':{0:'yes', 1:'no'}}}}}}}}, 3:'maybe'}}

In [35]:
myCreatePlot(newNewTree)


8.0
0.0

In [36]:
createPlot(newNewTree)



In [37]:
def classify(inputTree, featLabels, testVec):
    firstStr = list(inputTree.keys())[0]
    secondDict = inputTree[firstStr]
    featIndex = featLabels.index(firstStr)
    for key in secondDict.keys():
        if testVec[featIndex] == key:
            if type(secondDict[key]).__name__ == 'dict':
                classLabel = classify(secondDict[key], featLabels, testVec)
            else:
                classLabel = secondDict[key]
    return classLabel

In [38]:
myDat, labels = createDataSet()

In [39]:
labels


Out[39]:
['no surfacing', 'flippers']

In [40]:
myTree


Out[40]:
{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}

In [41]:
classify(myTree, labels, [1, 0])


Out[41]:
'no'

In [42]:
classify(myTree, labels, [1, 1])


Out[42]:
'yes'

In [43]:
def storeTree(inputTree, filename):
    import pickle
    fw = open(filename, 'wb')
    pickle.dump(inputTree, fw)
    fw.close()
def grabTree(filename):
    import pickle
    fr = open(filename, 'rb')
    return pickle.load(fr)

In [44]:
storeTree(myTree, 'myTree')

In [45]:
grabTree('myTree')


Out[45]:
{'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}}

In [46]:
fr = open('Ch03/lenses.txt')
lenses = [inst.strip().split('\t') for inst in fr.readlines()]

In [47]:
lensesLabels = ['age', 'prescript', 'astigmatic', 'tearRate']

In [48]:
lensesTree = createTree(lenses, lensesLabels)

In [49]:
lensesTree


Out[49]:
{'tearRate': {'normal': {'astigmatic': {'no': {'age': {'pre': 'soft',
      'presbyopic': {'prescript': {'hyper': 'soft', 'myope': 'no lenses'}},
      'young': 'soft'}},
    'yes': {'prescript': {'hyper': {'age': {'pre': 'no lenses',
        'presbyopic': 'no lenses',
        'young': 'hard'}},
      'myope': 'hard'}}}},
  'reduced': 'no lenses'}}

In [50]:
createPlot(lensesTree)



In [ ]: