机器学习——决策树(ID3算法)

1,382 阅读3分钟

「这是我参与11月更文挑战的第9天,活动详情查看:2021最后一次更文挑战

决策树原理

决策树是一种从训练数据中学习得出一个树状结构的模型。这种模型属于判别模型。决策树是一种树状结构,通过做出一系列决策来对数据进行划分,这类似针对一系列问题进行选择。决策树的决策过程就是从根结点开始,测试待分类项中对应的特征属性,并按照其值选择输出分支,直到叶子节点,最后将叶子节点的存放的类别作为决策结果。

image.png 决策树算法是一种归纳分类算法 ,它通过对训练集的学习,挖掘 出有用的规则,用于对新数据进 行预测。决策树归纳的基本算法是贪心算法 ,自顶向下来构建决策树。每一步选择中都采取在当前状态下最好的算法。在决策树生成过程中,属性选择度量是关机。

决策树算法之一(ID3算法)

该算法支持分类模型,树结构为多叉数,通过信息增益进行特征选择,不支持连续值和缺失值处理,也不支持剪枝和特征属性多次使用。

ID3大致算法流程如下:

image.png

image.png

image.png

image.png

image.png ID3算法也存在缺陷,ID3没有剪枝策略,容易过拟合;信息增益准则对可取值数目较多的特征有所偏好,类似“编号”的特征其信息增益接近于1;只能用于处理离散分布的特征;没有考虑缺失值。

ID3算法代码实现

所选取的数据集为看天气今天是否出门,训练完成后,将决策树绘制出来。

image.png

from math import log
import pandas as pd
import numpy as np
import operator
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt

path = 'D:\MachineL\TreeTest.csv'
df = pd.read_csv(path)
dataSet=[]
dataSet= np.array(df.loc[:,:])
print(dataSet)
labels = list(df.columns.values)
labels.pop()
print(labels)


def calcShannonEnt(dataSet):
    numEntries = len(dataSet)
    labelCounts = {}
    for featVec in dataSet:
        currentLabel = featVec[-1]  
        if currentLabel not in labelCounts.keys():  
            labelCounts[currentLabel] = 0
        labelCounts[currentLabel] += 1  

    shannonEnt = 0.0   
    for key in labelCounts:
        prob = float(labelCounts[key]) / numEntries  
        shannonEnt -= prob * log(prob, 2) 
    return shannonEnt  # 返回经验熵


def splitDataSet(dataSet, axis, value):
    retDataSet = []
    for featVec in dataSet:
        if featVec[axis] == value:
            reducedFeatVec = featVec
            retDataSet.append(reducedFeatVec)
    return retDataSet


def chooseBestFeatureToSplit(dataSet):
    numFeatures = len(dataSet[0]) - 1
    baseEntropy = calcShannonEnt(dataSet)
    bestInfoGain = 0.0
    bestFeature = -1
    for i in range(numFeatures):
        featList = [example[i] for example in dataSet]
        uniqueVals = set(featList)
        newEntropy = 0.0
        for value in uniqueVals:
            subDataSet = splitDataSet(dataSet, i, value)
            prob = len(subDataSet) / float(len(dataSet))
            newEntropy += prob * calcShannonEnt((subDataSet))
        infoGain = baseEntropy - newEntropy
        print("第%d个特征的增益为%.3f" % (i, infoGain))
        if (infoGain > bestInfoGain):
            bestInfoGain = infoGain
            bestFeature = i
    return bestFeature


def majorityCnt(classList):
    classCount = {}
    for vote in classList:
        if vote not in classCount.keys():
            classCount[vote] = 0
            classCount[vote] += 1
        sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
        return sortedClassCount[0][0]


def createTree(dataSet, labels, featLabels):
    # 取分类标签(yes or no)
    classList = [example[-1] for example in dataSet]
    if classList.count(classList[0]) == len(classList):
        return classList[0]
    if len(dataSet[0]) == 1:
        return majorityCnt(classList)
    bestFeat = chooseBestFeatureToSplit(dataSet)
    bestFeatLabel = labels[bestFeat]
    featLabels.append(bestFeatLabel)
    myTree = {bestFeatLabel: {}}
    featValues = [example[bestFeat] for example in dataSet]
    uniqueVls = set(featValues)
    for value in uniqueVls:
        myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),
                                                  labels, featLabels)
    return myTree


def getNumLeafs(myTree):
    numLeafs = 0
    firstStr = next(iter(myTree))
    secondDict = myTree[firstStr]
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':
            numLeafs += getNumLeafs(secondDict[key])
        else:
            numLeafs += 1
    return numLeafs




def getTreeDepth(myTree):#获取决策树的层数
    maxDepth = 0  
    firstStr = next(iter(
        myTree)) 
    secondDict = myTree[firstStr]  
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':  
            thisDepth = 1 + getTreeDepth(secondDict[key])
        else:
            thisDepth = 1
        if thisDepth > maxDepth: maxDepth = thisDepth  
    return maxDepth




def plotNode(nodeTxt, centerPt, parentPt, nodeType):#绘制图像结点
    arrow_args = dict(arrowstyle="<-")  
    font = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
    createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',  
                            xytext=centerPt, textcoords='axes fraction',
                            va="center", ha="center", bbox=nodeType, arrowprops=arrow_args, FontProperties=font)



def plotMidText(cntrPt, parentPt, txtString):
    xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]  
    yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
    createPlot.ax1.text(xMid, yMid, txtString, va="center", ha="center", rotation=30)




def plotTree(myTree, parentPt, nodeTxt):#生成决策树
    decisionNode = dict(boxstyle="sawtooth", fc="0.8")  
    leafNode = dict(boxstyle="round4", fc="0.8")  
    numLeafs = getNumLeafs(myTree) 
    depth = getTreeDepth(myTree) 
    firstStr = next(iter(myTree)) 
    cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW, plotTree.yOff) 
    plotMidText(cntrPt, parentPt, nodeTxt)  
    plotNode(firstStr, cntrPt, parentPt, decisionNode) 
    secondDict = myTree[firstStr]  
    plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD  
    for key in secondDict.keys():
        if type(secondDict[key]).__name__ == 'dict':  
            plotTree(secondDict[key], cntrPt, str(key))  
        else:  
            plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalW
            plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
            plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
    plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD



def createPlot(inTree):#绘制决策树图像
    fig = plt.figure(1, facecolor='white')  
    fig.clf()  
    axprops = dict(xticks=[], yticks=[])
    createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
    plotTree.totalW = float(getNumLeafs(inTree))  
    plotTree.totalD = float(getTreeDepth(inTree))  
    plotTree.xOff = -0.5 / plotTree.totalW;
    plotTree.yOff = 1.0  
    plotTree(inTree, (0.5, 1.0), '')
    plt.show()  # 显示绘制结果
    
    
featLabels = []
myTree = createTree(dataSet, labels, featLabels)
print(myTree)
createPlot(myTree)

最终决策树的可视化如下:

image.png 特征值的增益如下:

image.png