Python实现C4.5(信息增益率)

运行环境

  • Pyhton3
  • treePlotter模块(画图所需,不画图可不必)
  • matplotlib(如果使用上面的模块必须)

计算过程

st=>start: 开始
e=>end
op1=>operation: 读入数据
op2=>operation: 格式化数据
cond=>condition: 是否建树完成
su=>subroutine: 递归建树
op3=>operation: 选择熵增益率最大的为判决点
op4=>operation: 测试判决情况
op5=>operation: 划分为判决节点子树 st->op1->op2->cond
cond(no)->su->op5->op3->su
cond(yes)->op4->e

输入样例

/* Dataset.txt */
训练集: outlook temperature humidity windy
---------------------------------------------------------
sunny hot high false N
sunny hot high true N
overcast hot high false Y
rain mild high false Y
rain cool normal false Y
rain cool normal true N
overcast cool normal true Y 测试集
outlook temperature humidity windy
---------------------------------------------------------
sunny mild high false
sunny cool normal false
rain mild normal false
sunny mild normal true
overcast mild high true
overcast hot normal false
rain mild high true

代码实现

# -*- coding: utf-8 -*-
__author__ = 'Wsine' from math import log
import operator
import treePlotter def calcShannonEnt(dataSet):
"""
输入:数据集
输出:数据集的香农熵
描述:计算给定数据集的香农熵;熵越大,数据集的混乱程度越大
"""
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob, 2)
return shannonEnt def splitDataSet(dataSet, axis, value):
"""
输入:数据集,选择维度,选择值
输出:划分数据集
描述:按照给定特征划分数据集;去除选择维度中等于选择值的项
"""
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reduceFeatVec = featVec[:axis]
reduceFeatVec.extend(featVec[axis+1:])
retDataSet.append(reduceFeatVec)
return retDataSet def chooseBestFeatureToSplit(dataSet):
"""
输入:数据集
输出:最好的划分维度
描述:选择最好的数据集划分维度
"""
numFeatures = len(dataSet[0]) - 1
baseEntropy = calcShannonEnt(dataSet)
bestInfoGainRatio = 0.0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0.0
splitInfo = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
splitInfo += -prob * log(prob, 2)
infoGain = baseEntropy - newEntropy
if (splitInfo == 0): # fix the overflow bug
continue
infoGainRatio = infoGain / splitInfo
if (infoGainRatio > bestInfoGainRatio):
bestInfoGainRatio = infoGainRatio
bestFeature = i
return bestFeature def majorityCnt(classList):
"""
输入:分类类别列表
输出:子节点的分类
描述:数据集已经处理了所有属性,但是类标签依然不是唯一的,
采用多数判决的方法决定该子节点的分类
"""
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reversed=True)
return sortedClassCount[0][0] def createTree(dataSet, labels):
"""
输入:数据集,特征标签
输出:决策树
描述:递归构建决策树,利用上述的函数
"""
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList):
# 类别完全相同,停止划分
return classList[0]
if len(dataSet[0]) == 1:
# 遍历完所有特征时返回出现次数最多的
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])
# 得到列表包括节点所有的属性值
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:]
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree def classify(inputTree, featLabels, testVec):
"""
输入:决策树,分类标签,测试数据
输出:决策结果
描述:跑决策树
"""
firstStr = list(inputTree.keys())[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel def classifyAll(inputTree, featLabels, testDataSet):
"""
输入:决策树,分类标签,测试数据集
输出:决策结果
描述:跑决策树
"""
classLabelAll = []
for testVec in testDataSet:
classLabelAll.append(classify(inputTree, featLabels, testVec))
return classLabelAll def storeTree(inputTree, filename):
"""
输入:决策树,保存文件路径
输出:
描述:保存决策树到文件
"""
import pickle
fw = open(filename, 'wb')
pickle.dump(inputTree, fw)
fw.close() def grabTree(filename):
"""
输入:文件路径名
输出:决策树
描述:从文件读取决策树
"""
import pickle
fr = open(filename, 'rb')
return pickle.load(fr) def createDataSet():
"""
outlook-> 0: sunny | 1: overcast | 2: rain
temperature-> 0: hot | 1: mild | 2: cool
humidity-> 0: high | 1: normal
windy-> 0: false | 1: true
"""
dataSet = [[0, 0, 0, 0, 'N'],
[0, 0, 0, 1, 'N'],
[1, 0, 0, 0, 'Y'],
[2, 1, 0, 0, 'Y'],
[2, 2, 1, 0, 'Y'],
[2, 2, 1, 1, 'N'],
[1, 2, 1, 1, 'Y']]
labels = ['outlook', 'temperature', 'humidity', 'windy']
return dataSet, labels def createTestSet():
"""
outlook-> 0: sunny | 1: overcast | 2: rain
temperature-> 0: hot | 1: mild | 2: cool
humidity-> 0: high | 1: normal
windy-> 0: false | 1: true
"""
testSet = [[0, 1, 0, 0],
[0, 2, 1, 0],
[2, 1, 1, 0],
[0, 1, 1, 1],
[1, 1, 0, 1],
[1, 0, 1, 0],
[2, 1, 0, 1]]
return testSet def main():
dataSet, labels = createDataSet()
labels_tmp = labels[:] # 拷贝,createTree会改变labels
desicionTree = createTree(dataSet, labels_tmp)
#storeTree(desicionTree, 'classifierStorage.txt')
#desicionTree = grabTree('classifierStorage.txt')
print('desicionTree:\n', desicionTree)
treePlotter.createPlot(desicionTree)
testSet = createTestSet()
print('classifyResult:\n', classifyAll(desicionTree, labels, testSet)) if __name__ == '__main__':
main()

输出样例

desicionTree:
{'outlook': {0: 'N', 1: 'Y', 2: {'windy': {0: 'Y', 1: 'N'}}}}
classifyResult:
['N', 'N', 'Y', 'N', 'Y', 'Y', 'N']

附加文件

treePlotter.py

需要配置matplotlib才能使用

import matplotlib.pyplot as plt

decisionNode = dict(boxstyle="sawtooth", fc="0.8")
leafNode = dict(boxstyle="round4", fc="0.8")
arrow_args = dict(arrowstyle="<-") def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction', \
xytext=centerPt, textcoords='axes fraction', \
va="center", ha="center", bbox=nodeType, arrowprops=arrow_args) def getNumLeafs(myTree):
numLeafs = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs def getTreeDepth(myTree):
maxDepth = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = getTreeDepth(secondDict[key]) + 1
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString) def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = list(myTree.keys())[0]
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalw, plotTree.yOff)
plotMidText(cntrPt, parentPt, nodeTxt)
plotNode(firstStr, cntrPt, parentPt, decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], cntrPt, str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0 / plotTree.totalw
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0 / plotTree.totalD def createPlot(inTree):
fig = plt.figure(1, facecolor='white')
fig.clf()
axprops = dict(xticks=[], yticks=[])
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops)
plotTree.totalw = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5 / plotTree.totalw
plotTree.yOff = 1.0
plotTree(inTree, (0.5, 1.0), '')
plt.show()

Python实现C4.5(信息增益率)的更多相关文章

  1. 决策树与树集成模型(bootstrap, 决策树(信息熵,信息增益, 信息增益率, 基尼系数),回归树, Bagging, 随机森林, Boosting, Adaboost, GBDT, XGboost)

    1.bootstrap   在原始数据的范围内作有放回的再抽样M个, 样本容量仍为n,原始数据中每个观察单位每次被抽到的概率相等, 为1/n , 所得样本称为Bootstrap样本.于是可得到参数θ的 ...

  2. 信息熵、信息增益、信息增益率、gini、woe、iv、VIF

    整理一下这几个量的计算公式,便于记忆 采用信息增益率可以解决ID3算法中存在的问题,因此将采用信息增益率作为判定划分属性好坏的方法称为C4.5.需要注意的是,增益率准则对属性取值较少的时候会有偏好,为 ...

  3. 【Python】GUI 练习1--利率计算器

    import sys from PyQt4.QtCore import * from PyQt4.QtGui import * class Form(QDialog): def __init__(se ...

  4. 数据挖掘领域经典分类算法 —— C4.5算法(附python实现代码)

    目录 理论介绍 什么是分类 分类的步骤 什么是决策树 决策树归纳 信息增益 相关理论基础 计算公式 ID3 C4.5 python实现 参考资料 理论介绍 什么是分类 分类属于机器学习中监督学习的一种 ...

  5. 决策树(ID3 )原理及实现

    1.决策树原理 1.1.定义 分类决策树模型是一种描述对实例进行分类的树形结构.决策树由结点和有向边组成.结点有两种类型:内部节点和叶节点,内部节点表示一个特征或属性,叶节点表示一个类. 举一个通俗的 ...

  6. sklearn--决策树和基于决策树的集成模型

    一.决策树 决策树一般以选择属性的方式不同分为id3(信息增益),c4.5(信息增益率),CART(基尼系数),只能进行线性的分割,是一种贪婪的算法,其中sklearn中的决策树分为回归树和分类树两种 ...

  7. 【小白学AI】随机森林 全解 (从bagging到variance)

    文章转自公众号[机器学习炼丹术],关注回复"炼丹"即可获得海量免费学习资料哦! 目录 1 随机森林 2 bagging 3 神秘的63.2% 4 随机森林 vs bagging 5 ...

  8. 【机器学习速成宝典】模型篇06决策树【ID3、C4.5、CART】(Python版)

    目录 什么是决策树(Decision Tree) 特征选择 使用ID3算法生成决策树 使用C4.5算法生成决策树 使用CART算法生成决策树 预剪枝和后剪枝 应用:遇到连续与缺失值怎么办? 多变量决策 ...

  9. python实现决策树C4.5算法(在ID3基础上改进)

    一.概论 C4.5主要是在ID3的基础上改进,ID3选择(属性)树节点是选择信息增益值最大的属性作为节点.而C4.5引入了新概念"信息增益率",C4.5是选择信息增益率最大的属性作 ...

随机推荐

  1. poj3692_Kindergarten

    这题目大意是:男孩互相认识,女孩互相认识,一部分男女之间认识,老师要选一部分人,要求这部分人必须都相互认识. 这是一个二部图,先求出补图,在补图中G‘左右两点有连线说明在G中两者不认识,反之成立. 所 ...

  2. SubsetsTotal Accepted:49746Total Submissions:176257My Submissions

    Subsets Total Accepted: 49746 Total Submissions: 176257My Submissions Given a set of distinct intege ...

  3. Centos6.5(final)安装gcc和g++,python以及导致问题的解决方法

    安装gcc:yum install gcc  安装g++:yum install gcc-c++ 安装python: centos默认是2.6的版本, 下载python ,我下载的是2.7.10. 1 ...

  4. Mysql授权GRANT ALL PRIVILEGES

    1. 改表法. 可能是你的帐号不允许从远程登陆,只能在localhost.这个时候只要在localhost的那台电脑,登入mysql后,更改 "mysql" 数据库里的 " ...

  5. iptables用法

    iptables -t nat -A PREROUTING -s 10.10.10.0/24 -i eth1 -p tcp --dport 80 -j REDIRECT --to-ports 3128 ...

  6. ADO.NET中ExcuteReader读取存储过程获取的多行数据

    DLL层调用: List<BookInfo> tupleList = new List<BookInfo>(); using (IDataReader reader = thi ...

  7. 官网下载Spring dist

    新版Spring官网下载Spring的dist可真是麻烦 跟着下面的贴图走吧,有些在网页的下面,需要打开相应页面后往下拉拉. 下载完后解压lib里面就是各种jar包了 真是麻烦啊,不好找,不过Spri ...

  8. CSS计数器与动态计数呈现

    代码: CSS代码: body { counter-reset: icecream; } input:checked { counter-increment: icecream; } .total:: ...

  9. C# 随机颜色的方法

    public string GetRandomColor() { Random RandomNum_First = new Random((int)DateTime.Now.Ticks); // 对于 ...

  10. js各种宽高(3)

    有时候项目中会用到用js获取元素位置来定位元素,首先通过图片说明scrollWidth,clientWidth,offsetWidth的关系. JS获取各种宽度.高度的简单介绍: scrollHeig ...