附件一共给出了3段决策树代码程序,请读懂实验代码,阐述三段代码的主要功能,阐述三段代码的主要适用场景。同时,分析如果将购买电脑案例和不同天气下的打棒球案例数据运用 tree.DecisionTreeClassifier()进行分析和构造,程序如何编写?
作业提交要求:1.要求给出具体的代码段---购买电脑案例和不同天气下的打棒球案例数据运用 tree.DecisionTreeClassifier()编程
2.需要显示决策树图形。
1、如果import graphviz不能正常使用,参考
2、import numpy as np
from sklearn import tree
X = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1],
[1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1]])
y = [0, 1, 1, 1, 2, 3, 3, 4]
clf = tree.DecisionTreeClassifier() # 创建决策树分类器
clf.fit(X, y) # 拟合
'''DecisionTreeClassifier(class_weight=None, criterion='gini',
max_depth=None,
max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, presort=False,
random_state=None, splitter='best')'''
clf.predict([[1, 0, 0]]) # 分类
#array([2])
import graphviz
dot_data = tree.export_graphviz(clf, out_file=None) # 导出决策树
graph = graphviz.Source(dot_data) # 创建图形
graph.render('result') # 输出PDF文件
3、购买电脑案例
from math import log
import operator
import pandas as pd
import numpy as np
def calcShannonEnt(dataSet): # 计算数据的熵(entropy)
numEntries=len(dataSet) # 数据条数
labelCounts={}
for featVec in dataSet:
currentLabel=featVec[-1] # 每行数据的最后一个字(类别)
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1 # 统计有多少个类以及每个类的数量
shannonEnt=0
for key in labelCounts:
prob=float(labelCounts[key])/numEntries # 计算单个类的熵值
shannonEnt-=prob*log(prob,2) # 累加每个类的熵值
return shannonEnt
def createDataSet1(): # 创造数据集
dataSet = [['<=30', 'high', 'no', 'fair', 'no'],
['<=30', 'high', 'no', 'excellent', 'no'],
['31…40', 'high', 'no', 'fair', 'yes'],
['>40', 'medium', 'no', 'fair', 'yes'],
['>40', 'low', 'yes', 'fair', 'yes'],
['>40', 'low', 'yes', 'excellent', 'no'],
['31…40', 'low', 'yes', 'excellent', 'yes'],
['<=30', 'medium', 'no', 'fair', 'no'],
['<=30', 'low', 'yes', 'fair', 'yes'],
['>40', 'medium', 'yes', 'fair', 'yes'],
['<=30', 'medium', 'yes', 'excellent', 'yes'],
['31…40', 'medium', 'no', 'excellent ', 'yes'],
['31…40', 'high', 'yes', 'fair', 'yes'],
['>40', 'medium', 'no', 'excellent', 'no']
]
labels = ['age', 'income', 'student', 'credit_rating']
return dataSet,labels
def splitDataSet(dataSet,axis,value): # 按某个特征分类后的数据
retDataSet=[]
for featVec in dataSet:
if featVec[axis]==value:
reducedFeatVec =featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet): # 选择最优的分类特征
numFeatures = len(dataSet[0])-1
baseEntropy = calcShannonEnt(dataSet) # 原始的熵
bestInfoGain = 0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob =len(subDataSet)/float(len(dataSet))
newEntropy +=prob*calcShannonEnt(subDataSet) # 按特征分类后的熵
infoGain = baseEntropy - newEntropy # 原始熵与按特征分类后的熵的差值
if (infoGain>bestInfoGain): # 若按某特征划分后,熵值减少的最大,则次特征为最优分类特征
bestInfoGain=infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList): #按分类后类别数量排序,比如:最后分类为2好瓜1坏瓜,则判定为好瓜;
classCount={}
for vote in classList:
if vote not in classCount.keys():
classCount[vote]=0
classCount[vote]+=1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
def createTree(dataSet,labels):
classList=[example[-1] for example in dataSet] # 类别:好瓜或坏瓜
if classList.count(classList[0])==len(classList):
return classList[0]
if len(dataSet[0])==1:
return majorityCnt(classList)
bestFeat=chooseBestFeatureToSplit(dataSet) #选择最优特征
bestFeatLabel=labels[bestFeat]
myTree={bestFeatLabel:{}} #分类结果以字典形式保存
del(labels[bestFeat])
featValues=[example[bestFeat] for example in dataSet]
uniqueVals=set(featValues)
for value in uniqueVals:
subLabels=labels[:]
myTree[bestFeatLabel][value]=createTree(splitDataSet\
(dataSet,bestFeat,value),subLabels)
return myTree
if __name__=='__main__':
dataSet, labels=createDataSet1() # 创造示列数据
print(createTree(dataSet, labels)) # 输出决策树模型结果
#输出结果:{'age': {'>40': {'credit_rating': {'fair': 'yes', 'excellent': 'no'}}, '31…40': 'yes', '<=30': {'student': {'no': 'no', 'yes': 'yes'}}}}
4、
from math import log
import operator
import pandas as pd
import numpy as np
def calcShannonEnt(dataSet): # calculate entropy
numEntries=len(dataSet) # the data length
labelCounts={}
for featVec in dataSet:
currentLabel=featVec[-1] # the catalogy of each row of the data(the last one)
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1 # count the catalogies and the num of each catalogy
shannonEnt=0
for key in labelCounts:
prob=float(labelCounts[key])/numEntries # caculate the entropy of each catalogy
shannonEnt-=prob*log(prob,2) # sum the entropy of each catalogy
return shannonEnt
def createDataSet1(): # initialize the input datasets
dataSet = [['晴', '热', '高', '否', '否'],
['晴', '热', '高', '是', '否'],
['阴', '热', '高', '否', '是'],
['雨', '温', '高', '否', '是'],
['雨', '凉爽', '中', '否', '是'],
['雨', '凉爽', '中', '是', '否'],
['阴', '凉爽', '中', '是', '是'],
['晴', '温', '高', '否', '否'],
['晴', '凉爽', '中', '否', '是'],
['雨', '温', '中', '否', '是'],
['晴', '温', '中', '是', '是'],
['阴', '温', '高', '是', '是'],
['阴', '热', '中', '否', '是'],
['雨', '温', '高', '是', '否']
]
labels = ['weather', 'temp', 'humidity', 'wind']
return dataSet,labels
def splitDataSet(dataSet,axis,value): # the data of classifies by a feature
retDataSet=[]
for featVec in dataSet:
if featVec[axis]==value:
reducedFeatVec =featVec[:axis]
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet): # choose the best classifying feature
numFeatures = len(dataSet[0])-1
baseEntropy = calcShannonEnt(dataSet) # base entropy
bestInfoGain = 0
bestFeature = -1
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
uniqueVals = set(featList)
newEntropy = 0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet,i,value)
prob =len(subDataSet)/float(len(dataSet))
newEntropy +=prob*calcShannonEnt(subDataSet) # the entropy of classified
infoGain = baseEntropy - newEntropy # the difference entropy
if (infoGain>bestInfoGain): # the second best feature choose
bestInfoGain=infoGain
bestFeature = i
return bestFeature
def majorityCnt(classList): #sort the classified data
classCount={}
for vote in classList:
if vote not in classCount.keys():
classCount[vote]=0
classCount[vote]+=1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
def classify(inputTree, featLabels, testVec):
firstStr = list(inputTree.keys())[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
def createTree(dataSet,labels):
classList=[example[-1] for example in dataSet] # classify
if classList.count(classList[0])==len(classList):
return classList[0]
if len(dataSet[0])==1:
return majorityCnt(classList)
bestFeat=chooseBestFeatureToSplit(dataSet)
bestFeatLabel=labels[bestFeat]
myTree={bestFeatLabel:{}} #store the result in dict
del(labels[bestFeat])
featValues=[example[bestFeat] for example in dataSet]
uniqueVals=set(featValues)
for value in uniqueVals:
subLabels=labels[:]
myTree[bestFeatLabel][value]=createTree(splitDataSet\
(dataSet,bestFeat,value),subLabels)
return myTree
if __name__=='__main__':
dataSet, labels=createDataSet1() #create the dataset
labels_tmp=labels[:]
mytree=createTree(dataSet, labels)
# print(labels_tmp)
print(classify(mytree,labels_tmp,['雨','热','中','否']))

