转自:https://blog.csdn.net/GrinAndBearIt/article/details/79045143 侵删
本人在自学李航老师的统计学习方法,在学习朴素贝叶斯章节时,其中概念非常好理解,但是准备想把课本中的例题实战一下时却犯了难,有点无从下手的感觉,主要是因为怎么去合理的去写,提高代码的适应性以及重复利用率。
在网上找了蛮多博客,大部分都是是判断情感词等,其中有篇博客就对该例题进行了剖析,代码也写得非常好,我个人仔细学习了下,并自己动手写了下。这个脚本总体来说和那个情感的差不多,具有非常好的适应性,都是把特征合并一个大的tag标签,然后每个样本就变了一个1/0数组,1代表有这个属性,0代表没有。总之非常具有学习意义。下面我附上代码。
#-*- coding:utf-8 -*-
import numpy as np
def loadDateSet():
dateSet = [[1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3], [
'S', 'M', 'M', 'S', 'S', 'S', 'M', 'M', 'L', 'L', 'L', 'M', 'M', 'L', 'L']]
labels = [-1, -1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1]
return np.array(dateSet).transpose().tolist(), labels
def calc_label(labels, alpha): # 计算类的种类及比例
m = len(labels)
uniqueLabel = set(labels)
diffLabelNum = len(uniqueLabel)
labelRate = {}
for label in uniqueLabel:
labelRate[label] = (labels.count(label)+alpha)/(m+diffLabelNum*alpha)
return labelRate, list(uniqueLabel)
#labelRate = {1: 0.5882352941176471, -1: 0.4117647058823529}
#list(uniqueLabel) = [1, -1]
def calcVocaulary(dataset): # 计算属性的种类,是把所有属性放在一起
voca = set()
for content in dataset:
voca = voca | set(content) # |求两个集合的并集
return list(voca)
#voca ={'L', '3', 'M', 'S', '2', '1'}
def calcVector(voca, vector): # 计算某一个样本(vector)的属性的数组
n = len(voca)
originVector = np.zeros(n)
for word in vector:
if word in voca:
originVector[voca.index(word)] += 1
return np.array(originVector)
# originVector = array([1., 0., 0., 1., 0., 0.])
def calcUniqueValueNum(dataset, labels, label, voca): # 计算分母的sj
labelDataSet = []
for i in range(len(labels)):
if labels[i] == label:
labelDataSet.append(dataset[i])
m, n = np.shape(labelDataSet)
uniqueValueDict = {}
for i in range(n):
uniqueValue = set()
[uniqueValue.add(content[i]) for content in labelDataSet]
for value in uniqueValue:
uniqueValueDict[value] = len(uniqueValue) # 每个xi的可能取值范围个数
a = len(voca)
returnArray = np.zeros(a)
for key in uniqueValueDict:
returnArray[voca.index(key)] = float(uniqueValueDict[key])
return returnArray
#array([3., 3., 3., 3., 3., 3.])
def Bayes(dataset, labels, uniqueLabel, voca, alpha): # 计算不同的label的概率
n = len(uniqueLabel)
m = len(dataset)
trainVecDict = {}
for i in range(n):
labelVector = np.array(np.ones(len(voca)))*alpha # 分子的alpha
for j in range(m):
if labels[j] == uniqueLabel[i]:
labelVector += calcVector(voca, dataset[j]) # 计算分子
labelVector /= (labels.count(uniqueLabel[i])+calcUniqueValueNum(
dataset, labels, uniqueLabel[i], voca)*alpha)
trainVecDict[uniqueLabel[i]] = labelVector
return trainVecDict
#{-1: array([0.16666667, 0...33333333]), 1: array([0.44444444, 0...33333333])}
def testFunction(testArray, voca, trainVecDict, labelRate): # 对测试数据计算各label的概率,返回概率最大的那个值
result = -1
maxRate = -np.inf # 负无穷大
for key in trainVecDict:
singleLabelRate = 1.0
for word in testArray:
singleLabelRate *= trainVecDict[key][voca.index(word)]
if singleLabelRate*labelRate[key] > maxRate:
result = key
maxRate = singleLabelRate*labelRate[key]
return result
def main():
dataSet, labels = loadDateSet()
labelRate, uniqueLabel = calc_label(labels, 0)
voca = calcVocaulary(dataSet)
trainVecDict = Bayes(dataSet, labels, uniqueLabel, voca, 0)
testArray = np.array([2, 'S'])
print(testFunction(testArray, voca, trainVecDict, labelRate))
if __name__ == '__main__':
main()
结果是-1