基於python的scikit-learn庫實現決策樹、貝葉斯、K近鄰

1.決策樹
  1.1.對於有連續數據的決策樹實現(調用包中自帶的iris數據)

from sklearn.datasets import load_iris
from sklearn import tree
iris = load_iris()
clf = tree.DecisionTreeClassifier()
clf = clf.fit(iris.data, iris.target)
# export the tree in Graphviz format using the export_graphviz exporter
with open("iris.dot", 'w') as f:
    f = tree.export_graphviz(clf, out_file=f)
# predict the class of samples
res=clf.predict(iris.data[:1, :])

  1.2 對於數據中的特徵都是離散值的決策樹
  以下是數據的結構:
這裏寫圖片描述
  要理解變量dummy化的概念:比如一個屬性有5種取值,那麼dummy化後這個屬性就會產生5列,每列代表這個屬性的一種取值,該列爲1表示取到這個取值

from sklearn.feature_extraction import DictVectorizer  
import csv  
from sklearn import preprocessing  
from sklearn import tree  
from sklearn.externals.six import StringIO  
from _csv import reader  
from docutils.nodes import header  
#open the csv file  
allElectronicsData=open('D:\\winter_python\\0119.csv')  
reader=csv.reader(allElectronicsData)
headers=next(reader)
# print(headers)
featureList=[]
labelList=[]

for row in reader:  
    labelList.append(row[len(row)-1])  
    rowDict={}  
    for i in range(1,len(row)-1):  
        rowDict[headers[i]]=row[i]  
    featureList.append(rowDict)  
#print(featureList)  

vec=DictVectorizer()  
dummyX=vec.fit_transform(featureList).toarray()  
#print("dummyX:"+str(dummyX))  
#print(vec.get_feature_names())  
#print("labelList:"+str(labelList))  

lb=preprocessing.LabelBinarizer()  
dummyY=lb.fit_transform(labelList)  
#print("dummyY:"+str(dummyY))  

clf=tree.DecisionTreeClassifier(criterion='entropy')  
clf=clf.fit(dummyX, dummyY)  
#print("clf:"+str(clf))  

with open("AllElectronics.dot",'w')as f:  
    f=tree.export_graphviz(clf, feature_names=vec.get_feature_names(), out_file=f)  
print("featureNames:",vec.get_feature_names())
#predict  
oneRowX=dummyX[0,:]  
#print("oneRowX:"+str(oneRowX))  

newRowX=oneRowX  
#print("newRowX:"+str(newRowX))  
predictedY=clf.predict([newRowX])  
print("predictedY:"+str(predictedY))  

2.樸素貝葉斯

#測試數據  
import numpy as np  
features_train = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])  
labels_train = np.array([1, 1, 1, 2, 2, 2])  
#引入高斯樸素貝葉斯  
from sklearn.naive_bayes import GaussianNB  
#實例化  
clf = GaussianNB()  
#訓練數據 fit相當於train  
clf.fit(features_train, labels_train)   
#輸出單個預測結果  
features_test = np.array([[-0.8,-1]])  
labels_test = np.array([[1]])  
pred = clf.predict(features_test)  
print(pred)  
#準確度評估 評估正確/總數  
#方法1  
accuracy = clf.score(features_test, labels_test) 
#方法2  
from sklearn.metrics import accuracy_score  
accuracy2 = accuracy_score(pred,labels_test)

3.KNN

from sklearn.datasets import load_iris  
from sklearn import neighbors  
import sklearn  

#查看iris數據集  
iris = load_iris()  
knn = neighbors.KNeighborsClassifier()  
#訓練數據集  
knn.fit(iris.data, iris.target)  
#預測  
predict = knn.predict([[0.1,0.2,0.3,0.4]])  
發佈了54 篇原創文章 · 獲贊 13 · 訪問量 2萬+
發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章