相关分析
函数关系:
相关关系:影响不存在方向性,比如身高越高体重越重,但不能说身高增加1cm体重增加2kg
相关分析不具有传递性,A和C相关,B和C相关,A和B不一定相关
相关系数的显著性检验
#1.两两相关性[有相关系数有p值]
correlation=[]
for I in car_corr[['weight','circle','horsepower']].columns:
correlation.append(stats.pearsonr(car_corr['max_speed'],car_corr[I]))
#1.2 仅有p值
from sklearn.feature_selection import f_regression
F,P_value=f_regression(car_corr[['weight','circle','horsepower']],car_corr['max_speed'])
#2. df的相关系数矩阵[只有相关系数,没有p值]
car_corr[['weight','circle','horsepower','Max_speed']].corr()
#2.2
np.corrcoef((car_corr['Weight'],car_corr['circle'],car_corr['horsepower'],car_corr['Max_speed']))
偏相关分析
发动机作为汽车的心脏,对各项指标有影响。因此,在研究其他指标和最高时速指标之间的相关关系是,会不知不觉在变量之间加入发动机相关指标,对所研究的变量有影响,而这种影响由于相关关系的不可传递性,往往会得到错误的结论
剔除其他变量影响之后再进行相关分析
def partial_corr(x,y,partical=[]):
#x,y为考察相关关系的变量,partical为控制变量
xy,xyp=stats.pearsonr(x,y)
xp,xpp=stats.pearsonr(x,partical)
yp,ypp=stats.pearsonr(y,partical)
n=len(x)
df=n-3
r=(xy-xp*yp)/(np.sqrt(1-xp*xp)*np.sqrt(1-yp*yp))
if abs(r)==1:
prob=0.0
else:
t=(r*np.sqrt(df))/np.sqrt(1-r*r)
prob=(1-stats.t.cdf(abs(t),df))*2
return r,prob
pcorrelation=[]
for I in car_corr[['weight','circle']].columns:
pcorrelation.append(partical_corr(car_corr[I],car_corr['max speed'],partial=car_corr['horsepower']))
点二列相关分析
一个连续变量一个分类变量
stats.pointbiserialr(scorebygender['gender'],scorebygender['score'])
# 第一个参数要求是0,1布尔形式的数据
非参数相关分析
spearman相关系数Kendall相关系数hoeffding相关系数
rho,p=stats.spearmanr(graduate)
kt=[]
for I in graduate[[],[],[]].columns():
kt.append(stats.kendalltau(graduate[I],graduate['Tutor']))
关联分析
数据变成0,1型
sign='-->'
class Apriori(object):
def __init__(self,minsupport=0.1,minconfidence=0.4):
self.minsupport=minsupport
self.minconfidence=minconfidence
def link(self,x,sign):
'''
该函数用于连接前项和后项
'''
x=list(map(lambda i:sorted(i.split(sign)),x))
l=len(x[0])
r=[]
for i in range(len(x)):
for j in range(i,len(x)):
if x[i][:l-1]==x[j][:l-1] and x[i][l-1]!=x[j][l-1]:
r.append(x[i][:l-1]+sorted([x[j][l-1],x[i][l-1]]))
return r
def apriori(self,data):
'''
该函数用于频繁项集的挖掘
'''
final = pd.DataFrame(index=['support','confidence'])
support_series=1.0*data.sum()/len(data)
column=list(support_series[support_series>self.minsupport].index)
k=0
while len(column)>1:
k = k+1
column=self.link(column,sign)
sf=lambda i :data[i].prod(axis=1,numeric_only=True)
data_2=pd.DataFrame(list(map(sf,column)),
index=[sign.join(i) for i in column]).T
support_series_2=1.0*data_2[[sign.join(i) for i in column]].sum()/len(data)
#
column=list(support_series_2[support_series_2>self.minsupport].index)
#
support_series=support_series.append(support_series_2)
column2=[]
for i in column:
i=i.split(sign)
for j in range(len(i)):
column2.append(i[:j]+i[j+1:]+i[j:j+1])
#
cofidence_series=pd.Series(index=[sign.join(i) for i in column2])
for i in column2:
confidence_series[sign.join(i)]=
support_series[sign.join(sorted(i))]/support_series[sign.join(i[:len(i)-1])]
for i in confidence_series[confidence_series>self.minconfidence].index:
final[i]=0.0
final[i]['confidence']=confidence_series[i]
final[i]['support']=support_series[sign.join(sorted(i.split(sign)))]
final=final.T.sort(['confidence','support'],ascending=False)
return final
rule=Apriori()
rule.apriori(mpb)
FP-growth要求数据格式:出现就出现为原名字,不出现这一行就NaN
from fp_growth import find_frequent_itemsets as ffi
for itemset in ffi(array(mpb_fpg),minimum_support=int(len(array(mpb_fpg)*0.1)):
if nan in itemset:
pass
elif len(itemset)==1:
pass
else:
print(itemset[::-1])