代碼實現
# 2.4 python代碼實現
# 來自Task03-=常見假設及分佈
## 計算統計分佈的PMF和PDF
# 生成一組符合特定分佈的隨機數
# 在numpy庫中,提供了一組random類可以生成特定分佈的隨機數
import numpy
#生成大小爲1000的符合b(10,0.5)二項分佈的樣本集
s = numpy.random.binomial(n=10,p=0.5,size=1000)
# 生成大小爲1000的符合U(0,1)均勻分佈的樣本集,注意在此方法中邊界值爲左閉右開區間
s_u= numpy.random.uniform(low=0,high=1,size=1000)
# 生成大小爲1000的符合P(1)的泊松分佈的樣本集
s_poisson = numpy.random.poisson(lam=1,size=1000)
# 生成大小爲1000的符合N(0,1)正態分佈的樣本集,可以用normal函數自定義均值,標準差,也可以直接使用standard_normal函數
s_normal = numpy.random.normal(loc=0,scale=1,size=1000)
s_normal= numpy.random.standard_normal(size=1000)
# 生成大小爲1000的符合E(1/2)指數分佈的樣本集,注意該方法中的參數爲指數分佈參數λ的倒數
s_E = numpy.random.exponential(scale=2,size=1000)
# 除了numpy數據計算庫,還有Scipy庫
from scipy import stats as st
st.uniform.rvs(size=10)
# 計算統計分佈的PMF和PDF
# Scipy庫提供了一組用於計算離散型隨機變量PMF和連續型隨機變量PDF的方法。
# PMF是離散型概率隨機變量在各特定取值上的概率
# PDF是連續型隨機變量的概率密度函數
from scipy import stats
# 計算二項分佈B(10,0.5)的PMF
x= range(11)
x_b=stats.binom.pmf(x, n=10, p=0.5)
# 計算泊松分佈P(1)的值
x=range(11)
x_p=stats.poisson.pmf(x,mu=1)
# 計算均勻分佈U(0,1)的PDF
x = numpy.linspace(0,1,100)
x_u = stats.uniform.pdf(x,loc=0,scale=1)
# 計算正態分佈N(0,1)的PDF
x = numpy.linspace(-3,3,1000)
x_N = stats.norm.pdf(x,loc=0,scale=1)
# 計算指數分佈E(1)的PDF
x = numpy.linspace(0,10,1000)
x_e= stats.expon.pdf(x,loc=0,scale=1)
# 以正態分佈爲例,計算正態分佈N(0,1)的CDF
x = numpy.linspace(-3,3,1000)
p = stats.norm.cdf(x,loc=0,scale=1)
# 二項分佈
# 比較n=10,p=0.5的二項分佈的真實概率質量和10000次隨機抽樣的結果
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
x=range(11)
t=stats.binom.rvs(10,0.5,size=1000) # B(10,0.5) 隨機抽樣10000次
p=stats.binom.pmf(x,10,0.5)
fig, ax = plt.subplots(1, 1)
sns.distplot(t,bins=10,hist_kws={'density':True}, kde=False,label = 'Distplot from 10000 samples')
sns.scatterplot(x,p,color='purple')
sns.lineplot(x,p,color='purple',label='True mass density')
plt.title('Binomial distribution')
plt.legend()
# 泊松分佈
# 比較λ=2的泊松分佈的真實概率質量和10000次隨機抽樣的結果
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
x=range(11)
t= stats.poisson.rvs(2,size=10000)
p=stats.poisson.pmf(x, 2)
fig, ax = plt.subplots(1, 1)
sns.distplot(t,bins=10,hist_kws={'density':True}, kde=False,label = 'Distplot from 10000 samples')
sns.scatterplot(x,p,color='purple')
sns.lineplot(x,p,color='purple',label='True mass density')
plt.title('Poisson distribution')
plt.legend()
# 比較不同參數λ對應的概率質量函數,可以驗證隨着參數增大,泊松分佈開始逐漸變得對稱,分佈也越來越均勻,趨近於正態分佈
x=range(50)
fig, ax = plt.subplots()
for lam in [1,2,5,10,20] :
p=stats.poisson.pmf(x, lam)
sns.lineplot(x,p,label='lamda= '+ str(lam))
plt.title('Poisson distribution')
plt.legend()
\# 均勻分佈
# 比較U(0,1)的均勻分佈的真實概率密度和10000次隨機抽樣的結果
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
x=numpy.linspace(0,1,10000)
t= stats.uniform.rvs(0,1,size=10000)
p=stats.uniform.pdf(x, 0, 1)
fig, ax = plt.subplots(1, 1)
sns.distplot(t,bins=10,hist_kws={'density':True}, kde=False,label = 'Distplot from 10000 samples')
sns.lineplot(x,p,color='purple',label='True mass density')
plt.title('Uniforml distribution')
plt.legend(bbox_to_anchor=(1.05, 1))
# 正態分佈
# 比較N(0,1)的正態分佈的真實概率密度和10000次隨機抽樣的結果
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
x=numpy.linspace(-3,3,10000)
t= stats.norm.rvs(0,1,size=10000)
p=stats.norm.pdf(x, 0, 1)
fig, ax = plt.subplots(1, 1)
sns.distplot(t,bins=100,hist_kws={'density':True}, kde=False,label = 'Distplot from 10000 samples')
sns.lineplot(x,p,color='purple',label='True mass density')
plt.title('Normal distribution')
plt.legend(bbox_to_anchor=(1.05, 1))
# 比較不同均值和標準差組合的正態分佈的概率密度函數
x=numpy.linspace(-6,6,100)
p=stats.norm.pdf(x, 0, 1)
fig, ax = plt.subplots()
for mean, std in [(0,1),(0,2),(3,1)]:
p=stats.norm.pdf(x, mean, std)
sns.lineplot(x,p,label='Mean: '+ str(mean) + ', std: '+ str(std))
plt.title('Normal distribution')
plt.legend()
# 指數分佈
# 比較E(1)的指數分佈的真實概率密度和10000次隨機抽樣的結果
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
x=numpy.linspace(0,10,100)
t= stats.expon.rvs(0,1,size=10000)
p=stats.expon.pdf(x, 0, 1)
fig, ax = plt.subplots(1, 1)
sns.distplot(t,bins=100,hist_kws={'density':True}, kde=False,label = 'Distplot from 10000 samples')
sns.lineplot(x,p,color='purple',label='True mass density')
plt.title('Exponential distribution')
plt.legend(bbox_to_anchor=(1, 1))
# 比較不同參數的指數分佈的概率密度函數
x=numpy.linspace(0,10,100)
fig, ax = plt.subplots()
for scale in [0.2,0.5,1,2,5] :
p=stats.expon.pdf(x, scale=scale)
sns.lineplot(x,p,label='lamda= '+ str(1/scale))
plt.title('Exponential distribution')
plt.legend()
# 正態檢驗
Shapiro-Wilk Test是一種經典的正態檢驗方法。
H0: 樣本總體服從正態分佈
H1: 樣本總體不服從正態分佈
import numpy as np
from scipy.stats import shapiro
data_nonnormal = np.random.exponential(size=100)
data_normal = np.random.normal(size=100)
def normal_judge(data):
stat, p = shapiro(data)
if p > 0.05:
return 'stat={:.3f}, p = {:.3f}, probably gaussian'.format(stat,p)
else:
return 'stat={:.3f}, p = {:.3f}, probably not gaussian'.format(stat,p)
# output
normal_judge(data_nonnormal)
# 'stat=0.850, p = 0.000, probably not gaussian'
normal_judge(data_normal)
# 'stat=0.987, p = 0.415, probably gaussian'
# 卡方檢驗
目的:檢驗兩組類別變量是相關的還是獨立的
H0: 兩個樣本是獨立的
H1: 兩組樣本不是獨立的
from scipy.stats import chi2_contingency
table = [[10, 20, 30],[6, 9, 17]]
stat, p, dof, expected = chi2_contingency(table)
print('stat=%.3f, p=%.3f' % (stat, p))
if p > 0.05:
print('Probably independent')
else:
print('Probably dependent')
# output
#stat=0.272, p=0.873
#Probably independent
# T-test
目的:檢驗兩個獨立樣本集的均值是否具有顯著差異
H0: 均值是相等的
H1: 均值是不等的
from scipy.stats import ttest_ind
import numpy as np
data1 = np.random.normal(size=10)
data2 = np.random.normal(size=10)
stat, p = ttest_ind(data1, data2)
print('stat=%.3f, p=%.3f' % (stat, p))
if p > 0.05:
print('Probably the same distribution')
else:
print('Probably different distributions')
# output
# stat=-1.382, p=0.184
# Probably the same distribution
# ANOVA檢驗
目的:與t-test類似,ANOVA可以檢驗兩組及以上獨立樣本集的均值是否具有顯著差異
H0: 均值是相等的
H1: 均值是不等的
from scipy.stats import f_oneway
import numpy as np
data1 = np.random.normal(size=10)
data2 = np.random.normal(size=10)
data3 = np.random.normal(size=10)
stat, p = f_oneway(data1, data2, data3)
print('stat=%.3f, p=%.3f' % (stat, p))
if p > 0.05:
print('Probably the same distribution')
else:
print('Probably different distributions')
# output
# stat=0.189, p=0.829
# Probably the same distribution
# Mann-Whitney U Test
目的:檢驗兩個樣本集的分佈是否相同
H0: 兩個樣本集的分佈相同
H1: 兩個樣本集的分佈不同
from scipy.stats import mannwhitneyu
data1 = [0.873, 2.817, 0.121, -0.945, -0.055, -1.436, 0.360, -1.478, -1.637, -1.869]
data2 = [1.142, -0.432, -0.938, -0.729, -0.846, -0.157, 0.500, 1.183, -1.075, -0.169]
stat, p = mannwhitneyu(data1, data2)
print('stat=%.3f, p=%.3f' % (stat, p))
if p > 0.05:
print('Probably the same distribution')
else:
print('Probably different distributions')
# output
# stat=40.000, p=0.236
# Probably the same distribution