In [2]:
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
df = pd.read_csv('/Users/xiha/github/cjc-gh-pages/data/tianya_bbs_threads_list.txt', sep = "\t", header=None)
df[:2]
Out[2]:
0 1 2 3 4 5 6
0 【民间语文第161期】宁波px启示:船进港湾人应上岸 /post-free-2849477-1.shtml 贾也 http://www.tianya.cn/50499450 194675 2703 2012-10-29 07:59
1 宁波镇海PX项目引发群体上访 当地政府发布说明(转载) /post-free-2839539-1.shtml 无上卫士ABC http://www.tianya.cn/74341835 88244 1041 2012-10-24 12:41
In [3]:
df=df.rename(columns = {0:'title', 1:'link', 2:'author',3:'author_page', 4:'click', 5:'reply', 6:'time'})
df[:5]#重新进行header的命名
Out[3]:
title link author author_page click reply time
0 【民间语文第161期】宁波px启示:船进港湾人应上岸 /post-free-2849477-1.shtml 贾也 http://www.tianya.cn/50499450 194675 2703 2012-10-29 07:59
1 宁波镇海PX项目引发群体上访 当地政府发布说明(转载) /post-free-2839539-1.shtml 无上卫士ABC http://www.tianya.cn/74341835 88244 1041 2012-10-24 12:41
2 宁波准备停止PX项目了,元芳,你怎么看? /post-free-2848797-1.shtml 牧阳光 http://www.tianya.cn/36535656 82779 625 2012-10-28 19:11
3 【吴钩一言堂】漳州PX成"二踢响",说明老百姓科学素质不低 /post-free-5043260-1.shtml 挑灯看吴钩 http://www.tianya.cn/36959960 45304 219 2015-04-07 21:30
4 PX是否有毒?宁波镇海事件谁在推波助澜(转载) /post-free-2848995-1.shtml zzjzzpgg12 http://www.tianya.cn/53134970 38132 835 2012-10-28 21:08
In [4]:
da = pd.read_csv('/Users/xiha/github/cjc-gh-pages/data/tianya_bbs_threads_author_info.txt', sep = "\t", header=None)
da[:2]
Out[4]:
0 1 2 3 4
0 http://www.tianya.cn/50499450 152 27452 1020 1341
1 http://www.tianya.cn/74341835 0 1 2 5
In [5]:
da=da.rename(columns = {0:'author_page', 1:'followed_num', 2:'fans_num',3:'post_num', 4:'comment_num'})
da[:5]
Out[5]:
author_page followed_num fans_num post_num comment_num
0 http://www.tianya.cn/50499450 152 27452 1020 1341
1 http://www.tianya.cn/74341835 0 1 2 5
2 http://www.tianya.cn/36535656 19 28 816 1268
3 http://www.tianya.cn/36959960 25 307 513 1237
4 http://www.tianya.cn/53134970 17 22 79 3256
In [6]:
data = pd.concat([df,da], axis=1)#合并数据集
len(data)
Out[6]:
467

Time

In [8]:
type(data.time[0])#字符型的数据
Out[8]:
str
In [9]:
# extract date from datetime
date = map(lambda x: x[:10], data.time)#map(函数:参数情况)这个lambda函数运行效率高,处理大数据的时候用!
data['date'] = pd.to_datetime(date)#把日期格式的字符串转换成日期类型的数据
In [10]:
# convert str to datetime format
data.time = pd.to_datetime(data.time)
data['month'] = data.time.dt.month
data['year'] = data.time.dt.year
data['day'] = data.time.dt.day
type(data.time[0])
Out[10]:
pandas.tslib.Timestamp
In [11]:
data[:3]#显示前三个记录
data.describe()
Out[11]:
click reply month year day
count 467.000000 467.000000 467.000000 467.000000 467.000000
mean 1534.957173 18.907923 7.432548 2012.620985 17.961456
std 11099.249834 144.869921 3.084860 1.795269 9.491730
min 11.000000 0.000000 1.000000 2006.000000 1.000000
25% 42.500000 0.000000 5.000000 2013.000000 8.000000
50% 84.000000 0.000000 6.000000 2013.000000 23.000000
75% 322.000000 4.000000 11.000000 2013.000000 25.000000
max 194675.000000 2703.000000 12.000000 2015.000000 31.000000

Statsmodels

In [12]:
import statsmodels.api as sm
In [13]:
'   '.join(dir(sm.stats))
Out[13]:
'CompareCox   CompareJ   CompareMeans   DescrStatsW   Describe   FTestAnovaPower   FTestPower   GofChisquarePower   HetGoldfeldQuandt   NormalIndPower   Runs   TTestIndPower   TTestPower   __builtins__   __doc__   __file__   __name__   __package__   acorr_breush_godfrey   acorr_ljungbox   anova_lm   binom_test   binom_test_reject_interval   binom_tost   binom_tost_reject_interval   breaks_cusumolsresid   breaks_hansen   chisquare_effectsize   cochrans_q   compare_cox   compare_j   corr_clipped   corr_nearest   cov_cluster   cov_cluster_2groups   cov_hac   cov_hc0   cov_hc1   cov_hc2   cov_hc3   cov_nearest   cov_nw_panel   cov_white_simple   diagnostic   durbin_watson   fdrcorrection   fdrcorrection_twostage   gof   gof_chisquare_discrete   het_arch   het_breushpagan   het_goldfeldquandt   het_white   jarque_bera   lillifors   linear_harvey_collier   linear_lm   linear_rainbow   mcnemar   moment_helpers   multicomp   multipletests   normal_ad   omni_normtest   power_binom_tost   power_ztost_prop   powerdiscrepancy   proportion_confint   proportion_effectsize   proportions_chisquare   proportions_chisquare_allpairs   proportions_chisquare_pairscontrol   proportions_ztest   proportions_ztost   recursive_olsresiduals   runstest_1samp   runstest_2samp   sandwich_covariance   se_cov   stattools   symmetry_bowker   tt_ind_solve_power   tt_solve_power   ttest_ind   ttost_ind   ttost_paired   tukeyhsd   unitroot_adf   zconfint   zt_ind_solve_power   ztest   ztost'

Describe

In [14]:
data.describe()
Out[14]:
click reply month year day
count 467.000000 467.000000 467.000000 467.000000 467.000000
mean 1534.957173 18.907923 7.432548 2012.620985 17.961456
std 11099.249834 144.869921 3.084860 1.795269 9.491730
min 11.000000 0.000000 1.000000 2006.000000 1.000000
25% 42.500000 0.000000 5.000000 2013.000000 8.000000
50% 84.000000 0.000000 6.000000 2013.000000 23.000000
75% 322.000000 4.000000 11.000000 2013.000000 25.000000
max 194675.000000 2703.000000 12.000000 2015.000000 31.000000
In [15]:
import numpy as np

np.mean(data.click), np.std(data.click), np.sum(data.click)
Out[15]:
(1534.9571734475376, 11087.35990002894, 716825)
In [16]:
# 不加权的变量描述
d1 = sm.stats.DescrStatsW(data.click, weights=[1 for i in data.click])
d1.mean, d1.var, d1.std, d1.sum
Out[16]:
(1534.9571734475376, 122929549.55276975, 11087.35990002894, 716825.0)
In [17]:
# 加权的变量描述
d1 = sm.stats.DescrStatsW(data.click, weights=data.reply)
d1.mean, d1.var, d1.std, d1.sum
Out[17]:
(83335.963986409959, 6297145701.6868114, 79354.556905617035, 735856562.0)
In [18]:
np.median(data.click)
Out[18]:
84.0
In [19]:
plt.hist(data.click)
plt.show()
In [23]:
plt.hist(data.reply, color = 'purple')
plt.show()
In [26]:
plt.hist(np.log(data.click+1), color='green')
plt.hist(np.log(data.reply+1), color='purple')
plt.show()
In [27]:
# Plot the height and weight to see
plt.boxplot([np.log(data.click+1)])
plt.show()
In [28]:
# Plot the height and weight to see
plt.boxplot([data.click, data.reply])
plt.show()
In [29]:
def transformData(dat):#对一个变量进行处理
    results = []
    for i in dat:
        if i != 'na':
            results.append( int(i))
        else:
            results.append(0)
    return results
In [30]:
data.fans_num = transformData(data.fans_num)
data.followed_num = transformData(data.followed_num )
data.post_num = transformData(data.post_num )
data.comment_num = transformData(data.comment_num )
data.describe()
Out[30]:
click reply followed_num fans_num post_num comment_num month year day
count 467.000000 467.000000 467.000000 467.000000 467.000000 467.000000 467.000000 467.000000 467.000000
mean 1534.957173 18.907923 15.713062 839.421842 146.336188 434.556745 7.432548 2012.620985 17.961456
std 11099.249834 144.869921 120.221465 7589.853870 577.441999 1989.458332 3.084860 1.795269 9.491730
min 11.000000 0.000000 0.000000 0.000000 0.000000 0.000000 1.000000 2006.000000 1.000000
25% 42.500000 0.000000 0.000000 0.000000 4.000000 0.000000 5.000000 2013.000000 8.000000
50% 84.000000 0.000000 0.000000 1.000000 16.000000 9.000000 6.000000 2013.000000 23.000000
75% 322.000000 4.000000 1.000000 4.000000 84.000000 88.000000 11.000000 2013.000000 25.000000
max 194675.000000 2703.000000 1817.000000 108449.000000 10684.000000 24848.000000 12.000000 2015.000000 31.000000
In [31]:
# Plot the height and weight to see
plt.boxplot([np.log(data.click+1), np.log(data.reply+1), 
             np.log(data.fans_num+1), np.log(data.followed_num + 1)], 
            labels = ['$Click$', '$Reply$', '$Fans$', '$Followed$'])
plt.show()

Pandas自身已经包含了boxplot的功能

In [32]:
fig = plt.figure(figsize=(12,4))
data.boxplot(return_type='dict')
plt.yscale('log')
plt.show()
In [33]:
from pandas.tools import plotting

#fig = plt.figure(figsize=(10, 10))
plotting.scatter_matrix(data[['click', 'reply', 'post_num','comment_num']]) 
plt.show()
In [34]:
import seaborn # conda install seaborn
seaborn.pairplot(data, vars=['click', 'reply', 'post_num', 'comment_num'],
                  kind='reg') 
Out[34]:
<seaborn.axisgrid.PairGrid at 0x11be35cd0>
In [35]:
seaborn.pairplot(data, vars=['click', 'reply', 'post_num'],
                 kind='reg', hue='year')
Out[35]:
<seaborn.axisgrid.PairGrid at 0x11cea6d10>
In [36]:
seaborn.lmplot(y='reply', x='click', data=data)  
Out[36]:
<seaborn.axisgrid.FacetGrid at 0x11e982f10>

values_counts

In [37]:
data.year.value_counts()#每一年的贴子有
Out[37]:
2013    304
2014     63
2007     34
2012     33
2015     20
2011      6
2009      6
2006      1
Name: year, dtype: int64
In [38]:
d = data.year.value_counts()
dd = pd.DataFrame(d)
dd = dd.sort_index(axis=0, ascending=True)
dd
Out[38]:
year
2006 1
2007 34
2009 6
2011 6
2012 33
2013 304
2014 63
2015 20
In [39]:
dd.index
Out[39]:
Int64Index([2006, 2007, 2009, 2011, 2012, 2013, 2014, 2015], dtype='int64')
In [40]:
dd_date_str = map(lambda x: str(x) +'-01-01', dd.index)
dd_date_str
Out[40]:
['2006-01-01',
 '2007-01-01',
 '2009-01-01',
 '2011-01-01',
 '2012-01-01',
 '2013-01-01',
 '2014-01-01',
 '2015-01-01']
In [41]:
dd_date = pd.to_datetime(dd_date_str)#pd里有处理日期的函数
dd_date
Out[41]:
DatetimeIndex(['2006-01-01', '2007-01-01', '2009-01-01', '2011-01-01',
               '2012-01-01', '2013-01-01', '2014-01-01', '2015-01-01'],
              dtype='datetime64[ns]', freq=None)
In [42]:
plt.plot(dd_date, dd.year, 'r-o')
plt.show()
In [43]:
ds = dd.cumsum()
ds
Out[43]:
year
2006 1
2007 35
2009 41
2011 47
2012 80
2013 384
2014 447
2015 467
In [44]:
d = data.year.value_counts()
dd = pd.DataFrame(d)
dd = dd.sort_index(axis=0, ascending=True)
ds = dd.cumsum()

def getDate(dat):
    dat_date_str = map(lambda x: str(x) +'-01-01', dat.index)
    dat_date = pd.to_datetime(dat_date_str)
    return dat_date

ds_date = getDate(ds)
dd_date = getDate(dd)

plt.plot(ds_date, ds.year, 'g-s', label = '$Cumulative\: Number\:of\: Threads$')
plt.plot(dd_date, dd.year, 'r-o', label = '$Yearly\:Number\:of\:Threads$')
plt.legend(loc=2,numpoints=1,fontsize=13)
plt.show()

groupby

In [45]:
dg = data.groupby('year').sum()
dg
Out[45]:
click reply followed_num fans_num post_num comment_num month day
year
2006 1214 24 0 2 278 291 8 24
2007 28290 514 22 137 8041 10344 281 512
2009 18644 186 17 12 531 571 39 78
2011 2889 28 84 28 332 661 50 72
2012 463720 5933 2779 59511 12315 32498 322 819
2013 63140 937 571 43265 24359 40362 2458 6111
2014 57764 772 2216 16664 11266 98025 233 579
2015 81164 436 1649 272391 11217 20186 80 193
In [46]:
dgs = dg.cumsum()
dgs
Out[46]:
click reply followed_num fans_num post_num comment_num month day
year
2006 1214 24 0 2 278 291 8 24
2007 29504 538 22 139 8319 10635 289 536
2009 48148 724 39 151 8850 11206 328 614
2011 51037 752 123 179 9182 11867 378 686
2012 514757 6685 2902 59690 21497 44365 700 1505
2013 577897 7622 3473 102955 45856 84727 3158 7616
2014 635661 8394 5689 119619 57122 182752 3391 8195
2015 716825 8830 7338 392010 68339 202938 3471 8388
In [47]:
def getDate(dat):
    dat_date_str = map(lambda x: str(x) +'-01-01', dat.index)
    dat_date = pd.to_datetime(dat_date_str)
    return dat_date

dg.date = getDate(dg)
In [48]:
fig = plt.figure(figsize=(12,5))
plt.plot(dg.date, dg.click, 'r-o', label = '$Yearly\:Number\:of\:Clicks$')
plt.plot(dg.date, dg.reply, 'g-s', label = '$Yearly\:Number\:of\:Replies$')
plt.plot(dg.date, dg.fans_num, 'b->', label = '$Yearly\:Number\:of\:Fans$')

plt.yscale('log')

plt.legend(loc=4,numpoints=1,fontsize=13)
plt.show()
In [49]:
data.groupby('year')['click'].sum()
Out[49]:
year
2006      1214
2007     28290
2009     18644
2011      2889
2012    463720
2013     63140
2014     57764
2015     81164
Name: click, dtype: int64
In [50]:
data.groupby('year')['click'].mean()
Out[50]:
year
2006     1214.000000
2007      832.058824
2009     3107.333333
2011      481.500000
2012    14052.121212
2013      207.697368
2014      916.888889
2015     4058.200000
Name: click, dtype: float64

常用的统计分析方法

  • 检验
  • 卡方检验
  • 相关
  • 回归

RQ: 转载的文章的点击量是否显著地小于原创的文章?

In [51]:
repost = []
for i in df.title:
    if u'转载' in i.decode('utf8'):
        repost.append(1)
    else:
        repost.append(0)
In [52]:
df['repost'] = repost
In [53]:
df.groupby('repost').sum()
Out[53]:
click reply
repost
0 536495 6208
1 180330 2622
In [54]:
df['click'][df['repost']==0][:5]
Out[54]:
0    194675
2     82779
3     45304
5     27026
6     24026
Name: click, dtype: int64
In [55]:
df['click'][df['repost']==1][:5]
Out[55]:
1     88244
4     38132
13     4990
16     3720
18     3421
Name: click, dtype: int64
In [56]:
from scipy import stats
stats.ttest_ind(df.click, df.repost)
Out[56]:
Ttest_indResult(statistic=2.9873822484161594, pvalue=0.0028874851192659799)
In [57]:
sm.stats.ttest_ind(data.click, data.reply)
Out[57]:
(2.9514887561591618, 0.0032417014839700789, 932.0)

A chi-squared test 卡方检验 看数据是不是相关联的?

In [58]:
from scipy.stats import chisquare
chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])#远高于,所以不能拒绝原假设,原假设成立
Out[58]:
Power_divergenceResult(statistic=3.5, pvalue=0.62338762774958223)
In [59]:
from scipy.stats import chisqprob, chi2
# p_value = chi2.sf(chi_statistic, df)
print chisqprob(3.94,1), 1 - chi2.cdf(3.94,1)
0.0471507774946 0.0471507774946
/Users/xiha/anaconda/anaconda/lib/python2.7/site-packages/ipykernel/__main__.py:3: DeprecationWarning: `chisqprob` is deprecated!
stats.chisqprob is deprecated in scipy 0.17.0; use stats.distributions.chi2.sf instead.
  app.launch_new_instance()

Correlation 相关性分析

In [60]:
import numpy as np
print np.corrcoef(data.click, data.reply)  #计算任意两个变量的相关性!!!
print np.corrcoef(np.log(data.click+1), np.log(data.reply+1))
[[ 1.          0.96396571]
 [ 0.96396571  1.        ]]
[[ 1.          0.77721397]
 [ 0.77721397  1.        ]]
In [61]:
data.corr()
Out[61]:
click reply followed_num fans_num post_num comment_num month year day
click 1.000000 0.963966 0.143595 0.158116 0.097502 0.085615 0.038788 -0.024827 0.048361
reply 0.963966 1.000000 0.199270 0.159387 0.090342 0.123341 0.040165 -0.041208 0.058738
followed_num 0.143595 0.199270 1.000000 0.407656 0.211677 0.499612 -0.036037 0.051187 -0.020604
fans_num 0.158116 0.159387 0.407656 1.000000 0.341724 0.145387 -0.084243 0.102301 -0.045883
post_num 0.097502 0.090342 0.211677 0.341724 1.000000 0.514695 -0.070024 -0.011786 -0.033254
comment_num 0.085615 0.123341 0.499612 0.145387 0.514695 1.000000 -0.118703 0.069160 -0.119840
month 0.038788 0.040165 -0.036037 -0.084243 -0.070024 -0.118703 1.000000 -0.236920 0.535354
year -0.024827 -0.041208 0.051187 0.102301 -0.011786 0.069160 -0.236920 1.000000 -0.046699
day 0.048361 0.058738 -0.020604 -0.045883 -0.033254 -0.119840 0.535354 -0.046699 1.000000
In [62]:
plt.plot(df.click, df.reply, 'r-o')#不取对数的情况下
plt.show()
In [63]:
plt.plot(df.click, df.reply, 'gs')#取对数之后
plt.xlabel('$Clicks$', fontsize = 20)
plt.ylabel('$Replies$', fontsize = 20)
plt.xscale('log')
plt.yscale('log')
plt.title('$Allowmetric\,Law$', fontsize = 20)
plt.show()

Regression 回归分析

  • 线性回归,要求变量是连续性的,
  • 多分类的,用groupby
In [64]:
import numpy as np
import statsmodels.api as sm
import statsmodels.formula.api as smf
In [65]:
# Load data
dat = sm.datasets.get_rdataset("Guerry", "HistData").data
# Fit regression model (using the natural log of one of the regressors)
results = smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=dat).fit()
#研究Lottery和Literacy + np.log(Pop1831的相关性

有些使用windows的同学无法运行上述代码:

在spyder中打开terminal

输入: pip install -U patsy

In [66]:
# Inspect the results
print results.summary()
                            OLS Regression Results                            
==============================================================================
Dep. Variable:                Lottery   R-squared:                       0.348
Model:                            OLS   Adj. R-squared:                  0.333
Method:                 Least Squares   F-statistic:                     22.20
Date:                Sun, 14 May 2017   Prob (F-statistic):           1.90e-08
Time:                        20:41:53   Log-Likelihood:                -379.82
No. Observations:                  86   AIC:                             765.6
Df Residuals:                      83   BIC:                             773.0
Df Model:                           2                                         
Covariance Type:            nonrobust                                         
===================================================================================
                      coef    std err          t      P>|t|      [95.0% Conf. Int.]
-----------------------------------------------------------------------------------
Intercept         246.4341     35.233      6.995      0.000       176.358   316.510
Literacy           -0.4889      0.128     -3.832      0.000        -0.743    -0.235
np.log(Pop1831)   -31.3114      5.977     -5.239      0.000       -43.199   -19.424
==============================================================================
Omnibus:                        3.713   Durbin-Watson:                   2.019
Prob(Omnibus):                  0.156   Jarque-Bera (JB):                3.394
Skew:                          -0.487   Prob(JB):                        0.183
Kurtosis:                       3.003   Cond. No.                         702.
==============================================================================

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
In [67]:
reg = smf.ols('reply ~ click + followed_num', data=data).fit()#回复和点击和跟贴数量的关系
In [68]:
print reg.summary()
                            OLS Regression Results                            
==============================================================================
Dep. Variable:                  reply   R-squared:                       0.933
Model:                            OLS   Adj. R-squared:                  0.933
Method:                 Least Squares   F-statistic:                     3231.
Date:                Sun, 14 May 2017   Prob (F-statistic):          4.30e-273
Time:                        20:42:10   Log-Likelihood:                -2354.7
No. Observations:                 467   AIC:                             4715.
Df Residuals:                     464   BIC:                             4728.
Df Model:                           2                                         
Covariance Type:            nonrobust                                         
================================================================================
                   coef    std err          t      P>|t|      [95.0% Conf. Int.]
--------------------------------------------------------------------------------
Intercept       -1.4024      1.766     -0.794      0.428        -4.873     2.068
click            0.0125      0.000     78.660      0.000         0.012     0.013
followed_num     0.0749      0.015      5.117      0.000         0.046     0.104
==============================================================================
Omnibus:                      374.515   Durbin-Watson:                   1.938
Prob(Omnibus):                  0.000   Jarque-Bera (JB):            97373.297
Skew:                          -2.416   Prob(JB):                         0.00
Kurtosis:                      73.575   Cond. No.                     1.14e+04
==============================================================================

Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
[2] The condition number is large, 1.14e+04. This might indicate that there are
strong multicollinearity or other numerical problems.
In [70]:
import statsmodels.api as sm
from statsmodels.formula.api import ols

moore = sm.datasets.get_rdataset("Moore", "car",
                                 cache=True) # load data
data = moore.data
data = data.rename(columns={"partner.status" :
                             "partner_status"}) # make name pythonic
In [71]:
data[:5]
Out[71]:
partner_status conformity fcategory fscore
0 low 8 low 37
1 low 4 high 57
2 low 8 high 65
3 low 7 low 20
4 low 10 low 36
In [72]:
moore_lm = ols('conformity ~ C(fcategory, Sum)*C(partner_status, Sum)',
                 data=data).fit()
In [73]:
fig = plt.figure(figsize=(12,8))
fig = sm.graphics.plot_partregress_grid(moore_lm, fig = fig)
plt.show()
In [74]:
table = sm.stats.anova_lm(moore_lm, typ=2) # Type 2 ANOVA DataFrame
print table
                                              sum_sq    df          F  \
C(fcategory, Sum)                          11.614700   2.0   0.276958   
C(partner_status, Sum)                    212.213778   1.0  10.120692   
C(fcategory, Sum):C(partner_status, Sum)  175.488928   2.0   4.184623   
Residual                                  817.763961  39.0        NaN   

                                            PR(>F)  
C(fcategory, Sum)                         0.759564  
C(partner_status, Sum)                    0.002874  
C(fcategory, Sum):C(partner_status, Sum)  0.022572  
Residual                                       NaN  
In [ ]: