Skip to content

Commit 3c2a5db

Browse files
committed
Revert "Updated Ch6 to Python3"
This reverts commit 4d8da51. Undoing last commit
1 parent fae5ebd commit 3c2a5db

File tree

2 files changed

+49
-49
lines changed

2 files changed

+49
-49
lines changed

Chapter6_Priorities/Chapter6.ipynb

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
"cell_type": "markdown",
1212
"metadata": {},
1313
"source": [
14-
"# Chapter 6\n",
14+
"#Chapter 6\n",
1515
"\n",
1616
"____\n",
1717
"\n",
@@ -27,7 +27,7 @@
2727
"\n",
2828
"Up until now, we have mostly ignored our choice of priors. This is unfortunate as we can be very expressive with our priors, but we also must be careful about choosing them. This is especially true if we want to be objective, that is, not to express any personal beliefs in the priors. \n",
2929
"\n",
30-
"### Subjective vs Objective priors\n",
30+
"###Subjective vs Objective priors\n",
3131
"\n",
3232
"Bayesian priors can be classified into two classes: *objective* priors, which aim to allow the data to influence the posterior the most, and *subjective* priors, which allow the practitioner to express his or her views into the prior. \n",
3333
"\n",
@@ -926,7 +926,7 @@
926926
"figsize(12.0, 8)\n",
927927
"beta = stats.beta\n",
928928
"hidden_prob = beta.rvs(1, 13, size=35)\n",
929-
"print(hidden_prob)\n",
929+
"print hidden_prob\n",
930930
"bandits = Bandits(hidden_prob)\n",
931931
"bayesian_strat = BayesianStrategy(bandits)\n",
932932
"\n",
@@ -1042,8 +1042,8 @@
10421042
" \"AMZN\": (0.03, 0.02),\n",
10431043
" }\n",
10441044
"\n",
1045-
"for i, (name, params) in enumerate(expert_prior_params.items()):\n",
1046-
" plt.subplot(2, 2, i + 1)\n",
1045+
"for i, (name, params) in enumerate(expert_prior_params.iteritems()):\n",
1046+
" plt.subplot(2, 2, i)\n",
10471047
" y = normal.pdf(x, params[0], scale=params[1])\n",
10481048
" #plt.plot( x, y, c = colors[i] )\n",
10491049
" plt.fill_between(x, 0, y, color=colors[i], linewidth=2,\n",
@@ -1103,7 +1103,7 @@
11031103
"\n",
11041104
"stocks = [\"AAPL\", \"GOOG\", \"TSLA\", \"AMZN\"]\n",
11051105
"\n",
1106-
"enddate = \"2015-04-27\"\n",
1106+
"enddate = datetime.datetime.now().strftime(\"%Y-%m-%d\") # today's date.\n",
11071107
"startdate = \"2012-09-01\"\n",
11081108
"\n",
11091109
"stock_closes = {}\n",
@@ -1120,7 +1120,7 @@
11201120
" _previous_day = np.roll(stock_closes[stock], -1)\n",
11211121
" stock_returns[stock] = ((stock_closes[stock] - _previous_day) / _previous_day)[:n_observations]\n",
11221122
"\n",
1123-
"dates = list(map(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\"), x[1:n_observations + 1, 0]))"
1123+
"dates = map(lambda x: datetime.datetime.strptime(x, \"%Y-%m-%d\"), x[1:n_observations + 1, 0])"
11241124
]
11251125
},
11261126
{
@@ -1144,12 +1144,12 @@
11441144
"source": [
11451145
"figsize(12.5, 4)\n",
11461146
"\n",
1147-
"for _stock, _returns in stock_returns.items():\n",
1147+
"for _stock, _returns in stock_returns.iteritems():\n",
11481148
" p = plt.plot((1 + _returns)[::-1].cumprod() - 1, '-o', label=\"%s\" % _stock,\n",
11491149
" markersize=4, markeredgecolor=\"none\")\n",
11501150
"\n",
11511151
"plt.xticks(np.arange(100)[::-8],\n",
1152-
" list(map(lambda x: datetime.datetime.strftime(x, \"%Y-%m-%d\"), dates[::8])),\n",
1152+
" map(lambda x: datetime.datetime.strftime(x, \"%Y-%m-%d\"), dates[::8]),\n",
11531153
" rotation=60);\n",
11541154
"\n",
11551155
"plt.legend(loc=\"upper left\")\n",
@@ -1179,9 +1179,9 @@
11791179
"figsize(11., 5)\n",
11801180
"returns = np.zeros((n_observations, 4))\n",
11811181
"\n",
1182-
"for i, (_stock, _returns) in enumerate(stock_returns.items()):\n",
1182+
"for i, (_stock, _returns) in enumerate(stock_returns.iteritems()):\n",
11831183
" returns[:, i] = _returns\n",
1184-
" plt.subplot(2, 2, i+1)\n",
1184+
" plt.subplot(2, 2, i)\n",
11851185
" plt.hist(_returns, bins=20,\n",
11861186
" normed=True, histtype=\"stepfilled\",\n",
11871187
" color=colors[i], alpha=0.7)\n",
@@ -1258,7 +1258,7 @@
12581258
"for i in range(4):\n",
12591259
" plt.hist(mu_samples[:, i], alpha=0.8 - 0.05 * i, bins=30,\n",
12601260
" histtype=\"stepfilled\", normed=True,\n",
1261-
" label=\"%s\" % list(stock_returns.keys())[i])\n",
1261+
" label=\"%s\" % stock_returns.keys()[i])\n",
12621262
"\n",
12631263
"plt.vlines(mu_samples.mean(axis=0), 0, 500, linestyle=\"--\", linewidth=.5)\n",
12641264
"\n",
@@ -1302,8 +1302,8 @@
13021302
" plt.subplot(2, 2, i + 1)\n",
13031303
" plt.hist(mu_samples[:, i], alpha=0.8 - 0.05 * i, bins=30,\n",
13041304
" histtype=\"stepfilled\", normed=True, color=colors[i],\n",
1305-
" label=\"%s\" % list(stock_returns.keys())[i])\n",
1306-
" plt.title(\"%s\" % list(stock_returns.keys())[i])\n",
1305+
" label=\"%s\" % stock_returns.keys()[i])\n",
1306+
" plt.title(\"%s\" % stock_returns.keys()[i])\n",
13071307
" plt.xlim(-0.15, 0.15)\n",
13081308
"\n",
13091309
"plt.suptitle(\"Posterior distribution of daily stock returns\")\n",
@@ -2512,21 +2512,21 @@
25122512
"metadata": {
25132513
"anaconda-cloud": {},
25142514
"kernelspec": {
2515-
"display_name": "Python [conda env:bayes]",
2515+
"display_name": "Python 2",
25162516
"language": "python",
2517-
"name": "conda-env-bayes-py"
2517+
"name": "python2"
25182518
},
25192519
"language_info": {
25202520
"codemirror_mode": {
25212521
"name": "ipython",
2522-
"version": 3
2522+
"version": 2
25232523
},
25242524
"file_extension": ".py",
25252525
"mimetype": "text/x-python",
25262526
"name": "python",
25272527
"nbconvert_exporter": "python",
2528-
"pygments_lexer": "ipython3",
2529-
"version": "3.5.2"
2528+
"pygments_lexer": "ipython2",
2529+
"version": "2.7.10"
25302530
}
25312531
},
25322532
"nbformat": 4,

Chapter6_Priorities/other_strats.py

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -3,13 +3,13 @@
33

44
import scipy.stats as stats
55
import numpy as np
6-
#from pymc import rbeta
6+
from pymc import rbeta
77

88
rand = np.random.rand
99
beta = stats.beta
1010

1111

12-
class GeneralBanditStrat(object):
12+
class GeneralBanditStrat( object ):
1313

1414
"""
1515
Implements a online, learning strategy to solve
@@ -32,72 +32,72 @@ class GeneralBanditStrat(object):
3232
def __init__(self, bandits, choice_function):
3333

3434
self.bandits = bandits
35-
n_bandits = len(self.bandits)
36-
self.wins = np.zeros(n_bandits)
37-
self.trials = np.zeros(n_bandits)
35+
n_bandits = len( self.bandits )
36+
self.wins = np.zeros( n_bandits )
37+
self.trials = np.zeros(n_bandits )
3838
self.N = 0
3939
self.choices = []
4040
self.score = []
4141
self.choice_function = choice_function
4242

43-
def sample_bandits(self, n=1):
43+
def sample_bandits( self, n=1 ):
4444

45-
score = np.zeros(n)
46-
choices = np.zeros(n)
45+
score = np.zeros( n )
46+
choices = np.zeros( n )
4747

4848
for k in range(n):
4949
#sample from the bandits's priors, and select the largest sample
5050
choice = self.choice_function(self)
5151

5252
#sample the chosen bandit
53-
result = self.bandits.pull(choice)
53+
result = self.bandits.pull( choice )
5454

5555
#update priors and score
56-
self.wins[choice] += result
57-
self.trials[choice] += 1
58-
score[k] = result
56+
self.wins[ choice ] += result
57+
self.trials[ choice ] += 1
58+
score[ k ] = result
5959
self.N += 1
60-
choices[k] = choice
60+
choices[ k ] = choice
6161

62-
self.score = np.r_[self.score, score]
63-
self.choices = np.r_[self.choices, choices]
62+
self.score = np.r_[ self.score, score ]
63+
self.choices = np.r_[ self.choices, choices ]
6464
return
6565

6666

6767
def bayesian_bandit_choice(self):
68-
return np.argmax(np.random.beta(1 + self.wins, 1 + self.trials - self.wins))
68+
return np.argmax( rbeta( 1 + self.wins, 1 + self.trials - self.wins) )
6969

70-
def max_mean(self):
70+
def max_mean( self ):
7171
"""pick the bandit with the current best observed proportion of winning """
72-
return np.argmax(self.wins / (self.trials +1))
72+
return np.argmax( self.wins / ( self.trials +1 ) )
7373

7474
def lower_credible_choice( self ):
7575
"""pick the bandit with the best LOWER BOUND. See chapter 5"""
7676
def lb(a,b):
77-
return a/(a+b) - 1.65*np.sqrt((a*b)/( (a+b)**2*(a+b+1)))
77+
return a/(a+b) - 1.65*np.sqrt( (a*b)/( (a+b)**2*(a+b+1) ) )
7878
a = self.wins + 1
7979
b = self.trials - self.wins + 1
80-
return np.argmax(lb(a,b))
80+
return np.argmax( lb(a,b) )
8181

82-
def upper_credible_choice(self):
82+
def upper_credible_choice( self ):
8383
"""pick the bandit with the best LOWER BOUND. See chapter 5"""
8484
def lb(a,b):
85-
return a/(a+b) + 1.65*np.sqrt((a*b)/((a+b)**2*(a+b+1)))
85+
return a/(a+b) + 1.65*np.sqrt( (a*b)/( (a+b)**2*(a+b+1) ) )
8686
a = self.wins + 1
8787
b = self.trials - self.wins + 1
88-
return np.argmax(lb(a,b))
88+
return np.argmax( lb(a,b) )
8989

90-
def random_choice(self):
91-
return np.random.randint(0, len(self.wins))
90+
def random_choice( self):
91+
return np.random.randint( 0, len( self.wins ) )
9292

9393

94-
def ucb_bayes(self):
94+
def ucb_bayes( self ):
9595
C = 0
9696
n = 10000
97-
alpha =1 - 1./((self.N+1))
98-
return np.argmax(beta.ppf(alpha,
97+
alpha =1 - 1./( (self.N+1) )
98+
return np.argmax( beta.ppf( alpha,
9999
1 + self.wins,
100-
1 + self.trials - self.wins))
100+
1 + self.trials - self.wins ) )
101101

102102

103103

@@ -117,7 +117,7 @@ def __init__(self, p_array):
117117
self.p = p_array
118118
self.optimal = np.argmax(p_array)
119119

120-
def pull(self, i):
120+
def pull( self, i ):
121121
#i is which arm to pull
122122
return rand() < self.p[i]
123123

0 commit comments

Comments
 (0)