# fonnesbeck/pymc_tutorial

1 parent 5cb96dc commit da662f84e51c3fcf602c3c6042a7c5c3a57659b6 committed Jul 10, 2011
Showing with 156 additions and 0 deletions.
1. +16 −0 bioassay.py
2. +37 −0 cov.py
3. +7 −0 mean.py
4. +37 −0 obs.py
5. +33 −0 triangular.py
6. +26 −0 truncated_metropolis.py
 @@ -0,0 +1,16 @@ +from pymc import * +from numpy import array + +n = [5]*4 +dose = [-.86,-.3,-.05,.73] +response = [0,1,3,5] + +alpha = Normal('alpha', mu=0, tau=0.01) +beta = Normal('beta', mu=0, tau=0.01) + +theta = Lambda('theta', lambda a=alpha, b=beta: invlogit(a + b*array(dose))) + +@observed +def deaths(value=response, n=n, p=theta): + """deaths ~ binomial_like(n, p)""" + return binomial_like_like(value, n, p)
37 cov.py
 @@ -0,0 +1,37 @@ +from pymc.gp import * +from pymc.gp.cov_funs import matern +from numpy import * + +C = Covariance(eval_fun = matern.euclidean, diff_degree = 1.4, amp = .4, scale = 1.) +# C = Covariance(eval_fun = matern.euclidean, diff_degree = 1.4, amp = .4, scale = 1., rank_limit=100) +# C = FullRankCovariance(eval_fun = matern.euclidean, diff_degree = 1.4, amp = .4, scale = 1.) +# C = NearlyFullRankCovariance(eval_fun = matern.euclidean, diff_degree = 1.4, amp = .4, scale = 1.) + +#### - Plot - #### +if __name__ == '__main__': + from pylab import * + + x=arange(-1.,1.,.01) + clf() + + # Plot the covariance function + subplot(1,2,1) + + contourf(x,x,C(x,x).view(ndarray),origin='lower',extent=(-1.,1.,-1.,1.),cmap=cm.bone) + + xlabel('x') + ylabel('y') + title('C(x,y)') + axis('tight') + colorbar() + + # Plot a slice of the covariance function + subplot(1,2,2) + + plot(x,C(x,0).view(ndarray).ravel(),'k-') + + xlabel('x') + ylabel('C(x,0)') + title('A slice of C') + + # show()
 @@ -0,0 +1,7 @@ +from pymc.gp import * + +# Generate mean +def quadfun(x, a, b, c): + return (a * x ** 2 + b * x + c) + +M = Mean(quadfun, a = 1., b = .5, c = 2.)
37 obs.py
 @@ -0,0 +1,37 @@ +# Import the mean and covariance +from mean import M +from cov import C +from pymc.gp import * +from numpy import * + +# Impose observations on the GP +o = array([-.5,.5]) +V = array([.002,.002]) +data = array([3.1, 2.9]) +observe(M, C, obs_mesh=o, obs_V = V, obs_vals = data) + +# Generate realizations +f_list=[Realization(M, C) for i in range(3)] + +x=arange(-1.,1.,.01) + +#### - Plot - #### +if __name__ == '__main__': + from pylab import * + + x=arange(-1.,1.,.01) + + clf() + + plot_envelope(M, C, mesh=x) + + for f in f_list: + plot(x, f(x)) + + xlabel('x') + ylabel('f(x)') + title('Three realizations of the observed GP') + axis('tight') + + + # show()
 @@ -0,0 +1,33 @@ +from numpy import log, random, sqrt, zeros, atleast_1d + +def triangular_like(x, mode, minval, maxval): + """Log-likelihood of triangular distribution""" + + x = atleast_1d(x) + + # Check for support + if any(xmaxval): return -inf + + # Likelihood of left values + like = sum(log(2*(x[x<=mode] - minval)) - log(mode - minval) - log(maxval - minval)) + + # Likelihood of right values + like += sum(log(2*(maxval - x[x>mode])) - log(maxval - minval) - log(maxval - mode)) + + return like + +def rtriangular(mode, minval, maxval, size=1): + """Generate triangular random numbers""" + + # Uniform random numbers + z = atleast_1d(random.random(size)) + + # Threshold for transformation + threshold = (mode - minval)/(maxval - minval) + + # Transform uniforms + u = atleast_1d(zeros(size)) + u[z<=threshold] = minval + sqrt(z[z<=threshold]*(maxval - minval)*(mode - minval)) + u[z>threshold] = maxval - sqrt((1 - z[z>threshold])*(maxval - minval)*(maxval - mode)) + + return u
 @@ -0,0 +1,26 @@ +class TruncatedMetropolis(pymc.Metropolis): + def __init__(self, stochastic, low_bound, up_bound, *args, **kwargs): + self.low_bound = low_bound + self.up_bound = up_bound + pymc.Metropolis.__init__(self, stochastic, *args, **kwargs) + + # Propose method generates proposal values + def propose(self): + tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2 + self.stochastic.value = \ + pymc.rtruncnorm(self.stochastic.value, tau, self.low_bound, self.up_bound) + + # Hastings factor method accounts for asymmetric proposal distribution + def hastings_factor(self): + tau = 1./(self.adaptive_scale_factor * self.proposal_sd)**2 + cur_val = self.stochastic.value + last_val = self.stochastic.last_value + + lp_for = pymc.truncnorm_like(cur_val, last_val, tau, \ + self.low_bound, self.up_bound) + lp_bak = pymc.truncnorm_like(last_val, cur_val, tau, \ + self.low_bound, self.up_bound) + + if self.verbose > 1: + print self._id + ': Hastings factor %f'%(lp_bak - lp_for) + return lp_bak - lp_for