-
Notifications
You must be signed in to change notification settings - Fork 87
/
run.py
274 lines (234 loc) · 9.85 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
from __future__ import absolute_import, unicode_literals, print_function
from ctypes import cdll
import sys
libname = 'libmultinest'
try: # detect if run through mpiexec/mpirun
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() > 1: # need parallel capabilities
libname = 'libmultinest_mpi'
except ImportError:
pass
libname += {
'darwin' : '.dylib',
'win32' : '.dll',
'cygwin' : '.dll',
}.get(sys.platform, '.so')
try:
lib = cdll.LoadLibrary(libname)
except OSError as e:
message = str(e)
if message == '%s: cannot open shared object file: No such file or directory' % libname:
print()
print('ERROR: Could not load MultiNest library "%s"' % libname)
print('ERROR: You have to build it first,')
print('ERROR: and point the LD_LIBRARY_PATH environment variable to it!')
print('ERROR: manual: http://johannesbuchner.github.com/PyMultiNest/install.html')
print()
if message.endswith('cannot open shared object file: No such file or directory'):
print()
print('ERROR: Could not load MultiNest library: %s' % message.split(':')[0])
print('ERROR: You have to build MultiNest,')
print('ERROR: and point the LD_LIBRARY_PATH environment variable to it!')
print('ERROR: manual: http://johannesbuchner.github.com/PyMultiNest/install.html')
print()
if 'undefined symbol: mpi_' in message:
print()
print('ERROR: You tried to compile MultiNest linked with MPI,')
print('ERROR: but now when running, MultiNest can not find the MPI linked libraries.')
print('ERROR: manual: http://johannesbuchner.github.com/PyMultiNest/install.html')
print()
# the next if is useless because we can not catch symbol lookup errors (the executable crashes)
# but it is still there as documentation.
if 'symbol lookup error' in message and 'mpi' in message:
print()
print('ERROR: You are trying to get MPI to run, but MPI failed to load.')
print('ERROR: Specifically, mpi symbols are missing in the executable.')
print('ERROR: Let me know if this is a problem of running python or a compilation problem.')
print('ERROR: manual: http://johannesbuchner.github.com/PyMultiNest/install.html')
print()
# what if built with MPI, but don't have MPI
print('problem:', e)
import sys
sys.exit(1)
from ctypes import *
from numpy.ctypeslib import as_array
import signal, sys
import inspect
def interrupt_handler(signal, frame):
sys.stderr.write('ERROR: Interrupt received: Terminating\n')
sys.exit(1)
def run(LogLikelihood,
Prior,
n_dims,
n_params = None,
n_clustering_params = None, wrapped_params = None,
importance_nested_sampling = True,
multimodal = True, const_efficiency_mode = False, n_live_points = 400,
evidence_tolerance = 0.5, sampling_efficiency = 0.8,
n_iter_before_update = 100, null_log_evidence = -1e90,
max_modes = 100, mode_tolerance = -1e90,
outputfiles_basename = "chains/1-", seed = -1, verbose = False,
resume = True, context = 0, write_output = True, log_zero = -1e100,
max_iter = 0, init_MPI = False, dump_callback = None):
"""
Runs MultiNest
The most important parameters are the two log-probability functions Prior
and LogLikelihood. They are called by MultiNest.
Prior should transform the unit cube into the parameter cube. Here
is an example for a uniform prior::
def Prior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = cube[i] * 10 * math.pi
The LogLikelihood function gets this parameter cube and should
return the logarithm of the likelihood.
Here is the example for the eggbox problem::
def Loglike(cube, ndim, nparams, lnew):
chi = 1.
for i in range(ndim):
chi *= math.cos(cube[i] / 2.)
return math.pow(2. + chi, 5)
Some of the parameters are explained below. Otherwise consult the
MultiNest documentation.
@param importance_nested_sampling:
If True, Multinest will use Importance Nested Sampling (INS). Read http://arxiv.org/abs/1306.2144
for more details on INS. Please read the MultiNest README file before using the INS in MultiNest v3.0.
@param n_params:
Total no. of parameters, should be equal to ndims in most cases
but if you need to store some additional
parameters with the actual parameters then you need to pass
them through the likelihood routine.
@param sampling_efficiency:
defines the sampling efficiency. 0.8 and 0.3 are recommended
for parameter estimation & evidence evalutation
respectively.
use 'parameter' or 'model' to select the respective default
values
@param mode_tolerance:
MultiNest can find multiple modes & also specify which samples belong to which mode. It might be
desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a
particular value in which case Ztol should be set to that value. If there isn't any particularly interesting
Ztol value, then Ztol should be set to a very large negative number (e.g. -1e90).
@param evidence_tolerance:
A value of 0.5 should give good enough accuracy.
@param n_clustering_params:
If mmodal is T, MultiNest will attempt to separate out the
modes. Mode separation is done through a clustering
algorithm. Mode separation can be done on all the parameters
(in which case nCdims should be set to ndims) & it
can also be done on a subset of parameters (in which case
nCdims < ndims) which might be advantageous as
clustering is less accurate as the dimensionality increases.
If nCdims < ndims then mode separation is done on
the first nCdims parameters.
@param null_log_evidence:
If mmodal is T, MultiNest can find multiple modes & also specify
which samples belong to which mode. It might be
desirable to have separate samples & mode statistics for modes
with local log-evidence value greater than a
particular value in which case nullZ should be set to that
value. If there isn't any particulrly interesting
nullZ value, then nullZ should be set to a very large negative
number (e.g. -1.d90).
@param init_MPI:
initialize MPI routines?, relevant only if compiling with MPI
To run pymultinest with MPI, you need mpi4py installed. Then,
the libmultinest_mpi library is loaded when you run with mpiexec
or similar. init_MPI should be set to False, because importing
mpi4py initialises MPI already.
@param log_zero:
points with loglike < logZero will be ignored by MultiNest
@param max_iter:
maximum number of iterations. 0 is unlimited.
@param write_output:
write output files? This is required for analysis.
@param dump_callback:
a callback function for dumping the current status
"""
if n_params == None:
n_params = n_dims
if n_clustering_params == None:
n_clustering_params = n_dims
if wrapped_params == None:
wrapped_params = [0] * n_dims
WrappedType = c_int * len(wrapped_params)
wraps = WrappedType(*wrapped_params)
if sampling_efficiency == 'parameter':
sampling_efficiency = 0.8
if sampling_efficiency == 'model':
sampling_efficiency = 0.3
loglike_type = CFUNCTYPE(c_double, POINTER(c_double),
c_int, c_int, c_double, c_void_p)
dumper_type = CFUNCTYPE(c_void_p, c_int, c_int, c_int,
POINTER(c_double),POINTER(c_double),POINTER(c_double),
c_double,c_double,c_double,c_void_p)
# check if lnew is supported by user function
nargs = 3
try:
nargs = len(inspect.getargspec(LogLikelihood).args) - inspect.ismethod(LogLikelihood)
except:
pass
if nargs == 4:
def loglike(cube, ndim, nparams, lnew, nullcontext):
if Prior:
Prior(cube, ndim, nparams)
return LogLikelihood(cube, ndim, nparams, lnew)
else:
def loglike(cube, ndim, nparams, lnew, nullcontext):
if Prior:
Prior(cube, ndim, nparams)
return LogLikelihood(cube, ndim, nparams)
def dumper(nSamples,nlive,nPar,
physLive,posterior,paramConstr,
maxLogLike,logZ,logZerr,nullcontext):
if dump_callback:
# It's not clear to me what the desired PyMultiNest dumper callback
# syntax is... but this should pass back the right numpy arrays,
# without copies. Untested!
pc = as_array(paramConstr,shape=(nPar,4))
dump_callback(nSamples,nlive,nPar,
as_array(physLive,shape=(nPar+1,nlive)).T,
as_array(posterior,shape=(nPar+2,nSamples)).T,
(pc[0,:],pc[1,:],pc[2,:],pc[3,:]), # (mean,std,bestfit,map)
maxLogLike,logZ,logZerr)
prev_handler = signal.signal(signal.SIGINT, interrupt_handler)
# to avoid garbage collection of these ctypes, which leads to NULLs
# we need to make local copies here that are not thrown away
s = outputfiles_basename.encode()
sb = create_string_buffer(s, 100)
argtypes = [c_bool, c_bool, c_bool,
c_int, c_double, c_double,
c_int, c_int, c_int, c_int,
c_int, c_double,
lambda x: x, c_int, lambda x: x,
c_bool, c_bool, c_bool, c_bool,
c_double, c_int, loglike_type, dumper_type, c_int
]
args = [importance_nested_sampling, multimodal, const_efficiency_mode,
n_live_points, evidence_tolerance, sampling_efficiency,
n_dims, n_params, n_clustering_params, max_modes,
n_iter_before_update, mode_tolerance,
sb, seed, wraps,
verbose, resume, write_output, init_MPI,
log_zero, max_iter, loglike, dumper, context]
print(args)
args_converted = [converter(v) for v, converter in zip(args, argtypes)]
lib.run(*args_converted)
signal.signal(signal.SIGINT, prev_handler)
assert len(args) == len(argtypes) # to make sure stuff is still here
def _is_newer(filea, fileb):
return os.stat(filea).st_mtime > os.stat(fileb).st_mtime
def multinest_complete(outputfiles_basename = "chains/1-"):
"""
Checks the output files of multinest to see if they are complete.
This also requires the presence of params.json, which your code should
write after calling run()
returns True or False
"""
names = ['stats.dat', 'post_equal_weights.dat', '.txt', 'resume.dat', 'params.json']
for n in names:
if not os.path.exists(basename + n):
return False
# if stats.dat and post_equal_weights.dat are newer than .txt and resume.dat exists
if not _is_newer(basename + 'post_equal_weights.dat', basename + '.txt'):
return False
return True