This repository has been archived by the owner on May 21, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 30
/
functor.pyx
768 lines (628 loc) · 25.2 KB
/
functor.pyx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
#cython: embedsignature=True
cimport cython
from cpython cimport PyFloat_AsDouble, PyTuple_GetItem, PyTuple_GetItem,\
PyObject, PyTuple_SetItem, PyTuple_SetItem,\
PyTuple_New, Py_INCREF, PyFloat_FromDouble
import numpy as np
cimport numpy as np
from warnings import warn
from .probfit_warnings import SmallIntegralWarning
from _libstat cimport integrate1d_with_edges, _vector_apply,\
has_ana_integral, integrate1d
from .funcutil import FakeFuncCode, merge_func_code, FakeFunc
from .util import describe
cpdef tuple construct_arg(tuple arg, np.ndarray[np.int_t] fpos):
cdef int size = fpos.shape[0]
cdef int i, itmp
cdef np.int_t* fposdata = <np.int_t*>fpos.data
cdef tuple ret = PyTuple_New(size)
cdef object tmpo
for i in range(size):
itmp = fposdata[i]
tmpo = <object>PyTuple_GetItem(arg, itmp)
Py_INCREF(tmpo)
#Py_INCREF(tmpo) #first one for the case second one for the steal
PyTuple_SetItem(ret, i, tmpo)
return ret
#TODO: optimize this may be check id() instead of actual comparison?
cpdef bint fast_tuple_equal(tuple t1, tuple t2 , int t2_offset) except *:
#t=last_arg
#t2=arg
cdef double tmp1,tmp2
cdef int i,ind
cdef int tsize = len(t2)-t2_offset
cdef bint ret = 0
cdef double precision = 1e-16
if len(t1) ==0 and tsize==0:
return 1
for i in range(tsize):
ind = i+t2_offset
tmp1 = PyFloat_AsDouble(<object>PyTuple_GetItem(t1,i))
tmp2 = PyFloat_AsDouble(<object>PyTuple_GetItem(t2,ind))
ret = abs(tmp1-tmp2) < precision
if not ret: break
return ret
cdef class Convolve:#with gy cache
"""
Make convolution from supplied **f** and **g**. If your functions are
analytically convolutable you will be better off implementing
analytically. This functor is implemented using numerical integration with
bound there are numerical issue that will, in most cases, lightly affect
normalization.s
Argument from **f** and **g** is automatically merge by name. For example,
::
f = lambda x, a, b, c: a*x**2+b*x+c
g = lambda x, a, sigma: gaussian(x,a,sigma)
h = Convolve(f,g,gbound)
describe(h)#['x','a','b','c','sigma']
#h is equivalent to
def h_equiv(x, a, b, c, sigma):
return Integrate(f(x-t, a, b, c)*g(x, a, b, c), t_range=gbound)
.. math::
\\text{Convolve(f,g)}(t, arg \ldots) =
\int_{\\tau \in \\text{gbound}}
f(t-\\tau, arg \ldots)\\times g(t, arg\ldots) \, \mathrm{d}\\tau
**Argument**
- **f** Callable object, PDF.
- **g** Resolution function.
- **gbound** Bound of the resolution. Supplied something such that
g(x,*arg) is near 0 at the edges. Current implementation is
multiply reverse slide add straight up from the definition so this
bound is important. Overbounding is recommended.
- **nbins** Number of bins in multiply reverse slide add. Default(1000)
.. note::
You may be worried about normalization. By the property of convolution:
.. math::
\int_{\mathbf{R}^d}(f*g)(x) \, dx=\left(\int_{\mathbf{R}^d}f(x) \, dx\\right)\left(\int_{\mathbf{R}^d}g(x) \, dx\\right).
This means if you convolute two normalized distributions you get back
a normalized distribution. However, since we are doing numerical
integration here we will be off by a little bit.
"""
#TODO: make this use analytical integral
cdef int numbins
cdef tuple gbound
cdef double bw
cdef int nbg
cdef f #original f
cdef g #original g
cdef np.ndarray fpos#position of argument in f
cdef np.ndarray gpos#position of argument in g
cdef public object func_code
cdef public object func_defaults
cdef tuple last_garg
cdef np.ndarray gy_cache
#g is resolution function gbound need to be set so that the end of g is zero
def __init__(self, f, g, gbound, nbins=1000):
self.set_gbound(gbound,nbins)
self.func_code, [self.fpos, self.gpos] = merge_func_code(f,g,skip_first=True)
self.func_defaults = None
self.f = f
self.g = g
def set_gbound(self,gbound,nbins):
self.last_garg = None
self.gbound,self.nbg = gbound,nbins
self.bw = 1.0*(gbound[1]-gbound[0])/nbins
self.gy_cache = None
def __call__(self,*arg):
#skip the first one
cdef int iconv
cdef double ret = 0
cdef np.ndarray[np.double_t] gy,fy
tmp_arg = arg[1:]
x=arg[0]
garg = list()
for i in self.gpos: garg.append(arg[i])
garg = tuple(garg[1:])#dock off the independent variable
farg = list()
for i in self.fpos: farg.append(arg[i])
farg = tuple(farg[1:])
xg = np.linspace(self.gbound[0],self.gbound[1],self.nbg)
gy = None
#calculate all the g needed
if garg==self.last_garg:
gy = self.gy_cache
else:
gy = _vector_apply(self.g, xg, garg)
self.gy_cache=gy
#now prepare f... f needs to be calculated and padded to f-gbound[1] to f+gbound[0]
#yep this is not a typo because we are "reverse" sliding g onto f so we need to calculate f from
# f-right bound of g to f+left bound of g
fbound = x-self.gbound[1], x-self.gbound[0] #yes again it's not a typo
xf = np.linspace(fbound[0], fbound[1], self.nbg)#yep nbg
fy = _vector_apply(self.f, xf, farg)
#print xf[:100]
#print fy[:100]
#now do the inverse slide g and f
for iconv in range(self.nbg):
ret += fy[iconv]*gy[self.nbg-iconv-1]
#now normalize the integral
ret*=self.bw
return ret
cdef class Extended:
"""
Transformed given **f** into extended from.
::
def f(x,mu,sigma):
return gaussian(x,mu,sigma)
g = Extended(f) #g is equivalent to N*f(x,mu,sigma)
describe(g) #('x','mu','sigma','N')
#g is equivalent to
def g_equiv(x, mu, sigma, N):
return N*f(x, mu, sigma)
**Arguments**
- **f** call object. PDF.
- **extname** optional string. Name of the extended parameter. Default
`'N'`
"""
cdef f
cdef public func_code
cdef public func_defaults
def __init__(self, f, extname='N'):
self.f = f
if extname in describe(f):
raise ValueError('%s is already taken pick something else for extname'%extname)
self.func_code = FakeFuncCode(f,append=extname)
#print self.func_code.__dict__
self.func_defaults=None
def __call__(self, *arg):
cdef double N = arg[-1]
cdef double fval = self.f(*arg[:-1])
return N*fval
def integrate(self, tuple bound, int nint, *arg):
cdef double N = arg[-1]
cdef double ana = integrate1d(self.f, bound, nint, arg[:-1])
return N*ana
cdef class AddPdf:
"""
Directly add PDF without normalization nor factor.
Parameters are merged by names.
::
def f(x, a, b, c):
return do_something(x,a,b,c)
def g(x, d, a, e):
return do_something_else(x, d, a, e)
h = AddPdf(f, g)# you can do AddPdf(f, g, h)
#h is equivalent to
def h_equiv(x, a, b, c, d, e):
return f(x, a, b, c) + g(x, d, a, e)
**Arguments**
- **prefix** array of prefix string with length equal to number of
callable object passed. This allows you to add two PDF without having
parameters from the two merge. ::
h2 = AddPdf(f, g, prefix=['f','g'])
#h is equivalent to
def h2_equiv(x, f_a, f_b, f_c, g_d, g_a, g_e):
return f(x, f_a, f_b, f_c) + g(x, g_d, g_a, g_e)
- **factors** list of callable factor function if given Add pdf
will simulate the pdf of the form::
factor[0]*f + factor[1]*g
Note that all argument for callable factors will be prefixed (if
given) as opposed to skipping the first one for pdf list. If None
is given, all factors are assume to be constant 1. Default None.
- **skip_prefix** list of variable that should not be prefixed.
Default None. This is useful when you want to mix prefixing
and sharing some of variable.
"""
#FIXME: cache each part if called with same parameter
cdef public object func_code
cdef public object func_defaults
cdef int arglen
cdef list allpos
cdef list factpos
cdef tuple allf
cdef tuple factors
cdef readonly int numf
cdef np.ndarray cache
cdef np.ndarray factor_cache
cdef list argcache
cdef list factor_argcache
cdef public int hit
cdef list allfactors
def __init__(self, *arg, prefix=None, factors=None, skip_prefix=None):
if factors is not None and len(factors)!=len(arg):
raise ValueError('factor is specified but has different length'
' from arg.')
allf = list(arg)
if factors is not None:
allf += factors
self.func_code, allpos = merge_func_code(*arg, prefix=prefix,
skip_first=True,
factor_list=factors,
skip_prefix=skip_prefix)
funcpos = allpos[:len(arg)]
factpos = allpos[len(arg):]
self.func_defaults=None
self.arglen = self.func_code.co_argcount
self.allf = arg # f function
self.factors = tuple(factors) if factors is not None else None# factor function
self.allpos = allpos # position for f arg
self.factpos = factpos # position for factor arg
self.numf = len(self.allf)
self.argcache = [None]*self.numf
self.factor_argcache = [None]*self.numf
self.cache = np.zeros(self.numf)
self.factor_cache = np.zeros(self.numf)
self.hit = 0
def __call__(self, *arg):
cdef tuple this_arg
cdef double ret = 0.
cdef double tmp = 0.
cdef double tmp_factor = 0.
cdef int i
cdef np.ndarray thispos
for i in range(self.numf):
thispos = self.allpos[i]
this_arg = construct_arg(arg, thispos)
if self.argcache[i] is not None and fast_tuple_equal(this_arg, self.argcache[i], 0):
tmp = self.cache[i]
self.hit+=1
else:
tmp = self.allf[i](*this_arg)
self.argcache[i]=this_arg
self.cache[i]=tmp
if self.factors is not None: # calculate factor
factor_arg = construct_arg(arg, self.factpos[i])
if self.factor_argcache[i] is not None and fast_tuple_equal(factor_arg, self.factor_argcache[i], 0):
tmp_factor = self.factor_cache[i]
self.hit+=1
else:
tmp_factor = self.factors[i](*factor_arg)
self.factor_argcache[i] = factor_arg
self.factor_cache[i] = tmp_factor
ret += tmp_factor*tmp
else:
ret += tmp
return ret
def parts(self):
return [self._part(i) for i in range(self.numf)]
def _part(self, int findex):
def tmp(*arg):
thispos = self.allpos[findex]
this_arg = construct_arg(arg, thispos)
ret = self.allf[findex](*this_arg)
if self.factors is not None:
facpos = self.factpos[findex]
facarg = construct_arg(arg, facpos)
fac = self.factors[findex](*facarg)
ret *= fac
return ret
tmp.__name__ = getattr(self.allf[findex],'__name__','unnamedpart')
ret = FakeFunc(tmp)
ret.func_code = self.func_code
return ret
def eval_parts(self,*arg):
cdef tuple this_arg
cdef double tmp = 0.
cdef int i
cdef list ref
cdef np.ndarray thispos
ret = list()
for i in range(self.numf):
tmp = self._part(i)(*arg)
ret.append(tmp)
return tuple(ret)
def integrate(self, tuple bound, int nint, *arg):
cdef int findex
cdef tuple this_arg
cdef double ret = 0.
cdef double thisint = 0.
cdef double fac = 0.
cdef np.ndarray[np.int_t] fpos
cdef np.ndarray[np.int_t] facpos
for findex in range(self.numf):
fpos = self.allpos[findex]
#docking off x and shift due to no x in arg
this_arg = construct_arg(arg, fpos[1:]-1)
thisf = self.allf[findex]
fac = 1.
if self.factors is not None:
facpos = self.factpos[findex]
# -1 accounting for no dependent variable in this arg
facarg = construct_arg(arg, facpos-1)
fac = self.factors[findex](*facarg)
thisint = integrate1d(thisf, bound, nint, this_arg)
ret += fac*thisint
return ret
cdef class AddPdfNorm:
"""
Add PDF with normalization factor. Parameters are merged by name.
::
def f(x, a, b, c):
return do_something(x, a, b, c)
def g(x, d, a, e):
return do_something_else(x, d, a, e)
def p(x, b, a, c):
return do_something_other_thing(x,b,a,c)
h = Add2PdfNorm(f, g, p)
#h is equivalent to
def h_equiv(x, a, b, c, d, e, f_0, f_1):
return f_0*f(x, a, b, c)+ \\
f_1*g(x, d, a, e)+
(1-f_0-f_1)*p(x, b, a, c)
**Arguments**
- **facname** optional list of factor name of length=. If None is given
factor name is automatically chosen to be `f_0`, `f_1` etc.
Default None.
- **prefix** optional prefix list to prefix arguments of each function.
Default None.
- **skip_prefix** optional list of variable that prefix will not be
applied to. Default None(empty).
"""
#FIXME: cache each part if called with same parameter
cdef public object func_code
cdef public object func_defaults
cdef int arglen
cdef int normalarglen
cdef tuple allf
cdef readonly int numf
#cdef f
#cdef g
cdef list allpos
#cdef np.ndarray fpos
#cdef np.ndarray gpos
def __init__(self, *arg ,facname=None, prefix=None, skip_prefix=None):
self.func_code, self.allpos = merge_func_code(*arg,
prefix=prefix, skip_first=True, skip_prefix=skip_prefix)
if facname is not None and len(facname)!=len(arg)-1:
raise(RuntimeError('length of facname and arguments must satisfy len(facname)==len(arg)-1'))
self.normalarglen = self.func_code.co_argcount
#TODO check name collisions here
if facname is None:
#automatic naming
facname = ['f_%d'%i for i in range(len(arg)-1)]
for fname in facname:
self.func_code.append(fname)
self.arglen = self.func_code.co_argcount
self.func_defaults = None
self.allf = arg
self.numf = len(arg)
def __call__(self,*arg):
cdef ret = 0.
cdef tuple farg
cdef double allfac = 0.
cdef double fac = 0.
cdef int findex = 0
for findex in range(self.numf):
if findex!=self.numf-1: #not the last one
fac = arg[self.normalarglen+findex]
allfac += fac
else: #last one
fac = 1-allfac
farg = construct_arg(arg,self.allpos[findex])
ret += fac*self.allf[findex](*farg)
return ret
def parts(self):
return [self._part(i) for i in range(self.numf)]
def _part(self, int findex):
#FIXME make this faster. How does cython closure work?
def tmp(*arg):
if findex!=self.numf-1: #not the last one
fac = arg[self.normalarglen+findex]
else: #last one
fac = 1. - sum(arg[self.normalarglen:])
thispos = self.allpos[findex]
this_arg = construct_arg(arg, thispos)
return fac*self.allf[findex](*this_arg)
tmp.__name__ = getattr(self.allf[findex],'__name__','unnamedpart')
ret = FakeFunc(tmp)
ret.func_code = self.func_code
return ret
def eval_parts(self,*arg):
cdef tuple farg
cdef double allfac = 0.
cdef double fac = 0.
cdef int findex = 0
cdef list ret = []
for findex in range(self.numf):
if findex!=self.numf-1: #not the last one
fac = arg[self.normalarglen+findex]
allfac += fac
else: #last one
fac = 1-allfac
farg = construct_arg(arg,self.allpos[findex])
ret.append(fac*self.allf[findex](*farg))
return tuple(ret)
def integrate(self, tuple bound, int nint, *arg):
cdef int findex
cdef double allfac = 0.
cdef double fac = 0.
cdef double thisint = 0.
cdef double ret = 0.
cdef np.ndarray[np.int_t] fpos
for findex in range(self.numf):
if findex!=self.numf-1: #not the last one
# -1 since this arg has no x
fac = arg[self.normalarglen+findex-1]
allfac += fac
else: #last one
fac = 1-allfac
fpos = self.allpos[findex]
# docking off x and shift due to no x in arg
farg = construct_arg(arg, fpos[1:]-1)
f = self.allf[findex]
thisint = integrate1d(f, bound, nint, farg)
ret += thisint*fac
return ret
cdef class Normalized:
"""
Transformed PDF in to a normalized version. The normalization factor is
cached according the shape parameters(all arguments except the first one).
::
def f(x, a, b, c):
return do_something(x, a, b, c)
g = Normalized(f, (0., 1.))
#g is eqivalent to (shown here without cache)
def g_equiv(x, a, b, c):
return f(x, a, b, c)/Integrate(f(x, a, b, c), range=(0., 1.))
**Arguments**
- **f** function to normalized.
- **bound** bound of the normalization.
- **nint** optional number of pieces to integrate. Default 300.
- **warnfloat** optinal number of times it should warn if integral
of the given function is really small. This usually indicate
you bound doesn't make sense with given parameters.
.. note::
Integration implemented here is just a simple trapezoid rule.
You are welcome to implement something better and submit a pull
request.
.. warning::
Never reused Normalized object with different parameters in the same pdf.
::
#DO NOT DO THIS
def f(x,y,z):
do_something(x,y,z)
pdf1 = Normalized(f,(0,1)) #h has it's own cache
pdf2 = rename(pdf1,['x','a','b'])#don't do this
totalpdf = Add2PdfNorm(pdf1,pdf2)
The reason is that Normalized has excatly one cache value. Everytime
it's called with different parameters the cache is invalidate and it
will recompute the integration which takes a long time. For the
example given above, when calling totalpdf, calling to `pdf2` will
always invalidate `pdf1` cache causing it to recompute integration
for every datapoint `x`. The fix is easy::
#DO THIS INSTEAD
def f(x,y,z):
do_something(x,y,z)
pdf1 = Normalized(f,(0,1)) #h has it's own cache
pdf2_temp = Normalized(f,(0,1)) #own separate cache
pdf2 = rename(pdf2_temp,['x','a','b'])
totalpdf = Add2PdfNorm(pdf1,pdf2)
"""
cdef f
cdef double norm_cache
cdef tuple last_arg
cdef int nint
cdef np.ndarray edges
#cdef np.ndarray binwidth
cdef double binwidth
cdef public object func_code
cdef public object func_defaults
cdef int ndep
cdef int warnfloat
cdef int floatwarned
cdef public int hit
def __init__(self,f,bound,nint=300,warnfloat=1):
self.f = f
self.norm_cache= 1.
self.last_arg = None
self.nint = nint
# normx = normx if normx is not None else np.linspace(range[0],range[1],nint)
# if normx.dtype!=normx.dtype:
# normx = normx.astype(np.float64)
#print range
#print normx
self.edges = np.linspace(bound[0],bound[1],nint)
#print self.midpoints
self.binwidth = self.edges[1]-self.edges[0]
self.func_code = FakeFuncCode(f)
self.ndep = 1#TODO make the code doesn't depend on this assumption
self.func_defaults = None #make vectorize happy
self.warnfloat=warnfloat
self.floatwarned=0
self.hit=0
def __call__(self,*arg):
#print arg
cdef double n
cdef double x
n = self._compute_normalization(arg[self.ndep:])
x = self.f(*arg)
if self.floatwarned < self.warnfloat and n < 1e-100:
warn(SmallIntegralWarning(str(arg)))
self.floatwarned+=1
return x/n
cpdef _compute_normalization(self, tuple arg):
cdef tuple targ = arg
#if targ == self.last_arg:#cache hit
if self.last_arg is not None and fast_tuple_equal(targ,self.last_arg,0):
#targ == self.last_arg:#cache hit
#yah exact match for float since this is expected to be used
#in vectorize which same value are passed over and over
self.hit+=1
pass
else:
self.last_arg = targ
self.norm_cache = integrate1d_with_edges(self.f, self.edges,
self.binwidth, targ)
return self.norm_cache
def integrate(self, tuple bound, int bint, *arg):
n = self._compute_normalization(arg)
X = integrate1d(self.f, bound, bint, arg)
if self.floatwarned < self.warnfloat and n < 1e-100:
warn(SmallIntegralWarning(str(arg)))
self.floatwarned+=1
return X/n
cdef class BlindFunc:
"""
Transform given parameter(s) in the given **f** by a random shift
so that the analyst won't see the true fitted value.
.. math::
BlindFunc(f, ['y','z'], '123')(x, y , z) = f(x, y\pm \delta, z)
::
def f(x,mu,sigma):
return gaussian(x,mu,sigma)
g= BlindFunc(f, toblind=['mu','sigma'], seedstring= 'abcxyz', width=1, signflip=True)
describe(g) # ['x', 'mu', 'sigma']
**Arguments**
- **f** call object. A function or PDF.
- **toblind** a list of names of parameters to be blinded. Can be a scalar if only one.
- **seedstring** a string random number seed to control the random shift
- **width** a Gaussian width that controls the random shift
- **signflip** if True, sign of the parameter may be flipped
before being shifted.
"""
cdef f
cdef public func_code
cdef public func_defaults
cdef int signflip
cdef int [:] argpos
cdef double shift
def __init__(self, f, toblind, seedstring, width=1, signflip=True):
cdef int i
self.f = f
blindlist=[]
if np.isscalar(toblind):
blindlist= [toblind]
else:
blindlist= toblind
for tob in blindlist:
if tob not in describe(f):
raise ValueError('%s is not in a recognized parameter'%tob)
self.argpos = np.empty(len(blindlist), dtype=np.int32)
self.func_code = FakeFuncCode(f)
self.func_defaults = None
mystery = u'ambpel4.b4G#4hwW%&eNrw56wJE56N%wwgwywJj%whw'
# Form an array of integers from the strings
seed = np.array([ord(c) for c in seedstring + mystery], dtype=np.int)
rnd1 = np.random.RandomState(seed)
rnd1.shuffle(seed)
myRandom = np.random.RandomState(seed)
self.signflip = myRandom.choice([-1,1])
self.shift = myRandom.normal(0, width)
for i,bb in enumerate(blindlist):
self.argpos[i] = describe(f).index(bb)
cpdef tuple __shift_arg__(self, tuple arg):
cdef int numarg = len(arg)
cdef tuple ret = PyTuple_New(numarg)
cdef int i
cdef object tmp, tmp2
cdef double ftmp
for i in range(numarg):
if not i in self.argpos:
tmp = <object>PyTuple_GetItem(arg, i)
Py_INCREF(tmp) # get is borrow and set is steal
# but <object> comes with inc ref + dec ref
PyTuple_SetItem(ret, i, tmp)
else:
tmp = <object>PyTuple_GetItem(arg, i)
ftmp = tmp
ftmp = ftmp*self.signflip + self.shift
tmp2 = PyFloat_FromDouble(ftmp)
Py_INCREF(tmp2)
PyTuple_SetItem(ret, i, tmp2)
return ret
def __call__(self, *arg):
cdef tuple newarg = self.__shift_arg__(arg)
return self.f(*newarg)
def integrate(self, tuple bound, int nint, *arg):
cdef tuple newarg = self.__shift_arg__(arg)
return integrate1d(self.f, bound, nint, newarg)