forked from polmorenoc/inversegraphics
-
Notifications
You must be signed in to change notification settings - Fork 0
/
diffrender_opt.py
222 lines (157 loc) · 8.68 KB
/
diffrender_opt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
# Copyright (c) 2015, Javier Gonzalez
# Copyright (c) 2015, the GPy Authors (see GPy AUTHORS.txt)
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
import GPyOpt
import GPy
import ipdb
from numpy.random import seed
import chumpy as ch
"""
This is a simple demo to demonstrate the use of Bayesian optimization with GPyOpt with some simple options. Run the example by writing:
import GPyOpt
BO_demo_2d = GPyOpt.demos.advanced_optimization_2d()
As a result you should see:
- A plot with the model and the current acquisition function
- A plot with the diagnostic plots of the optimization.
- An object call BO_demo_auto that contains the results of the optimization process (see reference manual for details). Among the available results you have access to the GP model via
>> BO_demo_2d.model
and to the location of the best found location writing.
BO_demo_2d.x_opt
"""
def opendrObjectiveFunction(obj, free_variables):
def changevars(vs, obj, free_variables):
vs = vs.ravel()
cur = 0
changed = False
for idx, freevar in enumerate(free_variables):
sz = freevar.r.size
newvals = vs[cur:cur+sz].copy().reshape(free_variables[idx].shape)
if np.max(np.abs(newvals-free_variables[idx]).ravel()) > 0:
free_variables[idx][:] = newvals
changed = True
cur += sz
return changed
def objFun(vs):
vs = np.array(vs)
if vs.shape[0] == 1:
changevars(vs, obj, free_variables)
return obj.r.reshape([1,1])
else:
res = []
for vs_i in vs:
changevars(vs_i, obj, free_variables)
res = res + [obj.r.reshape([1,1])]
return np.vstack(res)
return objFun
def opendrObjectiveFunctionCRF(free_variables, rendererGT, renderer, color, chVColors, chSHLightCoeffs, lightCoeffs, free_variables_app_light, resultDir, test_i, stds, method, updateColor=False,minAppLight=False):
def changevars(vs, free_variables):
vs = vs.ravel()
cur = 0
changed = False
for idx, freevar in enumerate(free_variables):
sz = freevar.r.size
newvals = vs[cur:cur+sz].copy().reshape(free_variables[idx].shape)
if np.max(np.abs(newvals-free_variables[idx]).ravel()) > 0:
free_variables[idx][:] = newvals
changed = True
cur += sz
return changed
def objFun(vs):
vs = np.array(vs)
res = []
for vs_it, vs_i in enumerate(vs):
changevars(vs_i, free_variables)
import densecrf_model
vis_im = np.array(renderer.indices_image == 1).copy().astype(np.bool)
bound_im = renderer.boundarybool_image.astype(np.bool)
segmentation, Q = densecrf_model.crfInference(rendererGT.r, vis_im, bound_im, [0.75,0.25,0.01], resultDir + 'imgs/crf/Q_' + str(test_i) + '_it' + str(vs_it))
vColor = color
if updateColor:
if np.sum(segmentation == 0) > 5:
segmentRegion = segmentation == 0
vColor = np.median(rendererGT.reshape([-1, 3])[segmentRegion.ravel()], axis=0) * 1.4
vColor = vColor / max(np.max(vColor), 1.)
chVColors[:] = vColor
chSHLightCoeffs[:] = lightCoeffs
variances = stds**2
fgProb = ch.exp( - (renderer - rendererGT)**2 / (2 * variances)) * (1./(stds * np.sqrt(2 * np.pi)))
h = renderer.r.shape[0]
w = renderer.r.shape[1]
occProb = np.ones([h,w])
bgProb = np.ones([h,w])
errorFun = -ch.sum(ch.log(vis_im[:, :, None]*((Q[0].reshape([h, w, 1]) * fgProb) + (Q[1].reshape([h, w]) * occProb + Q[2].reshape([h, w]) * bgProb)[:, :, None]) + (1- vis_im[:, :, None])))/(h*w)
if minAppLight:
options = {'disp': False, 'maxiter': 10}
def cb(_):
print("Error: " + str(errorFun.r))
ch.minimize({'raw': errorFun}, bounds=None, method=method, x0=free_variables_app_light, callback=cb, options=options)
res = res + [errorFun.r.reshape([1,1])]
return np.vstack(res)
return objFun
def bayesOpt(objFun, initX, initF, bounds):
seed(12345)
input_dim = len(bounds)
# Select an specific kernel from GPy
kernel = GPy.kern.RBF(input_dim, variance=.1, lengthscale=1) + GPy.kern.Bias(input_dim) # we add a bias kernel
# --- Problem definition and optimization
BO_model = GPyOpt.methods.BayesianOptimization(f=objFun, # function to optimize
kernel = kernel, # pre-specified model
X = initX,
Y = initF,
bounds=bounds, # box-constrains of the problem
acquisition='EI', # Selects the Expected improvement
numdata_initial_design=len(initX),
type_initial_design='random', # latin desing of the initial points
normalize = True) # normalized y
# Run the optimization
max_iter = 10
# --- Run the optimization # evaluation budget
ipdb.set_trace()
BO_model.run_optimization(max_iter, n_inbatch=10, n_procs=10, # Number of iterations
acqu_optimize_method = 'DIRECT', # method to optimize the acq. function
acqu_optimize_restarts = 1, # number of local optimizers
eps=10e-2, # secondary stop criteria (apart from the number of iterations)
true_gradients = True) # The gradients of the acquisition function are approximated (faster)
# # --- Plots
# if plots:
# objective_true.plot()
# BO_demo_2d.plot_acquisition()
# BO_demo_2d.plot_convergence()
return BO_model
def advanced_optimization_2d(plots=True):
import GPyOpt
import GPy
from numpy.random import seed
seed(12345)
# --- Objective function
objective_true = GPyOpt.fmodels.experiments2d.sixhumpcamel() # true function
objective_noisy = GPyOpt.fmodels.experiments2d.sixhumpcamel(sd = 0.1) # noisy version
bounds = objective_noisy.bounds # problem constrains
input_dim = len(bounds)
# Select an specific kernel from GPy
kernel = GPy.kern.RBF(input_dim, variance=.1, lengthscale=.1) + GPy.kern.Bias(input_dim) # we add a bias kernel
# --- Problem definition and optimization
BO_demo_2d = GPyOpt.methods.BayesianOptimization(f=objective_noisy.f, # function to optimize
kernel = kernel, # pre-specified model
bounds=bounds, # box-constrains of the problem
acquisition='EI', # Selects the Expected improvement
acquisition_par = 2, # parameter of the acquisition function
numdata_initial_design = 15, # 15 initial points
type_initial_design='random', # latin desing of the initial points
model_optimize_interval= 2, # The model is updated every two points are collected
normalize = True) # normalized y
# Run the optimization
max_iter = 20
# --- Run the optimization # evaluation budget
BO_demo_2d.run_optimization(max_iter, # Number of iterations
acqu_optimize_method = 'DIRECT', # method to optimize the acq. function
acqu_optimize_restarts = 30, # number of local optimizers
eps=10e-6, # secondary stop criteria (apart from the number of iterations)
true_gradients = True) # The gradients of the acquisition function are approximated (faster)
# --- Plots
if plots:
objective_true.plot()
BO_demo_2d.plot_acquisition()
BO_demo_2d.plot_convergence()
return BO_demo_2d