Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Invalid Argument Error in style_transfer3.py #37

Closed
kushalj001 opened this issue Sep 24, 2018 · 1 comment
Closed

Invalid Argument Error in style_transfer3.py #37

kushalj001 opened this issue Sep 24, 2018 · 1 comment

Comments

@kushalj001
Copy link

kushalj001 commented Sep 24, 2018

No description provided.

@kushalj001
Copy link
Author

content_image = load_image('./Images/kushal.jpg')
height, width = content_image.shape[1:3]
style_image = load_image('./Images/style.jpg',(height,width))
batch_shape = content_image.shape
shape = content_image.shape[1:]

vgg_model = VGG16_AveragePool(shape)

content_model = VGG16_AveragePool_Cutoff(shape, 11)
content_target = K.variable(content_model.predict(content_image),name='content_target')

conv_outputs = [
layer.get_output_at(1) for layer in vgg_model.layers
if layer.name.endswith('conv1')
]
style_model = Model(vgg_model.input, conv_outputs)
style_outputs = [K.variable(y) for y in style_model.predict(style_image)]
style_weights = [0.2,0.4,0.3,0.5,0.2]

loss = K.mean(K.square(content_model.output - content_target))

for w, symbolic, actual in zip(style_weights,conv_outputs,style_outputs):
loss += w*compute_style_loss(symbolic[0],actual[0])

gradients = K.gradients(loss, vgg_model.input)
get_loss_and_gradients = K.function(inputs=[vgg_model.input],outputs=[loss] + gradients)

def get_loss_and_gradients_wrapper(x):
l,g = get_loss_and_gradients([x.reshape(*batch_shape)])
return l.astype(np.float64),g.flatten().astype(np.float64)

final_image = minimize(get_loss_and_gradients_wrapper,10,batch_shape)
plt.imshow(scale(final_image))
plt.show()

InvalidArgumentError: You must feed a value for placeholder tensor 'sequential_18_input' with dtype float and shape [?,640,640,3]
[[Node: sequential_18_input = Placeholderdtype=DT_FLOAT, shape=[?,640,640,3], _device="/job:localhost/replica:0/task:0/device:CPU:0"]]

StackTrace

InvalidArgumentError Traceback (most recent call last)
in ()
114 return l.astype(np.float64),g.flatten().astype(np.float64)
115
--> 116 final_image = minimize(get_loss_and_gradients_wrapper,10,batch_shape)
117 plt.imshow(scale(final_image))
118 plt.show()

in minimize(fn, epochs, batch_shape)
59 x = np.random.randn(np.prod(batch_shape))
60 for i in range(epochs):
---> 61 x, l,_ = fmin_l_bfgs_b(func=fn,x0=x,maxfun=20)
62 x = np.clip(x,-127,127)
63 print("iteration=%s, loss=%s" %(i,l))

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\scipy\optimize\lbfgsb.py in fmin_l_bfgs_b(func, x0, fprime, args, approx_grad, bounds, m, factr, pgtol, epsilon, iprint, maxfun, maxiter, disp, callback, maxls)
197
198 res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
--> 199 **opts)
200 d = {'grad': res['jac'],
201 'task': res['message'],

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\scipy\optimize\lbfgsb.py in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, **unknown_options)
333 # until the completion of the current minimization iteration.
334 # Overwrite f and g:
--> 335 f, g = func_and_grad(x)
336 elif task_str.startswith(b'NEW_X'):
337 # new iteration

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\scipy\optimize\lbfgsb.py in func_and_grad(x)
283 else:
284 def func_and_grad(x):
--> 285 f = fun(x, *args)
286 g = jac(x, *args)
287 return f, g

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\scipy\optimize\optimize.py in function_wrapper(*wrapper_args)
291 def function_wrapper(wrapper_args):
292 ncalls[0] += 1
--> 293 return function(
(wrapper_args + args))
294
295 return ncalls, function_wrapper

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\scipy\optimize\optimize.py in call(self, x, *args)
61 def call(self, x, *args):
62 self.x = numpy.asarray(x).copy()
---> 63 fg = self.fun(x, *args)
64 self.jac = fg[1]
65 return fg[0]

in get_loss_and_gradients_wrapper(x)
111
112 def get_loss_and_gradients_wrapper(x):
--> 113 l,g = get_loss_and_gradients([x.reshape(*batch_shape)])
114 return l.astype(np.float64),g.flatten().astype(np.float64)
115

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in call(self, inputs)
2664 return self._legacy_call(inputs)
2665
-> 2666 return self._call(inputs)
2667 else:
2668 if py_any(is_tensor(x) for x in inputs):

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\keras\backend\tensorflow_backend.py in _call(self, inputs)
2634 symbol_vals,
2635 session)
-> 2636 fetched = self._callable_fn(*array_vals)
2637 return fetched[:len(self.outputs)]
2638

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\client\session.py in call(self, *args, **kwargs)
1380 ret = tf_session.TF_SessionRunCallable(
1381 self._session._session, self._handle, args, status,
-> 1382 run_metadata_ptr)
1383 if run_metadata:
1384 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

c:\users\kushal\appdata\local\programs\python\python36\lib\site-packages\tensorflow\python\framework\errors_impl.py in exit(self, type_arg, value_arg, traceback_arg)
517 None, None,
518 compat.as_text(c_api.TF_Message(self.status.status)),
--> 519 c_api.TF_GetCode(self.status.status))
520 # Delete the underlying status object from memory otherwise it stays alive
521 # as there is a reference to status from this from the traceback due to

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant