Skip to content

Commit

Permalink
Change for running on CPU env & tfjs
Browse files Browse the repository at this point in the history
  • Loading branch information
sugyan committed Feb 5, 2020
1 parent 4874628 commit c64d6ac
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 6 deletions.
2 changes: 1 addition & 1 deletion dnnlib/tflib/ops/fused_bias_act.py
Expand Up @@ -63,7 +63,7 @@ def fused_bias_act(x, b=None, axis=1, act='linear', alpha=None, gain=None, impl=

impl_dict = {
'ref': _fused_bias_act_ref,
'cuda': _fused_bias_act_cuda,
'cuda': _fused_bias_act_ref,
}
return impl_dict[impl](x=x, b=b, axis=axis, act=act, alpha=alpha, gain=gain)

Expand Down
17 changes: 13 additions & 4 deletions dnnlib/tflib/ops/upfirdn_2d.py
Expand Up @@ -57,7 +57,7 @@ def upfirdn_2d(x, k, upx=1, upy=1, downx=1, downy=1, padx0=0, padx1=0, pady0=0,

impl_dict = {
'ref': _upfirdn_2d_ref,
'cuda': _upfirdn_2d_cuda,
'cuda': _upfirdn_2d_ref,
}
return impl_dict[impl](x=x, k=k, upx=upx, upy=upy, downx=downx, downy=downy, padx0=padx0, padx1=padx1, pady0=pady0, pady1=pady1)

Expand All @@ -82,7 +82,10 @@ def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):

# Upsample (insert zeros).
x = tf.reshape(x, [-1, inH, 1, inW, 1, minorDim])
x = tf.pad(x, [[0, 0], [0, 0], [0, upy - 1], [0, 0], [0, upx - 1], [0, 0]])
if upy == 2:
x = tf.concat([x, tf.zeros(tf.shape(x))], axis=2)
if upx == 2:
x = tf.concat([x, tf.zeros(tf.shape(x))], axis=4)
x = tf.reshape(x, [-1, inH * upy, inW * upx, minorDim])

# Pad (crop if negative).
Expand All @@ -93,7 +96,9 @@ def _upfirdn_2d_ref(x, k, upx, upy, downx, downy, padx0, padx1, pady0, pady1):
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, [-1, 1, inH * upy + pady0 + pady1, inW * upx + padx0 + padx1])
w = tf.constant(k[::-1, ::-1, np.newaxis, np.newaxis], dtype=x.dtype)
x = tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NCHW')
x = tf.transpose(x, [0, 2, 3, 1])
x = tf.nn.conv2d(x, w, strides=[1,1,1,1], padding='VALID', data_format='NHWC')
x = tf.transpose(x, [0, 3, 1, 2])
x = tf.reshape(x, [-1, minorDim, inH * upy + pady0 + pady1 - kernelH + 1, inW * upx + padx0 + padx1 - kernelW + 1])
x = tf.transpose(x, [0, 2, 3, 1])

Expand Down Expand Up @@ -288,7 +293,11 @@ def upsample_conv_2d(x, w, k=None, factor=2, gain=1, data_format='NCHW', impl='c
w = tf.reshape(w, [convH, convW, -1, num_groups * inC])

# Execute.
x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format=data_format)
x = tf.transpose(x, [0, 2, 3, 1])
stride = [1, factor, factor, 1]
output_shape = [_shape(x, 0), (_shape(x, 1) - 1) * factor + convH, (_shape(x, 2) - 1) * factor + convW, outC]
x = tf.nn.conv2d_transpose(x, w, output_shape=output_shape, strides=stride, padding='VALID', data_format='NHWC')
x = tf.transpose(x, [0, 3, 1, 2])
return _simple_upfirdn_2d(x, k, pad0=(p+1)//2+factor-1, pad1=p//2+1, data_format=data_format, impl=impl)

#----------------------------------------------------------------------------
Expand Down
4 changes: 3 additions & 1 deletion training/networks_stylegan2.py
Expand Up @@ -117,7 +117,9 @@ def modulated_conv2d_layer(x, y, fmaps, kernel, up=False, down=False, demodulate
elif down:
x = conv_downsample_2d(x, tf.cast(w, x.dtype), data_format='NCHW', k=resample_kernel)
else:
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NCHW', strides=[1,1,1,1], padding='SAME')
x = tf.transpose(x, [0, 2, 3, 1])
x = tf.nn.conv2d(x, tf.cast(w, x.dtype), data_format='NHWC', strides=[1,1,1,1], padding='SAME')
x = tf.transpose(x, [0, 3, 1, 2])

# Reshape/scale output.
if fused_modconv:
Expand Down

0 comments on commit c64d6ac

Please sign in to comment.