Skip to content
Permalink
Browse files

placeholder->TensorSpec refactor

  • Loading branch information...
ppwwyyxx committed Mar 18, 2019
1 parent 0aa92de commit ba679ab1fba2f4e2e73f74e1e792b85a4ac86486
Showing with 96 additions and 98 deletions.
  1. +1 −1 docs/tutorial/training-interface.md
  2. +4 −4 examples/A3C-Gym/train-atari.py
  3. +5 −5 examples/CTC-TIMIT/train-timit.py
  4. +2 −2 examples/Char-RNN/char-rnn.py
  5. +4 −6 examples/DeepQNetwork/DQNModel.py
  6. +2 −2 examples/DoReFa-Net/resnet-dorefa.py
  7. +2 −2 examples/DoReFa-Net/svhn-digit-dorefa.py
  8. +4 −4 examples/DynamicFilterNetwork/steering-filter.py
  9. +14 −14 examples/FasterRCNN/train.py
  10. +1 −1 examples/GAN/BEGAN.py
  11. +2 −2 examples/GAN/ConditionalGAN-mnist.py
  12. +2 −2 examples/GAN/CycleGAN.py
  13. +1 −1 examples/GAN/DCGAN.py
  14. +2 −2 examples/GAN/DiscoGAN-CelebA.py
  15. +2 −2 examples/GAN/Image2Image.py
  16. +1 −1 examples/GAN/InfoGAN-mnist.py
  17. +2 −2 examples/HED/hed.py
  18. +2 −2 examples/ImageNetModels/imagenet_utils.py
  19. +2 −2 examples/ImageNetModels/inception-bn.py
  20. +3 −3 examples/OpticalFlow/flownet_models.py
  21. +2 −2 examples/PennTreebank/PTB-LSTM.py
  22. +2 −2 examples/ResNet/cifar10-preact18-mixup.py
  23. +2 −2 examples/ResNet/cifar10-resnet.py
  24. +2 −2 examples/ResNet/load-resnet.py
  25. +1 −1 examples/Saliency/saliency-maps.py
  26. +8 −8 examples/SimilarityLearning/mnist-embeddings.py
  27. +2 −2 examples/SpatialTransformer/mnist-addition.py
  28. +2 −2 examples/SuperResolution/enet-pat.py
  29. +2 −2 examples/basics/cifar-convnet.py
  30. +3 −3 examples/basics/export-model.py
  31. +2 −2 examples/basics/mnist-tflayers.py
  32. +2 −2 examples/basics/mnist-tfslim.py
  33. +2 −2 examples/basics/mnist-visualizations.py
  34. +2 −2 examples/basics/svhn-digit-convnet.py
  35. +2 −2 examples/boilerplate.py
  36. +2 −2 examples/keras/mnist-keras.py
@@ -17,7 +17,7 @@ expects 4 arguments to setup the graph: `InputDesc`, `InputSource`, get_cost fun
```python
class MyModel(ModelDesc):
def inputs(self):
return [tf.placeholder(dtype, shape, name), tf.placeholder(dtype, shape, name), ... ]
return [tf.TensorSpec(shape, dtype, name), tf.TensorSpec(shape, dtype, name), ... ]
def build_graph(self, tensorA, tensorB, ...): # inputs
# build the graph
@@ -69,10 +69,10 @@ def _build_player(self):
class Model(ModelDesc):
def inputs(self):
assert NUM_ACTIONS is not None
return [tf.placeholder(tf.uint8, (None,) + STATE_SHAPE + (FRAME_HISTORY, ), 'state'),
tf.placeholder(tf.int64, (None,), 'action'),
tf.placeholder(tf.float32, (None,), 'futurereward'),
tf.placeholder(tf.float32, (None,), 'action_prob'),
return [tf.TensorSpec((None,) + STATE_SHAPE + (FRAME_HISTORY, ), tf.uint8, 'state'),
tf.TensorSpec((None,), tf.int64, 'action'),
tf.TensorSpec((None,), tf.float32, 'futurereward'),
tf.TensorSpec((None,), tf.float32, 'action_prob'),
]

def _get_NN_prediction(self, state):
@@ -26,11 +26,11 @@

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, None, FEATUREDIM], 'feat'), # bxmaxseqx39
tf.placeholder(tf.int64, [None, None], 'labelidx'), # label is b x maxlen, sparse
tf.placeholder(tf.int32, [None], 'labelvalue'),
tf.placeholder(tf.int64, [None], 'labelshape'),
tf.placeholder(tf.int32, [None], 'seqlen'), # b
return [tf.TensorSpec([None, None, FEATUREDIM], tf.float32, 'feat'), # bxmaxseqx39
tf.TensorSpec([None, None], tf.int64, 'labelidx'), # label is b x maxlen, sparse
tf.TensorSpec([None], tf.int32, 'labelvalue'),
tf.TensorSpec([None], tf.int64, 'labelshape'),
tf.TensorSpec([None], tf.int32, 'seqlen'), # b
]

def build_graph(self, feat, labelidx, labelvalue, labelshape, seqlen):
@@ -70,8 +70,8 @@ def __iter__(self):

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.int32, (None, param.seq_len), 'input'),
tf.placeholder(tf.int32, (None, param.seq_len), 'nextinput')]
return [tf.TensorSpec((None, param.seq_len), tf.int32, 'input'),
tf.TensorSpec((None, param.seq_len), tf.int32, 'nextinput')]

def build_graph(self, input, nextinput):
cell = rnn.MultiRNNCell([rnn.LSTMBlockCell(num_units=param.rnn_size)
@@ -34,12 +34,10 @@ def inputs(self):
# When we use h history frames, the current state and the next state will have (h-1) overlapping frames.
# Therefore we use a combined state for efficiency:
# The first h are the current state, and the last h are the next state.
return [tf.placeholder(self.state_dtype,
(None,) + self.state_shape + (self.history + 1, ),
'comb_state'),
tf.placeholder(tf.int64, (None,), 'action'),
tf.placeholder(tf.float32, (None,), 'reward'),
tf.placeholder(tf.bool, (None,), 'isOver')]
return [tf.TensorSpec((None,) + self.state_shape + (self.history + 1, ), self.state_dtype, 'comb_state'),
tf.TensorSpec((None,), tf.int64, 'action'),
tf.TensorSpec((None,), tf.float32, 'reward'),
tf.TensorSpec((None,), tf.bool, 'isOver')]

@abc.abstractmethod
def _get_DQN_prediction(self, state):
@@ -33,8 +33,8 @@

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
return [tf.TensorSpec([None, 224, 224, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]

def build_graph(self, image, label):
image = image / 256.0
@@ -41,8 +41,8 @@

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 40, 40, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
return [tf.TensorSpec([None, 40, 40, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]

def build_graph(self, image, label):
is_training = get_current_tower_context().is_training
@@ -95,10 +95,10 @@ def n(x):

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (BATCH, ), 'theta'),
tf.placeholder(tf.float32, (BATCH, SHAPE, SHAPE), 'image'),
tf.placeholder(tf.float32, (BATCH, SHAPE, SHAPE), 'gt_image'),
tf.placeholder(tf.float32, (BATCH, 9, 9), 'gt_filter')]
return [tf.TensorSpec((BATCH, ), tf.float32, 'theta'),
tf.TensorSpec((BATCH, SHAPE, SHAPE), tf.float32, 'image'),
tf.TensorSpec((BATCH, SHAPE, SHAPE), tf.float32, 'gt_image'),
tf.TensorSpec((BATCH, 9, 9), tf.float32, 'gt_filter')]

def _parameter_net(self, theta, kernel_shape=9):
"""Estimate filters for convolution layers
@@ -98,14 +98,14 @@ def build_graph(self, *inputs):
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image'),
tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),
tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0
tf.TensorSpec((None, None, 3), tf.float32, 'image'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret

@@ -199,20 +199,20 @@ class ResNetFPNModel(DetectionModel):

def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image')]
tf.TensorSpec((None, None, 3), tf.float32, 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.placeholder(tf.int32, (None, None, num_anchors),
'anchor_labels_lvl{}'.format(k + 2)),
tf.placeholder(tf.float32, (None, None, num_anchors, 4),
'anchor_boxes_lvl{}'.format(k + 2))])
tf.TensorSpec((None, None, num_anchors), tf.int32,
'anchor_labels_lvl{}'.format(k + 2)),
tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret

@@ -28,7 +28,7 @@

class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, args.final_size, args.final_size, 3), 'input')]
return [tf.TensorSpec((None, args.final_size, args.final_size, 3), tf.float32, 'input')]

@auto_reuse_variable_scope
def decoder(self, z):
@@ -41,8 +41,8 @@ def batch_flatten(x):

class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input'),
tf.placeholder(tf.int32, (None,), 'label')]
return [tf.TensorSpec((None, 28, 28), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]

def generator(self, z, y):
l = FullyConnected('fc0', tf.concat([z, y], 1), 1024, activation=BNReLU)
@@ -42,8 +42,8 @@ def INLReLU(x, name=None):

class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, 3), 'inputA'),
tf.placeholder(tf.float32, (None, SHAPE, SHAPE, 3), 'inputB')]
return [tf.TensorSpec((None, SHAPE, SHAPE, 3), tf.float32, 'inputA'),
tf.TensorSpec((None, SHAPE, SHAPE, 3), tf.float32, 'inputB')]

@staticmethod
def build_res_block(x, name, chan, first=False):
@@ -41,7 +41,7 @@ def __init__(self, shape, batch, z_dim):
self.zdim = z_dim

def inputs(self):
return [tf.placeholder(tf.float32, (None, self.shape, self.shape, 3), 'input')]
return [tf.TensorSpec((None, self.shape, self.shape, 3), tf.float32, 'input')]

def generator(self, z):
""" return an image generated from z"""
@@ -35,8 +35,8 @@ def BNLReLU(x, name=None):

class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, 3), 'inputA'),
tf.placeholder(tf.float32, (None, SHAPE, SHAPE, 3), 'inputB')]
return [tf.TensorSpec((None, SHAPE, SHAPE, 3), tf.float32, 'inputA'),
tf.TensorSpec((None, SHAPE, SHAPE, 3), tf.float32, 'inputB')]

@auto_reuse_variable_scope
def generator(self, img):
@@ -63,8 +63,8 @@ def visualize_tensors(name, imgs, scale_func=lambda x: (x + 1.) * 128., max_outp
class Model(GANModelDesc):
def inputs(self):
SHAPE = 256
return [tf.placeholder(tf.float32, (None, SHAPE, SHAPE, IN_CH), 'input'),
tf.placeholder(tf.float32, (None, SHAPE, SHAPE, OUT_CH), 'output')]
return [tf.TensorSpec((None, SHAPE, SHAPE, IN_CH), tf.float32, 'input'),
tf.TensorSpec((None, SHAPE, SHAPE, OUT_CH), tf.float32, 'output')]

def generator(self, imgs):
# imgs: input: 256x256xch
@@ -106,7 +106,7 @@ def sample_prior(batch_size):

class Model(GANModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input')]
return [tf.TensorSpec((None, 28, 28), tf.float32, 'input')]

def generator(self, z):
l = FullyConnected('fc0', z, 1024, activation=BNReLU)
@@ -103,8 +103,8 @@ def bilinear_conv_filler(s):

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, None, None, 3], 'image'),
tf.placeholder(tf.int32, [None, None, None], 'edgemap')]
return [tf.TensorSpec([None, None, None, 3], tf.float32, 'image'),
tf.TensorSpec([None, None, None], tf.int32, 'edgemap')]

def build_graph(self, image, edgemap):
image = image - tf.constant([104, 116, 122], dtype='float32')
@@ -324,8 +324,8 @@ class ImageNetModel(ModelDesc):
label_smoothing = 0.

def inputs(self):
return [tf.placeholder(self.image_dtype, [None, self.image_shape, self.image_shape, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
return [tf.TensorSpec([None, self.image_shape, self.image_shape, 3], self.image_dtype, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]

def build_graph(self, image, label):
image = self.image_preprocess(image)
@@ -23,8 +23,8 @@

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
return [tf.TensorSpec([None, INPUT_SHAPE, INPUT_SHAPE, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]

def build_graph(self, image, label):
image = image / 128.0
@@ -145,9 +145,9 @@ def __init__(self, height=None, width=None):
self.width = width

def inputs(self):
return [tf.placeholder(tf.float32, (1, 3, self.height, self.width), 'left'),
tf.placeholder(tf.float32, (1, 3, self.height, self.width), 'right'),
tf.placeholder(tf.float32, (1, 2, self.height, self.width), 'gt_flow')]
return [tf.TensorSpec((1, 3, self.height, self.width), tf.float32, 'left'),
tf.TensorSpec((1, 3, self.height, self.width), tf.float32, 'right'),
tf.TensorSpec((1, 2, self.height, self.width), tf.float32, 'gt_flow')]

def graph_structure(self, inputs):
"""
@@ -46,8 +46,8 @@ def get_PennTreeBank(data_dir=None):

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.int32, (None, SEQ_LEN), 'input'),
tf.placeholder(tf.int32, (None, SEQ_LEN), 'nextinput')]
return [tf.TensorSpec((None, SEQ_LEN), tf.int32, 'input'),
tf.TensorSpec((None, SEQ_LEN), tf.int32, 'nextinput')]

def build_graph(self, input, nextinput):
is_training = get_current_tower_context().is_training
@@ -40,8 +40,8 @@ def preactivation_block(input, num_filters, stride=1):

class ResNet_Cifar(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'),
tf.placeholder(tf.float32, [None, CLASS_NUM], 'label')]
return [tf.TensorSpec([None, 32, 32, 3], tf.float32, 'input'),
tf.TensorSpec([None, CLASS_NUM], tf.float32, 'label')]

def build_graph(self, image, label):
assert tf.test.is_gpu_available()
@@ -39,8 +39,8 @@ def __init__(self, n):
self.n = n

def inputs(self):
return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
return [tf.TensorSpec([None, 32, 32, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]

def build_graph(self, image, label):
image = image / 128.0
@@ -29,8 +29,8 @@

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 224, 224, 3], 'input'),
tf.placeholder(tf.int32, [None], 'label')]
return [tf.TensorSpec([None, 224, 224, 3], tf.float32, 'input'),
tf.TensorSpec([None], tf.int32, 'label')]

def build_graph(self, image, label):
blocks = CFG[DEPTH]
@@ -54,7 +54,7 @@ def saliency_map(output, input, name="saliency_map"):

class Model(tp.ModelDescBase):
def inputs(self):
return [tf.placeholder(tf.float32, (IMAGE_SIZE, IMAGE_SIZE, 3), 'image')]
return [tf.TensorSpec((IMAGE_SIZE, IMAGE_SIZE, 3), tf.float32, 'image')]

def build_graph(self, orig_image):
mean = tf.get_variable('resnet_v1_50/mean_rgb', shape=[3])
@@ -236,9 +236,9 @@ def get_data():
return ds

def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input'),
tf.placeholder(tf.float32, (None, 28, 28), 'input_y'),
tf.placeholder(tf.int32, (None,), 'label')]
return [tf.TensorSpec((None, 28, 28), tf.float32, 'input'),
tf.TensorSpec((None, 28, 28), tf.float32, 'input_y'),
tf.TensorSpec((None,), tf.int32, 'label')]

def build_graph(self, x, y, label):
# embed them
@@ -280,9 +280,9 @@ def get_data():
return ds

def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input'),
tf.placeholder(tf.float32, (None, 28, 28), 'input_p'),
tf.placeholder(tf.float32, (None, 28, 28), 'input_n')]
return [tf.TensorSpec((None, 28, 28), tf.float32, 'input'),
tf.TensorSpec((None, 28, 28), tf.float32, 'input_p'),
tf.TensorSpec((None, 28, 28), tf.float32, 'input_n')]

def loss(self, a, p, n):
return triplet_loss(a, p, n, 5., extra=True, scope="loss")
@@ -314,8 +314,8 @@ def get_data():
return ds

def inputs(self):
return [tf.placeholder(tf.float32, (None, 28, 28), 'input'),
tf.placeholder(tf.int32, (None,), 'label')]
return [tf.TensorSpec((None, 28, 28), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]

def build_graph(self, x, label):
# embed them
@@ -107,8 +107,8 @@ def GridSample(inputs, borderMode='repeat'):

class Model(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE, 2), 'input'),
tf.placeholder(tf.int32, (None,), 'label')]
return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE, 2), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]

def build_graph(self, image, label):
xys = np.array([(y, x, 1) for y in range(WARP_TARGET_SIZE)
@@ -50,8 +50,8 @@ def __init__(self, height=SHAPE_LR, width=SHAPE_LR):
self.width = width

def inputs(self):
return [tf.placeholder(tf.float32, (None, self.height * 1, self.width * 1, CHANNELS), 'Ilr'),
tf.placeholder(tf.float32, (None, self.height * 4, self.width * 4, CHANNELS), 'Ihr')]
return [tf.TensorSpec((None, self.height * 1, self.width * 1, CHANNELS), tf.float32, 'Ilr'),
tf.TensorSpec((None, self.height * 4, self.width * 4, CHANNELS), tf.float32, 'Ihr')]

def build_graph(self, Ilr, Ihr):
Ilr, Ihr = Ilr / 255.0, Ihr / 255.0
@@ -29,8 +29,8 @@ def __init__(self, cifar_classnum):
self.cifar_classnum = cifar_classnum

def inputs(self):
return [tf.placeholder(tf.float32, (None, 30, 30, 3), 'input'),
tf.placeholder(tf.int32, (None,), 'label')]
return [tf.TensorSpec((None, 30, 30, 3), tf.float32, 'input'),
tf.TensorSpec((None,), tf.int32, 'label')]

def build_graph(self, image, label):
is_training = get_current_tower_context().is_training
Oops, something went wrong.

0 comments on commit ba679ab

Please sign in to comment.
You can’t perform that action at this time.