Permalink
Browse files

Switch tf.concat_v2 references in third_party/tensorflow to tf.concat.

Change: 144153795
  • Loading branch information...
1 parent 1255a17 commit 0e226af7eed5e2764aa8acb825af4cd3e06d2452 @tensorflower-gardener tensorflower-gardener committed with tensorflower-gardener Jan 11, 2017
Showing with 347 additions and 356 deletions.
  1. +1 −1 tensorflow/compiler/tests/lstm.py
  2. +2 −2 tensorflow/compiler/tests/nary_ops_test.py
  3. +1 −1 tensorflow/contrib/distributions/python/kernel_tests/bijector_test.py
  4. +1 −1 tensorflow/contrib/distributions/python/ops/bernoulli.py
  5. +9 −11 tensorflow/contrib/distributions/python/ops/bijector.py
  6. +1 −1 tensorflow/contrib/distributions/python/ops/categorical.py
  7. +1 −1 tensorflow/contrib/distributions/python/ops/dirichlet.py
  8. +1 −1 tensorflow/contrib/distributions/python/ops/dirichlet_multinomial.py
  9. +1 −1 tensorflow/contrib/distributions/python/ops/distribution.py
  10. +5 −5 tensorflow/contrib/distributions/python/ops/distribution_util.py
  11. +1 −1 tensorflow/contrib/distributions/python/ops/exponential.py
  12. +1 −1 tensorflow/contrib/distributions/python/ops/gumbel.py
  13. +1 −1 tensorflow/contrib/distributions/python/ops/laplace.py
  14. +1 −1 tensorflow/contrib/distributions/python/ops/logistic.py
  15. +3 −3 tensorflow/contrib/distributions/python/ops/mixture.py
  16. +1 −1 tensorflow/contrib/distributions/python/ops/multinomial.py
  17. +2 −2 tensorflow/contrib/distributions/python/ops/mvn.py
  18. +1 −1 tensorflow/contrib/distributions/python/ops/normal.py
  19. +1 −1 tensorflow/contrib/distributions/python/ops/onehot_categorical.py
  20. +8 −9 tensorflow/contrib/distributions/python/ops/operator_pd.py
  21. +1 −1 tensorflow/contrib/distributions/python/ops/operator_pd_diag.py
  22. +1 −1 tensorflow/contrib/distributions/python/ops/operator_pd_vdvt_update.py
  23. +1 −1 tensorflow/contrib/distributions/python/ops/relaxed_onehot_categorical.py
  24. +2 −3 tensorflow/contrib/distributions/python/ops/shape.py
  25. +2 −2 tensorflow/contrib/distributions/python/ops/student_t.py
  26. +7 −5 tensorflow/contrib/distributions/python/ops/transformed_distribution.py
  27. +1 −1 tensorflow/contrib/distributions/python/ops/uniform.py
  28. +11 −11 tensorflow/contrib/distributions/python/ops/wishart.py
  29. +1 −1 tensorflow/contrib/factorization/python/ops/clustering_ops.py
  30. +3 −3 tensorflow/contrib/factorization/python/ops/factorization_ops.py
  31. +1 −2 tensorflow/contrib/factorization/python/ops/gmm.py
  32. +7 −7 tensorflow/contrib/factorization/python/ops/gmm_ops.py
  33. +7 −6 tensorflow/contrib/grid_rnn/python/ops/grid_rnn_cell.py
  34. +1 −1 tensorflow/contrib/image/python/ops/image_ops.py
  35. +1 −1 tensorflow/contrib/labeled_tensor/python/ops/ops.py
  36. +1 −1 tensorflow/contrib/labeled_tensor/python/ops/ops_test.py
  37. +5 −5 tensorflow/contrib/layers/python/layers/embedding_ops.py
  38. +1 −1 tensorflow/contrib/layers/python/layers/feature_column_ops.py
  39. +4 −4 tensorflow/contrib/layers/python/layers/layers.py
  40. +2 −2 tensorflow/contrib/layers/python/layers/target_column.py
  41. +3 −3 tensorflow/contrib/learn/python/learn/estimators/dynamic_rnn_estimator.py
  42. +1 −1 tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
  43. +1 −1 tensorflow/contrib/learn/python/learn/estimators/head.py
  44. +1 −1 tensorflow/contrib/learn/python/learn/estimators/kmeans.py
  45. +7 −7 tensorflow/contrib/legacy_seq2seq/python/kernel_tests/seq2seq_test.py
  46. +2 −2 tensorflow/contrib/legacy_seq2seq/python/ops/seq2seq.py
  47. +1 −1 tensorflow/contrib/linalg/python/kernel_tests/linear_operator_diag_test.py
  48. +1 −1 tensorflow/contrib/linalg/python/ops/linear_operator_composition.py
  49. +1 −1 tensorflow/contrib/linalg/python/ops/linear_operator_diag.py
  50. +2 −3 tensorflow/contrib/linalg/python/ops/linear_operator_identity.py
  51. +2 −2 tensorflow/contrib/linalg/python/ops/linear_operator_test_util.py
  52. +5 −5 tensorflow/contrib/metrics/python/ops/metric_ops.py
  53. +5 −5 tensorflow/contrib/metrics/python/ops/metric_ops_test.py
  54. +2 −2 tensorflow/contrib/ndlstm/python/lstm2d.py
  55. +1 −1 tensorflow/contrib/ndlstm/python/misc.py
  56. +1 −1 tensorflow/contrib/opt/python/training/external_optimizer.py
  57. +1 −1 tensorflow/contrib/rnn/python/kernel_tests/core_rnn_test.py
  58. +1 −1 tensorflow/contrib/rnn/python/kernel_tests/fused_rnn_cell_test.py
  59. +3 −3 tensorflow/contrib/rnn/python/kernel_tests/rnn_cell_test.py
  60. +1 −1 tensorflow/contrib/rnn/python/ops/core_rnn.py
  61. +4 −4 tensorflow/contrib/rnn/python/ops/core_rnn_cell_impl.py
  62. +2 −2 tensorflow/contrib/rnn/python/ops/gru_ops.py
  63. +3 −3 tensorflow/contrib/rnn/python/ops/lstm_ops.py
  64. +1 −1 tensorflow/contrib/rnn/python/ops/rnn.py
  65. +17 −17 tensorflow/contrib/rnn/python/ops/rnn_cell.py
  66. +3 −3 tensorflow/contrib/seq2seq/python/ops/attention_decoder_fn.py
  67. +2 −2 tensorflow/contrib/slim/python/slim/data/tfexample_decoder.py
  68. +9 −9 tensorflow/contrib/slim/python/slim/nets/inception_v1.py
  69. +10 −10 tensorflow/contrib/slim/python/slim/nets/inception_v2.py
  70. +16 −16 tensorflow/contrib/slim/python/slim/nets/inception_v3.py
  71. +1 −1 tensorflow/contrib/solvers/python/ops/lanczos.py
  72. +1 −1 tensorflow/contrib/specs/python/specs_ops.py
  73. +1 −1 tensorflow/contrib/tensor_forest/hybrid/python/layers/fully_connected.py
  74. +2 −2 tensorflow/contrib/tensor_forest/python/ops/data_ops.py
  75. +3 −3 tensorflow/contrib/tensor_forest/python/tensor_forest.py
  76. +4 −4 tensorflow/contrib/tensor_forest/python/topn.py
  77. +1 −1 tensorflow/contrib/training/python/training/resample.py
  78. +6 −6 tensorflow/contrib/training/python/training/sequence_queueing_state_saver.py
  79. +2 −2 tensorflow/examples/udacity/6_lstm.ipynb
  80. +1 −1 tensorflow/python/framework/function_test.py
  81. +5 −5 tensorflow/python/framework/tensor_util_test.py
  82. +2 −2 tensorflow/python/kernel_tests/confusion_matrix_test.py
  83. +3 −3 tensorflow/python/kernel_tests/control_flow_ops_py_test.py
  84. +1 −1 tensorflow/python/kernel_tests/embedding_ops_test.py
  85. +1 −1 tensorflow/python/kernel_tests/large_concat_op_test.py
  86. +5 −5 tensorflow/python/kernel_tests/metrics_test.py
  87. +7 −7 tensorflow/python/kernel_tests/partitioned_variables_test.py
  88. +2 −2 tensorflow/python/kernel_tests/reshape_op_test.py
  89. +2 −2 tensorflow/python/kernel_tests/svd_op_test.py
  90. +11 −16 tensorflow/python/ops/array_grad.py
  91. +1 −1 tensorflow/python/ops/concat_benchmark.py
  92. +3 −3 tensorflow/python/ops/control_flow_ops.py
  93. +4 −4 tensorflow/python/ops/embedding_ops.py
  94. +2 −2 tensorflow/python/ops/gradients_impl.py
  95. +5 −5 tensorflow/python/ops/gradients_test.py
  96. +3 −3 tensorflow/python/ops/image_ops_impl.py
  97. +1 −1 tensorflow/python/ops/linalg_grad.py
  98. +2 −2 tensorflow/python/ops/linalg_ops.py
  99. +2 −2 tensorflow/python/ops/math_grad.py
  100. +4 −4 tensorflow/python/ops/math_grad_test.py
  101. +4 −5 tensorflow/python/ops/math_ops.py
  102. +3 −3 tensorflow/python/ops/metrics_impl.py
  103. +6 −6 tensorflow/python/ops/nn_grad.py
  104. +8 −8 tensorflow/python/ops/nn_impl.py
  105. +5 −6 tensorflow/python/ops/nn_ops.py
  106. +1 −1 tensorflow/python/ops/random_ops.py
  107. +1 −1 tensorflow/python/ops/rnn.py
  108. +4 −3 tensorflow/python/ops/sparse_grad.py
  109. +7 −7 tensorflow/python/ops/sparse_ops.py
  110. +3 −3 tensorflow/python/ops/standard_ops.py
  111. +1 −1 tensorflow/python/ops/variables.py
  112. +1 −1 tensorflow/python/training/monitored_session.py
  113. +3 −3 tensorflow/python/training/saver_test.py
@@ -61,7 +61,7 @@ def LSTMCell(weights, m_prev, c_prev, x, pad):
"""
# Apply weights to the input and previous hidden state.
# The matmul here is the "big" operation.
- xm = array_ops.concat_v2([x, m_prev], 1)
+ xm = array_ops.concat([x, m_prev], 1)
xmw = math_ops.matmul(xm, weights)
# Element-wise ops for the standard LSTM cell, with clipped activations.
@@ -58,7 +58,7 @@ def testFloat(self):
def testConcat(self):
self._testNAry(
- lambda x: array_ops.concat_v2(x, 0), [
+ lambda x: array_ops.concat(x, 0), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
@@ -67,7 +67,7 @@ def testConcat(self):
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32))
self._testNAry(
- lambda x: array_ops.concat_v2(x, 1), [
+ lambda x: array_ops.concat(x, 1), [
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=np.float32), np.array(
[[7, 8, 9], [10, 11, 12]], dtype=np.float32)
@@ -519,7 +519,7 @@ def testBijector(self):
def testShapeGetters(self):
with self.test_session():
bijector = bijectors.Inline(
- forward_event_shape_fn=lambda x: array_ops.concat_v2((x, [1]), 0),
+ forward_event_shape_fn=lambda x: array_ops.concat((x, [1]), 0),
get_forward_event_shape_fn=lambda x: x.as_list() + [1],
inverse_event_shape_fn=lambda x: x[:-1],
get_inverse_event_shape_fn=lambda x: x[:-1],
@@ -118,7 +118,7 @@ def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- new_shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
+ new_shape = array_ops.concat(([n], self.batch_shape()), 0)
uniform = random_ops.random_uniform(
new_shape, seed=seed, dtype=self.p.dtype)
sample = math_ops.less(uniform, self.p)
@@ -1378,8 +1378,7 @@ def __init__(self, tril, v, diag=None, validate_args=False):
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
- id_shape = array_ops.concat_v2(
- [v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
+ id_shape = array_ops.concat([v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@@ -1740,7 +1739,7 @@ def _create_scale_operator(self, identity_multiplier, diag, tril,
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
- identity_shape = array_ops.concat_v2((v_shape[:-1], (v_shape[-2],)), 0)
+ identity_shape = array_ops.concat((v_shape[:-1], (v_shape[-2],)), 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
@@ -1796,9 +1795,10 @@ def _process_matrix(self, matrix, min_rank, event_ndims):
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
- pad = array_ops.concat_v2([
- array_ops.ones([left], dtype=dtypes.int32),
- array_ops.shape(matrix)], 0)
+ pad = array_ops.concat(
+ [array_ops.ones(
+ [left], dtype=dtypes.int32), array_ops.shape(matrix)],
+ 0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
@@ -2221,7 +2221,7 @@ def _forward(self, x):
ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
else array_ops.rank(y))
y = array_ops.pad(y,
- paddings=array_ops.concat_v2(
+ paddings=array_ops.concat(
(array_ops.zeros(
(ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
0))
@@ -2265,14 +2265,12 @@ def _inverse(self, y):
depth=ndims,
on_value=shape[-1]-np.array(1, dtype=shape.dtype),
dtype=shape.dtype)
- size = array_ops.concat_v2(
- (shape[:-1], np.asarray(
- [1], dtype=shape.dtype)), 0)
+ size = array_ops.concat((shape[:-1], np.asarray([1], dtype=shape.dtype)), 0)
log_normalization = -array_ops.strided_slice(x, begin, begin + size)
# Here we slice out all but the last coordinate; see above for idea.
begin = array_ops.zeros_like(shape)
- size = array_ops.concat_v2((shape[:-1], [shape[-1] - 1]), 0)
+ size = array_ops.concat((shape[:-1], [shape[-1] - 1]), 0)
x = array_ops.strided_slice(x, begin, begin + size)
x += log_normalization
@@ -189,7 +189,7 @@ def _sample_n(self, n, seed=None):
samples = math_ops.cast(samples, self.dtype)
ret = array_ops.reshape(
array_ops.transpose(samples),
- array_ops.concat_v2(([n], self.batch_shape()), 0))
+ array_ops.concat(([n], self.batch_shape()), 0))
return ret
def _log_prob(self, k):
@@ -238,7 +238,7 @@ def _mode(self):
math_ops.cast(self.event_shape()[0], self.dtype)))
if self.allow_nan_stats:
nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
- shape = array_ops.concat_v2((self.batch_shape(), self.event_shape()), 0)
+ shape = array_ops.concat((self.batch_shape(), self.event_shape()), 0)
return array_ops.where(
math_ops.greater(self.alpha, 1.),
mode,
@@ -235,7 +235,7 @@ def _sample_n(self, n, seed=None):
seed=distribution_util.gen_new_seed(seed, salt="dirichlet_multinomial"))
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2)
- final_shape = array_ops.concat_v2([[n], self.batch_shape(), [k]], 0)
+ final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_dirichlet_multinomial_prob_note)
@@ -580,7 +580,7 @@ def sample(self, sample_shape=(), seed=None, name="sample",
sample_shape, "sample_shape")
samples = self._sample_n(n, seed, **condition_kwargs)
batch_event_shape = array_ops.shape(samples)[1:]
- final_shape = array_ops.concat_v2([sample_shape, batch_event_shape], 0)
+ final_shape = array_ops.concat([sample_shape, batch_event_shape], 0)
samples = array_ops.reshape(samples, final_shape)
samples = self._set_sample_static_shape(samples, sample_shape)
return samples
@@ -126,10 +126,10 @@ def same_dynamic_shape(a, b):
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
lambda: math_ops.reduce_all(math_ops.equal(
- array_ops.concat_v2((
+ array_ops.concat((
array_ops.shape(a),
array_ops.shape(b)), 0),
- array_ops.concat_v2((
+ array_ops.concat((
array_ops.shape(b),
array_ops.shape(a)), 0))),
lambda: constant_op.constant(False))
@@ -371,7 +371,7 @@ def rotate_transpose(x, shift, name="rotate_transpose"):
ndims - math_ops.mod(shift, ndims))
first = math_ops.range(0, shift)
last = math_ops.range(shift, ndims)
- perm = array_ops.concat_v2((last, first), 0)
+ perm = array_ops.concat((last, first), 0)
return array_ops.transpose(x, perm=perm)
@@ -427,7 +427,7 @@ def pick_vector(cond,
false_vector.name, false_vector.dtype))
n = array_ops.shape(true_vector)[0]
return array_ops.slice(
- array_ops.concat_v2((true_vector, false_vector), 0),
+ array_ops.concat((true_vector, false_vector), 0),
[array_ops.where(cond, 0, n)], [array_ops.where(cond, n, -1)])
@@ -558,7 +558,7 @@ def tril_ids(n):
# Gather up, reshape, and return.
y = array_ops.reshape(x, [-1, d])
y = array_ops.gather_nd(y, idx)
- y = array_ops.reshape(y, array_ops.concat_v2([batch_shape, [n, n]], 0))
+ y = array_ops.reshape(y, array_ops.concat([batch_shape, [n, n]], 0))
y = array_ops.matrix_band_part(y, -1, 0)
y.set_shape(y.get_shape().merge_with(final_shape))
return y
@@ -89,7 +89,7 @@ def lam(self):
return self._lam
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self._lam)), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self._lam)), 0)
# Sample uniformly-at-random from the open-interval (0, 1).
sampled = random_ops.random_uniform(
shape,
@@ -158,7 +158,7 @@ def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape,
@@ -125,7 +125,7 @@ def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], self.batch_shape()), 0)
+ shape = array_ops.concat(([n], self.batch_shape()), 0)
# Sample uniformly-at-random from the open-interval (-1, 1).
uniform_samples = random_ops.random_uniform(
shape=shape,
@@ -157,7 +157,7 @@ def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape,
@@ -330,7 +330,7 @@ def _sample_n(self, n, seed=None):
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
- array_ops.concat_v2(([n_class * batch_size], event_shape), 0))
+ array_ops.concat(([n_class * batch_size], event_shape), 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
@@ -341,8 +341,8 @@ def _sample_n(self, n, seed=None):
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
- array_ops.concat_v2((samples_shape,
- self.event_shape()), 0))
+ array_ops.concat((samples_shape,
+ self.event_shape()), 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.get_event_shape()))
@@ -229,7 +229,7 @@ def _sample_n(self, n, seed=None):
x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
reduction_indices=-2) # shape: [B, n, k]
x = array_ops.transpose(x, perm=[1, 0, 2])
- final_shape = array_ops.concat_v2([[n], self.batch_shape(), [k]], 0)
+ final_shape = array_ops.concat([[n], self.batch_shape(), [k]], 0)
return array_ops.reshape(x, final_shape)
@distribution_util.AppendDocstring(_multinomial_prob_note)
@@ -229,7 +229,7 @@ def _get_event_shape(self):
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
- shape = array_ops.concat_v2([self._cov.vector_shape(), [n]], 0)
+ shape = array_ops.concat([self._cov.vector_shape(), [n]], 0)
white_samples = random_ops.random_normal(shape=shape,
mean=0.,
stddev=1.,
@@ -239,7 +239,7 @@ def _sample_n(self, n, seed=None):
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
- perm = array_ops.concat_v2(
+ perm = array_ops.concat(
(array_ops.stack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)), 0)
@@ -157,7 +157,7 @@ def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
- shape = array_ops.concat_v2(([n], array_ops.shape(self.mean())), 0)
+ shape = array_ops.concat(([n], array_ops.shape(self.mean())), 0)
sampled = random_ops.random_normal(
shape=shape, mean=0, stddev=1, dtype=self.mu.dtype, seed=seed)
return sampled * self.sigma + self.mu
@@ -186,7 +186,7 @@ def _get_event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
- sample_shape = array_ops.concat_v2(([n], array_ops.shape(self.logits)), 0)
+ sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
logits = self.logits
if logits.get_shape().ndims == 2:
logits_2d = logits
@@ -428,7 +428,7 @@ def vector_shape(self, name="vector_shape"):
# Derived classes get this "for free" once .shape() is implemented.
with ops.name_scope(self.name):
with ops.name_scope(name, values=self.inputs):
- return array_ops.concat_v2(
+ return array_ops.concat(
(self.batch_shape(), [self.vector_space_dimension()]), 0)
def vector_space_dimension(self, name="vector_space_dimension"):
@@ -703,12 +703,11 @@ def _flip_matrix_to_vector_dynamic(mat, batch_shape):
"""Flip matrix to vector with dynamic shapes."""
mat_rank = array_ops.rank(mat)
k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
- final_shape = array_ops.concat_v2((batch_shape, [k]), 0)
+ final_shape = array_ops.concat((batch_shape, [k]), 0)
# mat.shape = matrix_batch_shape + [k, M]
# Permutation corresponding to [M] + matrix_batch_shape + [k]
- perm = array_ops.concat_v2(
- ([mat_rank - 1], math_ops.range(0, mat_rank - 1)), 0)
+ perm = array_ops.concat(([mat_rank - 1], math_ops.range(0, mat_rank - 1)), 0)
mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
return vector
@@ -779,12 +778,12 @@ def _flip_vector_to_matrix_dynamic(vec, batch_shape):
# If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
k = array_ops.gather(vec_shape, vec_rank - 1)
- new_shape = array_ops.concat_v2((batch_shape, [k], condensed_shape), 0)
+ new_shape = array_ops.concat((batch_shape, [k], condensed_shape), 0)
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
- perm = array_ops.concat_v2(
- (math_ops.range(m, vec_rank), math_ops.range(0, m)), 0)
+ perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
+ 0)
return array_ops.transpose(vec, perm=perm)
x_flipped = control_flow_ops.cond(
@@ -817,8 +816,8 @@ def _flip_vector_to_matrix_static(vec, batch_shape):
def _flip_front_dims_to_back():
# Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
- perm = array_ops.concat_v2(
- (math_ops.range(m, vec_rank), math_ops.range(0, m)), 0)
+ perm = array_ops.concat((math_ops.range(m, vec_rank), math_ops.range(0, m)),
+ 0)
return array_ops.transpose(vec, perm=perm)
if 0 < m:
@@ -82,7 +82,7 @@ def get_shape(self):
def _shape(self):
d_shape = array_ops.shape(self._diag)
k = array_ops.gather(d_shape, array_ops.size(d_shape) - 1)
- return array_ops.concat_v2((d_shape, [k]), 0)
+ return array_ops.concat((d_shape, [k]), 0)
@abc.abstractmethod
def _batch_log_det(self):
@@ -147,7 +147,7 @@ def _get_identity_operator(self, v):
v_rank = array_ops.rank(v)
v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2])
r = array_ops.gather(v_shape, v_rank - 1) # Last dim of v
- id_shape = array_ops.concat_v2((v_batch_shape, [r, r]), 0)
+ id_shape = array_ops.concat((v_batch_shape, [r, r]), 0)
return operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self._verify_pd)
@@ -242,7 +242,7 @@ def _get_event_shape(self):
return self.logits.get_shape().with_rank_at_least(1)[-1:]
def _sample_n(self, n, seed=None):
- sample_shape = array_ops.concat_v2(([n], array_ops.shape(self.logits)), 0)
+ sample_shape = array_ops.concat(([n], array_ops.shape(self.logits)), 0)
logits = self.logits * array_ops.ones(sample_shape)
if logits.get_shape().ndims == 2:
logits_2d = logits
@@ -387,7 +387,7 @@ def make_batch_of_event_sample_matrices(
if expand_batch_dim:
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, [1], batch_shape)
- new_shape = array_ops.concat_v2([[-1], batch_shape, event_shape], 0)
+ new_shape = array_ops.concat([[-1], batch_shape, event_shape], 0)
x = array_ops.reshape(x, shape=new_shape)
x = distribution_util.rotate_transpose(x, shift=-1)
return x, sample_shape
@@ -437,8 +437,7 @@ def undo_make_batch_of_event_sample_matrices(
math_ops.logical_and(expand_batch_dim, self._batch_ndims_is_0),
2, 1 + self.batch_ndims)
event_shape = s[event_start:event_start+self.event_ndims]
- new_shape = array_ops.concat_v2(
- (sample_shape, batch_shape, event_shape), 0)
+ new_shape = array_ops.concat((sample_shape, batch_shape, event_shape), 0)
x = array_ops.reshape(x, shape=new_shape)
return x
@@ -196,7 +196,7 @@ def _sample_n(self, n, seed=None):
# Y = X / sqrt(Z / df)
# then:
# Y ~ StudentT(df).
- shape = array_ops.concat_v2([[n], self.batch_shape()], 0)
+ shape = array_ops.concat([[n], self.batch_shape()], 0)
normal_sample = random_ops.random_normal(shape, dtype=self.dtype, seed=seed)
df = self.df * array_ops.ones(self.batch_shape(), dtype=self.dtype)
gamma_sample = random_ops.random_gamma(
@@ -235,7 +235,7 @@ def _cdf(self, x):
def _entropy(self):
v = array_ops.ones(self.batch_shape(), dtype=self.dtype)[..., None]
u = v * self.df[..., None]
- beta_arg = array_ops.concat_v2([u, v], -1) / 2.
+ beta_arg = array_ops.concat([u, v], -1) / 2.
return (math_ops.log(math_ops.abs(self.sigma)) +
0.5 * math_ops.log(self.df) +
special_math_ops.lbeta(beta_arg) +
Oops, something went wrong.

0 comments on commit 0e226af

Please sign in to comment.