Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix warning about keep_dims. keep_dims -> keepdims for tf.reduce_sum(). #16876

Merged
merged 2 commits into from
Feb 12, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions tensorflow/contrib/boosted_trees/python/utils/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def per_example_maxent_loss(labels, weights, logits, num_classes, eps=1e-15):

# Calculate softmax probabilities for each class.
unnormalized_probs = math_ops.exp(logits)
normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keep_dims=True)
normalizers = math_ops.reduce_sum(unnormalized_probs, 1, keepdims=True)
softmax_predictions = math_ops.divide(unnormalized_probs,
math_ops.add(normalizers, eps))

Expand Down Expand Up @@ -120,7 +120,7 @@ def per_example_squared_loss(labels, weights, predictions):
update_op: An update operation to update the loss's internal state.
"""
unweighted_loss = math_ops.reduce_sum(
math_ops.square(predictions - labels), 1, keep_dims=True)
math_ops.square(predictions - labels), 1, keepdims=True)

return unweighted_loss * weights, control_flow_ops.no_op()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def _log_prob(self, x):
* math_ops.log(self.temperature))
# compute the unnormalized density
log_softmax = nn_ops.log_softmax(logits_2d - x_2d * self._temperature_2d)
log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keep_dims=False)
log_unnorm_prob = math_ops.reduce_sum(log_softmax, [-1], keepdims=False)
# combine unnormalized density with normalization constant
log_prob = log_norm_const + log_unnorm_prob
# Reshapes log_prob to be consistent with shape of user-supplied logits
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/factorization/python/ops/clustering_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,11 +192,11 @@ def _compute_euclidean_distance(cls, inputs, clusters):
# Computes Euclidean distance. Note the first and third terms are
# broadcast additions.
squared_distance = (
math_ops.reduce_sum(math_ops.square(inp), 1, keep_dims=True) -
math_ops.reduce_sum(math_ops.square(inp), 1, keepdims=True) -
2 * math_ops.matmul(inp, clusters, transpose_b=True) +
array_ops.transpose(
math_ops.reduce_sum(
math_ops.square(clusters), 1, keep_dims=True)))
math_ops.square(clusters), 1, keepdims=True)))
output.append(squared_distance)

return output
Expand Down
8 changes: 4 additions & 4 deletions tensorflow/contrib/layers/python/layers/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -779,7 +779,7 @@ def batch_norm(inputs,
else:
if data_format == DATA_FORMAT_NCHW:
mean, variance = nn.weighted_moments(
inputs, moments_axes, batch_weights, keep_dims=True)
inputs, moments_axes, batch_weights, keepdims=True)
mean = array_ops.reshape(mean, [-1])
variance = array_ops.reshape(variance, [-1])
else:
Expand Down Expand Up @@ -2834,9 +2834,9 @@ def spatial_softmax(features,

softmax_attention = nn.softmax(features / temperature)
expected_x = math_ops.reduce_sum(
pos_x * softmax_attention, [1], keep_dims=True)
pos_x * softmax_attention, [1], keepdims=True)
expected_y = math_ops.reduce_sum(
pos_y * softmax_attention, [1], keep_dims=True)
pos_y * softmax_attention, [1], keepdims=True)
expected_xy = array_ops.concat([expected_x, expected_y], 1)
feature_keypoints = array_ops.reshape(expected_xy,
[-1, num_channels.value * 2])
Expand Down Expand Up @@ -2969,7 +2969,7 @@ def poincare_normalize(x, axis=1, epsilon=1e-5, name=None):
"""
with ops.name_scope(name, 'poincare_normalize', [x]) as name:
x = ops.convert_to_tensor(x, name='x')
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keep_dims=True)
square_sum = math_ops.reduce_sum(math_ops.square(x), axis, keepdims=True)
x_inv_norm = math_ops.rsqrt(square_sum)
x_inv_norm = math_ops.minimum((1. - epsilon) * x_inv_norm, 1.)
return math_ops.multiply(x, x_inv_norm, name=name)
Expand Down
26 changes: 13 additions & 13 deletions tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@ def pairwise_distance(feature, squared=False):
math_ops.reduce_sum(
math_ops.square(feature),
axis=[1],
keep_dims=True),
keepdims=True),
math_ops.reduce_sum(
math_ops.square(
array_ops.transpose(feature)),
axis=[0],
keep_dims=True)) - 2.0 * math_ops.matmul(
keepdims=True)) - 2.0 * math_ops.matmul(
feature, array_ops.transpose(feature))

# Deal with numerical inaccuracies. Set small negatives to zero.
Expand Down Expand Up @@ -132,10 +132,10 @@ def masked_maximum(data, mask, dim=1):
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keep_dims=True)
axis_minimums = math_ops.reduce_min(data, dim, keepdims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(
data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums
data - axis_minimums, mask), dim, keepdims=True) + axis_minimums
return masked_maximums


Expand All @@ -151,10 +151,10 @@ def masked_minimum(data, mask, dim=1):
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keep_dims=True)
axis_maximums = math_ops.reduce_max(data, dim, keepdims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(
data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums
data - axis_maximums, mask), dim, keepdims=True) + axis_maximums
return masked_minimums


Expand Down Expand Up @@ -203,7 +203,7 @@ def triplet_semihard_loss(labels, embeddings, margin=1.0):
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(
mask, dtype=dtypes.float32), 1, keep_dims=True),
mask, dtype=dtypes.float32), 1, keepdims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)

Expand Down Expand Up @@ -290,7 +290,7 @@ def npairs_loss(labels, embeddings_anchor, embeddings_positive,

labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)

# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
Expand Down Expand Up @@ -395,7 +395,7 @@ def npairs_loss_multilabel(sparse_labels, embeddings_anchor,

multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keepdims=True)

# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
Expand Down Expand Up @@ -448,10 +448,10 @@ def lifted_struct_loss(labels, embeddings, margin=1.0):
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keep_dims=True)
row_minimums = math_ops.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(
diff - row_minimums, mask), 1, keep_dims=True) + row_minimums
diff - row_minimums, mask), 1, keepdims=True) + row_minimums

# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
Expand All @@ -470,7 +470,7 @@ def lifted_struct_loss(labels, embeddings, margin=1.0):
math_ops.reduce_sum(math_ops.multiply(
math_ops.exp(
diff_tiled - max_elements_vect),
mask_tiled), 1, keep_dims=True), [batch_size, batch_size])
mask_tiled), 1, keepdims=True), [batch_size, batch_size])

loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
Expand Down Expand Up @@ -686,7 +686,7 @@ def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keep_dims=True), [num_candidates, -1]),
keepdims=True), [num_candidates, -1]),
axis=1)

nmi_scores = array_ops.zeros([num_candidates])
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/signal/python/ops/spectral_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ def inverse_stft_window_fn_inner(frame_length, dtype):
overlaps = -(-frame_length // frame_step) # Ceiling division.
denom = array_ops.pad(denom, [(0, overlaps * frame_step - frame_length)])
denom = array_ops.reshape(denom, [overlaps, frame_step])
denom = math_ops.reduce_sum(denom, 0, keep_dims=True)
denom = math_ops.reduce_sum(denom, 0, keepdims=True)
denom = array_ops.tile(denom, [overlaps, 1])
denom = array_ops.reshape(denom, [overlaps * frame_step])

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/examples/udacity/5_word2vec.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@
" \n",
" # Compute the similarity between minibatch examples and all embeddings.\n",
" # We use the cosine distance:\n",
" norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))\n",
" norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True))\n",
" normalized_embeddings = embeddings / norm\n",
" valid_embeddings = tf.nn.embedding_lookup(\n",
" normalized_embeddings, valid_dataset)\n",
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/framework/function_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1458,7 +1458,7 @@ def testFoo(self):
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = math_ops.tanh(v + array_ops.transpose(v, [1, 0]))
return math_ops.reduce_sum(x, 1, keep_dims=True)
return math_ops.reduce_sum(x, 1, keepdims=True)

@function.Defun(dtype)
def Forward(x):
Expand Down