Skip to content

Commit

Permalink
Merge pull request #24688 from BoboTiG:fix-invalid-seq-warnings
Browse files Browse the repository at this point in the history
PiperOrigin-RevId: 228216549
  • Loading branch information
tensorflower-gardener committed Jan 7, 2019
2 parents c7c4a42 + 75f12a5 commit ed60e82
Show file tree
Hide file tree
Showing 14 changed files with 54 additions and 59 deletions.
2 changes: 1 addition & 1 deletion tensorflow/contrib/kernel_methods/python/losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def sparse_multiclass_hinge_loss(
scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=losses.Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds Ops for computing the multiclass hinge loss.
r"""Adds Ops for computing the multiclass hinge loss.
The implementation is based on the following paper:
On the Algorithmic Implementation of Multiclass Kernel-based Vector Machines
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def generator():
return np.arange(32, 36)

with self.cached_session():
with self.assertRaisesRegexp(TypeError, 'x\(\) must be generator'):
with self.assertRaisesRegexp(TypeError, r'x\(\) must be generator'):
failing_input_fn = generator_io.generator_input_fn(
generator, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
Expand All @@ -185,7 +185,7 @@ def generator():
yield np.arange(32, 36)

with self.cached_session():
with self.assertRaisesRegexp(TypeError, 'x\(\) must yield dict'):
with self.assertRaisesRegexp(TypeError, r'x\(\) must yield dict'):
failing_input_fn = generator_io.generator_input_fn(
generator, batch_size=2, shuffle=False, num_epochs=1)
failing_input_fn()
Expand Down
23 changes: 10 additions & 13 deletions tensorflow/contrib/opt/python/training/adam_gs_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ class AdamGSOptimizer(optimizer.Optimizer):
def __init__(self, global_step=0, learning_rate=0.001,
beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
r"""Construct a new Adam optimizer.
Branched from tf.train.AdamOptimizer. The only difference is to pass
global step for computing beta1 and beta2 accumulators, instead of having
Expand Down Expand Up @@ -83,23 +83,20 @@ def __init__(self, global_step=0, learning_rate=0.001,
Args:
global_step: tensorflow variable indicating the step.
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `beta1`, `beta2`, and
`epsilon` can each be a callable that takes no arguments and returns the
actual value to use. This can be useful for changing these values across
different invocations of optimizer functions.
@end_compatibility
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta1`, `beta2`, and `epsilon` can each be a
callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
"""
super(AdamGSOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/optimizer_v2/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class AdamOptimizer(optimizer_v2.OptimizerV2):

def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
r"""Construct a new Adam optimizer.
Initialization:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/python/client/session_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2036,7 +2036,7 @@ def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp(
TypeError, 'Type of feed value 1 with type <(\w+) \'int\'> is not'):
TypeError, r'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})


Expand Down
12 changes: 6 additions & 6 deletions tensorflow/python/feature_column/feature_column_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -1832,7 +1832,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'):
fc.linear_model(features, [price1, price2])

def test_subset_of_static_batch_size_mismatch(self):
Expand All @@ -1847,7 +1847,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.linear_model(features, [price1, price2, price3])

def test_runtime_batch_size_mismatch(self):
Expand Down Expand Up @@ -2467,7 +2467,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2])

def test_subset_of_static_batch_size_mismatch(self):
Expand All @@ -2482,7 +2482,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
get_keras_linear_model_predictions(features, [price1, price2, price3])

def test_runtime_batch_size_mismatch(self):
Expand Down Expand Up @@ -2974,7 +2974,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2])

def test_subset_of_static_batch_size_mismatch(self):
Expand All @@ -2989,7 +2989,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.input_layer(features, [price1, price2, price3])

def test_runtime_batch_size_mismatch(self):
Expand Down
16 changes: 8 additions & 8 deletions tensorflow/python/feature_column/feature_column_v2_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2052,7 +2052,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
model = fc.LinearModel([price1, price2])
model(features)

Expand All @@ -2068,7 +2068,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
model = fc.LinearModel([price1, price2, price3])
model(features)

Expand Down Expand Up @@ -2818,7 +2818,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2])

def test_subset_of_static_batch_size_mismatch(self):
Expand All @@ -2833,7 +2833,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.linear_model(features, [price1, price2, price3])

def test_runtime_batch_size_mismatch(self):
Expand Down Expand Up @@ -3435,7 +3435,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.DenseFeatures([price1, price2])(features)

def test_subset_of_static_batch_size_mismatch(self):
Expand All @@ -3450,7 +3450,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc.DenseFeatures([price1, price2, price3])(features)

def test_runtime_batch_size_mismatch(self):
Expand Down Expand Up @@ -4141,7 +4141,7 @@ def test_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2])

def test_subset_of_static_batch_size_mismatch(self):
Expand All @@ -4156,7 +4156,7 @@ def test_subset_of_static_batch_size_mismatch(self):
}
with self.assertRaisesRegexp(
ValueError,
'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
fc_old.input_layer(features, [price1, price2, price3])

def test_runtime_batch_size_mismatch(self):
Expand Down
10 changes: 4 additions & 6 deletions tensorflow/python/kernel_tests/confusion_matrix_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -470,9 +470,8 @@ def testUnsqueezableLabels(self):
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Can not squeeze dim\[2\]"):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Can not squeeze dim\[2\]"):
dynamic_labels.eval(feed_dict=feed_dict)
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
Expand All @@ -498,9 +497,8 @@ def testUnsqueezablePredictions(self):
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"Can not squeeze dim\[2\]"):
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
r"Can not squeeze dim\[2\]"):
dynamic_predictions.eval(feed_dict=feed_dict)


Expand Down
23 changes: 10 additions & 13 deletions tensorflow/python/training/adam.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ class AdamOptimizer(optimizer.Optimizer):

def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="Adam"):
"""Construct a new Adam optimizer.
r"""Construct a new Adam optimizer.
Initialization:
Expand Down Expand Up @@ -75,23 +75,20 @@ def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the 2nd moment estimates.
beta1: A float value or a constant float tensor. The exponential decay
rate for the 1st moment estimates.
beta2: A float value or a constant float tensor. The exponential decay
rate for the 2nd moment estimates.
epsilon: A small constant for numerical stability. This epsilon is
"epsilon hat" in the Kingma and Ba paper (in the formula just before
Section 2.1), not the epsilon in Algorithm 1 of the paper.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "Adam".
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `beta1`, `beta2`, and
`epsilon` can each be a callable that takes no arguments and returns the
actual value to use. This can be useful for changing these values across
different invocations of optimizer functions.
@end_compatibility
Defaults to "Adam". @compatibility(eager) When eager execution is
enabled, `learning_rate`, `beta1`, `beta2`, and `epsilon` can each be a
callable that takes no arguments and returns the actual value to use.
This can be useful for changing these values across different
invocations of optimizer functions. @end_compatibility
"""
super(AdamOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/tools/ci_build/copy_binary.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
import zipfile

TF_NIGHTLY_REGEX = (r"(.+)tf_nightly(|_gpu)-(\d\.[\d]{1,2}"
"\.\d.dev[\d]{0,8})-(.+)\.whl")
r"\.\d.dev[\d]{0,8})-(.+)\.whl")
BINARY_STRING_TEMPLATE = "%s-%s-%s.whl"


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def generate_RSA(bits=2048, exponent=65537):

def get_change_ssh_port(use_hostnet, port):
if use_hostnet == 1:
return "sed -i '/Port 22/c\Port {}' /etc/ssh/sshd_config".format(port)
return r"sed -i '/Port 22/c\Port {}' /etc/ssh/sshd_config".format(port)

return ''

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,9 @@ def InvokeNvcc(argv, log=False):
The return value of calling os.system('nvcc ' + args)
"""

src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
src_files = [
f for f in argv if re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)
]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,9 @@ def InvokeNvcc(argv, log=False):
The return value of calling os.system('nvcc ' + args)
"""

src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
src_files = [
f for f in argv if re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)
]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,9 @@ def InvokeNvcc(argv, log=False):
The return value of calling os.system('nvcc ' + args)
"""

src_files = [f for f in argv if
re.search('\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)]
src_files = [
f for f in argv if re.search(r'\.cpp$|\.cc$|\.c$|\.cxx$|\.C$', f)
]
if len(src_files) == 0:
raise Error('No source files found for cuda compilation.')

Expand Down

0 comments on commit ed60e82

Please sign in to comment.