Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change scalar_summary to summary.scalar #8883

Merged
merged 2 commits into from Apr 8, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/contrib/learn/python/learn/estimators/head.py
Expand Up @@ -611,7 +611,7 @@ def _create_model_fn_ops(features,
if (mode != model_fn.ModeKeys.INFER) and (labels is not None):
weight_tensor = _weight_tensor(features, weight_column_name)
loss, weighted_average_loss = loss_fn(labels, logits, weight_tensor)
logging_ops.scalar_summary(
summary.scalar(
_summary_key(head_name, mkey.LOSS), weighted_average_loss)

if mode == model_fn.ModeKeys.TRAIN:
Expand Down
80 changes: 41 additions & 39 deletions tensorflow/contrib/learn/python/learn/estimators/head_test.py
Expand Up @@ -124,7 +124,7 @@ def testPoissonWithLogits(self):
train_op_fn=head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["regression_head/loss"])
_assert_no_variables(self)
loss = self._log_poisson_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
Expand All @@ -150,7 +150,7 @@ def testRegressionWithLogits(self):
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["regression_head/loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)

Expand Down Expand Up @@ -180,7 +180,7 @@ def testRegressionWithLogitsInput(self):
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["regression_head/loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)

def testRegressionWithLogitsAndLogitsInput(self):
Expand Down Expand Up @@ -208,7 +208,7 @@ def testRegressionEvalMode(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["regression_head/loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)

def testRegressionWithLabelName(self):
Expand All @@ -223,7 +223,7 @@ def testRegressionWithLabelName(self):
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["regression_head/loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)

def testRegressionWithWeights(self):
Expand All @@ -238,7 +238,7 @@ def testRegressionWithWeights(self):
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["regression_head/loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)

Expand All @@ -261,7 +261,7 @@ def testRegressionWithCenteredBias(self):
expected_trainable=("regression_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "regression_head/centered_bias/bias_0"])
self, ["regression_head/loss", "regression_head/centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)

def testRegressionErrorInSparseTensorLabels(self):
Expand Down Expand Up @@ -326,7 +326,7 @@ def testMultiLabelWithLogits(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand All @@ -343,7 +343,7 @@ def testMultiLabelTwoClasses(self):
train_op_fn=head_lib.no_op_train_fn, logits=logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = 1.00320443
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
Expand Down Expand Up @@ -383,7 +383,7 @@ def testMultiLabelWithLogitsInput(self):
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = .69314718
_assert_metrics(self, expected_loss, {
"accuracy": 2. / 3,
Expand Down Expand Up @@ -428,7 +428,7 @@ def testMultiLabelEvalMode(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand All @@ -447,7 +447,7 @@ def testMultiClassEvalModeWithLargeLogits(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = 1.377779
expected_eval_metrics = {
"accuracy": 1. / 3,
Expand Down Expand Up @@ -485,7 +485,7 @@ def testMultiLabelWithLabelName(self):
head_lib.no_op_train_fn, logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand All @@ -505,7 +505,7 @@ def testMultiLabelWithWeight(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)

Expand All @@ -525,7 +525,7 @@ def testMultiLabelWithCustomLoss(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
_assert_metrics(self, 0.089985214,
self._expected_eval_metrics(0.089985214), model_fn_ops)

Expand All @@ -549,7 +549,7 @@ def testMultiLabelWithCenteredBias(self):
expected_trainable=("multi_label_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, (
"loss",
"multi_label_head/loss",
"multi_label_head/centered_bias/bias_0",
"multi_label_head/centered_bias/bias_1",
"multi_label_head/centered_bias/bias_2"
Expand All @@ -574,7 +574,7 @@ def testMultiLabelSparseTensorLabels(self):
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_label_head/loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand Down Expand Up @@ -639,7 +639,7 @@ def testBinaryClassificationWithLogits(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_logistic_head/loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand Down Expand Up @@ -667,7 +667,7 @@ def testBinaryClassificationWithLogitsInput(self):
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_logistic_head/loss"])
expected_loss = .69314718
label_mean = np.mean(self._labels)
_assert_metrics(self, expected_loss, {
Expand Down Expand Up @@ -703,7 +703,7 @@ def testBinaryClassificationEvalMode(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_logistic_head/loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand Down Expand Up @@ -768,7 +768,7 @@ def testBinaryClassificationWithLabelName(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_logistic_head/loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand All @@ -789,7 +789,7 @@ def testBinaryClassificationWithWeights(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_logistic_head/loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
Expand Down Expand Up @@ -822,7 +822,7 @@ def testBinaryClassificationWithCustomLoss(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_logistic_head/loss"])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# expected_loss is (total_weighted_loss)/1 since htere is 1 nonzero
Expand Down Expand Up @@ -862,7 +862,8 @@ def testBinaryClassificationWithCenteredBias(self):
expected_trainable=("binary_logistic_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "binary_logistic_head/centered_bias/bias_0"])
self, ["binary_logistic_head/loss",
"binary_logistic_head/centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand Down Expand Up @@ -911,7 +912,7 @@ def testMultiClassWithLogits(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand Down Expand Up @@ -950,7 +951,7 @@ def testMultiClassWithLogitsInput(self):
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 1.0986123
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
Expand Down Expand Up @@ -1001,7 +1002,7 @@ def testMultiClassEnableCenteredBias(self):
expected_trainable=("multi_class_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self,
["loss",
["multi_class_head/loss",
"multi_class_head/centered_bias/bias_0",
"multi_class_head/centered_bias/bias_1",
"multi_class_head/centered_bias/bias_2"])
Expand All @@ -1019,7 +1020,7 @@ def testMultiClassEvalMode(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand All @@ -1038,7 +1039,7 @@ def testMultiClassEvalModeWithLargeLogits(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 3.1698461
expected_eval_metrics = {
"accuracy": 0.,
Expand Down Expand Up @@ -1077,7 +1078,7 @@ def testMultiClassWithWeight(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand All @@ -1101,7 +1102,7 @@ def testMultiClassWithCustomLoss(self):
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 1.5514447 * weight
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
Expand Down Expand Up @@ -1208,7 +1209,7 @@ def testMultiClassWithLabelKeysEvalAccuracy0(self):
data_flow_ops.tables_initializer().run()
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 1.5514447
expected_eval_metrics = {
"accuracy": 0.,
Expand All @@ -1234,7 +1235,7 @@ def testMultiClassWithLabelKeysEvalAccuracy1(self):
data_flow_ops.tables_initializer().run()
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["multi_class_head/loss"])
expected_loss = 0.5514447
expected_eval_metrics = {
"accuracy": 1.,
Expand Down Expand Up @@ -1273,7 +1274,7 @@ def testBinarySVMWithLogits(self):
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_svm_head/loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
Expand Down Expand Up @@ -1303,7 +1304,7 @@ def testBinarySVMWithLogitsInput(self):
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_svm_head/loss"])
expected_loss = 1.
_assert_metrics(self, expected_loss, {
"accuracy": .5,
Expand Down Expand Up @@ -1335,7 +1336,7 @@ def testBinarySVMEvalMode(self):
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_svm_head/loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
Expand All @@ -1354,7 +1355,7 @@ def testBinarySVMWithLabelName(self):
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_svm_head/loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
Expand All @@ -1373,7 +1374,7 @@ def testBinarySVMWithWeights(self):
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_summary_tags(self, ["binary_svm_head/loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
Expand Down Expand Up @@ -1401,7 +1402,8 @@ def testBinarySVMWithCenteredBias(self):
expected_trainable=("binary_svm_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "binary_svm_head/centered_bias/bias_0"])
self, ["binary_svm_head/loss",
"binary_svm_head/centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
Expand Down