Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix trivial typos in source codes / comments #1871

Merged
merged 1 commit into from Apr 13, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -241,7 +241,7 @@ def test_empty_tensor_doesnt_raise(self):
out.eval()


class AssertPostiveTest(tf.test.TestCase):
class AssertPositiveTest(tf.test.TestCase):

def test_raises_when_negative(self):
with self.test_session():
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/learn/python/learn/estimators/base.py
Expand Up @@ -404,7 +404,7 @@ def save(self, path):
# Save graph definition.
_write_with_backup(os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))

# Save saver defintion.
# Save saver definition.
_write_with_backup(os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))

# Save checkpoints.
Expand Down Expand Up @@ -441,7 +441,7 @@ def _restore(self, path):
graph_def, name='', return_elements=endpoints)
saver_filename = os.path.join(path, 'saver.pbtxt')
if not os.path.exists(saver_filename):
raise ValueError("Restore folder doesn't contain saver defintion.")
raise ValueError("Restore folder doesn't contain saver definition.")
with gfile.Open(saver_filename) as fsaver:
saver_def = train.SaverDef()
text_format.Merge(fsaver.read(), saver_def)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/learn/python/learn/io/data_feeder.py
Expand Up @@ -39,7 +39,7 @@ def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size):
if y_shape is None:
return input_shape, None
y_shape = list(y_shape[1:]) if len(y_shape) > 1 else []
# Skip first dimention if it is 1.
# Skip first dimension if it is 1.
if y_shape and y_shape[0] == 1:
y_shape = y_shape[1:]
if n_classes > 1:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/examples/skflow/text_classification.py
Expand Up @@ -80,7 +80,7 @@ def rnn_model(X, y):
classifier = skflow.TensorFlowEstimator(model_fn=rnn_model, n_classes=15,
steps=1000, optimizer='Adam', learning_rate=0.01, continue_training=True)

# Continously train for 1000 steps & predict on test set.
# Continuously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train, logdir='/tmp/tf_examples/word_rnn')
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/summary/event_accumulator.py
Expand Up @@ -519,11 +519,11 @@ def _Purge(self, event, by_tags):

If by_tags is True, purge all events that occurred after the given
event.step, but only for the tags that the event has. Non-sequential
event.steps suggest that a Tensorflow restart occured, and we discard
event.steps suggest that a Tensorflow restart occurred, and we discard
the out-of-order events to display a consistent view in TensorBoard.

Discarding by tags is the safer method, when we are unsure whether a restart
has occured, given that threading in supervisor can cause events of
has occurred, given that threading in supervisor can cause events of
different tags to arrive with unsynchronized step values.

If by_tags is False, then purge all events with event.step greater than the
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/python/training/sync_replicas_optimizer.py
Expand Up @@ -503,8 +503,8 @@ def get_clean_up_op(self):
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.

This is supposed to be executed in the begining of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_agregate,
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Expand Down