Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

contrib/factorization: minor spelling tweaks #17992

Merged
merged 1 commit into from
Mar 25, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion tensorflow/contrib/factorization/kernels/clustering_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ class NearestNeighborsOp : public OpKernel {
auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads());
const int64 num_threads = worker_threads.num_threads;
// This kernel might be configured to use fewer than the total number of
// available CPUs on the host machine. To avoid descructive interference
// available CPUs on the host machine. To avoid destructive interference
// with other jobs running on the host machine, we must only use a fraction
// of total available L3 cache. Unfortunately, we cannot query the host
// machine to get the number of physical CPUs. So, we use a fixed per-CPU
Expand Down
14 changes: 7 additions & 7 deletions tensorflow/contrib/factorization/python/ops/factorization_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ class WALSModel(object):
# the prep_gramian_op for row(column) can be run.
worker_init_op = model.worker_init

# To be run once per interation sweep before the row(column) update
# To be run once per integration sweep before the row(column) update
# initialize ops can be run. Note that in the distributed training
# situations, this should only be run by the chief trainer. All other
# trainers need to block until this is done.
Expand All @@ -118,9 +118,9 @@ class WALSModel(object):
init_row_update_op = model.initialize_row_update_op
init_col_update_op = model.initialize_col_update_op

# Ops to upate row(column). This can either take the entire sparse tensor
# or slices of sparse tensor. For distributed trainer, each trainer
# handles just part of the matrix.
# Ops to update row(column). This can either take the entire sparse
# tensor or slices of sparse tensor. For distributed trainer, each
# trainer handles just part of the matrix.
_, row_update_op, unreg_row_loss, row_reg, _ = model.update_row_factors(
sp_input=matrix_slices_from_queue_for_worker_shard)
row_loss = unreg_row_loss + row_reg
Expand Down Expand Up @@ -220,7 +220,7 @@ def __init__(self,
in the form of [[w_0, w_1, ...], [w_k, ... ], [...]], with the number of
inner lists matching the number of row factor shards and the elements in
each inner list are the weights for the rows of the corresponding row
factor shard. In this case, w_ij = unonbserved_weight +
factor shard. In this case, w_ij = unobserved_weight +
row_weights[i] * col_weights[j].
- If this is a single non-negative real number, this value is used for
all row weights and w_ij = unobserved_weight + row_weights *
Expand Down Expand Up @@ -435,7 +435,7 @@ def _prepare_gramian(self, factors, gramian):
gramian: Variable storing the gramian calculated from the factors.

Returns:
A op that updates the gramian with the calcuated value from the factors.
A op that updates the gramian with the calculated value from the factors.
"""
partial_gramians = []
for f in factors:
Expand Down Expand Up @@ -564,7 +564,7 @@ def worker_init(self):

Note that specifically this initializes the cache of the row and column
weights on workers when `use_factors_weights_cache` is True. In this case,
if these weights are being calcualted and reset after the object is created,
if these weights are being calculated and reset after the object is created,
it is important to ensure this ops is run afterwards so the cache reflects
the correct values.
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def _run_test_process_input(self,

# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reprodue the same row factors in the model as the
# This is expected to reproduce the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
Expand Down Expand Up @@ -283,7 +283,7 @@ def _run_test_process_input(self,

# Test column projection.
# Using the specified projection weights for the 3 column feature vectors.
# This is expected to reprodue the same column factors in the model as the
# This is expected to reproduce the same column factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
Expand Down Expand Up @@ -385,7 +385,7 @@ def _run_test_process_input_transposed(self,

# Test row projection.
# Using the specified projection weights for the 2 row feature vectors.
# This is expected to reprodue the same row factors in the model as the
# This is expected to reproduce the same row factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_rows = wals_model.project_row_factors(
Expand Down Expand Up @@ -462,7 +462,7 @@ def _run_test_process_input_transposed(self,

# Test column projection.
# Using the specified projection weights for the 2 column feature vectors.
# This is expected to reprodue the same column factors in the model as the
# This is expected to reproduce the same column factors in the model as the
# weights and feature vectors are identical to that used in model
# training.
projected_cols = wals_model.project_col_factors(
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/factorization/python/ops/gmm_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ def _define_graph(self, data):
self._define_score_samples()

def _define_full_covariance_probs(self, shard_id, shard):
"""Defines the full covariance probabilties per example in a class.
"""Defines the full covariance probabilities per example in a class.

Updates a matrix with dimension num_examples X num_classes.

Expand Down Expand Up @@ -344,7 +344,7 @@ def _define_log_prob_operation(self, shard_id, shard):
def _define_prior_log_prob_operation(self, shard_id):
"""Computes the prior probability of all samples.

Updates a vector where each item is the prior probabibility of an
Updates a vector where each item is the prior probability of an
input example.

Args:
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/factorization/python/ops/gmm_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def _fn():
return _fn

# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/contrib/factorization/python/ops/kmeans_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ def test_predict(self):
self.assertAllClose(score, self.true_score, atol=1e-2)

def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# Most points are concentrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
Expand Down Expand Up @@ -604,7 +604,7 @@ def _fn():
return _fn

# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# Note that since cluster initialization is dependent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/contrib/factorization/python/ops/wals.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ def create_axis_ops(sp_input, num_items, update_fn, axis_name):
num_items: An integer, the total number of items of this axis.
update_fn: A function that takes one argument (`sp_input`), and that
returns a tuple of
* new_factors: A flot Tensor of the factor values after update.
* new_factors: A float Tensor of the factor values after update.
* update_op: a TensorFlow op which updates the factors.
* loss: A float Tensor, the unregularized loss.
* reg_loss: A float Tensor, the regularization loss.
Expand Down