Skip to content

Commit

Permalink
remove unused vars (#10618)
Browse files Browse the repository at this point in the history
  • Loading branch information
typhoonzero committed May 15, 2018
1 parent 6cbe597 commit b0eca10
Show file tree
Hide file tree
Showing 8 changed files with 8 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def train(word_dict,
assert save_dirname is None

adagrad = fluid.optimizer.Adagrad(learning_rate=0.002)
optimize_ops, params_grads = adagrad.minimize(cost)
adagrad.minimize(cost)

train_data = paddle.batch(
paddle.reader.shuffle(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/book/test_fit_a_line.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def train(use_cuda, save_dirname, is_local):
avg_cost = fluid.layers.mean(cost)

sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
sgd_optimizer.minimize(avg_cost)

BATCH_SIZE = 20

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def train(net_type, use_cuda, save_dirname, is_local):
test_program = fluid.default_main_program().clone(for_test=True)

optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimize_ops, params_grads = optimizer.minimize(avg_cost)
optimizer.minimize(avg_cost)

BATCH_SIZE = 128
PASS_NUM = 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def train(use_cuda, save_dirname=None, is_local=True):
decay_steps=100000,
decay_rate=0.5,
staircase=True))
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
sgd_optimizer.minimize(avg_cost)

# TODO(qiao)
# add dependency track and move this config before optimizer
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/book/test_machine_translation.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def train_main(use_cuda, is_sparse, is_local=True):
learning_rate=1e-4,
regularization=fluid.regularizer.L2DecayRegularizer(
regularization_coeff=0.1))
optimize_ops, params_grads = optimizer.minimize(avg_cost)
optimizer.minimize(avg_cost)

train_data = paddle.batch(
paddle.reader.shuffle(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/book/test_recognize_digits.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def train(nn_type,
test_program = fluid.default_main_program().clone(for_test=True)

optimizer = fluid.optimizer.Adam(learning_rate=0.001)
optimize_ops, params_grads = optimizer.minimize(avg_loss)
optimizer.minimize(avg_loss)

place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/book/test_recommender_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def train(use_cuda, save_dirname, is_local=True):
test_program = fluid.default_main_program().clone(for_test=True)

sgd_optimizer = SGDOptimizer(learning_rate=0.2)
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
sgd_optimizer.minimize(avg_cost)

place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/book/test_word2vec.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def __network__(words):
avg_cost = fluid.layers.mean(pd())

sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
sgd_optimizer.minimize(avg_cost)

train_reader = paddle.batch(
paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)
Expand Down

0 comments on commit b0eca10

Please sign in to comment.