We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0da233c commit 47f500eCopy full SHA for 47f500e
tensorlayer/distributed.py
@@ -140,7 +140,7 @@ def __init__(
140
141
# Adjust learning rate based on number of GPUs.
142
lr = optimizer_args['learning_rate']
143
- optimizer_args['learning_rate'] = scaling_learning_rate if lr * hvd.size() else lr
+ optimizer_args['learning_rate'] = lr * hvd.size() if scaling_learning_rate else lr
144
opt = optimizer(**optimizer_args)
145
146
# Add Horovod Distributed Optimizer.
0 commit comments