Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix parallel.do with batch norm #8186

Closed
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
23 changes: 11 additions & 12 deletions paddle/operators/parallel_do_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,8 @@ class ParallelDoGradOp : public framework::OperatorBase {
const std::vector<framework::Scope *> &sub_scopes,
const platform::PlaceList &places) const {
for (auto &s : Outputs(framework::GradVarName(kParameters))) {
VLOG(10) << "Accumulating " << s;
if (s == framework::kEmptyVarName) continue;
Copy link
Collaborator Author

@reyoung reyoung Feb 6, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do not accumulate @EMPTY@

std::string tmp_name;
auto *tmp = sub_scopes[0]->Var(&tmp_name);

Expand Down Expand Up @@ -334,16 +336,9 @@ class ParallelDoGradOpDescMaker : public framework::SingleGradOpDescMaker {
class ParallelDoGradOpShapeInference : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *ctx) const override {
std::vector<std::string> input{kParameters, kInputs};
std::vector<std::string> output{kOutputs};

PADDLE_ENFORCE(ctx->HasInputs(kParameters));
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters)));
PADDLE_ENFORCE(ctx->HasInputs(kInputs));

for (auto &s : output) {
PADDLE_ENFORCE(ctx->HasInputs(s));
}
PADDLE_ENFORCE(ctx->HasInputs(kOutputs));

ctx->SetOutputsDim(framework::GradVarName(kParameters),
ctx->GetInputsDim(kParameters));
Expand All @@ -360,10 +355,14 @@ class ParallelDoGradOpShapeInference : public framework::InferShapeBase {
ctx->SetDims({ig_name}, {i_dims[i]});
}

if (ctx->HasInputs(kParameters)) {
PADDLE_ENFORCE(ctx->HasOutputs(framework::GradVarName(kParameters)));
ctx->SetOutputsDim(framework::GradVarName(kParameters),
ctx->GetInputsDim(kParameters));
auto p_dims = ctx->GetInputsDim(kParameters);
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If parameter gradient is empty, do not infer shape.

auto pg_names = ctx->Outputs(framework::GradVarName(kParameters));
for (size_t i = 0; i < pg_names.size(); ++i) {
auto &pg_name = pg_names[i];
if (pg_name == framework::kEmptyVarName) {
continue;
}
ctx->SetDims({pg_name}, {p_dims[i]});
}
}
};
Expand Down
13 changes: 6 additions & 7 deletions python/paddle/v2/fluid/layers/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,21 +274,20 @@ def get_parameters(self):
parent_block = self.parent_block()

local_inputs = set()

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The previous logic cannot calculate parameters that used and updated by the same operator.

for op in current_block.ops:
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)

params = list()
for var in self.inputs:
local_inputs.add(var.name)

params = list()
for op in current_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)

for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)

params = list(set(params))

return [parent_block.var(name) for name in params]
Expand Down
1 change: 1 addition & 0 deletions python/paddle/v2/fluid/tests/book/test_recognize_digits.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ def conv_net(img, label):
pool_size=2,
pool_stride=2,
act="relu")
conv_pool_1 = fluid.layers.batch_norm(conv_pool_1)
conv_pool_2 = fluid.nets.simple_img_conv_pool(
input=conv_pool_1,
filter_size=5,
Expand Down