Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support grid_sample and affine_grid operator #6038

Merged
merged 21 commits into from
Aug 31, 2021
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
971a85b
Support grid_sample and affine_grid operator
tingkuanpei Aug 25, 2021
30d101a
Merge branch 'master' into tkpei/support_grid_sample
doombeaker Aug 25, 2021
4e95911
auto format by CI
oneflow-ci-bot Aug 25, 2021
6e3b8bd
Refine code after review
tingkuanpei Aug 25, 2021
3b7c5de
Merge grid_sample cpu and gpu implement
tingkuanpei Aug 26, 2021
5f84f63
Merge branch 'master' into tkpei/support_grid_sample
tingkuanpei Aug 26, 2021
6314a7f
Merge branch 'master' into tkpei/support_grid_sample
tingkuanpei Aug 29, 2021
8af05e8
Add new line when end file
tingkuanpei Aug 30, 2021
7266d05
Merge branch 'master' into tkpei/support_grid_sample
tingkuanpei Aug 30, 2021
15b0781
Change flow.F to flow._C
tingkuanpei Aug 30, 2021
eba29d0
Remove CudaGraphSupport
tingkuanpei Aug 30, 2021
44d41fd
Merge branch 'master' into tkpei/support_grid_sample
oneflow-ci-bot Aug 30, 2021
0cda9c5
Follow review
tingkuanpei Aug 30, 2021
9932604
Merge branch 'master' into tkpei/support_grid_sample
tingkuanpei Aug 30, 2021
07c2c95
Fix clang_tidy
tingkuanpei Aug 30, 2021
a55547b
Merge branch 'master' into tkpei/support_grid_sample
oneflow-ci-bot Aug 30, 2021
ec18164
Merge branch 'master' into tkpei/support_grid_sample
tingkuanpei Aug 30, 2021
34cbe6f
Merge branch 'master' into tkpei/support_grid_sample
oneflow-ci-bot Aug 30, 2021
7cbadf2
Only test grid_sample cudnn in gpu mode
tingkuanpei Aug 31, 2021
0828a5c
Merge branch 'master' into tkpei/support_grid_sample
tingkuanpei Aug 31, 2021
2e232ac
Merge branch 'master' into tkpei/support_grid_sample
oneflow-ci-bot Aug 31, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/functional.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,5 @@ Functional operations for neural networks
.. autofunction:: one_hot
.. autofunction:: dropout
.. autofunction:: upsample
.. autofunction:: affine_grid
.. autofunction:: grid_sample
69 changes: 69 additions & 0 deletions oneflow/core/autograd/gradient_funcs/affine_grid.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/*
Copyright 2020 The OneFlow Authors. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

#include "oneflow/core/framework/attr_map.h"
#include "oneflow/core/framework/op_expr_grad_function.h"
#include "oneflow/core/functional/functional.h"

namespace oneflow {
namespace one {

struct AffineGridInterpState : public AutoGradCaptureState {
Shape size;
bool align_corners;
tingkuanpei marked this conversation as resolved.
Show resolved Hide resolved
bool requires_grad;
};

class AffineGrid : public OpExprGradFunction<AffineGridInterpState> {
public:
Maybe<void> Init(const OpExpr& op) override {
const auto* fw_op_expr = dynamic_cast<const UserOpExpr*>(&op);
CHECK_NOTNULL_OR_RETURN(fw_op_expr);
base_attrs_ = MakeAttrMapFromUserOpConf(fw_op_expr->proto());
return Maybe<void>::Ok();
}

Maybe<void> Capture(AffineGridInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override {
CHECK_EQ_OR_RETURN(inputs.size(), 1);
ctx->requires_grad = inputs.at(0)->requires_grad(); // theta
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }

ComposedAttrMap composed_attrs(attrs, base_attrs_);
ctx->size = JUST(composed_attrs.GetAttr<Shape>("size"));
ctx->align_corners = JUST(composed_attrs.GetAttr<bool>("align_corners"));
return Maybe<void>::Ok();
}

Maybe<void> Apply(const AffineGridInterpState* ctx, const TensorTuple& out_grads,
TensorTuple* in_grads) const override {
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }

CHECK_EQ_OR_RETURN(out_grads.size(), 1);
in_grads->resize(1);
in_grads->at(0) =
JUST(functional::AffineGridGrad(out_grads.at(0), ctx->size, ctx->align_corners));
return Maybe<void>::Ok();
}

private:
AttrMap base_attrs_;
};

REGISTER_OP_EXPR_GRAD_FUNCTION("affine_grid", AffineGrid);

} // namespace one
} // namespace oneflow
86 changes: 86 additions & 0 deletions oneflow/core/autograd/gradient_funcs/grid_sample.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/*
Copyright 2020 The OneFlow Authors. All rights reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

#include "oneflow/core/framework/attr_map.h"
#include "oneflow/core/framework/op_expr_grad_function.h"
#include "oneflow/core/functional/functional.h"

namespace oneflow {
namespace one {

struct GridSampleInterpState : public AutoGradCaptureState {
std::string interpolation_mode;
tingkuanpei marked this conversation as resolved.
Show resolved Hide resolved
std::string padding_mode;
bool align_corners;
size_t input_index;
size_t grid_index;
bool input_requires_grad;
bool grid_requires_grad;
bool requires_grad;
};

class GridSample : public OpExprGradFunction<GridSampleInterpState> {
public:
Maybe<void> Init(const OpExpr& op) override {
const auto* fw_op_expr = dynamic_cast<const UserOpExpr*>(&op);
CHECK_NOTNULL_OR_RETURN(fw_op_expr);
base_attrs_ = MakeAttrMapFromUserOpConf(fw_op_expr->proto());
return Maybe<void>::Ok();
}

Maybe<void> Capture(GridSampleInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override {
CHECK_EQ_OR_RETURN(inputs.size(), 2);
ctx->input_requires_grad = inputs.at(0)->requires_grad();
ctx->grid_requires_grad = inputs.at(1)->requires_grad();
ctx->requires_grad = ctx->input_requires_grad || ctx->grid_requires_grad;
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }

ctx->input_index = ctx->SaveTensorForBackward(inputs.at(0)); // input
ctx->grid_index = ctx->SaveTensorForBackward(inputs.at(1)); // grid

ComposedAttrMap composed_attrs(attrs, base_attrs_);
ctx->interpolation_mode = JUST(composed_attrs.GetAttr<std::string>("interpolation_mode"));
ctx->padding_mode = JUST(composed_attrs.GetAttr<std::string>("padding_mode"));
ctx->align_corners = JUST(composed_attrs.GetAttr<bool>("align_corners"));
return Maybe<void>::Ok();
}

Maybe<void> Apply(const GridSampleInterpState* ctx, const TensorTuple& out_grads,
TensorTuple* in_grads) const override {
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }

CHECK_EQ_OR_RETURN(out_grads.size(), 1);

const auto& input = ctx->SavedTensors().at(ctx->input_index);
const auto& grid = ctx->SavedTensors().at(ctx->grid_index);
const auto& results =
JUST(functional::GridSampleGrad(out_grads.at(0), input, grid, ctx->interpolation_mode,
ctx->padding_mode, ctx->align_corners));
in_grads->resize(2);
if (ctx->input_requires_grad) { in_grads->at(0) = results->at(0); }
if (ctx->grid_requires_grad) { in_grads->at(1) = results->at(1); }
return Maybe<void>::Ok();
}

private:
AttrMap base_attrs_;
};

REGISTER_OP_EXPR_GRAD_FUNCTION("grid_sample", GridSample);

} // namespace one
} // namespace oneflow
20 changes: 20 additions & 0 deletions oneflow/core/functional/functional_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,26 @@
"Tensor (Tensor dy, Tensor label, Tensor theta, Float m1, Float m2, Float m3, Int64 depth) => CombinedMarginLossGrad"
bind_python: False

- name: "affine_grid"
signature:
"Tensor (Tensor theta, *, Shape size, Bool align_corners) => AffineGrid"
bind_python: True

- name: "affine_grid_grad"
signature:
"Tensor (Tensor dgrid, *, Shape size, Bool align_corners) => AffineGridGrad"
bind_python: False

- name: "grid_sample"
signature:
"Tensor (Tensor input, Tensor grid, *, String interpolation_mode, String padding_mode, Bool align_corners) => GridSample"
bind_python: True

- name: "grid_sample_grad"
signature:
"TensorTuple (Tensor doutput, Tensor input, Tensor grid, *, String interpolation_mode, String padding_mode, Bool align_corners) => GridSampleGrad"
bind_python: False

- name: "where"
signature: [
"Tensor (Tensor condition, Tensor x, Tensor y) => Where",
Expand Down
40 changes: 40 additions & 0 deletions oneflow/core/functional/impl/nn_functor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,44 @@ class CombinedMarginLossFunctor {
std::shared_ptr<OpExpr> op_;
};

class AffineGridFunctor {
public:
AffineGridFunctor() {
op_ = CHECK_JUST(one::OpBuilder("affine_grid").Input("theta").Output("grid").Build());
}
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& theta, const Shape& size,
const bool& align_corners) const {
MutableAttrMap attrs;
JUST(attrs.SetAttr<Shape>("size", size));
JUST(attrs.SetAttr<bool>("align_corners", align_corners));
return OpInterpUtil::Dispatch<Tensor>(*op_, {theta}, attrs);
}

private:
std::shared_ptr<OpExpr> op_;
};

class GridSampleFunctor {
public:
GridSampleFunctor() {
op_ = CHECK_JUST(
one::OpBuilder("grid_sample").Input("input").Input("grid").Output("output").Build());
}
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& input,
const std::shared_ptr<one::Tensor>& grid,
const std::string& interpolation_mode, const std::string& padding_mode,
const bool& align_corners) const {
MutableAttrMap attrs;
JUST(attrs.SetAttr<std::string>("interpolation_mode", interpolation_mode));
JUST(attrs.SetAttr<std::string>("padding_mode", padding_mode));
JUST(attrs.SetAttr<bool>("align_corners", align_corners));
return OpInterpUtil::Dispatch<Tensor>(*op_, {input, grid}, attrs);
}

private:
std::shared_ptr<OpExpr> op_;
};

class NormalizationFunctor {
public:
NormalizationFunctor() {
Expand Down Expand Up @@ -998,6 +1036,8 @@ ONEFLOW_FUNCTION_LIBRARY(m) {
m.add_functor<impl::SoftmaxCrossEntropyGradFunctor>("SoftmaxCrossEntropyGrad");
m.add_functor<impl::SmoothL1LossFunctor>("SmoothL1Loss");
m.add_functor<impl::CombinedMarginLossFunctor>("CombinedMarginLoss");
m.add_functor<impl::AffineGridFunctor>("AffineGrid");
m.add_functor<impl::GridSampleFunctor>("GridSample");
m.add_functor<impl::NormalizationFunctor>("Normalization");
m.add_functor<impl::PadFunctor>("Pad");
m.add_functor<impl::DropoutFunctor>("Dropout");
Expand Down
46 changes: 46 additions & 0 deletions oneflow/core/functional/impl/nn_grad_functor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,50 @@ class CombinedMarginLossGradFunctor {
std::shared_ptr<OpExpr> op_;
};

class AffineGridGradFunctor {
public:
AffineGridGradFunctor() {
op_ = CHECK_JUST(one::OpBuilder("affine_grid_grad").Input("dgrid").Output("dtheta").Build());
}
Maybe<Tensor> operator()(const std::shared_ptr<one::Tensor>& dgrid, const Shape& size,
const bool& align_corners) const {
MutableAttrMap attrs;
JUST(attrs.SetAttr<Shape>("size", size));
JUST(attrs.SetAttr<bool>("align_corners", align_corners));
return OpInterpUtil::Dispatch<one::Tensor>(*op_, {dgrid}, attrs);
}

private:
std::shared_ptr<OpExpr> op_;
};

class GridSampleGradFunctor {
public:
GridSampleGradFunctor() {
op_ = CHECK_JUST(one::OpBuilder("grid_sample_grad")
.Input("doutput")
.Input("input")
.Input("grid")
.Output("dinput")
.Output("dgrid")
.Build());
}
Maybe<TensorTuple> operator()(const std::shared_ptr<one::Tensor>& doutput,
const std::shared_ptr<one::Tensor>& input,
const std::shared_ptr<one::Tensor>& grid,
const std::string& interpolation_mode,
const std::string& padding_mode, const bool& align_corners) const {
MutableAttrMap attrs;
JUST(attrs.SetAttr<std::string>("interpolation_mode", interpolation_mode));
JUST(attrs.SetAttr<std::string>("padding_mode", padding_mode));
JUST(attrs.SetAttr<bool>("align_corners", align_corners));
return OpInterpUtil::Dispatch<one::TensorTuple>(*op_, {doutput, input, grid}, attrs);
}

private:
std::shared_ptr<OpExpr> op_;
};

class PadGradFunctor {
public:
PadGradFunctor() {
Expand Down Expand Up @@ -412,6 +456,8 @@ ONEFLOW_FUNCTION_LIBRARY(m) {
m.add_functor<impl::AdaptivePoolNdGradFunctor>("AdaptivePoolNdGrad");
m.add_functor<impl::SmoothL1LossGradFunctor>("SmoothL1LossGrad");
m.add_functor<impl::CombinedMarginLossGradFunctor>("CombinedMarginLossGrad");
m.add_functor<impl::AffineGridGradFunctor>("AffineGridGrad");
m.add_functor<impl::GridSampleGradFunctor>("GridSampleGrad");
m.add_functor<impl::PoolingNdGradFunctor>("PoolingNdGrad");
m.add_functor<impl::PadGradFunctor>("PadGrad");
m.add_functor<impl::AvgPoolingNdGradFunctor>("AvgPoolingNdGrad");
Expand Down