Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add nn.AdaptiveAvgPool1d and nn.AdaptiveAvgPool3d #5445

Merged
merged 42 commits into from
Jul 22, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
db27cdc
Generalize 'AdaptiveAvgPool' for 1~3 dims usage
Jul 9, 2021
99a13ac
Implement GPU version
Jul 15, 2021
ecbe95a
Enable pooling with larger 'output_size' which is supported by PyTorch
Jul 16, 2021
53ba214
Add docs for 'AdaptiveAvgPool1d' and 'AdaptiveAvgPool3d'
Jul 16, 2021
d86d3d7
Add functional API for 'AdaptiveAvgPoolXd'
Jul 16, 2021
221d756
Add 'flow.adaptive_avg_poolxd'
Jul 16, 2021
81ffa32
Add test cases for 'flow.adaptive_avg_poolxd'
Jul 16, 2021
ddfbe6f
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 16, 2021
008c842
auto format by CI
oneflow-ci-bot Jul 16, 2021
8741e25
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 16, 2021
51a94d8
Avoid using 'Shape::Count' in for loop
Jul 17, 2021
c66882e
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 18, 2021
d848cc5
Change names of compute functions
Jul 18, 2021
c7187d4
Change 'AdaptivePoolNdGradOp' to functor
Jul 18, 2021
5b790eb
Register integer types for CUDA 'adaptive_avg_poolxd' kernel
Jul 18, 2021
78af94a
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 18, 2021
59f8c8d
Integrate 'adaptive_avg_poolxd' into 'nn.functional.interpolate'
Jul 19, 2021
97dd698
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 19, 2021
33923b5
Correct wrong 'AdaptiveAvgPoolXd' entries
Jul 19, 2021
f7bf773
Merge branch 'add_adaptive_avg_pool_xd' of github.com:Oneflow-Inc/one…
Jul 19, 2021
2803cb6
Add missing 'not None' assertions for 'output_size'
Jul 19, 2021
aa70d48
Support tuple input for 'AdaptiveAvgPool1d'
Jul 19, 2021
0067493
Mark TODO for auto testing 'AdaptiveAvgPoolXd'
Jul 19, 2021
3956b16
Denote return types for 'BackwardOpConfGenFn'
Jul 19, 2021
54ea555
Combine test classes of 'AdaptiveAvgPoolXd' into one
Jul 19, 2021
d9f5dfc
Rename 'AvgXXXCompute'
Jul 19, 2021
1cc90f6
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 19, 2021
3df1154
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 19, 2021
da43e00
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 19, 2021
e82c5f8
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 19, 2021
301c56f
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 19, 2021
96343a8
Merge branch 'master' into add_adaptive_avg_pool_xd
oneflow-ci-bot Jul 21, 2021
b37f407
auto format by CI
oneflow-ci-bot Jul 21, 2021
b6e8d4e
Simplify output shape inference
Jul 21, 2021
bc8ca32
Merge branch 'add_adaptive_avg_pool_xd' of github.com:Oneflow-Inc/one…
Jul 21, 2021
932b16e
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 21, 2021
24421fc
Merge branch 'master' into add_adaptive_avg_pool_xd
oneflow-ci-bot Jul 21, 2021
dbfc987
Delete the wrong 'if' block
Jul 21, 2021
db00657
Merge branch 'add_adaptive_avg_pool_xd' of github.com:Oneflow-Inc/one…
Jul 21, 2021
c3db14a
Merge branch 'master' into add_adaptive_avg_pool_xd
oneflow-ci-bot Jul 21, 2021
63681ab
Merge branch 'master' into add_adaptive_avg_pool_xd
Jul 21, 2021
92a1e0b
Merge branch 'master' into add_adaptive_avg_pool_xd
oneflow-ci-bot Jul 21, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 24 additions & 7 deletions oneflow/core/autograd/gradient_funcs/adaptive_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,9 @@ struct AdaptivePoolInterpState : public OpExprInterpState {
bool requires_grad;
};

class AdaptivePool : public OpExprGradFunction<AdaptivePoolInterpState> {
class AdaptivePoolNd : public OpExprGradFunction<AdaptivePoolInterpState> {
public:
Maybe<void> Init(const OpExpr& op) override;
Maybe<void> Init(const OpExpr& op, const int& ndims);
Maybe<void> Capture(AdaptivePoolInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const override;
Maybe<void> Apply(const AdaptivePoolInterpState* ctx, const TensorTuple& out_grads,
Expand All @@ -39,16 +39,16 @@ class AdaptivePool : public OpExprGradFunction<AdaptivePoolInterpState> {
std::shared_ptr<OpExpr> grad_op_;
};

Maybe<void> AdaptivePool::Init(const OpExpr& op) {
Maybe<void> AdaptivePoolNd::Init(const OpExpr& op, const int& ndims) {
const UserOpExpr* fw_op_expr = dynamic_cast<const UserOpExpr*>(&op);
CHECK_NOTNULL_OR_RETURN(fw_op_expr);
base_attrs_ = MakeAttrMapFromUserOpConf(fw_op_expr->proto());
const std::string& op_name = fw_op_expr->op_name();
grad_op_ = JUST(op_expr_helper::AdaptivePoolGradOp(GradientOpName(op_name)));
grad_op_ = JUST(op_expr_helper::AdaptivePoolNdGradOp(ndims, GradientOpName(op_name)));
terryzhao127 marked this conversation as resolved.
Show resolved Hide resolved
return Maybe<void>::Ok();
}

Maybe<void> AdaptivePool::Capture(AdaptivePoolInterpState* ctx, const TensorTuple& inputs,
Maybe<void> AdaptivePoolNd::Capture(AdaptivePoolInterpState* ctx, const TensorTuple& inputs,
const TensorTuple& outputs, const AttrMap& attrs) const {
ctx->requires_grad = inputs.at(0)->requires_grad();
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }
Expand All @@ -57,7 +57,7 @@ Maybe<void> AdaptivePool::Capture(AdaptivePoolInterpState* ctx, const TensorTupl
return Maybe<void>::Ok();
}

Maybe<void> AdaptivePool::Apply(const AdaptivePoolInterpState* ctx, const TensorTuple& out_grads,
Maybe<void> AdaptivePoolNd::Apply(const AdaptivePoolInterpState* ctx, const TensorTuple& out_grads,
TensorTuple* in_grads) const {
if (!ctx->requires_grad) { return Maybe<void>::Ok(); }
CHECK_EQ_OR_RETURN(out_grads.size(), 1);
Expand All @@ -68,7 +68,24 @@ Maybe<void> AdaptivePool::Apply(const AdaptivePoolInterpState* ctx, const Tensor
return Maybe<void>::Ok();
}

REGISTER_OP_EXPR_GRAD_FUNCTION("adaptive_avg_pool2d", AdaptivePool);
class AdaptivePool1d final : public AdaptivePoolNd {
public:
Maybe<void> Init(const OpExpr& op) override { return AdaptivePoolNd::Init(op, 1); }
};

class AdaptivePool2d final : public AdaptivePoolNd {
public:
Maybe<void> Init(const OpExpr& op) override { return AdaptivePoolNd::Init(op, 2); }
};

class AdaptivePool3d final : public AdaptivePoolNd {
public:
Maybe<void> Init(const OpExpr& op) override { return AdaptivePoolNd::Init(op, 3); }
};

REGISTER_OP_EXPR_GRAD_FUNCTION("adaptive_avg_pool1d", AdaptivePool1d);
REGISTER_OP_EXPR_GRAD_FUNCTION("adaptive_avg_pool2d", AdaptivePool2d);
REGISTER_OP_EXPR_GRAD_FUNCTION("adaptive_avg_pool3d", AdaptivePool3d);

} // namespace one
} // namespace oneflow
8 changes: 4 additions & 4 deletions oneflow/core/framework/op_expr_helper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -851,11 +851,11 @@ Maybe<one::UserOpExpr> PoolNdGradOp(const std::string& mode, const std::string&
.Build();
}

Maybe<one::UserOpExpr> AdaptivePoolGradOp() {
return AdaptivePoolGradOp(UniqueOpName("adaptive_pool_grad"));
Maybe<one::UserOpExpr> AdaptivePoolNdGradOp(const int& ndims) {
terryzhao127 marked this conversation as resolved.
Show resolved Hide resolved
return AdaptivePoolNdGradOp(ndims, UniqueOpName("adaptive_avg_pool_nd_grad"));
}
Maybe<one::UserOpExpr> AdaptivePoolGradOp(const std::string& name) {
return one::OpBuilder("adaptive_avg_pool2d_grad", name)
Maybe<one::UserOpExpr> AdaptivePoolNdGradOp(const int& ndims, const std::string& name) {
terryzhao127 marked this conversation as resolved.
Show resolved Hide resolved
return one::OpBuilder("adaptive_avg_pool" + std::to_string(ndims) + "d_grad", name)
.Input("x")
.Input("dy")
.Output("dx")
Expand Down
4 changes: 2 additions & 2 deletions oneflow/core/framework/op_expr_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -276,8 +276,8 @@ Maybe<one::UserOpExpr> PoolNdGradOp(const std::string& mode, const std::string&
const std::vector<int32_t>& strides, const bool& ceil_mode,
const std::string& name);

Maybe<one::UserOpExpr> AdaptivePoolGradOp();
Maybe<one::UserOpExpr> AdaptivePoolGradOp(const std::string& name);
Maybe<one::UserOpExpr> AdaptivePoolNdGradOp();
Maybe<one::UserOpExpr> AdaptivePoolNdGradOp(const int& ndims, const std::string& name);

Maybe<one::UserOpExpr> UnsortedSegmentSumLikeOp(const int64_t& axis);
Maybe<one::UserOpExpr> UnsortedSegmentSumLikeOp(const int64_t& axis, const std::string& name);
Expand Down
78 changes: 78 additions & 0 deletions oneflow/python/nn/modules/adaptive_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,37 @@
from oneflow.python.oneflow_export import oneflow_export, experimental_api


@oneflow_export("nn.AdaptiveAvgPool1d")
@experimental_api
class AdaptiveAvgPool1d(Module):
def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size

self._op = (
flow.builtin_op("adaptive_avg_pool1d")
.Input("x")
.Attr("output_size", [])
.Output("y")
.Build()
)

def forward(self, x):
new_output_size = None
assert len(x.shape) == 3

if isinstance(self.output_size, int):
new_output_size = tuple([self.output_size])
elif isinstance(self.output_size, tuple):
pass
else:
raise NotImplementedError("output_size param wrong, please check!")

assert (new_output_size is not None and new_output_size[0] <= x.shape[2]), f"output_size param wrong, please check!"

return self._op(x, output_size=new_output_size)[0]


@oneflow_export("nn.AdaptiveAvgPool2d")
@experimental_api
class AdaptiveAvgPool2d(Module):
Expand Down Expand Up @@ -100,6 +131,53 @@ def forward(self, x):
return self._op(x, output_size=new_output_size)[0]


@oneflow_export("nn.AdaptiveAvgPool3d")
@experimental_api
class AdaptiveAvgPool3d(Module):
def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size

self._op = (
flow.builtin_op("adaptive_avg_pool3d")
.Input("x")
.Attr("output_size", [])
.Output("y")
.Build()
)

def forward(self, x):
new_output_size = []
assert len(x.shape) == 5

if isinstance(self.output_size, int):
for _ in range(len(x.shape) - 2):
new_output_size.append(self.output_size)
elif isinstance(self.output_size, tuple):
new_output_size = list(self.output_size)
if self.output_size[0] is None:
new_output_size[0] = x.shape[2]
if self.output_size[1] is None:
new_output_size[1] = x.shape[3]
if self.output_size[2] is None:
new_output_size[2] = x.shape[4]
else:
raise NotImplementedError("output_size param wrong, please check!")

new_output_size = tuple(new_output_size)
assert (
new_output_size[0] <= x.shape[2]
), f"output_size param wrong, please check!"
assert (
new_output_size[1] <= x.shape[3]
), f"output_size param wrong, please check!"
assert (
new_output_size[2] <= x.shape[4]
), f"output_size param wrong, please check!"

return self._op(x, output_size=new_output_size)[0]


if __name__ == "__main__":
import doctest

Expand Down
Loading