Skip to content

Commit

Permalink
change api attribute name, move pool_type to reduce_op, move compute_…
Browse files Browse the repository at this point in the history
…type to message_op
  • Loading branch information
DesmonDay committed Aug 11, 2022
1 parent e7cbc9f commit 2b0bd9a
Show file tree
Hide file tree
Showing 26 changed files with 231 additions and 231 deletions.
12 changes: 6 additions & 6 deletions paddle/fluid/operators/graph_send_recv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,9 +64,9 @@ class GraphSendRecvOpMaker : public framework::OpProtoAndCheckerMaker {
.AsDispensable();
AddOutput("Out", "Output tensor of graph_send_recv op.");
AddOutput("Dst_count",
"Count tensor of Dst_index, mainly for MEAN pool_type.")
"Count tensor of Dst_index, mainly for MEAN reduce_op.")
.AsIntermediate();
AddAttr<std::string>("pool_type",
AddAttr<std::string>("reduce_op",
"(string, default 'SUM')"
"Define different pool types to receive the result "
"tensors of Dst_index.")
Expand All @@ -81,7 +81,7 @@ class GraphSendRecvOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
Graph Learning Send_Recv combine operator.
$Out = Recv(Send(X, Src_index), Dst_index, pool_type)$
$Out = Recv(Send(X, Src_index), Dst_index, reduce_op)$
This operator is mainly used in Graph Learning domain, and the main purpose is to reduce
intermediate memory consumption in the process of message passing.
Expand All @@ -105,12 +105,12 @@ class GraphSendRecvGradOpMaker : public framework::SingleGradOpMaker<T> {
op->SetInput("Dst_index", this->Input("Dst_index"));
op->SetInput("X", this->Input("X"));

if (PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MEAN") {
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MEAN") {
op->SetInput("Dst_count", this->Output("Dst_count"));
}

if (PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MIN" ||
PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MAX") {
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MIN" ||
PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MAX") {
op->SetInput("Out", this->Output("Out"));
}

Expand Down
16 changes: 8 additions & 8 deletions paddle/fluid/operators/graph_send_ue_recv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,14 @@ class GraphSendUERecvOpMaker : public framework::OpProtoAndCheckerMaker {
.AsDispensable();
AddOutput("Out", "Output tensor of graph_send_ue_recv op.");
AddOutput("Dst_count",
"Count tensor of Dst_index, mainly for MEAN pool_type.")
"Count tensor of Dst_index, mainly for MEAN reduce_op.")
.AsIntermediate();
AddAttr<std::string>("compute_type",
AddAttr<std::string>("message_op",
"(string, default 'ADD')"
"Define differenct computation types between X and E.")
.SetDefault("ADD")
.InEnum({"ADD", "MUL"});
AddAttr<std::string>("pool_type",
AddAttr<std::string>("reduce_op",
"(string, default 'SUM')"
"Define different pool types to receive the result "
"tensors of Dst_index.")
Expand All @@ -90,13 +90,13 @@ class GraphSendUERecvOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
Graph Learning Send_UE_Recv combine operator.
$Out = Recv(Compute(Send(X, Src_index), Y, compute_type), Dst_index, pool_type)$
$Out = Recv(Compute(Send(X, Src_index), Y, message_op), Dst_index, reduce_op)$
This operator is mainly used in Graph Learning domain, and the main purpose is to reduce
intermediate memory consumption in the process of message passing.
Take `X` as the input tensor, we first use `src_index` to gather corresponding data.
Then the gather data should compute with `Y` in different compute_types, like add, sub, mul, and div,
Then the gather data should compute with `Y` in different message_ops, like add, sub, mul, and div,
and get the computation result. Then, use `dst_index` to update the corresponding position of output
tensor in different pooling types, like sum, mean, max, or min.
Expand All @@ -117,12 +117,12 @@ class GraphSendUERecvGradOpMaker : public framework::SingleGradOpMaker<T> {
op->SetInput("Src_index", this->Input("Src_index"));
op->SetInput("Dst_index", this->Input("Dst_index"));

if (PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MEAN") {
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MEAN") {
op->SetInput("Dst_count", this->Output("Dst_count"));
}

if (PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MIN" ||
PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MAX") {
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MIN" ||
PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MAX") {
op->SetInput("Out", this->Output("Out"));
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/api/yaml/legacy_api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1060,7 +1060,7 @@
func : generate_proposals_v2

- api : graph_send_recv
args : (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", IntArray out_size = {0})
args : (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0})
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : GraphSendRecvInferMeta
Expand All @@ -1071,7 +1071,7 @@
backward : graph_send_recv_grad

- api : graph_send_ue_recv
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str compute_type, str pool_type, IntArray out_size)
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op, str reduce_op, IntArray out_size)
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : GraphSendUERecvInferMeta
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/api/yaml/legacy_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -941,8 +941,8 @@
func : gelu_grad

- backward_api : graph_send_recv_grad
forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", IntArray out_size = {0}) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str pool_type = "SUM")
forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0}) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str reduce_op = "SUM")
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
Expand All @@ -953,8 +953,8 @@
optional: out, dst_count

- backward_api : graph_send_ue_recv_grad
forward : graph_send_ue_recv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str compute_type, str pool_type, IntArray out_size) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str compute_type, str pool_type)
forward : graph_send_ue_recv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op, str reduce_op, IntArray out_size) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str message_op, str reduce_op)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/infermeta/multiary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2603,8 +2603,8 @@ void GraphSendUERecvInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& compute_type,
const std::string& pool_type,
const std::string& message_op,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count) {
Expand Down Expand Up @@ -2658,7 +2658,7 @@ void GraphSendUERecvInferMeta(const MetaTensor& x,
y_dims[0]));

auto x_dims = x.dims();
if (pool_type == "MEAN") {
if (reduce_op == "MEAN") {
dst_count->set_dims({-1});
dst_count->set_dtype(DataType::INT32);
}
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/multiary.h
Original file line number Diff line number Diff line change
Expand Up @@ -470,8 +470,8 @@ void GraphSendUERecvInferMeta(const MetaTensor& x,
const MetaTensor& y,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& compute_type,
const std::string& pool_type,
const std::string& message_op,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/ternary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ void InstanceNormInferMeta(const MetaTensor& x,
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& pool_type,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count) {
Expand Down Expand Up @@ -460,7 +460,7 @@ void GraphSendRecvInferMeta(const MetaTensor& x,
out->set_dims(phi::make_ddim(dims_));
out->set_dtype(x.dtype());

if (pool_type == "MEAN") {
if (reduce_op == "MEAN") {
dst_count->set_dims({-1});
dst_count->set_dtype(DataType::INT32);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/ternary.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ void InstanceNormInferMeta(const MetaTensor& x,
void GraphSendRecvInferMeta(const MetaTensor& x,
const MetaTensor& src_index,
const MetaTensor& dst_index,
const std::string& pool_type,
const std::string& reduce_op,
const IntArray& out_size,
MetaTensor* out,
MetaTensor* dst_count);
Expand Down
28 changes: 14 additions & 14 deletions paddle/phi/kernels/cpu/graph_send_recv_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,18 +29,18 @@ void GraphSendRecvCpuGradLoop(const int& index_size,
const DenseTensor& src,
const DenseTensor& input,
DenseTensor* dst,
const std::string& pool_type,
const std::string& reduce_op,
const int* dst_count = nullptr,
const DenseTensor* output = nullptr) {
if (pool_type == "SUM") {
if (reduce_op == "SUM") {
Functor functor;
for (int i = 0; i < index_size; ++i) {
const IndexT& src_idx = s_index[i];
const IndexT& dst_idx = d_index[i];
ElementwiseInnerOperation<T, IndexT, Functor>(
src, dst, src_idx, dst_idx, false, functor);
}
} else if (pool_type == "MEAN") {
} else if (reduce_op == "MEAN") {
for (int i = 0; i < index_size; ++i) {
const IndexT& src_idx = s_index[i];
const IndexT& dst_idx = d_index[i];
Expand All @@ -50,7 +50,7 @@ void GraphSendRecvCpuGradLoop(const int& index_size,
auto eigen_dst = phi::EigenVector<T>::Flatten(dst_slice);
eigen_dst += (eigen_src / static_cast<T>(dst_count[src_idx]));
}
} else if (pool_type == "MIN" || pool_type == "MAX") {
} else if (reduce_op == "MIN" || reduce_op == "MAX") {
for (int i = 0; i < index_size; ++i) {
const IndexT& forward_src_idx = d_index[i];
const IndexT& forward_dst_idx = s_index[i];
Expand All @@ -75,7 +75,7 @@ void GraphSendRecvGradOpKernelLaunchHelper(
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
const std::string& reduce_op,
DenseTensor* x_grad,
const DenseTensor* dst_count = nullptr,
const DenseTensor* out = nullptr) {
Expand All @@ -94,23 +94,23 @@ void GraphSendRecvGradOpKernelLaunchHelper(
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();

if (pool_type == "SUM") {
if (reduce_op == "SUM") {
GraphSendRecvCpuGradLoop<T, IndexT, GraphSendRecvSumFunctor<T>>(
index_size, d_index, s_index, out_grad, x, x_grad, pool_type);
} else if (pool_type == "MEAN") {
index_size, d_index, s_index, out_grad, x, x_grad, reduce_op);
} else if (reduce_op == "MEAN") {
const int* s_count = dst_count->data<int>();
// Functor not used here.
GraphSendRecvCpuGradLoop<T, IndexT, GraphSendRecvSumFunctor<T>>(
index_size, d_index, s_index, out_grad, x, x_grad, pool_type, s_count);
} else if (pool_type == "MIN" || pool_type == "MAX") {
index_size, d_index, s_index, out_grad, x, x_grad, reduce_op, s_count);
} else if (reduce_op == "MIN" || reduce_op == "MAX") {
// Functor not used here.
GraphSendRecvCpuGradLoop<T, IndexT, GraphSendRecvMinFunctor<T>>(index_size,
d_index,
s_index,
out_grad,
x,
x_grad,
pool_type,
reduce_op,
nullptr,
out);
}
Expand All @@ -124,7 +124,7 @@ void GraphSendRecvGradKernel(const Context& ctx,
const paddle::optional<DenseTensor>& out,
const paddle::optional<DenseTensor>& dst_count,
const DenseTensor& out_grad,
const std::string& pool_type,
const std::string& reduce_op,
DenseTensor* x_grad) {
auto index_type = src_index.dtype();
if (index_type == phi::DataType::INT32) {
Expand All @@ -134,7 +134,7 @@ void GraphSendRecvGradKernel(const Context& ctx,
x,
src_index,
dst_index,
pool_type,
reduce_op,
x_grad,
dst_count.get_ptr(),
out.get_ptr());
Expand All @@ -145,7 +145,7 @@ void GraphSendRecvGradKernel(const Context& ctx,
x,
src_index,
dst_index,
pool_type,
reduce_op,
x_grad,
dst_count.get_ptr(),
out.get_ptr());
Expand Down
32 changes: 16 additions & 16 deletions paddle/phi/kernels/cpu/graph_send_recv_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,17 @@ void GraphSendRecvCpuLoop(const int& input_size,
const IndexT* d_index,
const DenseTensor& src,
DenseTensor* dst,
const std::string& pool_type,
const std::string& reduce_op,
int* dst_count = nullptr) {
Functor functor;
if (pool_type == "SUM") {
if (reduce_op == "SUM") {
for (int i = 0; i < index_size; ++i) {
const IndexT& src_idx = s_index[i];
const IndexT& dst_idx = d_index[i];
ElementwiseInnerOperation<T, IndexT, Functor>(
src, dst, src_idx, dst_idx, false, functor);
}
} else if (pool_type == "MEAN") {
} else if (reduce_op == "MEAN") {
for (int i = 0; i < index_size; ++i) {
const IndexT& src_idx = s_index[i];
const IndexT& dst_idx = d_index[i];
Expand All @@ -59,7 +59,7 @@ void GraphSendRecvCpuLoop(const int& input_size,
auto eigen_dst = phi::EigenVector<T>::Flatten(dst_slice);
eigen_dst = eigen_dst / static_cast<T>(*(dst_count + i));
}
} else if (pool_type == "MIN" || pool_type == "MAX") {
} else if (reduce_op == "MIN" || reduce_op == "MAX") {
std::set<IndexT> existed_dst;
for (int i = 0; i < index_size; ++i) {
const IndexT& src_idx = s_index[i];
Expand All @@ -82,7 +82,7 @@ void GraphSendRecvOpKernelLaunchHelper(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
const std::string& reduce_op,
int64_t out_size,
DenseTensor* out,
DenseTensor* dst_count = nullptr) {
Expand Down Expand Up @@ -117,16 +117,16 @@ void GraphSendRecvOpKernelLaunchHelper(const Context& ctx,
const IndexT* s_index = src_index.data<IndexT>();
const IndexT* d_index = dst_index.data<IndexT>();

if (pool_type == "SUM") {
if (reduce_op == "SUM") {
GraphSendRecvCpuLoop<T, IndexT, GraphSendRecvSumFunctor<T>>(
src_dims[0], index_size, s_index, d_index, x, out, pool_type);
} else if (pool_type == "MIN") {
src_dims[0], index_size, s_index, d_index, x, out, reduce_op);
} else if (reduce_op == "MIN") {
GraphSendRecvCpuLoop<T, IndexT, GraphSendRecvMinFunctor<T>>(
src_dims[0], index_size, s_index, d_index, x, out, pool_type);
} else if (pool_type == "MAX") {
src_dims[0], index_size, s_index, d_index, x, out, reduce_op);
} else if (reduce_op == "MAX") {
GraphSendRecvCpuLoop<T, IndexT, GraphSendRecvMaxFunctor<T>>(
src_dims[0], index_size, s_index, d_index, x, out, pool_type);
} else if (pool_type == "MEAN") {
src_dims[0], index_size, s_index, d_index, x, out, reduce_op);
} else if (reduce_op == "MEAN") {
int64_t input_size = out_size <= 0 ? src_dims[0] : out_size;
dst_count->Resize({input_size});
ctx.template Alloc<int>(dst_count);
Expand All @@ -138,7 +138,7 @@ void GraphSendRecvOpKernelLaunchHelper(const Context& ctx,
d_index,
x,
out,
pool_type,
reduce_op,
p_dst_count);
}
}
Expand All @@ -148,7 +148,7 @@ void GraphSendRecvKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& src_index,
const DenseTensor& dst_index,
const std::string& pool_type,
const std::string& reduce_op,
const IntArray& out_size,
DenseTensor* out,
DenseTensor* dst_count) {
Expand All @@ -159,7 +159,7 @@ void GraphSendRecvKernel(const Context& ctx,
x,
src_index,
dst_index,
pool_type,
reduce_op,
out_size_data[0],
out,
dst_count);
Expand All @@ -168,7 +168,7 @@ void GraphSendRecvKernel(const Context& ctx,
x,
src_index,
dst_index,
pool_type,
reduce_op,
out_size_data[0],
out,
dst_count);
Expand Down
Loading

0 comments on commit 2b0bd9a

Please sign in to comment.