Skip to content

Commit

Permalink
merge
Browse files Browse the repository at this point in the history
  • Loading branch information
yuanlehome committed Dec 8, 2022
2 parents 7e062dd + a4d9851 commit 7b47e71
Show file tree
Hide file tree
Showing 872 changed files with 10,163 additions and 17,504 deletions.
6 changes: 0 additions & 6 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,3 @@ per-file-ignores =
.cmake-format.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821
# These files will be fixed in the future
python/paddle/fluid/tests/unittests/fft/test_fft_with_static_graph.py: F811
python/paddle/fluid/tests/unittests/test_activation_nn_grad.py: F811
python/paddle/fluid/tests/unittests/test_lstm_cudnn_op.py: F811
python/paddle/fluid/tests/unittests/test_matmul_v2_op.py: F811
python/paddle/fluid/tests/unittests/test_rrelu_op.py: F811
2 changes: 1 addition & 1 deletion cmake/external/cutlass.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ include(ExternalProject)
set(CUTLASS_PREFIX_DIR ${THIRD_PARTY_PATH}/cutlass)

set(CUTLASS_REPOSITORY https://github.com/NVIDIA/cutlass.git)
set(CUTLASS_TAG v2.9.1)
set(CUTLASS_TAG v2.10.0)

include_directories("${THIRD_PARTY_PATH}/cutlass/src/extern_cutlass/")
include_directories("${THIRD_PARTY_PATH}/cutlass/src/extern_cutlass/include/")
Expand Down
4 changes: 2 additions & 2 deletions cmake/external/dgc.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,14 @@ set(DGC_INCLUDE_DIR
set(DGC_LIBRARIES
"${DGC_INSTALL_DIR}/lib/libdgc.a"
CACHE FILEPATH "dgc library." FORCE)
set(DGC_URL "https://fleet.bj.bcebos.com/dgc/collective_f66ef73.tgz")
set(DGC_URL "https://fleet.bj.bcebos.com/dgc/collective_7369ff.tgz")
include_directories(${DGC_INCLUDE_DIR})

ExternalProject_Add(
extern_dgc
${EXTERNAL_PROJECT_LOG_ARGS}
URL ${DGC_URL}
URL_MD5 "94e6fa1bc97169d0e1aad44570fe3251"
URL_MD5 "ede459281a0f979da8d84f81287369ff"
PREFIX "${DGC_PREFIX_DIR}"
CONFIGURE_COMMAND ""
BUILD_COMMAND make -j${NPROC}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/graph_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ static void GetGraphOpDesc(const std::vector<Node *> &nodes,
UpdateControlOpSkipEagerDeletionVars(*n, graph, graph_idx, n->Name());
}
ops->emplace_back(*n->Op());
VLOG(4) << n->ToString();
VLOG(5) << n->ToString();
}
// delete no OpDesc op
}
Expand Down
13 changes: 7 additions & 6 deletions paddle/fluid/framework/ir/graph_pattern_detector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2068,8 +2068,9 @@ PDNode *patterns::Flatten2Matmul::operator()() {
return matmul_out;
}

PDNode *patterns::ConvResidual::operator()(bool with_residual_data) {
auto conv_op = pattern->NewNode(conv_op_repr())->assert_is_op("conv2d");
PDNode *patterns::ConvResidual::operator()(const std::string &conv_type,
bool with_residual_data) {
auto conv_op = pattern->NewNode(conv_op_repr())->assert_is_op(conv_type);

if (!with_residual_data) {
conv_op->assert_more([&](Node *x) {
Expand All @@ -2082,22 +2083,22 @@ PDNode *patterns::ConvResidual::operator()(bool with_residual_data) {

auto input_var = pattern->NewNode(conv_input_repr())
->AsInput()
->assert_is_op_input("conv2d", "Input");
->assert_is_op_input(conv_type, "Input");

auto filter_var = pattern->NewNode(conv_filter_repr())
->AsInput()
->assert_is_op_input("conv2d", "Filter");
->assert_is_op_input(conv_type, "Filter");

auto output_var = pattern->NewNode(conv_output_repr())
->AsOutput()
->assert_is_op_output("conv2d", "Output");
->assert_is_op_output(conv_type, "Output");

std::vector<PDNode *> links_from{input_var, filter_var};

if (with_residual_data) {
auto res_conn_var = pattern->NewNode(conv_residual_data_repr())
->AsInput()
->assert_is_op_input("conv2d", "ResidualData");
->assert_is_op_input(conv_type, "ResidualData");
links_from.push_back(res_conn_var);
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/graph_pattern_detector.h
Original file line number Diff line number Diff line change
Expand Up @@ -1057,7 +1057,7 @@ struct ConvResidual : public PatternBase {
ConvResidual(PDPattern* pattern, const std::string& name_scope)
: PatternBase(pattern, name_scope, "conv_residual") {}

PDNode* operator()(bool with_residual_data);
PDNode* operator()(const std::string& conv_type, bool with_residual_data);

PATTERN_DECL_NODE(conv_op);
PATTERN_DECL_NODE(conv_input);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ void ComputePropagateScalesMkldnnPass::ComputeWeightScales(
ir::Graph* graph, Scope* scope, StringPairMap* var_quant_scales) const {
ComputeVarScales(graph,
scope,
{"conv2d", "depthwise_conv2d"},
{"conv2d", "depthwise_conv2d", "fused_conv2d"},
"Filter",
1,
var_quant_scales);
Expand Down Expand Up @@ -446,7 +446,7 @@ void ComputePropagateScalesMkldnnPass::UpdateReluOutputScales(
if (op->Type() == "relu") {
is_unsigned = true;
} else {
if (op->Type() == "conv2d") {
if (op->Type() == "conv2d" || op->Type() == "fused_conv2d") {
act_name = "fuse_activation";
output_name = "Output";
} else if (op->Type() == "fc") {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ using string::PrettyLogDetail;

void ConvActivationMkldnnFusePass::ApplyImpl(Graph* graph) const {
auto act_types = phi::funcs::GetSupportedActivations();
std::vector<std::string> conv_types = {"conv2d"};
std::vector<std::string> conv_types = {"conv2d", "fused_conv2d"};

for (auto& act_type : act_types) {
FuseConvConcatAct(graph, act_type);
Expand Down Expand Up @@ -218,6 +218,45 @@ ConvActivationMkldnnFusePass::ConvActivationMkldnnFusePass() {
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("fused_conv2d"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("Filter")
.IsTensor()
.End()
.AddInput("Bias")
.IsOptional()
.IsTensor()
.End()
.AddInput("ResidualData")
.IsTensor()
.IsOptional()
.End()
.AddOutput("Output")
.IsTensor()
.End()
.AddAttr("strides")
.IsType<std::vector<int>>()
.End()
.AddAttr("paddings")
.IsType<std::vector<int>>()
.End()
.AddAttr("padding_algorithm")
.IsOptional()
.IsStringIn({"EXPLICIT", "SAME", "VALID"})
.End()
.AddAttr("groups")
.IsNumGE(1)
.End()
.AddAttr("dilations")
.IsType<std::vector<int>>()
.End()
.AddAttr("data_format")
.IsOptional()
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("concat"))
.AddInput("X")
.End()
Expand Down
87 changes: 83 additions & 4 deletions paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,40 @@ ConvBiasFusePass::ConvBiasFusePass() {
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("fused_conv2d"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("Filter")
.IsTensor()
.End()
.AddInput("Bias")
.IsTensor()
.IsOptional()
.End()
.AddOutput("Output")
.IsTensor()
.End()
.AddAttr("strides")
.IsType<std::vector<int>>()
.End()
.AddAttr("paddings")
.IsType<std::vector<int>>()
.End()
.AddAttr("padding_algorithm")
.IsOptional()
.IsStringIn({"EXPLICIT", "SAME", "VALID"})
.End()
.AddAttr("groups")
.IsNumGE(1)
.End()
.AddAttr("dilations")
.IsType<std::vector<int>>()
.End()
.AddAttr("data_format")
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("elementwise_add"))
.AddInput("X")
.IsTensor()
Expand Down Expand Up @@ -165,6 +199,40 @@ Conv3DBiasFusePass::Conv3DBiasFusePass() {
.IsStringIn({"NDHWC", "NCDHW"})
.End();

AddOpCompat(OpCompat("fused_conv3d"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("Filter")
.IsTensor()
.End()
.AddInput("Bias")
.IsTensor()
.IsOptional()
.End()
.AddOutput("Output")
.IsTensor()
.End()
.AddAttr("strides")
.IsType<std::vector<int>>()
.End()
.AddAttr("paddings")
.IsType<std::vector<int>>()
.End()
.AddAttr("padding_algorithm")
.IsOptional()
.IsStringIn({"EXPLICIT", "SAME", "VALID"})
.End()
.AddAttr("groups")
.IsNumGE(1)
.End()
.AddAttr("dilations")
.IsType<std::vector<int>>()
.End()
.AddAttr("data_format")
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("elementwise_add"))
.AddInput("X")
.IsTensor()
Expand Down Expand Up @@ -203,6 +271,16 @@ phi::DenseTensor tensor_apply_eltwise(const phi::DenseTensor& vec_a,
}

void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
FuseConvBias(graph, type(), fused_type());
if (type() != fused_type()) {
// Is the second pass useful?
FuseConvBias(graph, fused_type(), fused_type());
}
}

void ConvBiasFusePass::FuseConvBias(ir::Graph* graph,
const std::string& conv_type,
const std::string& fused_conv) const {
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
Expand All @@ -216,9 +294,9 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
gpd.mutable_pattern()
->NewNode(patterns::PDNodeName(name_scope_, "conv_input"))
->AsInput()
->assert_is_op_input(type(), "Input");
->assert_is_op_input(conv_type, "Input");
patterns::ConvBias conv_bias_pattern(gpd.mutable_pattern(), name_scope_);
conv_bias_pattern(conv_input, type());
conv_bias_pattern(conv_input, conv_type);
int found_conv_bias_count = 0;
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Graph* g) {
Expand Down Expand Up @@ -249,7 +327,7 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
// check if fuse can be done and if MKL-DNN should be used
FuseOptions fuse_option = FindFuseOption(*conv, *eltwise);
if (fuse_option == DO_NOT_FUSE || fuse_option == FUSE_NATIVE) {
VLOG(3) << "do not perform " + type() + "+bias fuse";
VLOG(3) << "do not perform " + conv_type + "+bias fuse";
return;
}

Expand Down Expand Up @@ -294,7 +372,7 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
desc.SetInput("Filter", std::vector<std::string>({conv_weight->Name()}));
desc.SetInput("Bias", std::vector<std::string>({eltwise_bias->Name()}));
desc.SetOutput("Output", std::vector<std::string>({eltwise_out->Name()}));
desc.SetType(type());
desc.SetType(fused_conv);

for (auto& attr : conv->Op()->GetAttrMap()) {
desc.SetAttr(attr.first, attr.second);
Expand Down Expand Up @@ -323,6 +401,7 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
type());
}
}

} // namespace ir
} // namespace framework
} // namespace paddle
Expand Down
8 changes: 8 additions & 0 deletions paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,32 @@ class ConvBiasFusePass : public FusePassBase {
ConvBiasFusePass();
virtual ~ConvBiasFusePass() {}
virtual std::string type() const { return "conv2d"; }
virtual std::string fused_type() const { return "fused_conv2d"; }

protected:
void ApplyImpl(ir::Graph* graph) const override;
void FuseConvBias(ir::Graph* graph,
const std::string& conv_type,
const std::string& fused_conv) const;

const std::string name_scope_{"conv_bias_mkldnn_fuse"};
};

/*
* Fuse the Conv3D and Elementwise_add to a Conv3DBiasOp.
*/
class Conv2DTransposeBiasFusePass : public ConvBiasFusePass {
public:
Conv2DTransposeBiasFusePass();
std::string type() const override { return "conv2d_transpose"; }
std::string fused_type() const override { return "conv2d_transpose"; }
};

class Conv3DBiasFusePass : public ConvBiasFusePass {
public:
Conv3DBiasFusePass();
std::string type() const override { return "conv3d"; }
std::string fused_type() const override { return "fused_conv3d"; }
};
} // namespace ir
} // namespace framework
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,8 @@ void MainTest(bool convWithExistingBias) {
int conv_bias_count = 0;

for (auto* node : graph->Nodes()) {
if (node->IsOp() && node->Op()->Type() == "conv2d") {
if (node->IsOp() && (node->Op()->Type() == "conv2d" ||
node->Op()->Type() == "fused_conv2d")) {
auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
Expand Down
11 changes: 7 additions & 4 deletions paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -388,11 +388,12 @@ void CPUQuantizePass::GetQuantInfo(Graph* graph) const {
}

void CPUQuantizePass::QuantizeConv(Graph* graph,
const std::string& conv_type,
bool with_residual_data) const {
GraphPatternDetector gpd;
auto pattern = gpd.mutable_pattern();
patterns::ConvResidual conv_pattern{pattern, name_scope_};
conv_pattern(with_residual_data);
conv_pattern(conv_type, with_residual_data);

int quantize_conv_count = 0;
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Expand Down Expand Up @@ -510,7 +511,7 @@ void CPUQuantizePass::QuantizeConv(Graph* graph,
AddStatis(quantize_conv_count);

LogQuantizedOpsCounter(
"conv2d",
conv_type,
quantize_conv_count,
((with_residual_data) ? "with residual connection" : ""));
}
Expand Down Expand Up @@ -1247,8 +1248,10 @@ void CPUQuantizePass::ApplyImpl(ir::Graph* graph) const {
platform::errors::InvalidArgument("Scope cannot be nullptr."));

GetQuantInfo(graph);
QuantizeConv(graph, false /* with_residual_data */);
QuantizeConv(graph, true /* with_residual_data */);
QuantizeConv(graph, "conv2d", false /* with_residual_data */);
QuantizeConv(graph, "conv2d", true /* with_residual_data */);
QuantizeConv(graph, "fused_conv2d", false /* with_residual_data */);
QuantizeConv(graph, "fused_conv2d", true /* with_residual_data */);
QuantizePool(graph);
QuantizeConcat(graph);
QuantizePriorBox(graph);
Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,9 @@ class CPUQuantizePass : public FusePassBase {
protected:
void ApplyImpl(ir::Graph* graph) const override;

void QuantizeConv(Graph* graph, bool with_residual_data) const;
void QuantizeConv(Graph* graph,
const std::string& conv_type,
bool with_residual_data) const;
void QuantizeFc(Graph* graph, bool with_residual_data) const;
void QuantizePool(Graph* graph) const;
void QuantizeConcat(Graph* graph) const;
Expand Down
Loading

0 comments on commit 7b47e71

Please sign in to comment.