Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions paddle/ap/include/axpr/builtin_class_instance_method_class.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,14 @@ struct MethodClassImpl<ValueT, BuiltinClassInstance<ValueT>> {
if (opt_val.has_value()) {
return opt_val.value();
}
const auto& opt_gettattr = GetClassAttr(self, "__getattr__");
const auto& opt_getattr = GetClassAttr(self, "__getattr__");
const auto& class_attrs = self.type.class_attrs();
ADT_CHECK(opt_gettattr.has_value())
ADT_CHECK(opt_getattr.has_value())
<< adt::errors::AttributeError{std::string() + class_attrs->class_name +
" class has no attribute '__getattr__'"};
std::vector<ValueT> args{attr_name_val};
ADT_LET_CONST_REF(ret,
interpreter->InterpretCall(opt_gettattr.value(), args));
interpreter->InterpretCall(opt_getattr.value(), args));
return ret;
}

Expand Down
12 changes: 6 additions & 6 deletions paddle/cinn/backends/codegen_c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -870,8 +870,8 @@ void CodeGenC::Visit(const ir::_LoweredFunc_ *op) {
std::vector<Expr> new_body;

std::vector<Expr> create_temp_buffers = op->PrepareCreateTempBufferExprs();
std::vector<Expr> alloca_temp_buffers = op->PrepareAllocTempBufferExprs();
std::vector<Expr> dealloca_temp_buffers = op->PrepareDeallocTempBufferExprs();
std::vector<Expr> alloc_temp_buffers = op->PrepareAllocTempBufferExprs();
std::vector<Expr> dealloc_temp_buffers = op->PrepareDeallocTempBufferExprs();
#define APPEND_TO_NEW_BODY(field__) \
new_body.insert( \
std::end(new_body), std::begin(op->field__), std::end(op->field__));
Expand All @@ -881,13 +881,13 @@ void CodeGenC::Visit(const ir::_LoweredFunc_ *op) {
std::end(create_temp_buffers));
APPEND_TO_NEW_BODY(alloc_output_buffer_exprs)
new_body.insert(std::end(new_body),
std::begin(alloca_temp_buffers),
std::end(alloca_temp_buffers));
std::begin(alloc_temp_buffers),
std::end(alloc_temp_buffers));
APPEND_TO_NEW_BODY(buffer_data_cast_exprs)
new_body.push_back(op->body);
new_body.insert(std::end(new_body),
std::begin(dealloca_temp_buffers),
std::end(dealloca_temp_buffers));
std::begin(dealloc_temp_buffers),
std::end(dealloc_temp_buffers));
APPEND_TO_NEW_BODY(dealloc_output_buffer_exprs)

Expr func_body = ir::Block::Make(new_body);
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/backends/codegen_gpu_dev.cc
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ void CodeGenGpuDev::Visit(const ir::_LoweredFunc_ *op) {

auto axis_range_assumption_stmts = op->PrepareAxisRangeAssumptionStmts();
auto alloca_temp_buffer_stmts = op->PrepareAllocTempBufferStmts();
auto temp_buffer_alia_stmts = GenerateBufferAliasStmts(op, op->temp_bufs);
auto temp_buffer_alias_stmts = GenerateBufferAliasStmts(op, op->temp_bufs);
auto alias_var_stmts = op->CudaAliasVarStmts();
auto dealloc_temp_buffer_stmts =
FilterDeallocTempBuffers(op->PrepareDeallocTempBufferStmts());
Expand All @@ -180,7 +180,7 @@ void CodeGenGpuDev::Visit(const ir::_LoweredFunc_ *op) {
std::end(new_body_stmts), std::begin(field__), std::end(field__));
APPEND_TO_NEW_BODY_STMTS(axis_range_assumption_stmts)
APPEND_TO_NEW_BODY_STMTS(alloca_temp_buffer_stmts)
APPEND_TO_NEW_BODY_STMTS(temp_buffer_alia_stmts)
APPEND_TO_NEW_BODY_STMTS(temp_buffer_alias_stmts)
APPEND_TO_NEW_BODY_STMTS(alias_var_stmts)
APPEND_TO_NEW_BODY_STMTS(op->body_block->stmts())
APPEND_TO_NEW_BODY_STMTS(dealloc_temp_buffer_stmts);
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/backends/llvm/codegen_llvm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1165,16 +1165,16 @@ llvm::Value *CodeGenLLVM::Visit(const ir::_LoweredFunc_ *op) {

std::vector<Expr> new_body;
auto create_temp_buffers = op->PrepareCreateTempBufferExprs();
auto alloca_temp_buffers = op->PrepareAllocTempBufferExprs();
auto dealloca_temp_buffers = op->PrepareDeallocTempBufferExprs();
auto alloc_temp_buffers = op->PrepareAllocTempBufferExprs();
auto dealloc_temp_buffers = op->PrepareDeallocTempBufferExprs();

appendBody(new_body, op->argument_prepare_exprs);
appendBody(new_body, create_temp_buffers);
appendBody(new_body, alloca_temp_buffers);
appendBody(new_body, alloc_temp_buffers);
appendBody(new_body, op->alloc_output_buffer_exprs);
appendBody(new_body, op->buffer_data_cast_exprs);
appendBody(new_body, op->body);
appendBody(new_body, dealloca_temp_buffers);
appendBody(new_body, dealloc_temp_buffers);
appendBody(new_body, op->dealloc_output_buffer_exprs);

ir::Expr function_body = ir::Block::Make(new_body);
Expand Down
3 changes: 0 additions & 3 deletions paddle/cinn/backends/nvrtc/nvrtc_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -291,9 +291,6 @@ std::string Compiler::CompileWithNvcc(const std::string& cuda_c) {
return prefix_name_ + ".cubin";
}

// std::string Compiler::GetPtx() { return ReadFile(prefix_name_ + ".ptx",
// std::ios::in); }

void Compiler::CompileToPtx() {
auto include_dir = cinn::common::Context::Global().runtime_include_dir();
std::string include_dir_str = "";
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/backends/sycl/codegen_sycl_dev.cc
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ void CodeGenSyclDevice::PrintFunctionBody(const ir::_LoweredFunc_ *op) {
std::vector<ir::stmt::StmtRef> new_body_stmts;
auto axis_range_assumption_stmts = op->PrepareAxisRangeAssumptionStmts();
auto alloca_temp_buffer_stmts = op->PrepareAllocTempBufferStmts();
auto temp_buffer_alia_stmts = GenerateBufferAliasStmts(op, op->temp_bufs);
auto temp_buffer_alias_stmts = GenerateBufferAliasStmts(op, op->temp_bufs);
auto alias_var_stmts = op->CudaAliasVarStmts();
auto dealloc_temp_buffer_stmts =
FilterDeallocTempBuffers(op->PrepareDeallocTempBufferStmts());
Expand All @@ -165,7 +165,7 @@ void CodeGenSyclDevice::PrintFunctionBody(const ir::_LoweredFunc_ *op) {
std::end(new_body_stmts), std::begin(field__), std::end(field__));
APPEND_TO_NEW_BODY_STMTS(axis_range_assumption_stmts)
APPEND_TO_NEW_BODY_STMTS(alloca_temp_buffer_stmts)
APPEND_TO_NEW_BODY_STMTS(temp_buffer_alia_stmts)
APPEND_TO_NEW_BODY_STMTS(temp_buffer_alias_stmts)
APPEND_TO_NEW_BODY_STMTS(alias_var_stmts)
APPEND_TO_NEW_BODY_STMTS(op->body_block->stmts())
APPEND_TO_NEW_BODY_STMTS(dealloc_temp_buffer_stmts);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -477,4 +477,4 @@ REGISTER_PASS_CAPABILITY(embedding_eltwise_layernorm_fuse_pass)
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("lookup_table", 1)
.LE("lookup_table_v2", 1)
.LE("elementweise_add", 1));
.LE("elementwise_add", 1));
2 changes: 0 additions & 2 deletions paddle/fluid/framework/ir/fuse_bn_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,13 @@ namespace paddle::framework::ir {

void FuseBatchNormActPass::ApplyImpl(ir::Graph *graph) const {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 4, 1)
// forward
std::unordered_set<std::string> act_types = {"relu"};
graph = FuseBatchNormAct(graph, act_types);
// backward
std::unordered_set<std::string> act_grad_types = {"relu_grad"};
graph = FuseBatchNormActGrad(graph, act_grad_types); // NOLINT
#endif
#endif
}

// act(bn(x))
Expand Down
2 changes: 0 additions & 2 deletions paddle/fluid/framework/ir/fuse_bn_add_act_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,13 @@ namespace paddle::framework::ir {

void FuseBatchNormAddActPass::ApplyImpl(ir::Graph *graph) const {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 4, 1)
// forward
std::unordered_set<std::string> act_types = {"relu"};
graph = FuseBatchNormAddAct(graph, act_types);
// backward
std::unordered_set<std::string> act_grad_types = {"relu_grad"};
graph = FuseBatchNormAddActGrad(graph, act_grad_types); // NOLINT
#endif
#endif
}

// act(bn(x) + z)
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/graph_pattern_detector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5436,7 +5436,7 @@ PDNode *patterns::BNAddActConvGrad::operator()(
return bn1_grad;
}

void patterns::SparseConvOptimPartern::operator()() {
void patterns::SparseConvOptimPattern::operator()() {
auto sp_conv3d_x = pattern->NewNode(sp_conv3d_x_repr())
->AsInput()
->assert_is_op_input("sparse_conv3d", "x");
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/ir/graph_pattern_detector.h
Original file line number Diff line number Diff line change
Expand Up @@ -2637,9 +2637,9 @@ struct BNAddActConvGrad : public PatternBase {
PATTERN_DECL_NODE(d_bn2_bias);
};

struct SparseConvOptimPartern : public PatternBase {
SparseConvOptimPartern(PDPattern* pattern, const std::string& name_scope)
: PatternBase(pattern, name_scope, "sparse_conv_optim_partern") {}
struct SparseConvOptimPattern : public PatternBase {
SparseConvOptimPattern(PDPattern* pattern, const std::string& name_scope)
: PatternBase(pattern, name_scope, "sparse_conv_optim_pattern") {}

void operator()();
PATTERN_DECL_NODE(sp_conv3d_x);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/layer_norm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ bool validateReduceOpAttrs(const Node* node,
}
for (size_t i = 1; i < dims.size(); ++i) {
if (1 != dims[i] - dims[i - 1]) {
LOG(WARNING) << "The LayerNorm dim of mean must be continuous";
LOG(WARNING) << "The LayerNorm dim of mean must be continuous";
return false;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -458,4 +458,4 @@ REGISTER_PASS_CAPABILITY(preln_embedding_eltwise_layernorm_fuse_pass)
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("lookup_table", 1)
.LE("lookup_table_v2", 1)
.LE("elementweise_add", 1));
.LE("elementwise_add", 1));
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ struct PrelnSkipLayerNorm : public PatternBase {
// (word, weights_0) lookup_table -> word_emb
// (pos, weights_1) lookup_table -> pos_emb
// (sent, weights_2) lookup_table -> sent_emb
// (word_emb, pos_emb) elementweise_add -> elementwise_out_0
// (elemtwise_out_0, sent_emb) elementweise_add -> elementwise_out_1
// (word_emb, pos_emb) elementwise_add -> elementwise_out_0
// (elemtwise_out_0, sent_emb) elementwise_add -> elementwise_out_1
// (elementwise_out_1, scale, bias) layer_norm -> layer_norm_out
//
// and then convert the corresponding subgraph to:
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/preln_residual_bias_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ int PrelnResidualBiasFusePass::ApplyPattern(ir::Graph *graph,
// elementwise1_out. This will lead to two or more PrelnResidualBias
// patterns is found near elementwise1_out, and these patterns will interact
// on each other, so we make below check to ensure only one
// PrelnResidualBias pattern is delalted with.
// PrelnResidualBias pattern is dealt with.
for (auto op : elementwise1_out->inputs) {
if (op->Name() == "fused_bias_dropout_residual_layer_norm") return;
}
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/framework/ir/preln_skip_layernorm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,7 @@ void PrelnSkipLayerNormFusePass::ApplyImpl(ir::Graph *graph) const {
"use_varseqlen, preln_embedding_eltwise_layernorm_fuse_pass, "
"trt_multihead_matmul_fuse_pass, "
"set pos_id, set mask_id, with_dynamic_shape. Stop this pass, "
"please "
"reconfig.";
"please reconfig.";
return;
}
int found_subgraph_count = 0;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@ PDNode* BuildSeqExpandConcatPattern(PDPattern* pattern) {
// concat output

// So the following variables will be removed:
// sequence-expand output
// sequence-expand output
// sequence_expand output
// sequence_expand output

// Three operators
auto* sequence_expand0 = pattern->NewNode(
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/ir/sparse_conv_optim_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ SparseConvOptimPass::SparseConvOptimPass() {
}

void SparseConvOptimPass::ApplyImpl(ir::Graph* graph) const {
const std::string pattern_name = "sparse_conv_optim_partern";
const std::string pattern_name = "sparse_conv_optim_pattern";
FusePassBase::Init(pattern_name, graph);

GraphPatternDetector gpd;
Expand All @@ -70,7 +70,7 @@ void SparseConvOptimPass::ApplyImpl(ir::Graph* graph) const {
common::errors::InvalidArgument(
"Scope in SparseConvOptimPass should not be null."));
// Create pattern
patterns::SparseConvOptimPartern pattern(gpd.mutable_pattern(), pattern_name);
patterns::SparseConvOptimPattern pattern(gpd.mutable_pattern(), pattern_name);
pattern();
int found_count = 0;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -311,11 +311,11 @@ int TrtCrossMultiHeadMatmulFusePass::BuildCrossFusion(
int hidden_out = wq_tensor->dims()[1];
int head_size = hidden_out / head_number;
if (abs(scale_attr - 1.0f / sqrt(static_cast<float>(head_size))) > 1e-5) {
VLOG(3) << "scale of muilthead matmul do not fit the requirement of "
VLOG(3) << "scale of multihead matmul do not fit the requirement of "
"flash attention plugin, Stop fusing.";
return;
}
VLOG(5) << "trt cross attention get wq_tensor name = " << mul0_w->Name()
VLOG(5) << "trt cross attention wq_tensor name = " << mul0_w->Name()
<< "trt cross attention wk_tensor name = " << mul1_w->Name()
<< "trt cross attention wv_tensor name = " << mul2_w->Name();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ struct TrtSkipLayerNorm : public PatternBase {
// (word, weights_0) lookup_table -> word_emb
// (pos, weights_1) lookup_table -> pos_emb
// (sent, weights_2) lookup_table -> sent_emb
// (word_emb, pos_emb) elementweise_add -> elementwise_out_0
// (elemtwise_out_0, sent_emb) elementweise_add -> elementwise_out_1
// (word_emb, pos_emb) elementwise_add -> elementwise_out_0
// (elemtwise_out_0, sent_emb) elementwise_add -> elementwise_out_1
// (elementwise_out_1, scale, bias) layer_norm -> layer_norm_out
//
// and then convert the corresponding subgraph to:
Expand Down
3 changes: 0 additions & 3 deletions paddle/fluid/framework/ir/trt_multihead_matmul_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,6 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
Node* scale,
Node* scale_out) {
auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale"));
// auto scale_bias = PADDLE_GET_CONST(float, scale->Op()->GetAttr("bias"));
// bool after_scale =
// PADDLE_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale"));

// create multihead
OpDesc multihead_op_desc(mul0->Op()->Block());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -585,4 +585,4 @@ REGISTER_PASS_CAPABILITY(
paddle::framework::compatible::OpVersionComparatorCombination()
.LE("lookup_table", 1)
.LE("lookup_table_v2", 1)
.LE("elementweise_add", 1));
.LE("elementwise_add", 1));
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ PDNode* TrtQKMultiHeadMatmulPattern::operator()() {
.LinksTo({matmul_qk_out_var});
scale->LinksFrom({matmul_qk_out_var}).LinksTo({scale_out_var});
softmax_qk->LinksFrom({scale_out_var}).LinksTo({softmax_qk_out_var});
// V path
// V path
mul2->LinksFrom({input1, mul2_w_var}).LinksTo({mul2_out_var});
elementwise2->LinksFrom({mul2_out_var, elementwise2_w})
.LinksTo({elementwise2_out});
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/io.cc
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ void LoadSeparatePersistables(framework::Executor* executor,
num_threads = std::min(num_threads, persistable_vars.size() / chunk_size);
size_t remains_size = persistable_vars.size() % num_threads;
VLOG(4) << "Start Load with multi-thread: " << num_threads
<< " chund size: " << chunk_size;
<< " chunk size: " << chunk_size;

auto load_handler = [&](const std::vector<framework::VarDesc*>& vars) {
if (vars.empty()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ limitations under the License. */

namespace paddle::inference::tensorrt {

class CustomPluginCreater : public OpConverter {
class CustomPluginCreator : public OpConverter {
public:
void operator()(const framework::proto::OpDesc &op,
const framework::Scope &scope,
Expand Down Expand Up @@ -335,7 +335,7 @@ class CustomGenericPluginCreator : public OpConverter {
} // namespace paddle::inference::tensorrt

REGISTER_TRT_OP_CONVERTER(custom_plugin_creater,
CustomPluginCreater); // typos: disable-line
CustomPluginCreator); // typos: disable-line
REGISTER_TRT_OP_CONVERTER(generic_plugin_creator, GenericPluginCreator);
REGISTER_TRT_OP_CONVERTER(custom_generic_plugin_creator,
CustomGenericPluginCreator);
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/convert/op_converter.h
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ class OpConverter {
it = Registry<OpConverter>::Global().Lookup("generic_plugin_creator");
break;

case OpConverterType::CustomPluginCreater: // typos: disable-line
case OpConverterType::CustomPluginCreator:
LOG(INFO) << "There is no OpConverter for type " << op_desc.Type()
<< ", now use custom_plugin_creater!"; // typos: disable-line
it = Registry<OpConverter>::Global().Lookup(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ PD_BUILD_OP(custom_op)

namespace paddle::inference::tensorrt {

TEST(CustomPluginCreater, StaticShapePlugin) {
TEST(CustomPluginCreator, StaticShapePlugin) {
framework::ProgramDesc prog;
auto *block = prog.MutableBlock(0);
auto *op = block->AppendOp();
Expand Down Expand Up @@ -118,14 +118,14 @@ TEST(CustomPluginCreater, StaticShapePlugin) {
"(*custom_plugin_tell)(custom_op, false, true) is False."));

OpTeller::Global().SetOpConverterType(&custom_op,
OpConverterType::CustomPluginCreater);
OpConverterType::CustomPluginCreator);

OpConverter converter;
converter.ConvertBlock(
*block->Proto(), {}, scope, engine_.get() /*TensorRTEngine*/);
}

TEST(CustomPluginCreater, DynamicShapePlugin) {
TEST(CustomPluginCreator, DynamicShapePlugin) {
framework::ProgramDesc prog;
auto *block = prog.MutableBlock(0);
auto *op = block->AppendOp();
Expand Down Expand Up @@ -204,7 +204,7 @@ TEST(CustomPluginCreater, DynamicShapePlugin) {
"(*custom_plugin_tell)(custom_op, false, true) is False."));

OpTeller::Global().SetOpConverterType(&custom_op,
OpConverterType::CustomPluginCreater);
OpConverterType::CustomPluginCreator);

OpConverter converter;
converter.ConvertBlock(
Expand Down
4 changes: 1 addition & 3 deletions paddle/fluid/inference/tensorrt/op_teller.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3475,9 +3475,7 @@ bool OpTeller::Tell(const framework::ir::Node* node,
with_dynamic_shape,
forbid_dynamic_op_enter_into_trt,
use_explicit_quantization)) {
SetOpConverterType(
node->Op(),
OpConverterType::CustomPluginCreater); // typos: disable-line
SetOpConverterType(node->Op(), OpConverterType::CustomPluginCreator);
return true;
}
auto& custom_generic_plugin_teller = GetCustomGenericPluginTeller();
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/tensorrt/op_teller.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ struct Teller {
enum class OpConverterType {
Default = 0,
GenericPluginCreator,
CustomPluginCreater, // typos: disable-line
CustomPluginCreator,
CustomGenericPluginCreator
};
/*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ paddle::any PluginArgumentMappingContext::Attr(
break;
};
default: {
LOG(ERROR) << "Can't cover op's attribute [" << attr_name
LOG(ERROR) << "Can't covert op's attribute [" << attr_name
<< "] to paddle any.";
}
}
Expand Down
Loading