Skip to content

Commit

Permalink
Fix some typos(kernel_dialtion, etc) (#62013)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc committed Feb 26, 2024
1 parent 665f97b commit 2ea42ce
Show file tree
Hide file tree
Showing 16 changed files with 62 additions and 62 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ bool GraphGroupFuseHelper<FusePassCtxT>::AllOutputsSameSize(
template <typename FusePassCtxT>
bool GraphGroupFuseHelper<FusePassCtxT>::HorizontalElementwiseFuseReduce(
const OpGroupPtr& src, const OpGroupPtr& dst) const {
return honrizontal_elementwise_fuse_reduce(src.GetGroup(), dst.GetGroup());
return horizontal_elementwise_fuse_reduce(src.GetGroup(), dst.GetGroup());
}

template <typename FusePassCtxT>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ inline bool elementwise_fuse_broadcast(
return true;
}

inline bool honrizontal_elementwise_fuse_reduce(
inline bool horizontal_elementwise_fuse_reduce(
const std::shared_ptr<ir::Group>& first,
const std::shared_ptr<ir::Group>& second) {
std::shared_ptr<ir::Group> ele_group, reduce_group;
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/hlir/framework/graph_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ static void BufferMallocWithCallback(void* args, int num_args) {
for (int i = 0; i < num_args; ++i) {
cinn_buffer_t* buffer = static_cast<cinn_buffer_t*>(pod_args[i]);
CHECK(buffer->external_malloc)
<< "external_malloc is nullptr at " << i << "-th argumemnts";
<< "external_malloc is nullptr at " << i << "-th arguments";
buffer->external_malloc->operator()(nullptr, buffer);
}
}
Expand Down Expand Up @@ -282,7 +282,7 @@ void GraphCompiler::InsertBufferHandlers(
malloc_var_names,
std::vector<std::string>({}),
function_name);
VLOG(4) << "seting malloc function " << function_name << " for var "
VLOG(4) << "setting malloc function " << function_name << " for var "
<< cinn::utils::Join(malloc_var_names, ", ");
malloc_instr->SetLoweredFunc(
reinterpret_cast<void*>(BufferMallocWithCallback), function_name);
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/graph_compiler.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ class GraphCompiler final {
const std::vector<std::unique_ptr<Instruction>>& instructions);

// find the first and last instruction where a variable used, and mark the
// variable should allocate buffer before the first instruction runing and
// variable should allocate buffer before the first instruction running and
// can release the buffer after the last instruction finished.
void AnalyzeVariableLifeTime(
const std::vector<std::unique_ptr<Instruction>>& instructions,
Expand Down
18 changes: 9 additions & 9 deletions paddle/cinn/hlir/framework/instruction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -168,9 +168,9 @@ void Instruction::Run(
pod_args[2],
static_cast<cudaStream_t>(stream));
} else {
VLOG(3) << "Runing extern function " << function_name_;
VLOG(3) << "Running extern function " << function_name_;
for (int idx = 0; idx < fn_ptrs_.size(); ++idx) {
VLOG(3) << "Runing func name: " << fn_names_[idx];
VLOG(3) << "Running func name: " << fn_names_[idx];
auto& pod_args = args_cached_[idx];
CHECK(fn_ptrs_[idx]) << "The LoweredFunc address should be set first by "
"calling SetLoweredFunc method";
Expand All @@ -184,7 +184,7 @@ void Instruction::Run(
}
}
}
VLOG(3) << "Done Runing extern function " << function_name_;
VLOG(3) << "Done Running extern function " << function_name_;
}
#elif defined(CINN_WITH_CUDNN)
auto& pod_args = args_cached_[0];
Expand Down Expand Up @@ -315,9 +315,9 @@ void Instruction::Run(
pod_args[2],
static_cast<cudaStream_t>(stream));
} else {
VLOG(3) << "Runing extern function " << function_name_;
VLOG(3) << "Running extern function " << function_name_;
for (int idx = 0; idx < fn_ptrs_.size(); ++idx) {
VLOG(3) << "Runing func name: " << fn_names_[idx];
VLOG(3) << "Running func name: " << fn_names_[idx];
auto& pod_args = args_cached_[idx];
CHECK(fn_ptrs_[idx]) << "The LoweredFunc address should be set first by "
"calling SetLoweredFunc method";
Expand All @@ -331,12 +331,12 @@ void Instruction::Run(
}
}
}
VLOG(3) << "Done Runing extern function " << function_name_;
VLOG(3) << "Done Running extern function " << function_name_;
}
#else
VLOG(3) << "Runing extern function " << function_name_;
VLOG(3) << "Running extern function " << function_name_;
for (int idx = 0; idx < fn_ptrs_.size(); ++idx) {
VLOG(3) << "Runing func name: " << fn_names_[idx];
VLOG(3) << "Running func name: " << fn_names_[idx];
auto& pod_args = args_cached_[idx];
CHECK(fn_ptrs_[idx]) << "The LoweredFunc address should be set first by "
"calling SetLoweredFunc method";
Expand All @@ -350,7 +350,7 @@ void Instruction::Run(
}
}
}
VLOG(3) << "Done Runing extern function " << function_name_;
VLOG(3) << "Done Running extern function " << function_name_;
#endif

if (!cinn::runtime::CheckStringFlagFalse(FLAGS_cinn_self_check_accuracy)) {
Expand Down
6 changes: 3 additions & 3 deletions paddle/cinn/hlir/framework/pir_compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ namespace cinn {
namespace hlir {
namespace framework {

// TODO(Aurelius84): Clear usless Build Interface.
// TODO(Aurelius84): Clear useless Build Interface.
std::unique_ptr<Program> PirCompiler::Build() {
m_builder_.Clear();
// NOTE(Aurelius84): Currently only support each op for one group
Expand Down Expand Up @@ -213,8 +213,8 @@ std::shared_ptr<Scope> BuildScope(const Target& target,
};

for (auto& op : *program.block()) {
for (auto oprand : op.operands()) {
create_var(oprand.source());
for (auto operand : op.operands()) {
create_var(operand.source());
}

for (auto result : op.results()) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/print_graph_pass_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void PrintGraphPass(Graph* src) {

CINN_REGISTER_PASS(PrintGraph)
.describe(
"This pass just save the visulization Graph to "
"This pass just save the visualization Graph to "
"g.attrs[\"print_graph\"].")
.set_change_structure(false)
.provide_graph_attr("print_graph")
Expand Down
18 changes: 9 additions & 9 deletions paddle/cinn/hlir/op/nn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ std::shared_ptr<OpStrategy> StrategyForConv2dNCHWc(
std::vector<Expr> kernel_shape = inputs[1]->shape;
// kernel_h == 1 && kernel_w == 1
CHECK_EQ(kernel_shape.size(), 6U)
<< "kernel_dialtion shape size should be 6";
<< "kernel_dilation shape size should be 6";
bool is_1x1 =
(is_zero(kernel_shape[2] - 1)) && (is_zero(kernel_shape[3] - 1));
ir::Tensor res;
Expand Down Expand Up @@ -2224,18 +2224,18 @@ std::vector<framework::shape_t> InferShapeForBatchNormTrain(
CHECK_EQ(inputs_shape[0][1], inputs_shape[2][0])
<< "x and bias dimension size is not equal!";
CHECK_EQ(inputs_shape[0][1], inputs_shape[3][0])
<< "x and moveing_mean dimension size is not equal!";
<< "x and moving_mean dimension size is not equal!";
CHECK_EQ(inputs_shape[0][1], inputs_shape[4][0])
<< "x and moveing_variance dimension size is not equal!";
<< "x and moving_variance dimension size is not equal!";
} else if (data_layout == "NHWC") {
CHECK_EQ(inputs_shape[0][3], inputs_shape[1][0])
<< "x and scale dimension is not equal!";
CHECK_EQ(inputs_shape[0][3], inputs_shape[2][0])
<< "x and bias dimension size is not equal!";
CHECK_EQ(inputs_shape[0][3], inputs_shape[3][0])
<< "x and moveing_mean dimension size is not equal!";
<< "x and moving_mean dimension size is not equal!";
CHECK_EQ(inputs_shape[0][3], inputs_shape[4][0])
<< "x and moveing_variance dimension size is not equal!";
<< "x and moving_variance dimension size is not equal!";
} else {
LOG(FATAL) << "data_layout " << data_layout << " is not support!";
}
Expand Down Expand Up @@ -2302,16 +2302,16 @@ std::vector<framework::shape_t> InferShapeForBatchNormGrad(
CHECK_EQ(inputs_shape[0][1], inputs_shape[2][0])
<< "dy and bias dimension size is not equal!";
CHECK_EQ(inputs_shape[0][1], inputs_shape[3][0])
<< "dy and moveing_mean dimension size is not equal!";
<< "dy and moving_mean dimension size is not equal!";
CHECK_EQ(inputs_shape[0][1], inputs_shape[4][0])
<< "dy and moveing_variance dimension size is not equal!";
<< "dy and moving_variance dimension size is not equal!";
} else if (data_layout == "NHWC") {
CHECK_EQ(inputs_shape[0][3], inputs_shape[2][0])
<< "dy and bias dimension size is not equal!";
CHECK_EQ(inputs_shape[0][3], inputs_shape[3][0])
<< "dy and moveing_mean dimension size is not equal!";
<< "dy and moving_mean dimension size is not equal!";
CHECK_EQ(inputs_shape[0][3], inputs_shape[4][0])
<< "dy and moveing_variance dimension size is not equal!";
<< "dy and moving_variance dimension size is not equal!";
} else {
LOG(FATAL) << "data_layout " << data_layout << " is not support!";
}
Expand Down
12 changes: 6 additions & 6 deletions paddle/cinn/hlir/op/reduction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ std::shared_ptr<OpStrategy> StrategyForReduce(
for (int i = 0; i < arg_pack.size(); i++) {
if (arg_pack[i].is_expr()) {
Expr temp = arg_pack[i];
// TODO(zhhsplendid): old reducetion schedule assumes all length-1
// TODO(zhhsplendid): old reduction schedule assumes all length-1
// for loops are simplified, but it is not after we add length-1
// back. Reduction schedule is complex and we haven't changed it to
// support the length-1 for loop yet. So we simplify here. The todo
Expand Down Expand Up @@ -651,16 +651,16 @@ std::vector<std::vector<std::string>> InferLayoutForBnOptimize(
} // namespace cinn

CINN_REGISTER_HELPER(reduce_ops) {
#define CINN_REGISTER_REDUCTION_WITH_DTYPE(op__, op_stragegy__, dtype__) \
#define CINN_REGISTER_REDUCTION_WITH_DTYPE(op__, op_strategy__, dtype__) \
CINN_REGISTER_OP(op__) \
.describe(#op__ " function") \
.set_num_inputs(1) \
.set_num_outputs(1) \
.set_attr<cinn::hlir::framework::StrategyFunction>( \
"CINNStrategy", cinn::hlir::op::StrategyFor##op_stragegy__) \
"CINNStrategy", cinn::hlir::op::StrategyFor##op_strategy__) \
.set_attr<cinn::hlir::framework::StrategyFunctionSymbolic>( \
"CINNStrategySymbolic", \
cinn::hlir::op::StrategyFor##op_stragegy__##Symbolic) \
cinn::hlir::op::StrategyFor##op_strategy__##Symbolic) \
.set_attr("infershape", \
MakeOpFunction(cinn::hlir::op::InferShapeForReduction)) \
.set_attr( \
Expand All @@ -674,8 +674,8 @@ CINN_REGISTER_HELPER(reduce_ops) {
"OpPattern", cinn::hlir::framework::OpPatternKind::kReduction) \
.set_support_level(4);

#define CINN_REGISTER_REDUCTION(op__, op_stragegy__) \
CINN_REGISTER_REDUCTION_WITH_DTYPE(op__, op_stragegy__, )
#define CINN_REGISTER_REDUCTION(op__, op_strategy__) \
CINN_REGISTER_REDUCTION_WITH_DTYPE(op__, op_strategy__, )

CINN_REGISTER_REDUCTION(reduce_sum, ReduceSum);
CINN_REGISTER_REDUCTION(reduce_prod, ReduceProd);
Expand Down
20 changes: 10 additions & 10 deletions paddle/cinn/hlir/pass/fusion_merge_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ class FusionMergePassHelper : public FusionHelperBase {
break;
}

// if can't fuse to othors Groups, new Groups.
// if can't fuse to other Groups, new Groups.
if (!fusionable) {
fusionable_consumers.push_back({candidate});
}
Expand Down Expand Up @@ -488,7 +488,7 @@ class FusionMergePassHelper : public FusionHelperBase {
fusionable_consumers) {
VLOG(3) << "VerticalFuse...!";
GroupList fused_groups;
GroupPtr master_fuesd_group(nullptr);
GroupPtr master_fused_group(nullptr);
for (auto& consumer : fusionable_consumers) {
auto fused_group = std::make_shared<Graph::Group>();
// update depth using consumer depth.
Expand Down Expand Up @@ -623,8 +623,8 @@ class FusionMergePassHelper : public FusionHelperBase {
fusion_groups_[postion] = fused_group;
fusion_groups_index_[fused_group] = postion;

if (!master_fuesd_group.get()) {
master_fuesd_group = fused_group;
if (!master_fused_group.get()) {
master_fused_group = fused_group;
}
CHECK(fused_group->output_nodes.size())
<< "No output node is found, " << fused_group->group_id;
Expand Down Expand Up @@ -654,19 +654,19 @@ class FusionMergePassHelper : public FusionHelperBase {

if (be_output) {
VLOG(4) << "Insert Id " << node->id() << " Into Group "
<< master_fuesd_group->group_id;
master_fuesd_group->output_nodes.insert(node);
<< master_fused_group->group_id;
master_fused_group->output_nodes.insert(node);
}
}
// insert unfusionable consumer groups
for (auto& consumer : *producer->mut_consumer_groups()) {
if (fusionable_consumers.count(consumer)) {
continue;
}
master_fuesd_group->mut_consumer_groups()->insert(consumer);
master_fused_group->mut_consumer_groups()->insert(consumer);
// update consumer's producer
consumer->mut_producer_groups()->erase(producer);
consumer->mut_producer_groups()->insert(master_fuesd_group);
consumer->mut_producer_groups()->insert(master_fused_group);
}
}

Expand Down Expand Up @@ -979,7 +979,7 @@ class FusionMergePassHelper : public FusionHelperBase {
// element-wise and injective op must be horizontal relation.
{OpPatternKind::kInjective, is_same_size},
// element-wise and reduce op must be horizontal relation.
{OpPatternKind::kReduction, honrizontal_elementwise_fuse_reduce}};
{OpPatternKind::kReduction, horizontal_elementwise_fuse_reduce}};
// vertical
relation.vertical_relation = {
{OpPatternKind::kElementWise, is_same_size},
Expand Down Expand Up @@ -1044,7 +1044,7 @@ class FusionMergePassHelper : public FusionHelperBase {
// horizontal
relation.horizontal_relation = {
// reduce and element-wise op must be horizontal relation.
{OpPatternKind::kElementWise, honrizontal_elementwise_fuse_reduce},
{OpPatternKind::kElementWise, horizontal_elementwise_fuse_reduce},
// reduce and broadcast op must be horizontal relation.
{OpPatternKind::kBroadcast, is_same_size},
// reduce and injective op must be horizontal relation.
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/pass/fusion_merge_pass_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ CONDITION_FUNC(elementwise_fuse_broadcast) {
return true;
}

CONDITION_FUNC(honrizontal_elementwise_fuse_reduce) {
CONDITION_FUNC(horizontal_elementwise_fuse_reduce) {
std::shared_ptr<Graph::Group> ele_group, reduce_group;
if (first->op_pattern_kind == framework::kReduction) {
ele_group = second;
Expand Down
16 changes: 8 additions & 8 deletions paddle/cinn/hlir/pass/general_fusion_merge_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class GeneralFusionMergePassHelper : public FusionHelperBase {
}

GroupList operator()() {
// run fusion merge untill no update.
// run fusion merge until no update.
DoFusionMerge();
for (auto& group : fusion_groups_) {
VLOG(3) << "Fusion Group -> " << group->group_id;
Expand Down Expand Up @@ -564,7 +564,7 @@ class GeneralFusionMergePassHelper : public FusionHelperBase {
fusionable_consumers) {
VLOG(3) << "VerticalFuse...!";
GroupList fused_groups;
GroupPtr master_fuesd_group(nullptr);
GroupPtr master_fused_group(nullptr);
for (auto& consumer : fusionable_consumers) {
auto fused_group = std::make_shared<Graph::Group>(graph_);
// update depth using consumer depth.
Expand Down Expand Up @@ -700,8 +700,8 @@ class GeneralFusionMergePassHelper : public FusionHelperBase {
fusion_groups_[postion] = fused_group;
fusion_groups_index_[fused_group] = postion;

if (!master_fuesd_group.get()) {
master_fuesd_group = fused_group;
if (!master_fused_group.get()) {
master_fused_group = fused_group;
}
CHECK(fused_group->output_nodes.size())
<< "No output node is found, " << fused_group->group_id;
Expand Down Expand Up @@ -731,19 +731,19 @@ class GeneralFusionMergePassHelper : public FusionHelperBase {

if (be_output) {
VLOG(4) << "Insert Id " << node->id() << " Into Group "
<< master_fuesd_group->group_id;
master_fuesd_group->output_nodes.insert(node);
<< master_fused_group->group_id;
master_fused_group->output_nodes.insert(node);
}
}
// insert unfusionable consumer groups
for (auto& consumer : *producer->mut_consumer_groups()) {
if (fusionable_consumers.count(consumer)) {
continue;
}
master_fuesd_group->mut_consumer_groups()->insert(consumer);
master_fused_group->mut_consumer_groups()->insert(consumer);
// update consumer's producer
consumer->mut_producer_groups()->erase(producer);
consumer->mut_producer_groups()->insert(master_fuesd_group);
consumer->mut_producer_groups()->insert(master_fused_group);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ bool GraphGroupFuseHelper<FusePassCtxT>::AllOutputsSameSize(
template <typename FusePassCtxT>
bool GraphGroupFuseHelper<FusePassCtxT>::HorizontalElementwiseFuseReduce(
const OpGroupPtr& src, const OpGroupPtr& dst) const {
return honrizontal_elementwise_fuse_reduce(
return horizontal_elementwise_fuse_reduce(
&ctx_->graph_group_fusion_helper(), src.GetGroup(), dst.GetGroup());
}

Expand Down
Loading

0 comments on commit 2ea42ce

Please sign in to comment.