Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon 6th Fundable Projects 2 No.28】Fix modernize-avoid-c-arrays_2-part #64511

Merged
merged 5 commits into from
May 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/service/graph_brpc_client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ std::future<int32_t> GraphBrpcClient::add_graph_node(
reinterpret_cast<char *>(request_bucket[request_idx].data()),
sizeof(int64_t) * node_num);
if (add_weight) {
bool weighted[is_weighted_bucket[request_idx].size() + 1];
bool weighted[is_weighted_bucket[request_idx].size() + 1]; // NOLINT
for (size_t j = 0; j < is_weighted_bucket[request_idx].size(); j++)
weighted[j] = is_weighted_bucket[request_idx][j];
closure->request(request_idx)
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/framework/device_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor,
char separator,
bool need_leading_separator,
int num_decimals) {
char buf[MAX_FLOAT_BUFF_SIZE];
std::string buf;
buf.resize(MAX_FLOAT_BUFF_SIZE);
auto count = tensor->numel();
if (start < 0 || end > count) {
VLOG(3) << "access violation";
Expand All @@ -108,7 +109,7 @@ void PrintLodTensorType<float>(phi::DenseTensor* tensor,
out_val += "0";
} else {
std::string format = "%." + std::to_string(num_decimals) + "f";
sprintf(buf, &format[0], tensor->data<float>()[i]); // NOLINT
sprintf(&buf[0], &format[0], tensor->data<float>()[i]); // NOLINT
out_val += buf;
}
}
Expand Down
15 changes: 8 additions & 7 deletions paddle/fluid/inference/tensorrt/convert/temporal_shift_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -177,17 +177,18 @@ class TemporalShiftOpConverter : public OpConverter {
// Concatenate slices along the third dimension (C)
nvinfer1::IConcatenationLayer* concat_layer;
if (!slice_c) {
nvinfer1::ITensor* concat_inputs[2] = {slice2_layer->getOutput(0),
slice3_layer->getOutput(0)};
std::vector<nvinfer1::ITensor*> concat_inputs = {
slice2_layer->getOutput(0), slice3_layer->getOutput(0)};
concat_layer =
TRT_ENGINE_ADD_LAYER(engine_, Concatenation, concat_inputs, 2);
TRT_ENGINE_ADD_LAYER(engine_, Concatenation, concat_inputs.data(), 2);
concat_layer->setAxis(2);
} else {
nvinfer1::ITensor* concat_inputs[3] = {slice1_layer->getOutput(0),
slice2_layer->getOutput(0),
slice3_layer->getOutput(0)};
std::vector<nvinfer1::ITensor*> concat_inputs = {
slice1_layer->getOutput(0),
slice2_layer->getOutput(0),
slice3_layer->getOutput(0)};
concat_layer =
TRT_ENGINE_ADD_LAYER(engine_, Concatenation, concat_inputs, 3);
TRT_ENGINE_ADD_LAYER(engine_, Concatenation, concat_inputs.data(), 3);
concat_layer->setAxis(2);
}

Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/test_tensorrt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ void Execute(nvinfer1::IExecutionContext* context,
const int input_index = engine.getBindingIndex(kInputTensor);
const int output_index = engine.getBindingIndex(kOutputTensor);
// Create GPU buffers and a stream
void* buffers[2];
std::vector<void*> buffers(2);
ASSERT_EQ(0, cudaMalloc(&buffers[input_index], sizeof(float)));
ASSERT_EQ(0, cudaMalloc(&buffers[output_index], sizeof(float)));
cudaStream_t stream;
Expand All @@ -131,7 +131,7 @@ void Execute(nvinfer1::IExecutionContext* context,
sizeof(float),
cudaMemcpyHostToDevice,
stream));
context->enqueue(1, buffers, stream, nullptr);
context->enqueue(1, buffers.data(), stream, nullptr);
ASSERT_EQ(0,
cudaMemcpyAsync(output,
buffers[output_index],
Expand Down
10 changes: 5 additions & 5 deletions paddle/fluid/operators/controlflow/pylayer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ namespace { // NOLINT
enum class PyLayerBlockIndex { kFORWARD = 0, kBACKWARD = 1, kNONE = 2 };
} // namespace

const char PyLayerOp::kInputs[] = "Input"; // NOLINT
const char PyLayerOp::kOutputs[] = "Out"; // NOLINT
const char PyLayerOp::kScope[] = "Scope"; // NOLINT
const char PyLayerOp::kSkipEagerDeletionVars[] =
"skip_eager_deletion_vars"; // NOLINT
const char PyLayerOp::kInputs[] = "Input"; // NOLINT
const char PyLayerOp::kOutputs[] = "Out"; // NOLINT
const char PyLayerOp::kScope[] = "Scope"; // NOLINT
const char PyLayerOp::kSkipEagerDeletionVars[] = // NOLINT
"skip_eager_deletion_vars";
const char PyLayerOp::kBlocks[] = "blocks"; // NOLINT

void PyLayerOp::CreateInterpreter(
Expand Down
7 changes: 2 additions & 5 deletions paddle/fluid/pir/dialect/kernel/ir/kernel_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -181,11 +181,8 @@ phi::KernelKey OneDNNPhiKernelOp::kernel_key() {
return attributes().at("kernel_key").dyn_cast<KernelAttribute>().data();
}

const char* OneDNNMixedPhiKernelOp::attributes_name[attributes_num] =
{ // NOLINT
"op_name",
"kernel_name",
"kernel_key"};
const char* OneDNNMixedPhiKernelOp::attributes_name[attributes_num] = // NOLINT
{"op_name", "kernel_name", "kernel_key"};

void OneDNNMixedPhiKernelOp::VerifySig() {
VLOG(4) << "Verifying inputs, outputs and attributes for: "
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pir/dialect/operator/ir/manual_onednn_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ namespace paddle {
namespace onednn {
namespace dialect {

const char* ExpandOp::attributes_name[1] = {"mkldnn_data_type"};
const char* ExpandOp::attributes_name[1] = {"mkldnn_data_type"}; // NOLINT

OpInfoTuple ExpandOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
Expand Down
6 changes: 4 additions & 2 deletions paddle/fluid/pir/dialect/operator/ir/manual_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2050,7 +2050,8 @@ std::vector<pir::Type> ArrayWrite_Op::InferMeta(
return argument_outputs;
}

const char *ArrayToTensorOp::attributes_name[2] = {"axis", "use_stack"};
const char *ArrayToTensorOp::attributes_name[2] = {"axis",
"use_stack"}; // NOLINT

OpInfoTuple ArrayToTensorOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
Expand Down Expand Up @@ -2242,7 +2243,8 @@ std::vector<pir::Type> ArrayToTensorOp::InferMeta(
return argument_outputs;
}

const char *TensorToArrayOp::attributes_name[2] = {"axis", "use_stack"};
const char *TensorToArrayOp::attributes_name[2] = {"axis",
"use_stack"}; // NOLINT

OpInfoTuple TensorToArrayOp::GetOpInfo() {
std::vector<paddle::dialect::OpInputInfo> inputs = {
Expand Down
3 changes: 2 additions & 1 deletion paddle/fluid/pir/dialect/operator/ir/manual_pylayer_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,8 @@ paddle::dialect::PyLayerOp
namespace paddle {
namespace dialect {

const char *PyLayerOp::attributes_name[1] = {kBackwardFunctionIdAttrName};
const char *PyLayerOp::attributes_name[1] = {
kBackwardFunctionIdAttrName}; // NOLINT

void PyLayerOp::Build(pir::Builder &builder, // NOLINT
pir::OperationArgument &argument, // NOLINT
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/profiler/cpu_utilization.cc
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ void CpuUtilization::RecordBeginTimeInfo() {
#elif defined(__linux__)
start_ = times(&process_tms_start_);
#define proc_path_size 1024
static char proc_stat_path[proc_path_size] = "/proc/stat"; // NOLINTf
static char proc_stat_path[proc_path_size] = "/proc/stat"; // NOLINT
FILE *stat_file = fopen(proc_stat_path, "r");
if (stat_file != nullptr) {
std::array<char, 200> temp_str;
Expand Down
9 changes: 5 additions & 4 deletions paddle/phi/kernels/fusion/onednn/fc_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -602,11 +602,12 @@ void FCKernel(const Context& dev_ctx,
dev_ctx.HasDnnAttr("force_fp32_output")
? PADDLE_GET_CONST(bool, dev_ctx.GetDnnAttr("force_fp32_output"))
: false;
std::string mkldnn_data_type_list[] = {"float32", "int8", "bfloat16"};
std::vector<std::string> mkldnn_data_type_list = {
"float32", "int8", "bfloat16"};
PADDLE_ENFORCE_EQ(
std::find(std::begin(mkldnn_data_type_list),
std::end(mkldnn_data_type_list),
mkldnn_data_type) != std::end(mkldnn_data_type_list),
std::find(mkldnn_data_type_list.begin(),
mkldnn_data_type_list.end(),
mkldnn_data_type) != mkldnn_data_type_list.end(),
true,
phi::errors::InvalidArgument("The mkldnn_data_type should be [float32, "
"int8, bfloat16], but found %s.",
Expand Down
9 changes: 5 additions & 4 deletions paddle/phi/kernels/fusion/onednn/fusion_gru_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -573,11 +573,12 @@ void FusionGRUKernel(const Context& dev_ctx,
? PADDLE_GET_CONST(std::string,
dev_ctx.GetDnnAttr("mkldnn_data_type"))
: "float32";
std::string mkldnn_data_type_list[] = {"float32", "int8", "bfloat16"};
std::vector<std::string> mkldnn_data_type_list = {
"float32", "int8", "bfloat16"};
PADDLE_ENFORCE_EQ(
std::find(std::begin(mkldnn_data_type_list),
std::end(mkldnn_data_type_list),
mkldnn_data_type) != std::end(mkldnn_data_type_list),
std::find(mkldnn_data_type_list.begin(),
mkldnn_data_type_list.end(),
mkldnn_data_type) != mkldnn_data_type_list.end(),
true,
phi::errors::InvalidArgument("The mkldnn_data_type shoule be [float32, "
"int8, bfloat16], but found %s.",
Expand Down
2 changes: 1 addition & 1 deletion paddle/pir/src/dialect/shape/ir/shape_attribute.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

namespace pir::shape {

const char SymbolAttribute::attr_name[] = "sym_shape_str";
const char SymbolAttribute::attr_name[] = "sym_shape_str"; // NOLINT

symbol::ShapeOrDataDimExprs SymbolAttribute::data() const {
return storage()->data();
Expand Down
2 changes: 1 addition & 1 deletion test/cpp/pir/tools/test_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ void BranchOp::VerifySig() const {
phi::errors::InvalidArgument("successor[0] can't be nullptr"));
}

const char *Operation1::attributes_name[2] = {"op1_attr1",
const char *Operation1::attributes_name[2] = {"op1_attr1", // NOLINT
"op1_attr2"}; // NOLINT

void Operation1::Build(pir::Builder &builder, // NOLINT
Expand Down