Skip to content

Commit

Permalink
【Hackathon 6th Fundable Projects 2 No.22】cppcoreguidelines-pro-type-m…
Browse files Browse the repository at this point in the history
…ember-init_1-part (PaddlePaddle#63938)

* restore

* minor changes

* format

* minor changes
  • Loading branch information
walkalone20 authored and chen2016013 committed May 26, 2024
1 parent 5f9534a commit dae3249
Show file tree
Hide file tree
Showing 14 changed files with 60 additions and 25 deletions.
12 changes: 9 additions & 3 deletions paddle/fluid/distributed/ps/service/communicator/communicator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,18 @@ using phi::SelectedRows;
const uint32_t MAX_FEASIGN_NUM = 1024 * 100 * 100;

inline double GetCurrentUS() {
struct timeval time;
gettimeofday(&time, nullptr);
struct timeval time = {0, 0};
gettimeofday(&time, NULL);
return 1e+6 * time.tv_sec + time.tv_usec;
}

Communicator::Communicator() = default;
Communicator::Communicator()
: envs(),
trainers_(0),
send_varname_to_ctx_(),
recv_varname_to_ctx_(),
recv_scope_(nullptr),
xpu_temp_scope_(nullptr) {}

void Communicator::InitGFlag(const std::string &gflags) {
VLOG(3) << "Init With Gflags:" << gflags;
Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/distributed/test/ctr_accessor_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,16 @@ TEST(downpour_feature_value_accessor_test, test_update) {
std::vector<float> embed_g2sum;
std::vector<float> embedx_w;
std::vector<float> embedx_g2sum;
DownpourSparseValueTest()
: slot(0),
unseen_days(0),
delta_score(0),
show(0),
click(0),
embed_w(0),
embed_g2sum(),
embedx_w(),
embedx_g2sum() {}

void to_array(float* ptr, size_t dim) {
ptr[0] = slot;
Expand Down Expand Up @@ -210,6 +220,8 @@ TEST(downpour_feature_value_accessor_test, test_update) {
float click;
float embed_g;
std::vector<float> embedx_g;
DownpourSparsePushValueTest()
: slot(0), show(0), click(0), embed_g(0), embedx_g() {}
};
std::vector<float*> exp_value;
for (auto i = 0u; i < item_size; ++i) {
Expand Down
9 changes: 5 additions & 4 deletions paddle/fluid/framework/io/shell.cc
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ static int close_open_fds_internal() {
off_t d_off;
unsigned short d_reclen = 0; // NOLINT
char d_name[256]; // NOLINT
linux_dirent() : d_off(0), d_name{} {}
};

int dir_fd = open("/proc/self/fd", O_RDONLY);
Expand Down Expand Up @@ -151,7 +152,7 @@ static int shell_popen_fork_internal(const char* real_cmd,
}

static int read_from_pipe(FILE* fp, std::string* output) {
std::array<char, 4096> buf;
std::array<char, 4096> buf = {};
while (true) {
int n = static_cast<int>(fread(buf.data(), 1, 4096, fp));
if (n <= 0) {
Expand Down Expand Up @@ -187,7 +188,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,

std::string real_cmd = "set -o pipefail; " + cmd;

std::array<int, 2> pipe_fds;
std::array<int, 2> pipe_fds = {};
if (pipe(pipe_fds.data()) != 0) {
*err_no = -1;
return nullptr;
Expand Down Expand Up @@ -300,8 +301,8 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(

std::string real_cmd = "set -o pipefail; " + cmd;

std::array<int, 2> pipein_fds;
std::array<int, 2> pipeout_fds;
std::array<int, 2> pipein_fds = {};
std::array<int, 2> pipeout_fds = {};
if (pipe(pipein_fds.data()) != 0) {
return {nullptr, nullptr};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ PyLayerInstruction::PyLayerInstruction(
pir::Operation* op,
ValueExecutionInfo* value_exec_info,
interpreter::ExecutionConfig execution_config)
: InstructionBase(id, place) {
: InstructionBase(id, place), output_vars_(), fwd_skip_gc_names_() {
PADDLE_ENFORCE(op->isa<paddle::dialect::PyLayerOp>(),
phi::errors::PreconditionNotMet(
"Cond instruction only support pylayer op"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,11 @@ WhileInstruction::WhileInstruction(
pir::Operation* op,
ValueExecutionInfo* parent_exe_info,
interpreter::ExecutionConfig execution_config)
: InstructionBase(id, place) {
: InstructionBase(id, place),
inputs_(),
outputs_(),
body_inter_(nullptr),
external_input_names_() {
PADDLE_ENFORCE(op->isa<paddle::dialect::WhileOp>(),
phi::errors::PreconditionNotMet(
"While instruction only support While op"));
Expand Down
9 changes: 8 additions & 1 deletion paddle/fluid/framework/new_executor/standalone_executor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,14 @@ namespace framework {
StandaloneExecutor::StandaloneExecutor(const platform::Place& place,
const interpreter::Plan& plan,
Scope* scope)
: place_(place), plan_(plan), scope_(scope) {
: place_(place),
plan_(plan),
interpretercores_(),
scope_(scope),
micro_batch_scopes_(),
fetch_var_names_(),
fetch_list_(),
vec_force_events_to_wait_() {
int64_t micro_batch_num = plan_.MicroBatchNum();
vec_force_events_to_wait_.resize(micro_batch_num);
for (int64_t i = 0; i < micro_batch_num; ++i) {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/platform/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -584,6 +584,7 @@ MemEventRecorder::RecordMemEvent::RecordMemEvent(const Place &place,
: place_(place),
bytes_(bytes),
start_ns_(PosixInNsec()),
end_ns_(0),
alloc_in_(phi::CurAnnotationName()) {
PushMemEvent(start_ns_, end_ns_, bytes_, place_, alloc_in_);
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/op_function_common.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class OpAttrTypeMap {
}

private:
OpAttrTypeMap() = default;
OpAttrTypeMap() : ops_attrtype_map_() {}
std::unordered_map<
std::string,
std::unordered_map<std::string, paddle::framework::proto::AttrType>>
Expand Down
6 changes: 4 additions & 2 deletions paddle/phi/core/distributed/nccl_comm_task.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,15 @@ NCCLCommTask::NCCLCommTask(const phi::Place& place,
nccl_comm,
stream,
comm_type),
timeout_(std::chrono::milliseconds(timeout)),
sync_op_(sync_op),
use_calc_stream_(use_calc_stream) {
use_calc_stream_(use_calc_stream),
nccl_start_event_(nullptr),
nccl_end_event_(nullptr) {
start_trace_updated_ = false;
start_event_created_ = false;
end_event_created_ = false;
start_time_ = std::chrono::steady_clock::now();
timeout_ = std::chrono::milliseconds(timeout);
}

void NCCLCommTask::StartRecord() {
Expand Down
12 changes: 6 additions & 6 deletions paddle/phi/kernels/funcs/gpc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -956,9 +956,9 @@ void gpc_polygon_clip(gpc_op op,
polygon_node *cf = nullptr;
vertex_node *vtx = nullptr;
vertex_node *nv = nullptr;
std::array<h_state, 2> horiz;
std::array<int, 2> in;
std::array<int, 2> exists;
std::array<h_state, 2> horiz = {};
std::array<int, 2> in = {};
std::array<int, 2> exists = {};
std::array<int, 2> parity = {LEFT, LEFT};
int c = 0;
int v = 0;
Expand Down Expand Up @@ -1603,10 +1603,10 @@ void gpc_tristrip_clip(gpc_op op,
vertex_node *ltn = nullptr;
vertex_node *rt = nullptr;
vertex_node *rtn = nullptr;
std::array<h_state, 2> horiz;
std::array<h_state, 2> horiz = {};
vertex_type cft = NUL;
std::array<int, 2> in;
std::array<int, 2> exists;
std::array<int, 2> in = {};
std::array<int, 2> exists = {};
std::array<int, 2> parity = {LEFT, LEFT};
int s = 0;
int v = 0;
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/funcs/jit/test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1177,9 +1177,9 @@ TEST(JITKernel_helper, GetAllCandidateFuncs) {

TEST(JITKernel_helper, pack_weights) {
const int N = 8 * 60, K = 2;
std::array<std::array<float, N>, K> src;
std::array<std::array<float, N>, K> yref;
std::array<float, N * K> y;
std::array<std::array<float, N>, K> src = {};
std::array<std::array<float, N>, K> yref = {};
std::array<float, N* K> y = {};
float* x = &(src[0][0]);
float* ref = &(yref[0][0]);
for (int i = 0; i < N * K; ++i) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ namespace analysis {
struct Record {
std::vector<float> data;
std::vector<int32_t> shape;
Record() : data(), shape() {}
};

Record ProcessALine(const std::string &line, const std::string &shape_line) {
Expand Down
6 changes: 3 additions & 3 deletions test/cpp/phi/api/test_from_blob.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ using phi::memory_utils::Copy;
TEST(GetPlaceFromPtr, GPU) {
using paddle::GetPlaceFromPtr;

std::array<float, 6> cpu_data;
std::array<float, 6> cpu_data = {};
auto cpu_data_place = GetPlaceFromPtr(cpu_data.data());
ASSERT_EQ(cpu_data_place, phi::CPUPlace());
std::cout << "cpu_data_place: " << cpu_data_place << std::endl;
Expand Down Expand Up @@ -137,7 +137,7 @@ TEST(from_blob, GPU) {

// 3.2 check tensor values
auto* gpu_tensor_data = gpu_tensor.template data<float>();
std::array<float, 6> gpu_tensor_data_cpu;
std::array<float, 6> gpu_tensor_data_cpu = {};
Copy(phi::CPUPlace(),
gpu_tensor_data_cpu.data(),
gpu0,
Expand All @@ -155,7 +155,7 @@ TEST(from_blob, GPU) {
// 3.4 test other API
auto gpu_tensor_pow = paddle::experimental::pow(gpu_tensor, 2);
auto* gpu_tensor_pow_data = gpu_tensor_pow.template data<float>();
std::array<float, 6> gpu_tensor_pow_data_cpu;
std::array<float, 6> gpu_tensor_pow_data_cpu = {};
Copy(phi::CPUPlace(),
gpu_tensor_pow_data_cpu.data(),
gpu0,
Expand Down
1 change: 1 addition & 0 deletions test/deprecated/cpp/inference/api/analyzer_vis_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ namespace analysis {
struct Record {
std::vector<float> data;
std::vector<int32_t> shape;
Record() : data(), shape() {}
};

Record ProcessALine(const std::string &line) {
Expand Down

0 comments on commit dae3249

Please sign in to comment.