Skip to content

Commit

Permalink
[PHI decoupling] move "flags.h" from fluid to phi (PaddlePaddle#48696)
Browse files Browse the repository at this point in the history
  • Loading branch information
AndPuQing committed Dec 9, 2022
1 parent 329ee31 commit 124cc7d
Show file tree
Hide file tree
Showing 36 changed files with 68 additions and 73 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/store/tcp_store.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@

#include "paddle/fluid/distributed/store/tcp_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

namespace paddle {
namespace distributed {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
#include "paddle/fluid/eager/tests/performance_tests/benchmark_utils.h"
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

#ifdef WITH_GPERFTOOLS
#include "gperftools/profiler.h"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
#include "paddle/fluid/eager/tests/performance_tests/benchmark_utils.h"
#include "paddle/fluid/eager/tests/test_utils.h"
#include "paddle/fluid/imperative/tracer.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

#ifdef WITH_GPERFTOOLS
#include "gperftools/profiler.h"
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/new_executor/executor_statistics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,9 @@
#include <vector>

#include "glog/logging.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/os_info.h"
#include "paddle/fluid/platform/profiler/utils.h"
#include "paddle/phi/core/flags.h"

DECLARE_bool(use_stream_safe_cuda_allocator);
PADDLE_DEFINE_EXPORTED_string(static_executor_perfstat_filepath,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

#include "paddle/fluid/imperative/flags.h"

#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

PADDLE_DEFINE_EXPORTED_uint64(dygraph_debug,
0,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/profiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

#include <mutex> // NOLINT

#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

PADDLE_DEFINE_EXPORTED_string(
tracer_profile_fname,
Expand Down
3 changes: 1 addition & 2 deletions paddle/fluid/inference/api/analysis_predictor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1422,8 +1422,7 @@ CreatePaddlePredictor<AnalysisConfig, PaddleEngineKind::kAnalysis>(
}

// support set flags from enviorment.
const platform::ExportedFlagInfoMap &env_map =
platform::GetExportedFlagInfoMap();
const phi::ExportedFlagInfoMap &env_map = phi::GetExportedFlagInfoMap();
std::ostringstream os;
os << "--tryfromenv=";
for (auto &pair : env_map) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
#include <mutex> // NOLINT

#include "paddle/fluid/memory/allocation/aligned_allocator.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/core/flags.h"

PADDLE_DEFINE_EXPORTED_READONLY_bool(
free_idle_chunk,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/controlflow/conditional_block_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License. */
#include "paddle/fluid/framework/new_executor/standalone_executor.h"
#include "paddle/fluid/operators/assign_op.h"
#include "paddle/fluid/operators/controlflow/control_flow_op_helper.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"
#include "paddle/phi/kernels/funcs/math_function.h"

#ifdef PADDLE_WITH_MKLDNN
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/operators/conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ limitations under the License. */
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/infermeta/binary.h"

namespace paddle {
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/operators/conv_transpose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/fusion_conv_inception_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ limitations under the License. */
#include <vector>

#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"

namespace paddle {
namespace operators {
Expand Down Expand Up @@ -113,7 +113,7 @@ class ConvInceptionFusionOpMaker : public framework::OpProtoAndCheckerMaker {
"allocated/freed each time the operator runs, larger "
"workspace size can increase performance but also requires "
"better hardware. This size should be chosen carefully.")
.SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB());
.SetDefault(phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB());
AddComment(R"DOC(
)DOC");
}
Expand Down
9 changes: 0 additions & 9 deletions paddle/fluid/platform/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,6 @@ if(WITH_PYTHON)
endif()
endif()

cc_library(
flags
SRCS flags.cc
DEPS gflags)
cc_library(
denormal
SRCS denormal.cc
Expand Down Expand Up @@ -178,11 +174,6 @@ if(WITH_GLOO)
DEPS framework_proto gloo_wrapper enforce)
endif()

cc_library(
cudnn_workspace_helper
SRCS cudnn_workspace_helper.cc
DEPS)

# separate init from device_context to avoid cycle dependencies
cc_library(
init
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/cpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ limitations under the License. */

#include <algorithm>

#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

DECLARE_double(fraction_of_cpu_memory_to_use);
DECLARE_uint64(initial_cpu_memory_in_mb);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/device/gpu/gpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,14 +24,14 @@ limitations under the License. */
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/platform/cuda_device_guard.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/lock_guard_ptr.h"
#include "paddle/fluid/platform/macros.h"
#include "paddle/fluid/platform/monitor.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/profiler/mem_tracing.h"
#include "paddle/fluid/string/split.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/core/flags.h"

#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/platform/dynload/miopen.h"
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/enforce.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ limitations under the License. */
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/device/gpu/gpu_types.h"
#endif
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

namespace phi {
class ErrorSummary;
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/platform/profiler/host_tracer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@

#include "glog/logging.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/profiler/common_event.h"
#include "paddle/fluid/platform/profiler/host_event_recorder.h"
#include "paddle/phi/core/flags.h"

// Used to filter events, works like glog VLOG(level).
// RecordEvent will works if host_trace_level >= level.
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/global_value_getter_setter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ static void RegisterGlobalVarGetterSetter() {
REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_prefetch_thread_num);
#endif

const auto &flag_map = platform::GetExportedFlagInfoMap();
const auto &flag_map = phi::GetExportedFlagInfoMap();
for (const auto &pair : flag_map) {
const std::string &name = pair.second.name;
bool is_writable = pair.second.is_writable;
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/api/yaml/generator/ops_extra_info_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def map_code_template(attrs_str, attrs_checker_str):
return f"""// This file is generated by paddle/phi/api/yaml/generator/ops_extra_info_gen.py
#include "paddle/fluid/operators/ops_extra_info.h"
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"
namespace paddle {{
namespace operators {{
Expand Down
14 changes: 7 additions & 7 deletions paddle/phi/api/yaml/op_compat.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- op : conv2d_fusion
extra :
Expand All @@ -211,28 +211,28 @@
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- op : conv2d_transpose
backward : conv2d_transpose_grad
extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()]

- op : conv3d
backward : conv3d_grad
extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- op : conv3d_transpose
backward : conv3d_transpose_grad
extra :
attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()]

- op : cos
backward : cos_grad, cos_double_grad, cos_triple_grad
Expand Down Expand Up @@ -273,15 +273,15 @@
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false,
bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f,
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]

- op : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad
extra :
attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
str mkldnn_data_type = "float32", bool fuse_relu = false,
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()]

- op : dequantize_linear
extra :
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/backends/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
add_subdirectory(dynload)
add_subdirectory(gpu)

set(BACKENDS_SRCS all_context.cc cpu/cpu_context.cc)
set(BACKENDS_DEPS enforce place flags eigen3 phi_device_context)
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/backends/gpu/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
add_subdirectory(cuda)
1 change: 1 addition & 0 deletions paddle/phi/backends/gpu/cuda/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
cc_library(cudnn_workspace_helper SRCS cudnn_workspace_helper.cc)
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -12,13 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"

#include <cstdlib>
#include <string>

namespace paddle {
namespace platform {
namespace phi {
namespace backends {
namespace gpu {

static int GetDefaultConvWorkspaceSizeLimitMBImpl() {
const char *env_str = std::getenv("FLAGS_conv_workspace_size_limit");
Expand All @@ -30,6 +31,6 @@ int GetDefaultConvWorkspaceSizeLimitMB() {
static auto workspace_size = GetDefaultConvWorkspaceSizeLimitMBImpl();
return workspace_size;
}

} // namespace platform
} // namespace paddle
} // namespace gpu
} // namespace backends
} // namespace phi
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
Expand All @@ -14,12 +14,13 @@

#pragma once

namespace paddle {
namespace platform {
namespace phi {
namespace backends {
namespace gpu {

static constexpr int kDefaultConvWorkspaceSizeLimitMB = 512;

int GetDefaultConvWorkspaceSizeLimitMB();

} // namespace platform
} // namespace paddle
} // namespace gpu
} // namespace backends
} // namespace phi
2 changes: 1 addition & 1 deletion paddle/phi/backends/xpu/xpu_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ limitations under the License. */

// TODO(wilber): The phi computing library requires a component to manage
// flags.
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"

PADDLE_DEFINE_EXPORTED_string(
selected_xpus,
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/core/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@ if(WITH_GPU)
proto_library(external_error_proto SRCS external_error.proto)
endif()

cc_library(
flags
SRCS flags.cc
DEPS gflags)

cc_library(errors SRCS errors.cc)
set(phi_enforce_deps errors flags)
if(WITH_GPU)
Expand Down
17 changes: 8 additions & 9 deletions paddle/fluid/platform/flags.cc → paddle/phi/core/flags.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/core/flags.h"
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"
#endif

namespace paddle {
namespace platform {
namespace phi {

const ExportedFlagInfoMap &GetExportedFlagInfoMap() {
return *GetMutableExportedFlagInfoMap();
Expand All @@ -30,8 +29,7 @@ ExportedFlagInfoMap *GetMutableExportedFlagInfoMap() {
return &g_exported_flag_info_map;
}

} // namespace platform
} // namespace paddle
} // namespace phi

PADDLE_DEFINE_EXPORTED_int32(inner_op_parallelism,
0,
Expand Down Expand Up @@ -261,9 +259,10 @@ PADDLE_DEFINE_EXPORTED_bool(
* increased.
* Users need to balance memory and speed.
*/
PADDLE_DEFINE_EXPORTED_int64(conv_workspace_size_limit,
paddle::platform::kDefaultConvWorkspaceSizeLimitMB,
"cuDNN convolution workspace limit in MB unit.");
PADDLE_DEFINE_EXPORTED_int64(
conv_workspace_size_limit,
phi::backends::gpu::kDefaultConvWorkspaceSizeLimitMB,
"cuDNN convolution workspace limit in MB unit.");

/**
* CUDNN related FLAG
Expand Down
Loading

0 comments on commit 124cc7d

Please sign in to comment.