diff --git a/CMakeLists.txt b/CMakeLists.txt
index 1802e4a46d5bd..265ddc9504167 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -168,6 +168,9 @@ if(WITH_BRPC_RDMA)
endif()
endif()
+# lite subgraph compilation depends on CUDNN_ROOT,
+# so include(cudnn) needs to be in front of include(third_party/lite)
+include(cudnn) # set cudnn libraries, must before configure
include(third_party) # download, build, install third_party
if(WITH_DISTRIBUTE)
@@ -187,7 +190,6 @@ if(NOT WIN32)
endif()
include(flags) # set paddle compile flags
-include(cudnn) # set cudnn libraries, must before configure
if(WITH_GPU)
include(cuda)
@@ -216,6 +218,9 @@ endif(WITH_AMD_GPU)
if(WITH_ARM)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
+ set(WITH_XBYAK OFF CACHE STRING "Disable XBYAK when compiling WITH_ARM=ON" FORCE)
+ set(WITH_MKL OFF CACHE STRING "Disable MKL when compiling WITH_ARM=ON." FORCE)
+ set(WITH_GPU OFF CACHE STRING "Disable GPU when compiling WITH_ARM=ON." FORCE)
add_definitions(-DPADDLE_WITH_ARM)
endif()
diff --git a/README.md b/README.md
index 1805faeb11f03..b07709facd528 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,9 @@
-# PaddlePaddle
+
+
+
+
+--------------------------------------------------------------------------------
English | [简体中文](./README_cn.md)
@@ -29,7 +33,7 @@ pip install paddlepaddle
# Linux GPU cuda10cudnn7
pip install paddlepaddle-gpu
# Linux GPU cuda9cudnn7
-pip install paddlepaddle-gpu==1.8.2.post97
+pip install paddlepaddle-gpu==1.8.3.post97
```
It is recommended to read [this doc](https://www.paddlepaddle.org.cn/documentation/docs/en/beginners_guide/install/index_en.html) on our website.
diff --git a/README_cn.md b/README_cn.md
index dccd4f227b8d1..93ad06d20010f 100644
--- a/README_cn.md
+++ b/README_cn.md
@@ -1,5 +1,9 @@
-# PaddlePaddle
+
+
+
+
+--------------------------------------------------------------------------------
[English](./README.md) | 简体中文
@@ -26,7 +30,7 @@ pip install paddlepaddle
# Linux GPU cuda10cudnn7
pip install paddlepaddle-gpu
# Linux GPU cuda9cudnn7
-pip install paddlepaddle-gpu==1.8.2.post97
+pip install paddlepaddle-gpu==1.8.3.post97
```
更多安装信息详见官网 [安装说明](http://www.paddlepaddle.org.cn/documentation/docs/zh/1.8/beginners_guide/install/index_cn.html)
diff --git a/cmake/external/lite.cmake b/cmake/external/lite.cmake
index 49488c855f930..978b0427125be 100644
--- a/cmake/external/lite.cmake
+++ b/cmake/external/lite.cmake
@@ -25,7 +25,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
set(LITE_INSTALL_DIR ${THIRD_PARTY_PATH}/install/lite)
if(NOT LITE_GIT_TAG)
- set(LITE_GIT_TAG ab8af5c4b4dc5b40217633e0aa436315912d7b53)
+ set(LITE_GIT_TAG 42ab4d559f6659edfc35040fb30fdcec3dc3f8aa)
endif()
if(NOT CUDA_ARCH_NAME)
@@ -83,7 +83,7 @@ message(STATUS "Paddle-lite SOURCE_DIR: ${LITE_SOURCE_DIR}")
include_directories(${LITE_SOURCE_DIR})
include_directories(${LITE_BINARY_DIR})
-function(external_lite_static_libs alias path)
+function(external_lite_libs alias path)
add_library(${alias} SHARED IMPORTED GLOBAL)
SET_PROPERTY(TARGET ${alias} PROPERTY IMPORTED_LOCATION
${path})
@@ -92,7 +92,16 @@ function(external_lite_static_libs alias path)
endif()
endfunction()
-external_lite_static_libs(lite_full_static ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
+external_lite_libs(lite_full_static ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
+set(LITE_SHARED_LIB ${LITE_BINARY_DIR}/inference_lite_lib/cxx/lib/libpaddle_full_api_shared.so)
+
+if(XPU_SDK_ROOT)
+ include_directories("${XPU_SDK_ROOT}/XTDK/include")
+ include_directories("${XPU_SDK_ROOT}/XTCL/include")
+ add_definitions(-DPADDLE_WITH_XPU)
+ LINK_DIRECTORIES("${XPU_SDK_ROOT}/XTDK/shlib/")
+ LINK_DIRECTORIES("${XPU_SDK_ROOT}/XTDK/runtime/shlib/")
+endif()
add_definitions(-DPADDLE_WITH_LITE)
add_definitions(-DLITE_WITH_LOG)
diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake
index 5e47f268a3669..5bc7eaaff3abe 100644
--- a/cmake/external/openblas.cmake
+++ b/cmake/external/openblas.cmake
@@ -20,6 +20,8 @@ SET(CBLAS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/openblas)
SET(CBLAS_REPOSITORY https://github.com/xianyi/OpenBLAS.git)
SET(CBLAS_TAG v0.3.7)
IF(WITH_ARM)
+ # Under the FT2000 architecture, the calculation result of blas.sgemm in openblas 0.3+ is wrong,
+ # so version 0.2 is used by default.
SET(CBLAS_TAG v0.2.18)
ENDIF()
cache_third_party(extern_openblas
diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake
index 04f22d7fc8775..82dd4fa2e8eae 100644
--- a/cmake/external/protobuf.cmake
+++ b/cmake/external/protobuf.cmake
@@ -145,9 +145,9 @@ if (NOT "${PROTOBUF_ROOT}" STREQUAL "")
find_program(PROTOBUF_PROTOC_EXECUTABLE protoc PATHS ${PROTOBUF_ROOT}/bin NO_DEFAULT_PATH)
if (PROTOBUF_INCLUDE_DIR AND PROTOBUF_LIBRARY AND PROTOBUF_LITE_LIBRARY AND PROTOBUF_PROTOC_LIBRARY AND PROTOBUF_PROTOC_EXECUTABLE)
SET(PROTOBUF_FOUND true)
+ message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.")
SET_PROTOBUF_VERSION()
PROMPT_PROTOBUF_LIB()
- message(STATUS "Using custom protobuf library in ${PROTOBUF_ROOT}.")
endif()
endif()
diff --git a/cmake/flags.cmake b/cmake/flags.cmake
index e6a77c38ab5c0..64878693518b6 100644
--- a/cmake/flags.cmake
+++ b/cmake/flags.cmake
@@ -8,6 +8,8 @@ function(CheckCompilerCXX11Flag)
if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
if(${CMAKE_CXX_COMPILER_VERSION} VERSION_LESS 4.8)
message(FATAL_ERROR "Unsupported GCC version. GCC >= 4.8 required.")
+ elseif(${CMAKE_CXX_COMPILER_VERSION} VERSION_GREATER 8.2)
+ message(WARNING "Found GCC ${CMAKE_CXX_COMPILER_VERSION} which is too high, recommended to use GCC 8.2")
endif()
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
# cmake >= 3.0 compiler id "AppleClang" on Mac OS X, otherwise "Clang"
diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake
index 6fc81f2387b78..5a889dbc31438 100644
--- a/cmake/inference_lib.cmake
+++ b/cmake/inference_lib.cmake
@@ -19,9 +19,12 @@ set(FLUID_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_install_dir" CACHE STRING
set(FLUID_INFERENCE_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_inference_install_dir" CACHE STRING
"A path setting fluid inference shared and static libraries")
+# TODO(zhaolong)
+# At present, the size of static lib in Windows exceeds the system limit,
+# so the generation of static lib is temporarily turned off.
if(WIN32)
#todo: remove the option
- option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON)
+ option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." OFF)
if(NOT PYTHON_EXECUTABLE)
FIND_PACKAGE(PythonInterp REQUIRED)
endif()
@@ -187,21 +190,18 @@ copy(inference_lib_dist
SRCS ${CMAKE_BINARY_DIR}/../paddle/fluid/framework/io/crypto/cipher.h
DSTS ${FLUID_INFERENCE_INSTALL_DIR}/paddle/include/crypto/)
include_directories(${CMAKE_BINARY_DIR}/../paddle/fluid/framework/io)
+
# CAPI inference library for only inference
set(FLUID_INFERENCE_C_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_inference_c_install_dir" CACHE STRING
"A path setting CAPI fluid inference shared")
copy_part_of_thrid_party(inference_lib_dist ${FLUID_INFERENCE_C_INSTALL_DIR})
set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")
-if(WIN32)
- set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/${CMAKE_BUILD_TYPE}/paddle_fluid_c.*)
-else(WIN32)
- set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_fluid_c.*)
-endif(WIN32)
+set(paddle_fluid_c_lib ${PADDLE_BINARY_DIR}/paddle/fluid/inference/capi/libpaddle_fluid_c.*)
copy(inference_lib_dist
- SRCS ${src_dir}/inference/capi/paddle_c_api.h ${paddle_fluid_c_lib}
- DSTS ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/lib)
+ SRCS ${src_dir}/inference/capi/paddle_c_api.h ${paddle_fluid_c_lib}
+ DSTS ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/include ${FLUID_INFERENCE_C_INSTALL_DIR}/paddle/lib)
# fluid library for both train and inference
set(fluid_lib_deps inference_lib_dist)
diff --git a/cmake/nccl.cmake b/cmake/nccl.cmake
index be84c54fd2fa1..9124fec0b856a 100644
--- a/cmake/nccl.cmake
+++ b/cmake/nccl.cmake
@@ -7,14 +7,14 @@ if(WIN32)
return()
endif()
-set(NCCL_ROOT "/usr" CACHE PATH "NCCL ROOT")
-find_path(NCCL_INCLUDE_DIR nccl.h
- PATHS ${NCCL_ROOT} ${NCCL_ROOT}/include ${NCCL_ROOT}/local/include
- $ENV{NCCL_ROOT} $ENV{NCCL_ROOT}/include $ENV{NCCL_ROOT}/local/include
- NO_DEFAULT_PATH
-)
-
if(WITH_NCCL)
+ set(NCCL_ROOT "/usr" CACHE PATH "NCCL ROOT")
+ find_path(NCCL_INCLUDE_DIR nccl.h
+ PATHS ${NCCL_ROOT} ${NCCL_ROOT}/include ${NCCL_ROOT}/local/include
+ $ENV{NCCL_ROOT} $ENV{NCCL_ROOT}/include $ENV{NCCL_ROOT}/local/include
+ NO_DEFAULT_PATH
+ )
+
file(READ ${NCCL_INCLUDE_DIR}/nccl.h NCCL_VERSION_FILE_CONTENTS)
string(REGEX MATCH "define NCCL_VERSION_CODE +([0-9]+)"
diff --git a/doc/imgs/logo.png b/doc/imgs/logo.png
new file mode 100644
index 0000000000000..3ed4cc8ec82ee
Binary files /dev/null and b/doc/imgs/logo.png differ
diff --git a/paddle/fluid/framework/array.h b/paddle/fluid/framework/array.h
index 7424bae1ab865..10abb83116624 100644
--- a/paddle/fluid/framework/array.h
+++ b/paddle/fluid/framework/array.h
@@ -63,7 +63,8 @@ class Array {
HOSTDEVICE inline const T &at(size_t i) const {
#ifndef __CUDA_ARCH__
- PADDLE_ENFORCE_LT(i, N, "Array index out of bounds");
+ PADDLE_ENFORCE_LT(
+ i, N, platform::errors::OutOfRange("Array index out of bounds."));
#endif
return (*this)[i];
}
@@ -106,7 +107,7 @@ class Array {
static T obj();
return obj;
#else
- PADDLE_THROW("Array has no element");
+ PADDLE_THROW(platform::errors::Unavailable("Array has no element."));
#endif
}
@@ -115,7 +116,7 @@ class Array {
static const T obj();
return obj;
#else
- PADDLE_THROW("Array has no element");
+ PADDLE_THROW(platform::errors::Unavailable("Array has no element."));
#endif
}
diff --git a/paddle/fluid/framework/async_executor.cc b/paddle/fluid/framework/async_executor.cc
index 9f8f17cd1ac68..4c7ef2e600bc1 100644
--- a/paddle/fluid/framework/async_executor.cc
+++ b/paddle/fluid/framework/async_executor.cc
@@ -77,11 +77,13 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
for (auto var_name : fetch_var_names) {
auto var_desc = block.FindVar(var_name);
PADDLE_ENFORCE_NOT_NULL(
- var_desc, platform::errors::NotFound("%s is not found.", var_name));
+ var_desc, platform::errors::NotFound(
+ "Variable %s is not found in main program.", var_name));
auto shapes = var_desc->GetShape();
- PADDLE_ENFORCE(shapes[shapes.size() - 1] == 1,
- "var %s: Fetched var has wrong shape, "
- "only variables with the last dimension size 1 supported",
+ PADDLE_ENFORCE_EQ(shapes[shapes.size() - 1], 1,
+ platform::errors::InvalidArgument(
+ "Fetched variable %s has wrong shape, "
+ "only variables whose last dimension is 1 are supported",
var_name);
}
@@ -95,7 +97,7 @@ void AsyncExecutor::RunFromFile(const ProgramDesc& main_program,
actual_thread_num_ = thread_num;
int file_cnt = filelist.size();
PADDLE_ENFORCE_GT(file_cnt, 0,
- platform::errors::NotFound("Input file list is empty"));
+ platform::errors::NotFound("Input file list is empty."));
if (actual_thread_num_ > file_cnt) {
VLOG(1) << "Thread num = " << thread_num << ", file num = " << file_cnt
diff --git a/paddle/fluid/framework/attribute.cc b/paddle/fluid/framework/attribute.cc
index fabf2abfc803b..9ca3fe31a33c7 100644
--- a/paddle/fluid/framework/attribute.cc
+++ b/paddle/fluid/framework/attribute.cc
@@ -72,7 +72,8 @@ Attribute GetAttrValue(const proto::OpDesc::Attr& attr_desc) {
return val;
}
default:
- PADDLE_THROW("Unsupport attr type %d", attr_desc.type());
+ PADDLE_THROW(platform::errors::Unavailable("Unsupport attribute type %d.",
+ attr_desc.type()));
}
return boost::blank();
}
diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h
index 21bb39b043987..e516ae1efdfc6 100644
--- a/paddle/fluid/framework/attribute.h
+++ b/paddle/fluid/framework/attribute.h
@@ -37,9 +37,10 @@ struct ExtractAttribute {
try {
attr_value = &boost::get(attr);
} catch (boost::bad_get& bad_get) {
- PADDLE_THROW("Cannot get attribute %s by type %s, its type is %s",
- attr_name_, paddle::platform::demangle(typeid(T).name()),
- paddle::platform::demangle(attr.type().name()));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Cannot get attribute (%s) by type %s, its type is %s.", attr_name_,
+ paddle::platform::demangle(typeid(T).name()),
+ paddle::platform::demangle(attr.type().name())));
}
return attr_value;
}
@@ -70,8 +71,9 @@ struct ExtractAttribute {
try {
attr_value = &boost::get(attr);
} catch (boost::bad_get& bad_get) {
- PADDLE_THROW("Cannot get attribute %s by type bool, its type is %s",
- attr_name_, paddle::platform::demangle(attr.type().name()));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Cannot get attribute (%s) by type bool, its type is %s.", attr_name_,
+ paddle::platform::demangle(attr.type().name())));
}
return attr_value;
}
@@ -96,8 +98,9 @@ struct ExtractAttribute {
try {
attr_value = &boost::get(attr);
} catch (boost::bad_get& bad_get) {
- PADDLE_THROW("Cannot get attribute %s by type int64_t, its type is %s",
- attr_name_, paddle::platform::demangle(attr.type().name()));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Cannot get attribute (%s) by type int64_t, its type is %s.",
+ attr_name_, paddle::platform::demangle(attr.type().name())));
}
return attr_value;
}
@@ -124,8 +127,10 @@ struct ExtractAttribute> {
try {
attr_value = &boost::get>(attr);
} catch (boost::bad_get& bad_get) {
- PADDLE_THROW("Cannot get attribute %s by type int64_t, its type is %s",
- attr_name_, paddle::platform::demangle(attr.type().name()));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Cannot get attribute (%s) by type std::vector, its type is "
+ "%s.",
+ attr_name_, paddle::platform::demangle(attr.type().name())));
}
return attr_value;
}
@@ -150,8 +155,9 @@ struct ExtractAttribute {
try {
attr_value = &boost::get(attr);
} catch (boost::bad_get& bad_get) {
- PADDLE_THROW("Cannot get attribute %s by type float, its type is %s",
- attr_name_, paddle::platform::demangle(attr.type().name()));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Cannot get attribute (%s) by type float, its type is %s.",
+ attr_name_, paddle::platform::demangle(attr.type().name())));
}
return attr_value;
}
@@ -173,8 +179,9 @@ class AttrReader {
template
inline const T& Get(const std::string& name) const {
- PADDLE_ENFORCE(attrs_.count(name) != 0, "%s should be in AttributeMap",
- name);
+ PADDLE_ENFORCE_NE(attrs_.count(name), 0,
+ platform::errors::NotFound(
+ "Attribute (%s) should be in AttributeMap.", name));
Attribute& attr = const_cast(attrs_.at(name));
ExtractAttribute extract_attr(name);
@@ -192,8 +199,10 @@ class GreaterThanChecker {
public:
explicit GreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(const T& value) const {
- PADDLE_ENFORCE_GT(value, lower_bound_,
- platform::errors::OutOfRange("larger_than check fails."));
+ PADDLE_ENFORCE_GT(
+ value, lower_bound_,
+ platform::errors::OutOfRange(
+ "Check for attribute value greater than a certain value failed."));
}
private:
@@ -205,7 +214,10 @@ class EqualGreaterThanChecker {
public:
explicit EqualGreaterThanChecker(T lower_bound) : lower_bound_(lower_bound) {}
void operator()(const T& value) const {
- PADDLE_ENFORCE_GE(value, lower_bound_, "equal_larger_than check fails.");
+ PADDLE_ENFORCE_GE(
+ value, lower_bound_,
+ platform::errors::OutOfRange("Check for attribute valur equal or "
+ "greater than a certain value failed."));
}
private:
@@ -231,9 +243,10 @@ class EnumInContainer {
public:
explicit EnumInContainer(const std::unordered_set& c) : container_(c) {}
void operator()(const T& val) const {
- PADDLE_ENFORCE(container_.find(val) != container_.end(),
- "Value %s is not in enum container %s", val,
- ContainerDebugString());
+ PADDLE_ENFORCE_NE(
+ container_.find(val), container_.end(),
+ platform::errors::NotFound("Value %s is not in enum container %s.", val,
+ ContainerDebugString()));
}
private:
@@ -284,8 +297,11 @@ class TypedAttrChecker {
// we can add more common limits, like LessThan(), Between()...
TypedAttrChecker& SetDefault(const T& default_value) {
- PADDLE_ENFORCE(default_value_setter_.empty(),
- "%s can't have more than one default value!", attr_name_);
+ PADDLE_ENFORCE_EQ(
+ default_value_setter_.empty(), true,
+ platform::errors::AlreadyExists(
+ "Attribute (%s) has a default value and cannot be set repeatedly.",
+ attr_name_));
default_value_setter_.push_back(DefaultValueSetter(default_value));
return *this;
}
@@ -308,8 +324,10 @@ class TypedAttrChecker {
auto it = attr_map->find(attr_name_);
if (it == attr_map->end()) {
// user do not set this attr
- PADDLE_ENFORCE(!default_value_setter_.empty(),
- "Attribute '%s' is required!", attr_name_);
+ PADDLE_ENFORCE_EQ(
+ default_value_setter_.empty(), false,
+ platform::errors::InvalidArgument(
+ "Attribute (%s) is not set correctly.", attr_name_));
// default_value_setter_ has no more than one element
attr_map->emplace(attr_name_, default_value_setter_[0]());
}
diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc
index fee6ba4004705..a79bc4bc2cf5f 100644
--- a/paddle/fluid/framework/data_device_transform.cc
+++ b/paddle/fluid/framework/data_device_transform.cc
@@ -23,7 +23,8 @@ void TransDataDevice(const Tensor &in, const platform::Place &dst_place,
PADDLE_ENFORCE_NE(
in.place().which(), dst_place.which(),
- "Currently, model parallelism is only supported between CPU and CUDA");
+ platform::errors::Unavailable("Currently, model parallelism is only "
+ "supported between CPU and CUDA."));
// NOTE(yy): TransDataDevice should wait for computation of input.
platform::DeviceContextPool::Instance().Get(in.place())->Wait();
diff --git a/paddle/fluid/framework/data_feed.cc b/paddle/fluid/framework/data_feed.cc
index 566a08d8a2ad1..96d54ec869174 100644
--- a/paddle/fluid/framework/data_feed.cc
+++ b/paddle/fluid/framework/data_feed.cc
@@ -133,11 +133,14 @@ bool DataFeed::PickOneFile(std::string* filename) {
}
void DataFeed::CheckInit() {
- PADDLE_ENFORCE(finish_init_, "Initialization did not succeed.");
+ PADDLE_ENFORCE_EQ(finish_init_, true, platform::errors::PreconditionNotMet(
+ "DataFeed initialization failed."));
}
void DataFeed::CheckSetFileList() {
- PADDLE_ENFORCE(finish_set_filelist_, "Set filelist did not succeed.");
+ PADDLE_ENFORCE_EQ(
+ finish_set_filelist_, true,
+ platform::errors::PreconditionNotMet("DataFeed set filelist failed."));
}
void DataFeed::CheckStart() {
@@ -160,14 +163,18 @@ void DataFeed::CopyToFeedTensor(void* dst, const void* src, size_t size) {
#ifdef PADDLE_WITH_CUDA
cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice);
#else
- PADDLE_THROW("Not supported GPU, Please compile WITH_GPU option");
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Not supported GPU, please compile with option WITH_GPU=ON."));
#endif
}
}
template
void PrivateQueueDataFeed::SetQueueSize(int queue_size) {
- PADDLE_ENFORCE(queue_size > 0, "Illegal queue size: %d.", queue_size);
+ PADDLE_ENFORCE_GT(
+ queue_size, 0,
+ platform::errors::InvalidArgument(
+ "Queue size %d is illegal in PrivateQueueDataFeed.", queue_size));
queue_size_ = queue_size;
queue_ = paddle::framework::MakeChannel();
queue_->SetCapacity(queue_size);
@@ -418,8 +425,10 @@ void MultiSlotDataFeed::Init(
finish_set_filelist_ = false;
finish_start_ = false;
- PADDLE_ENFORCE(data_feed_desc.has_multi_slot_desc(),
- "Multi_slot_desc has not been set.");
+ PADDLE_ENFORCE_EQ(
+ data_feed_desc.has_multi_slot_desc(), true,
+ platform::errors::PreconditionNotMet(
+ "Multi_slot_desc has not been set in MultiSlotDataFeed."));
paddle::framework::MultiSlotDesc multi_slot_desc =
data_feed_desc.multi_slot_desc();
SetBatchSize(data_feed_desc.batch_size());
@@ -668,13 +677,14 @@ bool MultiSlotDataFeed::ParseOneInstance(std::vector* instance) {
for (size_t i = 0; i < use_slots_index_.size(); ++i) {
int idx = use_slots_index_[i];
int num = strtol(&str[pos], &endptr, 10);
- PADDLE_ENFORCE(
- num,
- "The number of ids can not be zero, you need padding "
- "it in data generator; or if there is something wrong with "
- "the data, please check if the data contains unresolvable "
- "characters.\nplease check this error line: %s",
- str);
+ PADDLE_ENFORCE_NE(
+ num, 0,
+ platform::errors::InvalidArgument(
+ "The number of ids can not be zero, you need padding "
+ "it in data generator; or if there is something wrong with "
+ "the data, please check if the data contains unresolvable "
+ "characters.\nplease check this error line: %s.",
+ str));
if (idx != -1) {
(*instance)[idx].Init(all_slots_type_[i]);
@@ -765,8 +775,10 @@ void MultiSlotInMemoryDataFeed::Init(
finish_set_filelist_ = false;
finish_start_ = false;
- PADDLE_ENFORCE(data_feed_desc.has_multi_slot_desc(),
- "Multi_slot_desc has not been set.");
+ PADDLE_ENFORCE_EQ(
+ data_feed_desc.has_multi_slot_desc(), true,
+ platform::errors::PreconditionNotMet(
+ "Multi_slot_desc has not been set in MultiSlotInMemoryDataFeed."));
paddle::framework::MultiSlotDesc multi_slot_desc =
data_feed_desc.multi_slot_desc();
SetBatchSize(data_feed_desc.batch_size());
@@ -898,13 +910,14 @@ bool MultiSlotInMemoryDataFeed::ParseOneInstanceFromPipe(Record* instance) {
for (size_t i = 0; i < use_slots_index_.size(); ++i) {
int idx = use_slots_index_[i];
int num = strtol(&str[pos], &endptr, 10);
- PADDLE_ENFORCE(
- num,
- "The number of ids can not be zero, you need padding "
- "it in data generator; or if there is something wrong with "
- "the data, please check if the data contains unresolvable "
- "characters.\nplease check this error line: %s",
- str);
+ PADDLE_ENFORCE_NE(
+ num, 0,
+ platform::errors::InvalidArgument(
+ "The number of ids can not be zero, you need padding "
+ "it in data generator; or if there is something wrong with "
+ "the data, please check if the data contains unresolvable "
+ "characters.\nplease check this error line: %s.",
+ str));
if (idx != -1) {
if (all_slots_type_[i][0] == 'f') { // float
for (int j = 0; j < num; ++j) {
@@ -963,13 +976,14 @@ bool MultiSlotInMemoryDataFeed::ParseOneInstance(Record* instance) {
for (size_t i = 0; i < use_slots_index_.size(); ++i) {
int idx = use_slots_index_[i];
int num = strtol(&str[pos], &endptr, 10);
- PADDLE_ENFORCE(
- num,
- "The number of ids can not be zero, you need padding "
- "it in data generator; or if there is something wrong with "
- "the data, please check if the data contains unresolvable "
- "characters.\nplease check this error line: %s",
- str);
+ PADDLE_ENFORCE_NE(
+ num, 0,
+ platform::errors::InvalidArgument(
+ "The number of ids can not be zero, you need padding "
+ "it in data generator; or if there is something wrong with "
+ "the data, please check if the data contains unresolvable "
+ "characters.\nplease check this error line: %s.",
+ str));
if (idx != -1) {
if (all_slots_type_[i][0] == 'f') { // float
@@ -1085,7 +1099,7 @@ void MultiSlotInMemoryDataFeed::PutToFeedVec(
PADDLE_ENFORCE_EQ(slot_offset.size(), 2,
platform::errors::InvalidArgument(
"In batch reader, the sparse tensor lod size "
- "must be 2, but received %d",
+ "must be 2, but received %d.",
slot_offset.size()));
const auto& max_size = slot_offset[1];
tmp_offset.reserve(max_size + 1);
@@ -1137,10 +1151,13 @@ void PrivateInstantDataFeed::PutToFeedVec() {
for (const auto e : use_slots_shape_[i]) {
total_dims *= e;
}
- PADDLE_ENFORCE(
- total_dims == total_instance,
- "The actual data size of slot[%s] doesn't match its declaration",
- use_slots_[i].c_str());
+ PADDLE_ENFORCE_EQ(
+ total_dims, total_instance,
+ platform::errors::InvalidArgument(
+ "The actual data size of slot[%s] doesn't match its declaration. "
+ "The actual data size of slot is %lld"
+ ", and its declaration is %lld.",
+ use_slots_[i].c_str(), total_dims, total_instance));
feed_vec_[i]->Resize(framework::make_ddim(use_slots_shape_[i]));
}
}
@@ -1162,7 +1179,9 @@ int PrivateInstantDataFeed::Next() {
return -1;
}
- PADDLE_ENFORCE(true == ParseOneMiniBatch(), "Fail to parse mini-batch data");
+ PADDLE_ENFORCE_EQ(
+ true, ParseOneMiniBatch(),
+ platform::errors::InvalidArgument("Fail to parse mini-batch data."));
PutToFeedVec();
return ins_vec_[0].GetBatchSize();
}
@@ -1173,8 +1192,10 @@ void PrivateInstantDataFeed::Init(const DataFeedDesc& data_feed_desc) {
finish_set_filelist_ = false;
finish_start_ = false;
- PADDLE_ENFORCE(data_feed_desc.has_multi_slot_desc(),
- "Multi_slot_desc has not been set.");
+ PADDLE_ENFORCE_EQ(
+ data_feed_desc.has_multi_slot_desc(), true,
+ platform::errors::PreconditionNotMet(
+ "Multi_slot_desc has not been set in PrivateInstantDataFeed."));
paddle::framework::MultiSlotDesc multi_slot_desc =
data_feed_desc.multi_slot_desc();
SetBatchSize(data_feed_desc.batch_size());
@@ -1217,7 +1238,10 @@ template class PrivateInstantDataFeed>;
bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) {
fd_ = open(filename.c_str(), O_RDONLY);
- PADDLE_ENFORCE(fd_ != -1, "Fail to open file: %s", filename.c_str());
+ PADDLE_ENFORCE_NE(
+ fd_, -1, platform::errors::Unavailable(
+ "Fail to open file: %s in MultiSlotFileInstantDataFeed.",
+ filename.c_str()));
struct stat sb;
fstat(fd_, &sb);
@@ -1225,7 +1249,11 @@ bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) {
buffer_ =
reinterpret_cast(mmap(NULL, end_, PROT_READ, MAP_PRIVATE, fd_, 0));
- PADDLE_ENFORCE(buffer_ != MAP_FAILED, strerror(errno));
+ PADDLE_ENFORCE_NE(
+ buffer_, MAP_FAILED,
+ platform::errors::Unavailable(
+ "Memory map failed when create shared memory, error number is %s.",
+ strerror(errno)));
offset_ = 0;
return true;
@@ -1257,12 +1285,13 @@ bool MultiSlotFileInstantDataFeed::ParseOneMiniBatch() {
char type = all_slots_type_[i][0];
uint16_t num = *reinterpret_cast(buffer_ + offset_);
- PADDLE_ENFORCE(
- num,
- "The number of ids can not be zero, you need padding "
- "it in data generator; or if there is something wrong with "
- "the data, please check if the data contains unresolvable "
- "characters.");
+ PADDLE_ENFORCE_NE(
+ num, 0,
+ platform::errors::InvalidArgument(
+ "The number of ids can not be zero, you need padding "
+ "it in data generator; or if there is something wrong with "
+ "the data, please check if the data contains unresolvable "
+ "characters."));
offset_ += sizeof(uint16_t);
if (idx != -1) {
@@ -1304,7 +1333,12 @@ bool MultiSlotFileInstantDataFeed::ParseOneMiniBatch() {
}
PADDLE_ENFORCE(batch_size_ == default_batch_size_ || offset_ == end_,
- "offset_ != end_");
+ platform::errors::InvalidArgument(
+ "The batch size id not equal to default batch size, or "
+ "the offset is not equal to end index."
+ "The batch size is %d, default batcch size is %d, offset "
+ "is %d, end index is %d.",
+ batch_size_, default_batch_size_, offset_, end_));
return true;
}
#endif
diff --git a/paddle/fluid/framework/data_feed.h b/paddle/fluid/framework/data_feed.h
index ef49b28cdbc81..b48d152fe3582 100644
--- a/paddle/fluid/framework/data_feed.h
+++ b/paddle/fluid/framework/data_feed.h
@@ -116,7 +116,8 @@ class DataFeed {
virtual ~DataFeed() {}
virtual void Init(const DataFeedDesc& data_feed_desc) = 0;
virtual bool CheckFile(const char* filename) {
- PADDLE_THROW("This function(CheckFile) is not implemented.");
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "This function(CheckFile) is not implemented."));
}
// Set filelist for DataFeed.
// Pay attention that it must init all readers before call this function.
@@ -179,7 +180,8 @@ class DataFeed {
}
virtual int GetCurBatchSize() { return batch_size_; }
virtual void LoadIntoMemory() {
- PADDLE_THROW("This function(LoadIntoMemory) is not implemented.");
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "This function(LoadIntoMemory) is not implemented."));
}
virtual void SetPlace(const paddle::platform::Place& place) {
place_ = place;
@@ -438,14 +440,23 @@ class MultiSlotType {
private:
void CheckType(const std::string& type) const {
- PADDLE_ENFORCE((type == "uint64") || (type == "float"),
- "There is no this type<%s>.", type);
+ PADDLE_ENFORCE_EQ((type == "uint64" || type == "float"), true,
+ platform::errors::InvalidArgument(
+ "MultiSlotType error, expect type is uint64 or "
+ "float, but received type is %s.",
+ type));
}
void CheckFloat() const {
- PADDLE_ENFORCE(type_[0] == 'f', "Add %s value to float slot.", type_);
+ PADDLE_ENFORCE_EQ(
+ type_[0], 'f',
+ platform::errors::InvalidArgument(
+ "MultiSlotType error, add %s value to float slot.", type_));
}
void CheckUint64() const {
- PADDLE_ENFORCE(type_[0] == 'u', "Add %s value to uint64 slot.", type_);
+ PADDLE_ENFORCE_EQ(
+ type_[0], 'u',
+ platform::errors::InvalidArgument(
+ "MultiSlotType error, add %s value to uint64 slot.", type_));
}
std::vector float_feasign_;
std::vector uint64_feasign_;
diff --git a/paddle/fluid/framework/data_feed_test.cc b/paddle/fluid/framework/data_feed_test.cc
index 9a055765b8c91..2cc441bbd34cb 100644
--- a/paddle/fluid/framework/data_feed_test.cc
+++ b/paddle/fluid/framework/data_feed_test.cc
@@ -34,8 +34,10 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const char* filename) {
paddle::framework::DataFeedDesc data_feed_desc;
int file_descriptor = open(filename, O_RDONLY);
- PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavailable(
- "Cannot open file %s.", filename));
+ PADDLE_ENFORCE_NE(
+ file_descriptor, -1,
+ platform::errors::Unavailable(
+ "Cannot open file %s c load datafeed param from file.", filename));
google::protobuf::io::FileInputStream fileInput(file_descriptor);
google::protobuf::TextFormat::Parse(&fileInput, &data_feed_desc);
close(file_descriptor);
@@ -45,8 +47,10 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const std::vector load_filelist_from_file(const char* filename) {
std::vector filelist;
std::ifstream fin(filename);
- PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavailable(
- "Cannot open file %s.", filename));
+ PADDLE_ENFORCE_EQ(
+ fin.good(), true,
+ platform::errors::Unavailable(
+ "Cannot open file %s when load filelist from file.", filename));
std::string line;
while (getline(fin, line)) {
filelist.push_back(line);
@@ -196,7 +200,8 @@ void GetElemSetFromReader(std::vector* reader_elem_set,
}
}
} else {
- PADDLE_THROW("Error type in proto file.");
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Error type in proto file."));
}
} else { // sparse branch
if (slot.type() == "uint64") {
@@ -218,7 +223,8 @@ void GetElemSetFromReader(std::vector* reader_elem_set,
}
}
} else {
- PADDLE_THROW("Error type in proto file.");
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Error type in proto file."));
}
} // end sparse branch
++index;
@@ -272,7 +278,10 @@ void GetElemSetFromFile(std::vector* file_elem_set,
file_elem_set->resize(used_slot_num);
for (const auto& file : filelist) {
std::ifstream fin(file.c_str());
- PADDLE_ENFORCE(fin.good(), "Can not open %s.", file.c_str());
+ PADDLE_ENFORCE_EQ(
+ fin.good(), true,
+ platform::errors::Unavailable(
+ "Can not open %s when get element set from file.", file.c_str()));
while (1) {
bool end_flag = false;
int index = 0;
@@ -298,7 +307,8 @@ void GetElemSetFromFile(std::vector* file_elem_set,
}
}
} else {
- PADDLE_THROW("Error type in proto file.");
+ PADDLE_THROW(
+ platform::errors::InvalidArgument("Error type in proto file."));
}
if (slot.is_used()) {
++index;
diff --git a/paddle/fluid/framework/data_layout.h b/paddle/fluid/framework/data_layout.h
index b611bb77b4e1e..947f06408d028 100644
--- a/paddle/fluid/framework/data_layout.h
+++ b/paddle/fluid/framework/data_layout.h
@@ -45,7 +45,8 @@ inline DataLayout StringToDataLayout(const std::string& str) {
} else if (s == "MKLDNNLAYOUT") {
return DataLayout::kMKLDNN;
} else {
- PADDLE_THROW("Unknown storage order string: %s", s);
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Unknown data layout type string: %s.", s));
}
}
@@ -60,7 +61,8 @@ inline std::string DataLayoutToString(const DataLayout& data_layout) {
case DataLayout::kMKLDNN:
return "MKLDNNLAYOUT";
default:
- PADDLE_THROW("unknown DataLayout %d", data_layout);
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Unknown Data Layout type %d.", data_layout));
}
}
diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc
index 59a76ce103c0e..3cea7a66d0105 100644
--- a/paddle/fluid/framework/data_layout_transform.cc
+++ b/paddle/fluid/framework/data_layout_transform.cc
@@ -25,14 +25,17 @@ namespace paddle {
namespace framework {
std::vector GetAxis(const DataLayout& from, const DataLayout& to) {
- PADDLE_ENFORCE_NE(from, to,
- "layout transform should transform different layout");
+ PADDLE_ENFORCE_NE(
+ from, to,
+ platform::errors::InvalidArgument(
+ "Layout transform should transform between different layout."));
if (from == DataLayout::kNCHW && to == DataLayout::kNHWC) {
return {0, 2, 3, 1};
} else if (from == DataLayout::kNHWC && to == DataLayout::kNCHW) {
return {0, 3, 1, 2};
} else {
- PADDLE_THROW("unsupported transform");
+ PADDLE_THROW(
+ platform::errors::InvalidArgument("Unsupported layout transform."));
}
}
@@ -55,7 +58,8 @@ struct CastDataLayout {
auto* context = static_cast(ctx_);
trans4(*context, in_, out_, axis_);
} else {
- PADDLE_THROW("Unsupport CPU <-> GPU!");
+ PADDLE_THROW(platform::errors::PreconditionNotMet(
+ "Unsupported data layout cast from CPU to GPU."));
}
}
};
@@ -66,9 +70,14 @@ void TransDataLayout(const OpKernelType& kernel_type_for_var,
PADDLE_ENFORCE(
platform::places_are_same_class(kernel_type_for_var.place_,
expected_kernel_type.place_),
- "TransDataLayout only support DataLayout transform on same place!");
+ platform::errors::PreconditionNotMet(
+ "TransDataLayout only support DataLayout transform on same place."));
- PADDLE_ENFORCE(arity(in.dims()) == 4, "Input Arity only support 4!");
+ PADDLE_ENFORCE_EQ(
+ arity(in.dims()), 4,
+ platform::errors::InvalidArgument(
+ "Input dimension arity only can be 4, the input dimension is %s.",
+ in.dims()));
auto& pool = platform::DeviceContextPool::Instance();
@@ -108,7 +117,8 @@ void* GetDataFromTensor(const Tensor& tensor, mkldnn::memory::data_type type) {
case mkldnn::memory::data_type::s32:
return platform::to_void_cast(tensor.data());
default:
- PADDLE_THROW("wrong mkldnn type provided");
+ PADDLE_THROW(
+ platform::errors::InvalidArgument("Wrong mkldnn type provided."));
}
}
@@ -121,8 +131,9 @@ void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var,
PADDLE_ENFORCE(
in_layout == DataLayout::kMKLDNN && out_layout != DataLayout::kMKLDNN,
- "TransDataLayoutFromMKLDNN only supports transform from MKLDNN to "
- "non-MKLDNN");
+ platform::errors::InvalidArgument(
+ "TransDataLayoutFromMKLDNN only supports transform from MKLDNN to "
+ "non-MKLDNN"));
innerTransDataLayoutFromMKLDNN(
in_layout,
@@ -155,7 +166,9 @@ void innerTransDataLayoutFromMKLDNN(DataLayout in_layout, DataLayout out_layout,
memory::data_type in_type = ToMKLDNNDataType(in.type());
PADDLE_ENFORCE_NE(in_type, memory::data_type::undef,
- "Input tensor type is not supported: %s", in.type());
+ platform::errors::InvalidArgument(
+ "Input tensor type (%s) is not supported.",
+ DataTypeToString(in.type())));
auto in_format = platform::MKLDNNFormatForSize(in_tz.size(), in.format());
auto out_format =
diff --git a/paddle/fluid/framework/data_layout_transform.h b/paddle/fluid/framework/data_layout_transform.h
index 711146efd267b..6eb84ef9d7c01 100644
--- a/paddle/fluid/framework/data_layout_transform.h
+++ b/paddle/fluid/framework/data_layout_transform.h
@@ -38,8 +38,9 @@ inline MKLDNNMemoryFormat ToMKLDNNFormat(const DataLayout& layout) {
case DataLayout::kNCHW:
return MKLDNNMemoryFormat::nchw;
default:
- PADDLE_THROW("Fail to convert layout %s to MKLDNN format",
- DataLayoutToString(layout));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Fail to convert layout %s to MKLDNN format.",
+ DataLayoutToString(layout)));
}
}
@@ -50,7 +51,8 @@ inline DataLayout ToPaddleLayout(const MKLDNNMemoryFormat& format) {
case MKLDNNMemoryFormat::nchw:
return DataLayout::kNCHW;
default:
- PADDLE_THROW("Fail to convert MKLDNN format to paddle layout");
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Fail to convert MKLDNN format to paddle layout."));
}
}
diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc
index 76c53e8231577..f54311eebfade 100644
--- a/paddle/fluid/framework/data_transform.cc
+++ b/paddle/fluid/framework/data_transform.cc
@@ -45,9 +45,10 @@ void TransformData(const OpKernelType &expected_kernel_type,
if (NeedTransformLayout(lout, lin)) {
#ifdef PADDLE_WITH_MKLDNN
if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) {
- PADDLE_ENFORCE(
- !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN),
- "No layout transform needed between two MKLDNN OPKernels");
+ PADDLE_ENFORCE_EQ(
+ !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), true,
+ platform::errors::PreconditionNotMet(
+ "No layout transform needed between two MKLDNN OPKernels."));
if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) {
// Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel
@@ -96,7 +97,10 @@ void TransformData(const OpKernelType &expected_kernel_type,
PassTensorData(&out, &in);
}
- PADDLE_ENFORCE(transformed, "No transform is applied, please check!");
+ PADDLE_ENFORCE_EQ(
+ transformed, true,
+ platform::errors::PreconditionNotMet(
+ "No transform is applied for the data needs to be transformed."));
// get output data
output_tensor->ShareDataWith(in);
}
@@ -116,7 +120,10 @@ void SetTensorToVariable(const Variable &in_var, const Tensor &tensor,
trans_selected_rows->set_rows(in_selected_rows.rows());
trans_selected_rows->mutable_value()->ShareDataWith(tensor);
} else {
- PADDLE_THROW("unknown var type");
+ PADDLE_THROW(platform::errors::Unavailable(
+ "Unsupported variable type, only supports LoDTensor or SelectedRows, "
+ "but the input variable type is %s.",
+ ToTypeName(in_var.Type())));
}
}
diff --git a/paddle/fluid/framework/data_type.cc b/paddle/fluid/framework/data_type.cc
index a0248cf3c7569..f479d92483c1c 100644
--- a/paddle/fluid/framework/data_type.cc
+++ b/paddle/fluid/framework/data_type.cc
@@ -65,7 +65,8 @@ proto::VarType::Type ToDataType(std::type_index type) {
if (it != gDataTypeMap().cpp_to_proto_.end()) {
return it->second;
}
- PADDLE_THROW("Not support %s as tensor type", type.name());
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Not support %s as tensor data type.", platform::demangle(type.name())));
}
std::type_index ToTypeIndex(proto::VarType::Type type) {
@@ -73,8 +74,9 @@ std::type_index ToTypeIndex(proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_cpp_.end()) {
return it->second;
}
- PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type",
- static_cast(type));
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Not support proto::VarType::Type(%d) as tensor type.",
+ static_cast(type)));
}
std::string DataTypeToString(const proto::VarType::Type type) {
@@ -82,8 +84,9 @@ std::string DataTypeToString(const proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_str_.end()) {
return it->second;
}
- PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type",
- static_cast(type));
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Not support proto::VarType::Type(%d) as tensor type.",
+ static_cast(type)));
}
size_t SizeOfType(proto::VarType::Type type) {
@@ -91,7 +94,8 @@ size_t SizeOfType(proto::VarType::Type type) {
if (it != gDataTypeMap().proto_to_size_.end()) {
return it->second;
}
- PADDLE_THROW("Not support %s as tensor type", DataTypeToString(type));
+ PADDLE_THROW(platform::errors::Unimplemented("Not support %s as tensor type.",
+ DataTypeToString(type)));
}
} // namespace framework
diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h
index e3b45d05d85e9..2c4a7b4d02727 100644
--- a/paddle/fluid/framework/data_type.h
+++ b/paddle/fluid/framework/data_type.h
@@ -78,7 +78,9 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) {
_ForEachDataType_(VisitDataTypeCallback);
#undef VisitDataTypeCallback
- PADDLE_THROW("Not supported %d", type);
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Not supported proto::VarType::Type(%d) as data type.",
+ static_cast(type)));
}
template
diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc
index d79f8cacb5f47..44542f05d9d5c 100644
--- a/paddle/fluid/framework/data_type_transform.cc
+++ b/paddle/fluid/framework/data_type_transform.cc
@@ -56,7 +56,8 @@ struct CastDataType {
context->Wait();
#endif
} else {
- PADDLE_THROW("Unsupported place!");
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Place type is not supported when casting data type."));
}
}
};
@@ -98,7 +99,9 @@ void TransDataType(const OpKernelType& kernel_type_for_var,
framework::VisitDataType(dst_type, CastDataType(in, out, ctx));
break;
default:
- PADDLE_THROW("Not support type %d", src_type);
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Data type (%s) is not supported when casting data type.",
+ DataTypeToString(src_type)));
}
}
diff --git a/paddle/fluid/framework/ddim.cc b/paddle/fluid/framework/ddim.cc
index 799deec1b6955..fe7d243066237 100644
--- a/paddle/fluid/framework/ddim.cc
+++ b/paddle/fluid/framework/ddim.cc
@@ -81,9 +81,11 @@ bool contain_unknown_dim(const DDim& ddim) {
}
DDim slice_ddim(const DDim& dim, int begin, int end) {
- PADDLE_ENFORCE(begin >= 0 && end <= dim.size(),
- "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.",
- begin, end, dim.size());
+ PADDLE_ENFORCE_EQ(
+ (begin >= 0 && end <= dim.size()), true,
+ platform::errors::InvalidArgument(
+ "[begin(%d), end(%d)) must be inside [0, %d) in ddim slice.", begin,
+ end, dim.size()));
// Constructor of DDim would check whether end - begin is valid
return DDim(dim.Get() + begin, end - begin);
}
diff --git a/paddle/fluid/framework/ddim.h b/paddle/fluid/framework/ddim.h
index cbc8b0fb7cc78..29c4732f99118 100644
--- a/paddle/fluid/framework/ddim.h
+++ b/paddle/fluid/framework/ddim.h
@@ -29,20 +29,23 @@ namespace framework {
return (callback); \
}
-#define PADDLE_VISIT_DDIM(rank, callback) \
- switch (rank) { \
- PADDLE_VISIT_DDIM_BASE(0, callback); \
- PADDLE_VISIT_DDIM_BASE(1, callback); \
- PADDLE_VISIT_DDIM_BASE(2, callback); \
- PADDLE_VISIT_DDIM_BASE(3, callback); \
- PADDLE_VISIT_DDIM_BASE(4, callback); \
- PADDLE_VISIT_DDIM_BASE(5, callback); \
- PADDLE_VISIT_DDIM_BASE(6, callback); \
- PADDLE_VISIT_DDIM_BASE(7, callback); \
- PADDLE_VISIT_DDIM_BASE(8, callback); \
- PADDLE_VISIT_DDIM_BASE(9, callback); \
- default: \
- PADDLE_THROW("Invalid rank %d", rank); \
+#define PADDLE_VISIT_DDIM(rank, callback) \
+ switch (rank) { \
+ PADDLE_VISIT_DDIM_BASE(0, callback); \
+ PADDLE_VISIT_DDIM_BASE(1, callback); \
+ PADDLE_VISIT_DDIM_BASE(2, callback); \
+ PADDLE_VISIT_DDIM_BASE(3, callback); \
+ PADDLE_VISIT_DDIM_BASE(4, callback); \
+ PADDLE_VISIT_DDIM_BASE(5, callback); \
+ PADDLE_VISIT_DDIM_BASE(6, callback); \
+ PADDLE_VISIT_DDIM_BASE(7, callback); \
+ PADDLE_VISIT_DDIM_BASE(8, callback); \
+ PADDLE_VISIT_DDIM_BASE(9, callback); \
+ default: \
+ PADDLE_THROW(platform::errors::Unimplemented( \
+ "Invalid dimension to be accessed. Now only supports access to " \
+ "dimension 0 to 9, but received dimension is %d.", \
+ rank)); \
}
template
@@ -92,13 +95,31 @@ class DDim {
inline int64_t operator[](int idx) const { return dim_[idx]; }
- inline int64_t& at(int idx) {
- PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx);
+ int64_t& at(int idx) {
+ PADDLE_ENFORCE_GE(idx, 0,
+ platform::errors::InvalidArgument(
+ "Invalid DDim index to be accessed. The valid index "
+ "is between 0 and %d, but received index is %d.",
+ rank_, idx));
+ PADDLE_ENFORCE_LT(idx, rank_,
+ platform::errors::InvalidArgument(
+ "Invalid DDim index to be accessed. The valid index "
+ "is between 0 and %d, but received index is %d.",
+ rank_, idx));
return dim_[idx];
}
- inline int64_t at(int idx) const {
- PADDLE_ENFORCE(idx >= 0 && idx < rank_, "Invalid idx %d", idx);
+ int64_t at(int idx) const {
+ PADDLE_ENFORCE_GE(idx, 0,
+ platform::errors::InvalidArgument(
+ "Invalid DDim index to be accessed. The valid index "
+ "is between 0 and %d, but received index is %d.",
+ rank_, idx));
+ PADDLE_ENFORCE_LT(idx, rank_,
+ platform::errors::InvalidArgument(
+ "Invalid DDim index to be accessed. The valid index "
+ "is between 0 and %d, but received index is %d.",
+ rank_, idx));
return dim_[idx];
}
diff --git a/paddle/fluid/framework/distributed_strategy.proto b/paddle/fluid/framework/distributed_strategy.proto
index 9bcd79cd34f07..cc7d60b148def 100644
--- a/paddle/fluid/framework/distributed_strategy.proto
+++ b/paddle/fluid/framework/distributed_strategy.proto
@@ -33,22 +33,27 @@ message DistributedStrategy {
optional int32 localsgd_k_step = 7 [ default = 4 ];
optional bool dgc = 8 [ default = false ];
optional bool hierachical_allreduce = 9 [ default = false ];
- optional int32 nccl_comm_num = 10 [ default = 1 ];
- optional bool gradient_merge = 11 [ default = false ];
- optional int32 gradient_merge_k_step = 12 [ default = 1 ];
- optional bool sequential_execution = 13 [ default = false ];
- optional bool enable_backward_optimizer_op_deps = 14 [ default = true ];
- optional bool lars = 15 [ default = false ];
- optional bool lamb = 16 [ default = false ];
- optional bool fuse_elewise_add_act_ops = 17 [ default = false ];
- optional bool fuse_bn_act_ops = 18 [ default = false ];
- optional bool enable_auto_fusion = 19 [ default = false ];
- optional bool fuse_relu_depthwise_conv = 20 [ default = false ];
- optional bool enable_inplace = 21 [ default = false ];
- optional bool fuse_all_reduce_ops = 22 [ default = false ];
- optional int32 num_iteration_per_drop_scope = 23 [ default = 1 ];
- optional bool sync_batch_norm = 24 [ default = false ];
- optional bool fuse_all_optimizer_ops = 25 [ default = false ];
+ optional int32 hierachical_allreduce_inter_ranks = 10 [ default = 1 ];
+ optional int32 nccl_comm_num = 11 [ default = 1 ];
+ optional bool gradient_merge = 12 [ default = false ];
+ optional int32 gradient_merge_k_step = 13 [ default = 1 ];
+ optional bool sequential_execution = 14 [ default = false ];
+ optional bool enable_backward_optimizer_op_deps = 15 [ default = true ];
+ optional bool lars = 16 [ default = false ];
+ optional bool lamb = 17 [ default = false ];
+ optional bool fuse_elewise_add_act_ops = 18 [ default = false ];
+ optional bool fuse_bn_act_ops = 19 [ default = false ];
+ optional bool enable_auto_fusion = 20 [ default = false ];
+ optional bool fuse_relu_depthwise_conv = 21 [ default = false ];
+ optional bool enable_inplace = 22 [ default = false ];
+ optional bool fuse_all_reduce_ops = 23 [ default = false ];
+ optional int32 num_iteration_per_drop_scope = 24 [ default = 1 ];
+ optional bool sync_batch_norm = 25 [ default = false ];
+ optional bool fuse_all_optimizer_ops = 26 [ default = false ];
+ optional bool sync_nccl_allreduce = 27 [ default = true ];
+ optional bool fuse_broadcast_ops = 28 [ default = true ];
+ optional int32 num_threads = 29 [ default = 1 ];
+ optional int32 num_iteration_per_run = 30 [ default = 1 ];
// pipeline training
optional bool pipeline = 101 [ default = false ];
diff --git a/paddle/fluid/framework/dlpack_tensor.cc b/paddle/fluid/framework/dlpack_tensor.cc
index 74e344cfebe36..f2421248e33f2 100644
--- a/paddle/fluid/framework/dlpack_tensor.cc
+++ b/paddle/fluid/framework/dlpack_tensor.cc
@@ -30,7 +30,10 @@ static ::DLDataType GetDLDataTypeCode() {
} else if (std::is_integral::value) {
dtype.code = kDLInt;
} else {
- PADDLE_THROW("Unsupported data type %s", typeid(T).name());
+ PADDLE_THROW(platform::errors::Unavailable(
+ "Unsupported data type (%s), only supports float16, float, unsigned "
+ "int and int.",
+ platform::demangle(typeid(T).name())));
}
dtype.bits = 8 * sizeof(T);
dtype.lanes = 1;
@@ -52,8 +55,9 @@ static DLDataType GetDLDataTypeFromTypeIndex(proto::VarType::Type type) {
static auto type_to_dtype_map = CreateDLDataTypeMap();
static auto type_to_dtype_map_end_it = type_to_dtype_map.end();
auto it = type_to_dtype_map.find(static_cast(type));
- PADDLE_ENFORCE(it != type_to_dtype_map_end_it, "Unsupported data type %d",
- type);
+ PADDLE_ENFORCE_NE(it, type_to_dtype_map_end_it,
+ platform::errors::InvalidArgument(
+ "Unsupported data type (%s).", DataTypeToString(type)));
return it->second;
#undef REG_DL_DATA_TYPE
}
@@ -73,7 +77,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> {
ctx.device_id = place.device;
return ctx;
#else
- PADDLE_THROW("platform::CUDAPlace is not supported in CPU only version");
+ PADDLE_THROW(platform::errors::Unavailable(
+ "platform::CUDAPlace is not supported in CPU only version."));
#endif
}
@@ -84,8 +89,8 @@ struct DLContextVisitor : public boost::static_visitor<::DLContext> {
ctx.device_id = 0;
return ctx;
#else
- PADDLE_THROW(
- "platform::CUDAPinnedPlace is not supported in CPU only version");
+ PADDLE_THROW(platform::errors::Unavailable(
+ "platform::CUDAPinnedPlace is not supported in CPU only version."));
#endif
}
};
@@ -136,7 +141,10 @@ ::DLManagedTensor *DLPackTensor::ToCudfCompatibleDLManagedTensor() {
// refer to cupy and cudf, the compact tensor first dim's strides need to be 1
// and second dim's strides need to be length of rows of cudf
// cudf now only support dim=2
- PADDLE_ENFORCE_LE(t_.ndim, 2, "cudf now only support dim=2.");
+ PADDLE_ENFORCE_LE(t_.ndim, 2, platform::errors::InvalidArgument(
+ "cudf now only supports dimension is 2, "
+ "but received dimension is %d.",
+ t_.ndim));
if (t_.ndim > 1)
t_.strides = new int64_t[2]{1, t_.shape[1]};
diff --git a/paddle/fluid/framework/downpour_worker.cc b/paddle/fluid/framework/downpour_worker.cc
index cbdfa00652abd..3f70835c9d312 100644
--- a/paddle/fluid/framework/downpour_worker.cc
+++ b/paddle/fluid/framework/downpour_worker.cc
@@ -556,9 +556,11 @@ void DownpourWorker::TrainFilesWithProfiler() {
continue;
}
PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
- "Tensor %s contains Inf", var_name);
+ platform::errors::InvalidArgument(
+ "Tensor %s contains Inf.", var_name));
PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
- "Tensor %s contains NAN", var_name);
+ platform::errors::InvalidArgument(
+ "Tensor %s contains NAN.", var_name));
}
if (need_to_push_sparse_) {
@@ -829,9 +831,11 @@ void DownpourWorker::TrainFiles() {
continue;
}
PADDLE_ENFORCE_EQ(framework::TensorContainsInf(*tensor), false,
- "Tensor %s contains Inf", var_name);
+ platform::errors::InvalidArgument(
+ "Tensor %s contains Inf.", var_name));
PADDLE_ENFORCE_EQ(framework::TensorContainsNAN(*tensor), false,
- "Tensor %s contains NAN", var_name);
+ platform::errors::InvalidArgument(
+ "Tensor %s contains NAN.", var_name));
}
if (need_to_push_sparse_) {
diff --git a/paddle/fluid/framework/eigen.h b/paddle/fluid/framework/eigen.h
index 21adcb9948b20..0e3edfb95cb9b 100644
--- a/paddle/fluid/framework/eigen.h
+++ b/paddle/fluid/framework/eigen.h
@@ -26,7 +26,11 @@ struct EigenDim {
using Type = Eigen::DSizes;
static Type From(const DDim& dims) {
- PADDLE_ENFORCE(arity(dims) == D, "D must match arity(DDim)");
+ PADDLE_ENFORCE_EQ(arity(dims), D,
+ platform::errors::InvalidArgument(
+ "Input dimension size should be equal to %d, but "
+ "received dimension size is %d.",
+ arity(dims), D));
Type ret;
for (int64_t d = 0; d < arity(dims); d++) {
ret[d] = dims[d];
@@ -69,8 +73,11 @@ struct EigenMatrix : public EigenTensor {
static typename EigenMatrix::Type Reshape(Tensor& tensor, // NOLINT
int num_col_dims) {
int rank = tensor.dims_.size();
- PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
- "`num_col_dims` must be between (0, rank_of_tensor).");
+ PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true,
+ platform::errors::InvalidArgument(
+ "Input dimension number(num_col_dims) must be "
+ "between 0 and %d, but received number is %d.",
+ rank, num_col_dims));
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
@@ -78,8 +85,11 @@ struct EigenMatrix : public EigenTensor {
static typename EigenMatrix::ConstType Reshape(const Tensor& tensor,
int num_col_dims) {
int rank = tensor.dims_.size();
- PADDLE_ENFORCE(num_col_dims > 0 && num_col_dims < rank,
- "`num_col_dims` must be between (0, rank_of_tensor).");
+ PADDLE_ENFORCE_EQ((num_col_dims > 0 && num_col_dims < rank), true,
+ platform::errors::InvalidArgument(
+ "Input dimension number(num_col_dims) must be "
+ "between 0 and %d, but received number is %d.",
+ rank, num_col_dims));
return EigenMatrix::From(tensor,
flatten_to_2d(tensor.dims(), num_col_dims));
}
diff --git a/paddle/fluid/framework/executor_gc_helper.cc b/paddle/fluid/framework/executor_gc_helper.cc
index 1712d66cf4c99..706248229bc27 100644
--- a/paddle/fluid/framework/executor_gc_helper.cc
+++ b/paddle/fluid/framework/executor_gc_helper.cc
@@ -175,8 +175,9 @@ void DeleteUnusedTensors(
garbages.emplace_back(t.MoveMemoryHolder());
}
} else {
- PADDLE_THROW("Type %s of %s is not supported eager deletion",
- framework::ToTypeName(var->Type()), var_name);
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Type %s of variable %s is not supported eager deletion.",
+ framework::ToTypeName(var->Type()), var_name));
}
}
diff --git a/paddle/fluid/framework/garbage_collector.cc b/paddle/fluid/framework/garbage_collector.cc
index 08c3e6d7f592d..ac892443de36c 100644
--- a/paddle/fluid/framework/garbage_collector.cc
+++ b/paddle/fluid/framework/garbage_collector.cc
@@ -79,15 +79,15 @@ StreamGarbageCollector::StreamGarbageCollector(const platform::CUDAPlace &place,
size_t max_memory_size)
: GarbageCollector(place, max_memory_size) {
platform::CUDADeviceGuard guard(place.device);
- PADDLE_ENFORCE(cudaStreamCreate(&stream_));
+ PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamCreate(&stream_));
callback_manager_.reset(new platform::StreamCallbackManager(stream_));
}
StreamGarbageCollector::~StreamGarbageCollector() {
auto place = BOOST_GET_CONST(platform::CUDAPlace, this->dev_ctx_->GetPlace());
platform::CUDADeviceGuard guard(place.device);
- PADDLE_ENFORCE(cudaStreamSynchronize(stream_));
- PADDLE_ENFORCE(cudaStreamDestroy(stream_));
+ PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream_));
+ PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamDestroy(stream_));
}
cudaStream_t StreamGarbageCollector::stream() const { return stream_; }
diff --git a/paddle/fluid/framework/io/shell.h b/paddle/fluid/framework/io/shell.h
index 5b3e9a4df1d11..dc486275d6f58 100644
--- a/paddle/fluid/framework/io/shell.h
+++ b/paddle/fluid/framework/io/shell.h
@@ -17,6 +17,9 @@
#include
#include
#ifdef _WIN32
+#ifndef NOMINMAX
+#define NOMINMAX // msvc max/min macro conflict with std::min/max
+#endif
#include
#else
#include
diff --git a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc
index a56fcd1a52339..a4b43086785b3 100644
--- a/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/attention_lstm_fuse_pass.cc
@@ -135,7 +135,9 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
void PrepareParameters(Graph* graph, const Param& param, ir::Node* lstm_op) {
// Check parameters
- PADDLE_ENFORCE(graph->Has(kParamScopeAttr));
+ PADDLE_ENFORCE_EQ(graph->Has(kParamScopeAttr), true,
+ platform::errors::InvalidArgument(
+ "Graph have no attribute: kParamScopeAttr."));
auto& scope = graph->Get(kParamScopeAttr);
// Create new parameters.
@@ -193,7 +195,10 @@ void PrepareParameters(Graph* graph, const Param& param, ir::Node* lstm_op) {
// reshape attention_bias
auto* attention_bias_t =
scope.FindVar(param.AttentionBias)->GetMutable();
- PADDLE_ENFORCE_EQ(attention_bias_t->dims().size(), 1);
+ PADDLE_ENFORCE_EQ(attention_bias_t->dims().size(), 1,
+ platform::errors::InvalidArgument(
+ "Tensor attention bias dimension size(%d) must be 1.",
+ attention_bias_t->dims().size()));
attention_bias_t->Resize(make_ddim({1, attention_bias_t->dims()[0]}));
auto* attention_scalar_bias_t =
@@ -252,7 +257,10 @@ void PrepareLSTMBias(const LoDTensor& B_forget, const LoDTensor& B_input,
B_forget.data(), B_input.data(), B_output.data(),
B_cell.data()};
- PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1);
+ PADDLE_ENFORCE_EQ(B_forget.dims().size(), 1,
+ platform::errors::InvalidArgument(
+ "Tensor B forget dimension size(%d) must be 1.",
+ B_forget.dims().size()));
int D = B_forget.dims()[0];
out->Resize(make_ddim({1, 4 * D}));
auto* out_data = out->mutable_data(platform::CPUPlace());
diff --git a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc
index d7faf2ee64833..f3634f90e6c69 100644
--- a/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc
+++ b/paddle/fluid/framework/ir/coalesce_grad_tensor_pass.cc
@@ -119,9 +119,11 @@ class CoalesceGradTensorPass : public ir::Pass {
p_g_dense_grad.insert(p_g_dense_grad.end(), group_p_g.begin(),
group_p_g.end());
}
- PADDLE_ENFORCE_EQ(
- p_g_dense_grad.size(), num_of_p_g_dense_grad,
- "The number of p_g_dense_grad is not consistent with before.");
+ PADDLE_ENFORCE_EQ(p_g_dense_grad.size(), num_of_p_g_dense_grad,
+ platform::errors::InvalidArgument(
+ "The number of dense grads is not consistent with "
+ "previous. Previous(%d), now(%d).",
+ p_g_dense_grad.size(), num_of_p_g_dense_grad));
auto &pinned_var_set =
graph->GetOrInit(details::kPinnedVars);
@@ -131,8 +133,11 @@ class CoalesceGradTensorPass : public ir::Pass {
} else {
for (auto &sub_param_grad : group_params_grads) {
RecordGradients(p_g_dense_grad, vars_info, &pinned_var_set);
- PADDLE_ENFORCE_EQ(IsUnifiedDtype(sub_param_grad, vars_info), true,
- "The data type of the same group is not consistent.");
+ PADDLE_ENFORCE_EQ(
+ IsUnifiedDtype(sub_param_grad, vars_info), true,
+ platform::errors::InvalidArgument("All gradient variable in "
+ "kGroupParamsAndDenseGrads, must "
+ "have same type."));
CoalesceTensors(vars_info, sub_param_grad, &result);
}
}
@@ -145,15 +150,25 @@ class CoalesceGradTensorPass : public ir::Pass {
// The Gradients should not be reused during memory optimization.
for (auto &p_g : sub_param_grad) {
auto iter = vars_info.find(p_g.second);
- PADDLE_ENFORCE_EQ(iter != vars_info.end(), true, "%s is not found.",
- p_g.second);
- PADDLE_ENFORCE_EQ(!iter->second.empty(), true);
+ PADDLE_ENFORCE_EQ(iter != vars_info.end(), true,
+ platform::errors::NotFound(
+ "Parameter@Grad %s is not found.", p_g.second));
+ PADDLE_ENFORCE_EQ(
+ !iter->second.empty(), true,
+ platform::errors::InvalidArgument(
+ "Parameter@Grad %s's var node is empty.", p_g.second));
for (auto it : iter->second) {
- PADDLE_ENFORCE_NOT_NULL(it->Var());
+ PADDLE_ENFORCE_NOT_NULL(
+ it->Var(),
+ platform::errors::InvalidArgument(
+ "A node of Parameter@Grad %s does not hold variable.",
+ p_g.second));
pinned_var_set->insert(it->Var()->Name());
}
PADDLE_ENFORCE_EQ(IsLoDTensorType(GetTypeOfVar(vars_info, p_g.second)),
- true);
+ true,
+ platform::errors::InvalidArgument(
+ "Parameter@Grad %s is not LoDTensor.", p_g.second));
}
}
@@ -192,8 +207,10 @@ class CoalesceGradTensorPass : public ir::Pass {
auto fused_grad_var_name = std::string(details::kFusedVarNamePrefix) +
"@GRAD@" + params_grads.begin()->second;
auto &fused_var_set = result->Get(details::kFusedVars);
- PADDLE_ENFORCE_EQ(fused_var_set.count(fused_grad_var_name), 0,
- "%s is duplicate in FusedVars.", fused_grad_var_name);
+ PADDLE_ENFORCE_EQ(
+ fused_var_set.count(fused_grad_var_name), 0,
+ platform::errors::AlreadyExists("Var(%s) is duplicate in FusedVars.",
+ fused_grad_var_name));
fused_var_set.insert(fused_grad_var_name);
result->Get(details::kFusedGrads)
.emplace_back(fused_grad_var_name);
@@ -420,11 +437,16 @@ class CoalesceGradTensorPass : public ir::Pass {
const std::unordered_map> &vars_info,
const std::string &var_name) const {
auto grad_iter = vars_info.find(var_name);
- PADDLE_ENFORCE_EQ(grad_iter != vars_info.end(), true, "%s is not found.",
- var_name);
- PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true, "%s is not found.",
- var_name);
- PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var());
+ PADDLE_ENFORCE_EQ(
+ grad_iter != vars_info.end(), true,
+ platform::errors::NotFound("Variable %s is not found.", var_name));
+ PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true,
+ platform::errors::InvalidArgument(
+ "Variable %s's node is empty.", var_name));
+ PADDLE_ENFORCE_NOT_NULL(
+ grad_iter->second.front()->Var(),
+ platform::errors::InvalidArgument(
+ "A node of %s does not hold variable.", var_name));
return grad_iter->second.front()->Var();
}
@@ -464,7 +486,12 @@ class CoalesceGradTensorPass : public ir::Pass {
params_name.emplace_back(p_g.first);
grads_name.emplace_back(p_g.second);
auto next_dtype = GetDtypeOfVar(vars_info, p_g.second);
- PADDLE_ENFORCE_EQ(next_dtype, dtype);
+ PADDLE_ENFORCE_EQ(
+ next_dtype, dtype,
+ platform::errors::InvalidArgument(
+ "All Parameter@Grad should have same dtype, but "
+ "there are two different type: %s, %s.",
+ DataTypeToString(next_dtype), DataTypeToString(dtype)));
}
result->Get(details::kProgramDescs).emplace_back();
diff --git a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
index fecc159adef19..079fb1479861c 100644
--- a/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/conv_affine_channel_fuse_pass.cc
@@ -50,7 +50,12 @@ void recompute_bias_and_weights(const Scope* scope, ir::Node* conv_weight,
Eigen::Array>;
// Re-compute bias of conv2d from AffineChannel
- PADDLE_ENFORCE_EQ(eltwise_y_in_tensor->dims(), ac_bias_tensor.dims());
+ PADDLE_ENFORCE_EQ(
+ eltwise_y_in_tensor->dims(), ac_bias_tensor.dims(),
+ platform::errors::InvalidArgument(
+ "Tensor elementwise y(%d) and activation bias(%d) must have same "
+ "dimension.",
+ eltwise_y_in_tensor->dims().size(), ac_bias_tensor.dims().size()));
auto* scale_tensor = scope->FindVar(ac_scale.Name())->GetMutable();
@@ -78,11 +83,13 @@ void recompute_bias_and_weights(const Scope* scope, ir::Node* conv_weight,
}
void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
@@ -152,11 +159,13 @@ void ConvAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
}
void ConvEltwiseAddAffineChannelFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
index 7313ef2cc35dd..60e4ac8cbcfd8 100644
--- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc
@@ -61,7 +61,12 @@ void recompute_bias_and_weights(const Scope* scope,
Eigen::Array>;
// Re-compute bias of conv2d from BN
- PADDLE_ENFORCE_EQ(eltwise_y_in_tensor->dims(), bn_bias_tensor.dims());
+ PADDLE_ENFORCE_EQ(
+ eltwise_y_in_tensor->dims(), bn_bias_tensor.dims(),
+ platform::errors::InvalidArgument("Tensor elementwise y(%d) and batch "
+ "norm bias(%d) must have same dims.",
+ eltwise_y_in_tensor->dims().size(),
+ bn_bias_tensor.dims().size()));
auto* scale_tensor = scope->FindVar(bn_scale.Name())->GetMutable();
auto* variance_tensor =
@@ -116,11 +121,13 @@ void recompute_bias_and_weights(const Scope* scope,
}
void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
@@ -186,11 +193,18 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
if (has_bias && conv->Op()->Input("Bias").size() > 0) {
// reuse existing conv bias node
auto conv_bias_names = conv->Op()->Input("Bias");
- PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1UL);
+ PADDLE_ENFORCE_EQ(
+ conv_bias_names.size(), 1UL,
+ platform::errors::InvalidArgument("Find input var Bais error."));
auto* conv_bias_var = scope->FindVar(conv_bias_names[0]);
auto* conv_bias_tensor = conv_bias_var->GetMutable();
- PADDLE_ENFORCE_EQ(conv_bias_tensor->dims(),
- eltwise_y_in_tensor->dims());
+ PADDLE_ENFORCE_EQ(
+ conv_bias_tensor->dims(), eltwise_y_in_tensor->dims(),
+ platform::errors::InvalidArgument(
+ "Tensor convolution bias(%d) and elementwise y(%d) "
+ "must have same dims.",
+ conv_bias_tensor->dims().size(),
+ eltwise_y_in_tensor->dims().size()));
auto eigen_conv_bias = EigenVector::From(*conv_bias_tensor);
eigen_conv_bias += EigenVector::From(*eltwise_y_in_tensor);
@@ -236,11 +250,13 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const {
}
void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc
index 168d0afb26d98..74dd6a7cdc5a6 100644
--- a/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc
+++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass_tester.cc
@@ -71,8 +71,16 @@ void TestMain(const std::string& conv_type) {
int num_bn_nodes_after = GetNumOpNodes(graph, "batch_norm");
VLOG(3) << DebugString(graph);
- PADDLE_ENFORCE_EQ(num_bn_nodes_before, 1);
- PADDLE_ENFORCE_EQ(num_bn_nodes_after, 0);
+ PADDLE_ENFORCE_EQ(
+ num_bn_nodes_before, 1,
+ platform::errors::InvalidArgument(
+ "Before conv_bn_fuse_pass, number of batch norm op(%d) must be 1.",
+ num_bn_nodes_before));
+ PADDLE_ENFORCE_EQ(
+ num_bn_nodes_after, 0,
+ platform::errors::InvalidArgument(
+ "After conv_bn_fuse_pass, number of batch norm op(%d) must be 0.",
+ num_bn_nodes_after));
}
TEST(ConvBNFusePass, conv2d) { TestMain("conv"); }
diff --git a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc
index b00be79a2a7da..2627da7dc40f1 100644
--- a/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/conv_elementwise_add2_act_fuse_pass.cc
@@ -91,7 +91,9 @@ void ConvElementwiseAdd2ActFusePass::ApplyImpl(ir::Graph* graph) const {
auto* new_conv_op = graph->CreateOpNode(&new_op_desc);
// Link inputs and outputs.
- PADDLE_ENFORCE(subgraph.count(x));
+ PADDLE_ENFORCE_NE(
+ subgraph.count(x), 0,
+ platform::errors::NotFound("Detector did not find input x of conv2d."));
auto* conv_in_node = subgraph.at(x);
IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input
diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc
index b15871ef03fbb..0b454a0407e48 100644
--- a/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/conv_elementwise_add_act_fuse_pass.cc
@@ -78,7 +78,9 @@ void ConvElementwiseAddActFusePass::ApplyImpl(ir::Graph* graph) const {
auto* new_conv_op = graph->CreateOpNode(&new_op_desc);
// Link inputs and outputs.
- PADDLE_ENFORCE(subgraph.count(x));
+ PADDLE_ENFORCE_NE(
+ subgraph.count(x), 0,
+ platform::errors::NotFound("Detector did not find input x of conv2d."));
auto* conv_in_node = subgraph.at(x);
IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input
diff --git a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
index 8c491d4f58b4d..007770cf57d27 100644
--- a/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/conv_elementwise_add_fuse_pass.cc
@@ -66,7 +66,9 @@ void ConvElementwiseAddFusePass::ApplyImpl(ir::Graph* graph) const {
auto* new_conv_op = graph->CreateOpNode(&new_op_desc);
// Link inputs and outputs.
- PADDLE_ENFORCE(subgraph.count(x));
+ PADDLE_ENFORCE_NE(
+ subgraph.count(x), 0,
+ platform::errors::NotFound("Detector did not find input x of conv2d."));
auto* conv_in_node = subgraph.at(x);
IR_NODE_LINK_TO(conv_in_node, new_conv_op); // Input
diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
index 85e2f2bad323f..c50b7476c6a96 100644
--- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
@@ -64,17 +64,23 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
#undef SET_IN
// Multiply embeddings with Weights
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
const std::string& embeddings = patterns::UniqueKey("Embeddings");
auto* embeddings_var = scope->Var(embeddings);
- PADDLE_ENFORCE(embeddings_var);
+ PADDLE_ENFORCE_NOT_NULL(
+ embeddings_var,
+ platform::errors::InvalidArgument(
+ "Embeddings variable's pointer cannot be nullptr."));
auto* embeddings_tensor =
embeddings_var->GetMutable();
// Get WeightX size: [single_embedding, fc_size]
// and embedding size: [dict_size, single_embedding]
// and create new size of embeddings eg. [dict_size , hidden_size]
auto* embedding_var = scope->FindVar(W->Name());
- PADDLE_ENFORCE(embedding_var);
+ PADDLE_ENFORCE_NOT_NULL(
+ embedding_var, platform::errors::InvalidArgument(
+ "Embedding variable's pointer cannot be nullptr."));
const auto& embedding_tensor = embedding_var->Get();
const auto& weightx_tensor =
@@ -90,7 +96,9 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
// Adding biases to GEMM result to be
auto* lstm_bias_var = scope->FindVar(bias->Name());
- PADDLE_ENFORCE(lstm_bias_var);
+ PADDLE_ENFORCE_NOT_NULL(lstm_bias_var,
+ platform::errors::InvalidArgument(
+ "Lstm bias var ptr cannot be nullptr."));
const auto& lstm_bias_tensor = lstm_bias_var->Get();
auto alpha = 1.0f;
diff --git a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc
index c1f822d7ca5cd..51e9545bf92e8 100644
--- a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc
+++ b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass_tester.cc
@@ -56,8 +56,17 @@ TEST(FCElementwiseLayerNormFusePass, basic) {
GetNumOpNodes(graph, "fused_fc_elementwise_layernorm");
VLOG(3) << DebugString(graph);
- PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6);
- PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1);
+ PADDLE_ENFORCE_EQ(
+ num_nodes_before, num_nodes_after + 6,
+ platform::errors::InvalidArgument(
+ "After pass, the number of nodes should be reduced by 6, but the "
+ "number before pass is %d, after pass is %d.",
+ num_nodes_before, num_nodes_after));
+ PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1,
+ platform::errors::InvalidArgument(
+ "After pass, the number of nodes of type "
+ "'fused_fc_elementwise_layernorm' should be 1, not %d.",
+ num_fused_nodes_after));
}
} // namespace ir
diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc
index 6a9c64e3a7f24..066a8fb975740 100644
--- a/paddle/fluid/framework/ir/fc_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc
@@ -25,7 +25,8 @@ namespace framework {
namespace ir {
void FCFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE_NOT_NULL(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("fc_fuse", graph);
int found_fc_count = 0;
diff --git a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc
index dfae572d4634e..cf35c1ac772da 100644
--- a/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc
+++ b/paddle/fluid/framework/ir/fc_fuse_pass_tester.cc
@@ -79,9 +79,17 @@ TEST(FCFusePass, basic) {
int num_fc_nodes_after = GetNumOpNodes(graph, "fc");
VLOG(3) << DebugString(graph);
- PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6);
- PADDLE_ENFORCE_EQ(num_fc_nodes_after, 2);
- PADDLE_ENFORCE_EQ(num_mul_nodes_before, num_fc_nodes_after);
+ PADDLE_ENFORCE_EQ(num_nodes_before, num_nodes_after + 6,
+ platform::errors::InvalidArgument(
+ "num_nodes_before=%d, num_nodes_after=%d.",
+ num_nodes_before, num_nodes_after));
+ PADDLE_ENFORCE_EQ(num_fc_nodes_after, 2,
+ platform::errors::InvalidArgument("num_fc_nodes_after=%d.",
+ num_fc_nodes_after));
+ PADDLE_ENFORCE_EQ(num_mul_nodes_before, num_fc_nodes_after,
+ platform::errors::InvalidArgument(
+ "num_mul_nodes_before=%d, num_fc_nodes_after=%d.",
+ num_mul_nodes_before, num_fc_nodes_after));
}
} // namespace ir
diff --git a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc
index d26998e6fc99d..a2185cdc5593c 100644
--- a/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/fc_gru_fuse_pass.cc
@@ -26,15 +26,15 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
GraphPatternDetector gpd;
auto* pattern = gpd.mutable_pattern();
- // Create pattern.
- patterns::FC fc_pattern(pattern, name_scope);
- patterns::GRU gru_pattern(pattern, name_scope);
-
PDNode* x =
pattern->NewNode(patterns::UniqueKey("x"))->assert_var_not_persistable();
+ // Create pattern.
+ patterns::FC fc_pattern(pattern, name_scope);
auto* fc_out = fc_pattern(x, with_fc_bias, /* with_relu */ false);
fc_out->AsIntermediate(); // fc_out is a tmp var, will be removed after fuse.
+
+ patterns::GRU gru_pattern(pattern, name_scope);
gru_pattern(fc_out);
// Create New OpDesc
@@ -48,17 +48,18 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
SET_IN(X, x);
SET_IN(WeightX, weight_x);
SET_IN(WeightH, weight_h);
- if (with_fc_bias) {
- op_desc.SetInput("Bias", {NEW_NAME(bias) + bias->Name()});
- } else {
- SET_IN(Bias, bias);
- }
+ SET_IN(Bias, bias);
#undef SET_IN
+ // TODO(grygielski): Add H0 to the pass
op_desc.SetInput("H0", {});
op_desc.SetOutput("Hidden", {hidden->Name()});
op_desc.SetAttr("is_reverse", gru->Op()->GetAttr("is_reverse"));
+ op_desc.SetAttr("origin_mode",
+ gru->Op()->GetAttrIfExists("origin_mode"));
// TODO(TJ): This should be a option for infer
op_desc.SetAttr("use_seq", true);
+ op_desc.SetAttr("activation", gru->Op()->GetAttr("activation"));
+ op_desc.SetAttr("gate_activation", gru->Op()->GetAttr("gate_activation"));
#define SET_IMTERMEDIATE_OUT(key) op_desc.SetOutput(#key, {NEW_NAME(key)})
SET_IMTERMEDIATE_OUT(ReorderedH0);
@@ -68,26 +69,30 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
#undef SET_IMTERMEDIATE_OUT
auto* op = graph->CreateOpNode(&op_desc);
- PADDLE_ENFORCE(graph->Has(kParamScopeAttr));
- auto& scope = graph->Get(kParamScopeAttr);
if (with_fc_bias) {
- // Fusion GRU bias = fcbias + grubias
- auto* fusion_bias_var = scope.Var(NEW_NAME(bias) + bias->Name());
- auto* out_bias_tensor =
- fusion_bias_var->GetMutable();
- PADDLE_ENFORCE(fusion_bias_var);
- auto* gru_bias_var = scope.FindVar(bias->Name());
- auto* fc_bias_var = scope.FindVar(fc_bias->Name());
- PADDLE_ENFORCE(gru_bias_var);
- PADDLE_ENFORCE(fc_bias_var);
- const auto& gru_bias_tenosr = gru_bias_var->Get();
- const auto& fc_bias_tensor = fc_bias_var->Get();
- // new bias = fc bias + gru bias
- out_bias_tensor->Resize(gru_bias_tenosr.dims());
- auto* data = out_bias_tensor->mutable_data(platform::CPUPlace());
- for (int i = 0; i < out_bias_tensor->numel(); i++) {
- data[i] =
- fc_bias_tensor.data()[i] + gru_bias_tenosr.data()[i];
+ auto* gru_bias_var = scope->FindVar(bias->Name());
+ auto* fc_bias_var = scope->FindVar(fc_bias->Name());
+ PADDLE_ENFORCE_NE(
+ gru_bias_var, nullptr,
+ platform::errors::NotFound("GRU bias var has not been found."));
+ PADDLE_ENFORCE_NE(
+ fc_bias_var, nullptr,
+ platform::errors::NotFound("FC bias var has not been found."));
+
+ auto* gru_bias_tensor = gru_bias_var->GetMutable();
+ auto* fc_bias_tensor = fc_bias_var->GetMutable();
+ PADDLE_ENFORCE_EQ(
+ gru_bias_tensor->numel(), fc_bias_tensor->numel(),
+ platform::errors::PreconditionNotMet(
+ "GRU and FC biases have to have equal number of elements."));
+
+ auto gru_bias_data =
+ gru_bias_tensor->mutable_data(platform::CPUPlace());
+ auto* fc_bias_data = fc_bias_tensor->data();
+
+ // Recompute GRU bias
+ for (int i = 0; i < gru_bias_tensor->numel(); ++i) {
+ gru_bias_data[i] += fc_bias_data[i];
}
}
#undef GET_NODE
@@ -108,7 +113,7 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
IR_NODE_LINK_TO(x, op);
IR_NODE_LINK_TO(weight_x, op);
IR_NODE_LINK_TO(weight_h, op);
- IR_NODE_LINK_TO(bias, op); // actually should link to new bias if have
+ IR_NODE_LINK_TO(bias, op);
IR_NODE_LINK_TO(op, hidden);
// h0?
return op;
diff --git a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc
index 44306a729544d..12c7fc051e23a 100644
--- a/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/fc_lstm_fuse_pass.cc
@@ -52,13 +52,17 @@ int BuildFusion(Graph* graph, const std::string& name_scope, Scope* scope,
#undef SET_IN
if (with_fc_bias) {
// Add FC-bias with LSTM-bias and create a new weight
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
const std::string& new_bias_var = patterns::UniqueKey("NewBias");
auto* bias_var = scope->Var(new_bias_var);
- PADDLE_ENFORCE(bias_var);
+ PADDLE_ENFORCE_NOT_NULL(bias_var, platform::errors::InvalidArgument(
+ "Bias var ptr cannot be nullptr."));
auto* bias_tensor = bias_var->GetMutable();
auto* lstm_bias_var = scope->FindVar(bias->Name());
- PADDLE_ENFORCE(lstm_bias_var);
+ PADDLE_ENFORCE_NOT_NULL(lstm_bias_var,
+ platform::errors::InvalidArgument(
+ "Lstm bias var ptr cannot be nullptr."));
const auto& lstm_bias_tensor = lstm_bias_var->Get();
bias_tensor->Resize(lstm_bias_tensor.dims());
diff --git a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc
index 7d6ef5b9023b0..54c05046a2c2f 100644
--- a/paddle/fluid/framework/ir/fuse_bn_act_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_bn_act_pass.cc
@@ -320,7 +320,7 @@ std::vector FuseBatchNormActPass::ReplaceNode(
return node;
});
PADDLE_ENFORCE_EQ(has_replaced, true,
- platform::errors::NotFound("Not find %s in the node list.",
+ platform::errors::NotFound("Not found %s in the node list.",
cur_node->Name()));
return new_list;
}
diff --git a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc
index 5c2c574fd681a..b559d66fe7456 100644
--- a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc
@@ -42,7 +42,8 @@ void FuseElewiseAddActPass::ApplyImpl(ir::Graph *graph) const {
// ele_add(x, act(y))
ir::Graph *FuseElewiseAddActPass::FuseElewiseAddAct(
ir::Graph *graph, const std::unordered_set &act_types) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("elewise_add_act", graph);
GraphPatternDetector gpd;
@@ -93,7 +94,8 @@ ir::Graph *FuseElewiseAddActPass::FuseElewiseAddAct(
// act(ele_add(x,y))
ir::Graph *FuseElewiseAddActPass::FuseActElewiseAdd(
ir::Graph *graph, const std::unordered_set &act_types) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("act_elewise_add", graph);
GraphPatternDetector gpd;
@@ -145,7 +147,8 @@ ir::Graph *FuseElewiseAddActPass::FuseActElewiseAdd(
// ele_add_grad: in["Y", "Out@GRAD"], out["X@GRAD", "Y@GRAD"]
ir::Graph *FuseElewiseAddActPass::FuseElewiseAddActInplaceGrad(
ir::Graph *graph, const std::unordered_set &act_types) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("elewise_add_act_grad", graph);
GraphPatternDetector gpd;
@@ -252,10 +255,11 @@ void FuseElewiseAddActPass::RemoveIntermediateOut(Graph *graph) const {
bool save_intermediate_out = BOOST_GET_CONST(
bool, cur_node->Op()->GetAttr("save_intermediate_out"));
auto intermediate_out_args = cur_node->Op()->Output("IntermediateOut");
- PADDLE_ENFORCE(
- save_intermediate_out && !intermediate_out_args.empty(),
- "The %s should save the intermediate_out in the fusing stage.",
- cur_node->Name());
+ PADDLE_ENFORCE_EQ(
+ (save_intermediate_out && !intermediate_out_args.empty()), true,
+ platform::errors::InvalidArgument(
+ "The %s should save the intermediate out in the fusing stage.",
+ cur_node->Name()));
// If the intermediate_out's output is empty, it should be removed.
auto cur_node_outputs = cur_node->outputs;
@@ -271,10 +275,11 @@ void FuseElewiseAddActPass::RemoveIntermediateOut(Graph *graph) const {
} else if (cur_node->Name() == "fused_elemwise_activation_grad") {
auto intermediate_out_grad_args =
cur_node->Op()->Output(GradVarName("IntermediateOut"));
- PADDLE_ENFORCE(
- !intermediate_out_grad_args.empty(),
- "The %s should save the intermediate_out in the fusing stage.",
- cur_node->Name());
+ PADDLE_ENFORCE_EQ(
+ intermediate_out_grad_args.empty(), false,
+ platform::errors::InvalidArgument(
+ "The %s should save the intermediate out in the fusing stage.",
+ cur_node->Name()));
auto cur_node_outputs = cur_node->outputs;
// If the intermediate_out_g's output is empty, it should be removed.
for (auto &out : cur_node_outputs) {
@@ -312,7 +317,11 @@ void FuseElewiseAddActPass::ReLinkNodes(Graph *graph,
nodes2delete.emplace(out);
}
} else {
- PADDLE_ENFORCE(out == intermediate_out);
+ PADDLE_ENFORCE_EQ(
+ out, intermediate_out,
+ platform::errors::InvalidArgument(
+ "Output of op(%s) must be %s, but not %s.", op_1->Name(),
+ intermediate_out->Name(), out->Name()));
IR_OP_VAR_LINK(fused_op, out);
}
}
@@ -347,8 +356,9 @@ std::vector FuseElewiseAddActPass::ReplaceNode(
}
return node;
});
- PADDLE_ENFORCE(has_replaced, "Not find %s in the node list.",
- cur_node->Name());
+ PADDLE_ENFORCE_EQ(has_replaced, true,
+ platform::errors::NotFound("Not found %s in the node list.",
+ cur_node->Name()));
return new_list;
}
diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc
index 482d8cf3d2f19..c284c1f4587cd 100644
--- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc
@@ -50,18 +50,25 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
fused_scale2->inputs.end());
for (auto &out_node : fused_scale1->outputs) {
if (fused_scale2_in_nodes.count(out_node)) {
- PADDLE_ENFORCE(out_node->IsCtrlVar(),
- "The dependency var only should be ctrl var.");
+ PADDLE_ENFORCE_EQ(out_node->IsCtrlVar(), true,
+ platform::errors::PreconditionNotMet(
+ "In adam op pass, the dependency var(%s) only "
+ "should be ctrl var.",
+ out_node->Name()));
not_need_ctrl_var_nodes.insert(out_node);
}
}
for (auto &node : not_need_ctrl_var_nodes) {
// remove this node from the input op node.
- PADDLE_ENFORCE(!node->inputs.empty(),
- "The input should not be empty here.");
+ PADDLE_ENFORCE_EQ(
+ node->inputs.empty(), false,
+ platform::errors::PreconditionNotMet(
+ "Node(%s)'s input should not be empty here.", node->Name()));
auto op_node = node->inputs.front();
- PADDLE_ENFORCE(op_node->IsOp());
+ PADDLE_ENFORCE_EQ(op_node->IsOp(), true,
+ platform::errors::PreconditionNotMet(
+ "Node(%s) should be an OP node.", op_node->Name()));
op_node->outputs.erase(
remove_if(
op_node->outputs.begin(), op_node->outputs.end(),
@@ -85,7 +92,9 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
const std::unordered_map> &vars_set,
const std::unordered_map &fused_vars_name,
const std::vector &adam_ops, ir::Graph *graph) const {
- PADDLE_ENFORCE_GT(adam_ops.size(), static_cast(0));
+ PADDLE_ENFORCE_GT(
+ adam_ops.size(), static_cast(0),
+ platform::errors::InvalidArgument("No adam op in the graph."));
// Check attributions
// NOTE: If new attribution is added, the following code maybe need change.
@@ -102,22 +111,58 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
int64_t, adam_ops[0]->Op()->GetAttr("min_row_size_to_use_multithread"));
for (auto &adam_op : adam_ops) {
PADDLE_ENFORCE_EQ(
- beta1, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1")));
+ beta1, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1")),
+ platform::errors::PreconditionNotMet(
+ "All adam Op's attr(beta1) must be same, but there are two "
+ "different "
+ "value: %f, %f.",
+ beta1, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1"))));
PADDLE_ENFORCE_EQ(
- beta2, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2")));
+ beta2, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2")),
+ platform::errors::PreconditionNotMet(
+ "All adam Op's attr(beta2) must be same, but there are two "
+ "different "
+ "value: %f, %f.",
+ beta2, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2"))));
PADDLE_ENFORCE_EQ(
- epsilon, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")));
+ epsilon, BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")),
+ platform::errors::PreconditionNotMet(
+ "All adam Op's attr(epsilon) must be same, but there are two "
+ "different "
+ "value: %f, %f.",
+ epsilon,
+ BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon"))));
PADDLE_ENFORCE_EQ(
- lazy_mode,
- BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")));
+ lazy_mode, BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")),
+ platform::errors::PreconditionNotMet(
+ "All adam Op's attr(lazy_mode) must be same, but there are two "
+ "different "
+ "value: %d, %d.",
+ lazy_mode,
+ BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode"))));
PADDLE_ENFORCE_EQ(
min_row_size_to_use_multithread,
BOOST_GET_CONST(int64_t, adam_op->Op()->GetAttr(
- "min_row_size_to_use_multithread")));
+ "min_row_size_to_use_multithread")),
+ platform::errors::PreconditionNotMet(
+ "All adam Op's attr(min_row_size_to_use_multithread) must be "
+ "same, but there are two different value: %I64, %I64.",
+ min_row_size_to_use_multithread,
+ BOOST_GET_CONST(
+ int64_t,
+ adam_op->Op()->GetAttr("min_row_size_to_use_multithread"))));
PADDLE_ENFORCE_EQ(
op_role,
BOOST_GET_CONST(int, adam_op->Op()->GetAttr(
- OpProtoAndCheckerMaker::OpRoleAttrName())));
+ OpProtoAndCheckerMaker::OpRoleAttrName())),
+ platform::errors::PreconditionNotMet(
+ "All adam Op's attr(op_role) must be same, but there are two "
+ "different "
+ "value: %d, %d.",
+ op_role,
+ BOOST_GET_CONST(int,
+ adam_op->Op()->GetAttr(
+ OpProtoAndCheckerMaker::OpRoleAttrName()))));
}
// NOTE: fused_var is only exist in scope, so the graph doesn't have
@@ -154,7 +199,10 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
const std::string &fused_var_name,
const std::vector &adam_ops,
ir::Graph *graph) const {
- PADDLE_ENFORCE_EQ(beta_name.size(), adam_ops.size());
+ PADDLE_ENFORCE_EQ(beta_name.size(), adam_ops.size(),
+ platform::errors::InvalidArgument(
+ "Beta name size(%d) must equal to adam op size(%d).",
+ beta_name.size(), adam_ops.size()));
const std::string scale_op_name = "scale";
// Get the scale_ops of dealing the adam's beta var.
@@ -168,7 +216,9 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
return var_node->Var() &&
var_node->Var()->Name() == beta_1_pow_name;
});
- PADDLE_ENFORCE(beta_pow_iter != adam_ops[i]->inputs.end());
+ PADDLE_ENFORCE_NE(beta_pow_iter, adam_ops[i]->inputs.end(),
+ platform::errors::NotFound(
+ "Can not find %s in adam ops.", beta_1_pow_name));
auto beta_pow_node = *beta_pow_iter;
auto scale_op_iter = std::find_if(
@@ -176,11 +226,18 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
[&scale_op_name](ir::Node *op_node) -> bool {
return op_node->Op() && op_node->Op()->Type() == scale_op_name;
});
- PADDLE_ENFORCE(scale_op_iter != beta_pow_node->outputs.end());
+ PADDLE_ENFORCE_NE(
+ scale_op_iter, beta_pow_node->outputs.end(),
+ platform::errors::NotFound("Can not find %s in beta pow node.",
+ scale_op_name));
scale_ops.emplace_back(*scale_op_iter);
}
- PADDLE_ENFORCE_EQ(scale_ops.size(), beta_name.size());
+ PADDLE_ENFORCE_EQ(
+ scale_ops.size(), beta_name.size(),
+ platform::errors::PreconditionNotMet(
+ "Beta name size(%d) must equal to scale ops size(%d).",
+ beta_name.size(), scale_ops.size()));
VLOG(6) << "The number of scale op is " << scale_ops.size() << ".";
// Check attributions
// NOTE: If new attribution is added, the following code maybe need change.
@@ -193,16 +250,40 @@ class FuseAdamOpPass : public FuseOptimizerOpPass {
BOOST_GET_CONST(bool, scale_ops[0]->Op()->GetAttr("bias_after_scale"));
for (auto &scale_op : scale_ops) {
PADDLE_ENFORCE_EQ(
- scale, BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")));
+ scale, BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")),
+ platform::errors::PreconditionNotMet(
+ "All scale Op's attr(scale) must be same, but there are two "
+ "different "
+ "value: %f, %f.",
+ scale, BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale"))));
PADDLE_ENFORCE_EQ(
- bias, BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")));
+ bias, BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")),
+ platform::errors::PreconditionNotMet(
+ "All scale Op's attr(bias) must be same, but there are two "
+ "different "
+ "value: %f, %f.",
+ bias, BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias"))));
PADDLE_ENFORCE_EQ(
bias_after_scale,
- BOOST_GET_CONST(bool, scale_op->Op()->GetAttr("bias_after_scale")));
+ BOOST_GET_CONST(bool, scale_op->Op()->GetAttr("bias_after_scale")),
+ platform::errors::PreconditionNotMet(
+ "All scale Op's attr(bias_after_scale) must be same, but there "
+ "are two different value: %d, %d.",
+ bias_after_scale,
+ BOOST_GET_CONST(bool,
+ scale_op->Op()->GetAttr("bias_after_scale"))));
PADDLE_ENFORCE_EQ(
op_role,
BOOST_GET_CONST(int, scale_op->Op()->GetAttr(
- OpProtoAndCheckerMaker::OpRoleAttrName())));
+ OpProtoAndCheckerMaker::OpRoleAttrName())),
+ platform::errors::PreconditionNotMet(
+ "All scale Op's attr(op_role) must be same, but there are two "
+ "different "
+ "value: %d, %d.",
+ op_role,
+ BOOST_GET_CONST(int,
+ scale_op->Op()->GetAttr(
+ OpProtoAndCheckerMaker::OpRoleAttrName()))));
}
// NOTE: fused_var is only exist in scope, so the graph doesn't have
diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc
index f70745be1bd60..43ec8bff5edc1 100644
--- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc
@@ -37,7 +37,9 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass {
const std::unordered_map> &vars_set,
const std::unordered_map &fused_vars_name,
const std::vector &momentum_ops, ir::Graph *graph) const {
- PADDLE_ENFORCE_GT(momentum_ops.size(), static_cast(0));
+ PADDLE_ENFORCE_GT(
+ momentum_ops.size(), static_cast(0),
+ platform::errors::InvalidArgument("Momentum ops must not be empyt."));
// Check attributions
// NOTE: If new attribution is added, the following code maybe need change.
@@ -50,14 +52,32 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass {
for (auto &momentum_op : momentum_ops) {
PADDLE_ENFORCE_EQ(
- mu, BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu")));
+ mu, BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu")),
+ platform::errors::InvalidArgument(
+ "All momentum Op's attr(mu) must be same, but there are two "
+ "different "
+ "value: %f, %f.",
+ mu, BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu"))));
PADDLE_ENFORCE_EQ(
use_nesterov,
- BOOST_GET_CONST(bool, momentum_op->Op()->GetAttr("use_nesterov")));
+ BOOST_GET_CONST(bool, momentum_op->Op()->GetAttr("use_nesterov")),
+ platform::errors::InvalidArgument(
+ "All momentum Op's attr(use_nesterov) must be same, but there "
+ "are two different value: %d, %d.",
+ use_nesterov, BOOST_GET_CONST(bool, momentum_op->Op()->GetAttr(
+ "use_nesterov"))));
PADDLE_ENFORCE_EQ(
op_role,
BOOST_GET_CONST(int, momentum_op->Op()->GetAttr(
- OpProtoAndCheckerMaker::OpRoleAttrName())));
+ OpProtoAndCheckerMaker::OpRoleAttrName())),
+ platform::errors::InvalidArgument(
+ "All momentum Op's attr(op_role) must be same, but there are two "
+ "different "
+ "value: %d, %d.",
+ op_role,
+ BOOST_GET_CONST(int,
+ momentum_op->Op()->GetAttr(
+ OpProtoAndCheckerMaker::OpRoleAttrName()))));
}
// NOTE: fused_var is only exist in scope, so the graph doesn't have
diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc
index 35bdfde96bc3c..fa86db891f881 100644
--- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_optimizer_op_pass.cc
@@ -41,10 +41,12 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const {
for (auto &node : topo_nodes) {
if (node->Op()->Type() == fuse_op_type) {
auto grad_name = node->Op()->Input(kGrad);
- PADDLE_ENFORCE_EQ(grad_name.size(), static_cast(1),
- "The %s operator has multiple gradient input. Expected "
- "it to only have one gradient input.",
- fuse_op_type);
+ PADDLE_ENFORCE_EQ(
+ grad_name.size(), static_cast(1),
+ platform::errors::InvalidArgument(
+ "The %s operator has multiple gradient input. Expected "
+ "it to only have one gradient input.",
+ fuse_op_type));
if (IsLoDTensorType(GetTypeOfVar(vars_info, grad_name[0]))) {
opt_nodes.emplace_back(node);
}
@@ -96,7 +98,8 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const {
VLOG(6) << var_name << ": " << fused_var_name;
PADDLE_ENFORCE_EQ(
fused_var_set.count(fused_var_name), 0,
- platform::errors::AlreadyExists("The fused variable already exists."));
+ platform::errors::AlreadyExists(
+ "The fused variable(%s) already exists.", fused_var_name));
fused_var_set.insert(fused_var_name);
fused_vars_name.emplace(var_name, fused_var_name);
}
@@ -110,7 +113,10 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const {
result.Get(details::kParamsAndDenseGrads);
PADDLE_ENFORCE_LE(
params_and_dense_grads.size(), aux_var_map.at(kGrad).size(),
- "The number of dense gradients should be little than optimizer ops.");
+ platform::errors::InvalidArgument(
+ "The number of dense gradients(%d) should be "
+ "little than optimizer ops(%d).",
+ params_and_dense_grads.size(), aux_var_map.at(kGrad).size()));
std::unordered_set opt_grad_set(aux_var_map.at(kGrad).size());
for (auto &p_g : params_and_dense_grads) {
@@ -130,13 +136,14 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const {
// some gradient's name maybe changed.
if (new_grad_idx.size() == 0) {
if (!result.Has(details::kFusedGrads)) {
- PADDLE_THROW(
+ PADDLE_THROW(platform::errors::PreconditionNotMet(
"The coalesce_grad_tensor_pass should "
- "be called before this pass.");
+ "be called before this pass."));
}
auto &fused_grad = result.Get(details::kFusedGrads);
PADDLE_ENFORCE_NE(fused_grad.size(), 0,
- "The fused gradient should not be empty.");
+ platform::errors::NotFound(
+ "The fused gradient should not be empty."));
if (fused_grad.size() > 1) {
// Note(chenweihang): Because the dtype of those gradients is not
// unified,so the number of fused gradients is more than one,
@@ -146,8 +153,9 @@ void FuseOptimizerOpPass::ApplyImpl(ir::Graph *graph) const {
auto &fused_vars = result.Get(details::kFusedVars);
auto iter =
std::find(fused_vars.begin(), fused_vars.end(), fused_grad.front());
- PADDLE_ENFORCE_EQ(iter != fused_vars.end(), true,
- "Not found the fused gradient variable.");
+ PADDLE_ENFORCE_EQ(
+ iter != fused_vars.end(), true,
+ platform::errors::NotFound("Not found the fused gradient variable."));
fused_vars_name[kGrad] = fused_grad.front();
// Sort the parameters and auxiliary variables according
@@ -334,16 +342,24 @@ void FuseOptimizerOpPass::FuseGradientsToContinuousSpace(
// The Gradients should not be reused during memory optimization.
for (auto &grad_var_name : grads) {
auto iter = vars_info.find(grad_var_name);
- PADDLE_ENFORCE_EQ(iter != vars_info.end(), true,
- "The gradient variable %s is not found.", grad_var_name);
- PADDLE_ENFORCE_EQ(!iter->second.empty(), true,
- "The gradient var node %s is not found.", grad_var_name);
- PADDLE_ENFORCE_NOT_NULL(iter->second.front()->Var(),
- "The gradient var node is null.");
+ PADDLE_ENFORCE_EQ(
+ iter != vars_info.end(), true,
+ platform::errors::NotFound("The gradient variable %s is not found.",
+ grad_var_name));
+ PADDLE_ENFORCE_EQ(
+ !iter->second.empty(), true,
+ platform::errors::NotFound("The gradient var node %s is not found.",
+ grad_var_name));
+ PADDLE_ENFORCE_NOT_NULL(
+ iter->second.front()->Var(),
+ platform::errors::InvalidArgument("The gradient var(%s) node is null.",
+ grad_var_name));
PADDLE_ENFORCE_EQ(
IsLoDTensorType(iter->second.front()->Var()->GetType()), true,
- "Currently the gradient type only should be LoDTensor when "
- "fusing optimizer ops.");
+ platform::errors::InvalidArgument(
+ "Currently the gradient(%s) type only should be LoDTensor when "
+ "fusing optimizer ops.",
+ grad_var_name));
for (auto var : iter->second) {
pinned_var_set.insert(var->Var()->Name());
}
@@ -382,11 +398,14 @@ const VarDesc *FuseOptimizerOpPass::GetVarDescFromVarsInfo(
const std::string &var_name) const {
auto grad_iter = vars_info.find(var_name);
PADDLE_ENFORCE_EQ(grad_iter != vars_info.end(), true,
- "The gradient variable %s is not found.", var_name);
+ platform::errors::NotFound(
+ "The gradient variable %s is not found.", var_name));
PADDLE_ENFORCE_EQ(!grad_iter->second.empty(), true,
- "The gradient var node %s is not found.", var_name);
+ platform::errors::NotFound(
+ "The gradient var node %s is not found.", var_name));
PADDLE_ENFORCE_NOT_NULL(grad_iter->second.front()->Var(),
- "The gradient var node is null.");
+ platform::errors::InvalidArgument(
+ "The gradient var(%s) node is null.", var_name));
return grad_iter->second.front()->Var();
}
@@ -428,8 +447,9 @@ void FuseOptimizerOpPass::SortParametersAndAuxVars(
const std::vector> ¶ms_grads,
std::unordered_map> *aux_var_map,
std::vector *ops) const {
- PADDLE_ENFORCE_NE(aux_var_map->count(kGrad), static_cast(0),
- "The gradient variable doesn‘t exist.");
+ PADDLE_ENFORCE_NE(
+ aux_var_map->count(kGrad), static_cast(0),
+ platform::errors::NotFound("The gradient variable doesn‘t exist."));
auto &grad_vec = aux_var_map->at(kGrad);
std::vector grad_sort_idx;
@@ -437,8 +457,10 @@ void FuseOptimizerOpPass::SortParametersAndAuxVars(
for (auto &p_g : params_grads) {
auto iter = std::find(grad_vec.begin(), grad_vec.end(), p_g.second);
- PADDLE_ENFORCE_EQ(iter != grad_vec.end(), true,
- "%s is not found in gradient vector", p_g.second);
+ PADDLE_ENFORCE_EQ(
+ iter != grad_vec.end(), true,
+ platform::errors::NotFound(
+ "Parameter@Grad(%s) is not found in gradient vector.", p_g.second));
auto idx = std::distance(grad_vec.begin(), iter);
grad_sort_idx.emplace_back(idx);
}
@@ -477,9 +499,10 @@ void FuseOptimizerOpPass::GetFusingVarNamesMap(
for (auto &var_n : aux_vars_name) {
auto arg_names = node->Op()->Input(var_n);
PADDLE_ENFORCE_EQ(arg_names.size(), static_cast(1),
- "The input variable of optimizer to be fused is "
- "invalid. Excepted %s only has one %s input.",
- node->Op()->Type(), var_n);
+ platform::errors::InvalidArgument(
+ "The input variable of optimizer to be fused is "
+ "invalid. Excepted %s only has one %s input.",
+ node->Op()->Type(), var_n));
(*aux_args_name)[var_n].emplace_back(arg_names[0]);
}
}
@@ -525,10 +548,14 @@ void FuseOptimizerOpPass::InsertInputAndOutputForFusedOpNode(
auto deal_with_ctrl_vars = [&out_dep_vars, ¬_useful_vars,
&fused_opt_node](ir::Node *ctr_var_node) {
PADDLE_ENFORCE_EQ(ctr_var_node->inputs.size(), 1,
- "The control var node has nultiple inputs.");
+ platform::errors::InvalidArgument(
+ "The control var(%s) node has multiple inputs.",
+ ctr_var_node->Name()));
if (ctr_var_node->inputs.front() == fused_opt_node) {
- PADDLE_ENFORCE_GT(ctr_var_node->outputs.size(), 0,
- "The control var node has no output.");
+ PADDLE_ENFORCE_GT(
+ ctr_var_node->outputs.size(), 0,
+ platform::errors::InvalidArgument(
+ "The control var(%s) node has no output.", ctr_var_node->Name()));
auto output_ops = ctr_var_node->outputs;
output_ops.erase(std::remove_if(output_ops.begin(), output_ops.end(),
[&fused_opt_node](const ir::Node *node) {
diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc
index 1504f00b27cd6..70d4d2b865230 100644
--- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc
@@ -35,7 +35,9 @@ class FuseSgdOpPass : public FuseOptimizerOpPass {
const std::unordered_map> &vars_set,
const std::unordered_map &fused_vars_name,
const std::vector &sgd_ops, ir::Graph *graph) const {
- PADDLE_ENFORCE_GT(sgd_ops.size(), static_cast(0));
+ PADDLE_ENFORCE_GT(
+ sgd_ops.size(), static_cast(0),
+ platform::errors::InvalidArgument("SGD ops must not be empyt."));
// NOTE: fused_var is only exist in scope, so the graph doesn't have
// fused_var node.
diff --git a/paddle/fluid/framework/ir/fuse_pass_base.cc b/paddle/fluid/framework/ir/fuse_pass_base.cc
index c7bf53f3d6119..e6fb1302e275f 100644
--- a/paddle/fluid/framework/ir/fuse_pass_base.cc
+++ b/paddle/fluid/framework/ir/fuse_pass_base.cc
@@ -25,14 +25,19 @@ void FusePassBase::Init(const std::string& repr, Graph* graph) const {
}
Scope* FusePassBase::param_scope() const {
- PADDLE_ENFORCE(graph_->Has(kParamScopeAttr));
+ PADDLE_ENFORCE_EQ(graph_->Has(kParamScopeAttr), true,
+ platform::errors::InvalidArgument(
+ "Graph must have kParamScopeAttr attribute."));
auto& scope = graph_->Get(kParamScopeAttr);
return &scope;
}
void FusePassBase::AddStatis(int count_of_fused) const {
- PADDLE_ENFORCE(graph_);
- PADDLE_ENFORCE(!repr_.empty());
+ PADDLE_ENFORCE_NOT_NULL(
+ graph_, platform::errors::InvalidArgument("Graph cannot be nullptr."));
+ PADDLE_ENFORCE_EQ(repr_.empty(), false,
+ platform::errors::InvalidArgument(
+ "Fuse pass must be initialized with a name."));
if (!graph_->Has(kFuseStatisAttr)) {
graph_->Set(kFuseStatisAttr, new std::unordered_map);
}
diff --git a/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc b/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
index c4e6b6e6a52ec..56ca98b566070 100644
--- a/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
+++ b/paddle/fluid/framework/ir/fuse_relu_depthwise_conv_pass.cc
@@ -31,7 +31,8 @@ void FuseReluDepthwiseConvPass::ApplyImpl(ir::Graph *graph) const {
ir::Graph *FuseReluDepthwiseConvPass::FuseReluDepthwiseConv(
ir::Graph *graph, bool only_forward) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
if (only_forward)
FusePassBase::Init("relu_depthwise_conv_only_forward", graph);
else
@@ -110,23 +111,45 @@ ir::Graph *FuseReluDepthwiseConvPass::FuseReluDepthwiseConv(
xg_var = subgraph.at(xg)->Var();
}
- PADDLE_ENFORCE_EQ(layer_op->Input("Input").size(), 1UL);
- PADDLE_ENFORCE_EQ(layer_op->Input("Input")[0], y_var->Name());
+ PADDLE_ENFORCE_EQ(layer_op->Input("Input").size(), 1UL,
+ platform::errors::InvalidArgument(
+ "Op(%s)'s input size(%d) must be 1.",
+ layer_op->Type(), layer_op->Input("Input").size()));
+ PADDLE_ENFORCE_EQ(
+ layer_op->Input("Input")[0], y_var->Name(),
+ platform::errors::InvalidArgument(
+ "Op(%s)'s input name(%s) must be %s.", layer_op->Type(),
+ layer_op->Input("Input")[0], y_var->Name()));
layer_op->SetInput("Input", {x_var->Name()});
subgraph.at(layer)->inputs.push_back(subgraph.at(x));
subgraph.at(x)->outputs.push_back(subgraph.at(layer));
VLOG(4) << "replace " << y_var->Name() << " -> " << x_var->Name();
if (!only_forward) {
- PADDLE_ENFORCE_EQ(layer_g_op->Input("Input").size(), 1UL);
- PADDLE_ENFORCE_EQ(layer_g_op->Input("Input")[0], y_var->Name());
+ PADDLE_ENFORCE_EQ(
+ layer_g_op->Input("Input").size(), 1UL,
+ platform::errors::InvalidArgument(
+ "Op(%s)'s input size(%d) must be 1.", layer_g_op->Type(),
+ layer_g_op->Input("Input").size()));
+ PADDLE_ENFORCE_EQ(
+ layer_g_op->Input("Input")[0], y_var->Name(),
+ platform::errors::InvalidArgument(
+ "Op(%s)'s input name(%s) must be %s.", layer_g_op->Type(),
+ layer_g_op->Input("Input")[0], y_var->Name()));
layer_g_op->SetInput("Input", {x_var->Name()});
subgraph.at(layer_g)->inputs.push_back(subgraph.at(x));
subgraph.at(x)->outputs.push_back(subgraph.at(layer_g));
- PADDLE_ENFORCE_EQ(layer_g_op->Output(GradVarName("Input")).size(), 1UL);
- PADDLE_ENFORCE_EQ(layer_g_op->Output(GradVarName("Input"))[0],
- yg_var->Name());
+ PADDLE_ENFORCE_EQ(
+ layer_g_op->Output(GradVarName("Input")).size(), 1UL,
+ platform::errors::InvalidArgument(
+ "Op(%s)'s input size(%d) must be 1.", layer_g_op->Type(),
+ layer_g_op->Output(GradVarName("Input")).size()));
+ PADDLE_ENFORCE_EQ(
+ layer_g_op->Output(GradVarName("Input"))[0], yg_var->Name(),
+ platform::errors::InvalidArgument(
+ "Op(%s)'s input name(%s) must be %s.", layer_g_op->Type(),
+ layer_g_op->Output(GradVarName("Input"))[0], yg_var->Name()));
layer_g_op->SetOutput(GradVarName("Input"), {xg_var->Name()});
subgraph.at(layer_g)->outputs.push_back(subgraph.at(xg));
subgraph.at(xg)->inputs.push_back(subgraph.at(layer_g));
diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc
index b397216f0b4d1..ff0e0e65a297f 100644
--- a/paddle/fluid/framework/ir/graph_helper.cc
+++ b/paddle/fluid/framework/ir/graph_helper.cc
@@ -136,7 +136,9 @@ bool FindCircleSubGraph(const Graph &graph,
std::vector TopologySortOperations(const Graph &graph) {
std::map, ir::NodeComp>
adj_list = BuildOperationAdjList(graph);
- PADDLE_ENFORCE(!HasCircleInternal(adj_list, nullptr));
+ PADDLE_ENFORCE_EQ(HasCircleInternal(adj_list, nullptr), false,
+ platform::errors::InvalidArgument(
+ "Generated graph shouldn't contain cycle."));
std::unordered_set visited;
std::vector ret;
for (auto adj : adj_list) {
@@ -161,7 +163,11 @@ BuildOperationAdjList(const Graph &graph) {
}
for (auto &var : n->inputs) {
for (auto &adj_n : var->inputs) {
- PADDLE_ENFORCE(adj_n->NodeType() == ir::Node::Type::kOperation);
+ PADDLE_ENFORCE_EQ(
+ adj_n->NodeType(), ir::Node::Type::kOperation,
+ platform::errors::InvalidArgument(
+ "Node(%s)'s type(%d) must be kOperation type.", adj_n->Name(),
+ static_cast(adj_n->NodeType())));
VLOG(4) << "adj " << adj_n->Name() << reinterpret_cast(adj_n)
<< " -> " << n->Name() << reinterpret_cast(n)
<< " via " << var->Name() << reinterpret_cast(var);
@@ -184,7 +190,11 @@ std::map> BuildOperationOutAdjList(
}
for (auto &var : n->outputs) {
for (auto &adj_n : var->outputs) {
- PADDLE_ENFORCE(adj_n->NodeType() == ir::Node::Type::kOperation);
+ PADDLE_ENFORCE_EQ(
+ adj_n->NodeType(), ir::Node::Type::kOperation,
+ platform::errors::InvalidArgument(
+ "Node(%s)'s type(%d) must be kOperation type.", adj_n->Name(),
+ static_cast(adj_n->NodeType())));
VLOG(40) << "adj " << adj_n->Name() << reinterpret_cast(adj_n)
<< " -> " << n->Name() << reinterpret_cast(n)
<< " via " << var->Name() << reinterpret_cast(var);
@@ -359,7 +369,10 @@ size_t GraphNum(const Graph &graph) {
}
std::unique_ptr fout(
new std::ofstream(FLAGS_print_sub_graph_dir));
- PADDLE_ENFORCE(fout->good());
+ PADDLE_ENFORCE_EQ(fout->good(), true,
+ platform::errors::Unavailable(
+ "Can not open file %s for printing the graph.",
+ FLAGS_print_sub_graph_dir));
*fout << out.str();
}
}
diff --git a/paddle/fluid/framework/ir/graph_traits.cc b/paddle/fluid/framework/ir/graph_traits.cc
index abcba32a6492b..4b403c46260c6 100644
--- a/paddle/fluid/framework/ir/graph_traits.cc
+++ b/paddle/fluid/framework/ir/graph_traits.cc
@@ -37,12 +37,14 @@ NodesDFSIterator::NodesDFSIterator(const NodesDFSIterator &other)
: stack_(other.stack_), visited_(other.visited_) {}
Node &NodesDFSIterator::operator*() {
- PADDLE_ENFORCE(!stack_.empty());
+ PADDLE_ENFORCE_EQ(stack_.empty(), false, platform::errors::OutOfRange(
+ "The iterator exceeds range."));
return *stack_.top();
}
NodesDFSIterator &NodesDFSIterator::operator++() {
- PADDLE_ENFORCE(!stack_.empty(), "the iterator exceeds range");
+ PADDLE_ENFORCE_EQ(stack_.empty(), false, platform::errors::OutOfRange(
+ "The iterator exceeds range."));
visited_.insert(stack_.top());
auto *cur = stack_.top();
stack_.pop();
@@ -73,11 +75,18 @@ inline bool CheckNodeIndegreeEquals(const Node &node, size_t n) {
}
NodesTSIterator::NodesTSIterator(const std::vector &source) {
- PADDLE_ENFORCE(!source.empty(),
- "Start points of topological sorting should not be empty!");
+ PADDLE_ENFORCE_EQ(
+ source.empty(), false,
+ platform::errors::InvalidArgument(
+ "Start points of topological sorting should not be empty!"));
// CHECK all the inputs' in-degree is 0
for (auto *node : source) {
- PADDLE_ENFORCE(CheckNodeIndegreeEquals(*node, 0));
+ PADDLE_ENFORCE_EQ(
+ CheckNodeIndegreeEquals(*node, 0), true,
+ platform::errors::InvalidArgument(
+ "In start points of topological sorting, the indegree of each "
+ "point should be 0. Node(%s)'s indegree is not 0.",
+ node->Name()));
}
std::set to_visit{source.begin(), source.end()};
@@ -106,7 +115,11 @@ NodesTSIterator::NodesTSIterator(const NodesTSIterator &other)
: sorted_(other.sorted_), cursor_(other.cursor_) {}
Node &NodesTSIterator::operator*() {
- PADDLE_ENFORCE_LT(cursor_, sorted_.size());
+ PADDLE_ENFORCE_LT(
+ cursor_, sorted_.size(),
+ platform::errors::OutOfRange(
+ "The iterator exceeds range. Container size is %d, but index is %d.",
+ sorted_.size(), cursor_));
return *sorted_[cursor_];
}
@@ -128,7 +141,11 @@ bool NodesTSIterator::operator==(const NodesTSIterator &other) {
}
Node *NodesTSIterator::operator->() {
- PADDLE_ENFORCE_LT(cursor_, sorted_.size());
+ PADDLE_ENFORCE_LT(
+ cursor_, sorted_.size(),
+ platform::errors::OutOfRange(
+ "The iterator exceeds range. Container size is %d, but index is %d.",
+ sorted_.size(), cursor_));
return sorted_[cursor_];
}
diff --git a/paddle/fluid/framework/ir/graph_traits.h b/paddle/fluid/framework/ir/graph_traits.h
index f6772f9a37567..bb4212bcd33d7 100644
--- a/paddle/fluid/framework/ir/graph_traits.h
+++ b/paddle/fluid/framework/ir/graph_traits.h
@@ -15,6 +15,8 @@
#pragma once
#include
+#include
+#include
#include
#include "paddle/fluid/framework/ir/graph.h"
@@ -66,7 +68,7 @@ struct NodesDFSIterator
struct NodesTSIterator
: public std::iterator {
NodesTSIterator() = default;
- NodesTSIterator(const std::vector &source);
+ explicit NodesTSIterator(const std::vector &source);
NodesTSIterator(NodesTSIterator &&other)
: sorted_(std::move(other.sorted_)), cursor_(other.cursor_) {
other.cursor_ = 0;
@@ -104,7 +106,10 @@ struct GraphTraits {
static iterator_range TS(const Graph &g) {
auto start_points = ExtractStartPoints(g);
- PADDLE_ENFORCE(!start_points.empty());
+ PADDLE_ENFORCE_EQ(
+ start_points.empty(), false,
+ platform::errors::InvalidArgument(
+ "Start points of topological sorting should not be empty!"));
NodesTSIterator x(start_points);
return iterator_range(NodesTSIterator(start_points),
NodesTSIterator());
diff --git a/paddle/fluid/framework/ir/graph_viz_pass.cc b/paddle/fluid/framework/ir/graph_viz_pass.cc
index 7f4519ad9919d..64f5376a784c2 100644
--- a/paddle/fluid/framework/ir/graph_viz_pass.cc
+++ b/paddle/fluid/framework/ir/graph_viz_pass.cc
@@ -42,7 +42,10 @@ void GraphVizPass::ApplyImpl(ir::Graph* graph) const {
const std::string& graph_viz_path = Get(kGraphvizPath);
VLOG(3) << "draw IR graph viz to " << graph_viz_path;
std::unique_ptr fout(new std::ofstream(graph_viz_path));
- PADDLE_ENFORCE(fout->good());
+ PADDLE_ENFORCE_EQ(
+ fout->good(), true,
+ platform::errors::Unavailable(
+ "Can not open file %s for printing the graph.", graph_viz_path));
std::ostream& sout = *fout;
std::unordered_map node2dot;
diff --git a/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc b/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc
index a39901e63bf65..c8dfa02f469a3 100644
--- a/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc
+++ b/paddle/fluid/framework/ir/identity_scale_op_clean_pass.cc
@@ -64,7 +64,11 @@ void IdentityScaleOpCleanPass::ApplyImpl(ir::Graph* graph) const {
for (auto& parameter : *pre_op_desc->Proto()->mutable_outputs()) {
auto* arguments = parameter.mutable_arguments();
auto it = std::find(arguments->begin(), arguments->end(), scale_in_name);
- PADDLE_ENFORCE(it != arguments->end());
+ PADDLE_ENFORCE_NE(
+ it, arguments->end(),
+ platform::errors::NotFound(
+ "Can not find input variable(%s) from scale op(%s).",
+ scale_in_name, pre_op_desc->Type()));
*it = scale_out_name;
}
diff --git a/paddle/fluid/framework/ir/lock_free_optimize_pass.cc b/paddle/fluid/framework/ir/lock_free_optimize_pass.cc
index a0cb7e93306d2..864a0379988fa 100644
--- a/paddle/fluid/framework/ir/lock_free_optimize_pass.cc
+++ b/paddle/fluid/framework/ir/lock_free_optimize_pass.cc
@@ -33,7 +33,8 @@ const char kSumGradOpName[] = "sum";
const char kOptimizerType[] = "sgd";
void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
// We could collect all weights' name from SGD, where
// W1 <- SGD(W0, Grad0)
@@ -41,7 +42,10 @@ void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const {
for (auto* node : graph->Nodes()) {
if (IsOpNamed(node, kOptimizerType)) {
auto& param_out_vars = node->Op()->Output("ParamOut");
- PADDLE_ENFORCE(param_out_vars.size() == 1u);
+ PADDLE_ENFORCE_EQ(
+ param_out_vars.size(), 1u,
+ platform::errors::InvalidArgument(
+ "In op(%s), find output(ParamOut) failed.", node->Name()));
weight_var_set.insert(param_out_vars[0]);
}
}
@@ -95,12 +99,19 @@ void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const {
VLOG(3) << "Found forward_op " << forward_op->Name();
- PADDLE_ENFORCE(forward_op);
+ PADDLE_ENFORCE_NOT_NULL(
+ forward_op, platform::errors::NotFound(
+ "Can not find forward op for backword op(%s).",
+ backward_op->Name()));
Node* new_optimizer_node = CreateNewSGDNode(
graph, forward_op, backward_op, node, opt_node);
- PADDLE_ENFORCE(new_optimizer_node);
+ PADDLE_ENFORCE_NOT_NULL(
+ new_optimizer_node,
+ platform::errors::InvalidArgument(
+ "Create new SGD node failed, backward op is %s.",
+ backward_op->Name()));
}
}
}
@@ -144,11 +155,21 @@ void LockFreeOptimizePass::ApplyImpl(ir::Graph* graph) const {
ir::Node* LockFreeOptimizePass::CreateNewSGDNode(
ir::Graph* graph, ir::Node* forward_node, ir::Node* backward_node,
ir::Node* grad_sum_node, ir::Node* optimize_node) const {
- PADDLE_ENFORCE(graph);
- PADDLE_ENFORCE(forward_node);
- PADDLE_ENFORCE(backward_node);
- PADDLE_ENFORCE(grad_sum_node);
- PADDLE_ENFORCE(optimize_node);
+ PADDLE_ENFORCE_NOT_NULL(graph,
+ platform::errors::InvalidArgument(
+ "Input argument graph cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ forward_node, platform::errors::InvalidArgument(
+ "Input argument forward_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ backward_node, platform::errors::InvalidArgument(
+ "Input argument backward_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ grad_sum_node, platform::errors::InvalidArgument(
+ "Input argument grad_sum_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ optimize_node, platform::errors::InvalidArgument(
+ "Input argument optimize_node cannot be nullptr."));
// find the grad var node between the grad sum node and backward_node
std::vector grad_vars =
@@ -159,7 +180,8 @@ ir::Node* LockFreeOptimizePass::CreateNewSGDNode(
grad_node = node;
}
}
- PADDLE_ENFORCE(grad_node);
+ PADDLE_ENFORCE_NOT_NULL(grad_node, platform::errors::NotFound(
+ "Can not find control dep variable."));
// create a new SGD node
OpDesc* old_desc = optimize_node->Op();
@@ -212,8 +234,14 @@ ir::Node* LockFreeOptimizePass::CreateNewSGDNode(
}
// SGD must have only one param and LR in
- PADDLE_ENFORCE(old_desc->Input("LearningRate").size() == 1u);
- PADDLE_ENFORCE(old_desc->Input("Param").size() == 1u);
+ PADDLE_ENFORCE_EQ(
+ old_desc->Input("LearningRate").size(), 1u,
+ platform::errors::InvalidArgument(
+ "In op(%s), find input(LearningRate) failed.", old_desc->Type()));
+ PADDLE_ENFORCE_EQ(
+ old_desc->Input("Param").size(), 1u,
+ platform::errors::InvalidArgument("In op(%s), find input(Param) failed.",
+ old_desc->Type()));
// LR and weight nodes should be copied
for (Node* upstream_node : optimize_node->inputs) {
@@ -245,9 +273,17 @@ std::vector LockFreeOptimizePass::FindConnectedNode(
void LockFreeOptimizePass::ReplaceUpstreamNode(
ir::Node* upstream_node, ir::Node* old_optimizer_node,
ir::Node* new_optimizer_node) const {
- PADDLE_ENFORCE(upstream_node);
- PADDLE_ENFORCE(old_optimizer_node);
- PADDLE_ENFORCE(new_optimizer_node);
+ PADDLE_ENFORCE_NOT_NULL(
+ upstream_node, platform::errors::InvalidArgument(
+ "Input argument upstream_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ old_optimizer_node,
+ platform::errors::InvalidArgument(
+ "Input argument old_optimizer_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ new_optimizer_node,
+ platform::errors::InvalidArgument(
+ "Input argument new_optimizer_node cannot be nullptr."));
// Remove the old_optimizer_node from upstream_node's outputs vector
auto& output_node_vec = upstream_node->outputs;
@@ -268,8 +304,14 @@ void LockFreeOptimizePass::ReplaceUpstreamNode(
void LockFreeOptimizePass::ReplaceAllDownstreamNode(
ir::Node* old_optimizer_node, ir::Node* new_optimizer_node) const {
- PADDLE_ENFORCE(old_optimizer_node);
- PADDLE_ENFORCE(new_optimizer_node);
+ PADDLE_ENFORCE_NOT_NULL(
+ old_optimizer_node,
+ platform::errors::InvalidArgument(
+ "Input argument old_optimizer_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ new_optimizer_node,
+ platform::errors::InvalidArgument(
+ "Input argument new_optimizer_node cannot be nullptr."));
for (ir::Node* downstream_node : old_optimizer_node->outputs) {
// Remove the old_optimizer_node from downstream_node's inputs vector
@@ -292,8 +334,12 @@ void LockFreeOptimizePass::ReplaceAllDownstreamNode(
ir::Node* LockFreeOptimizePass::FindForwardOpViaBackwardOp(
ir::Graph* graph, ir::Node* backward_node) const {
- PADDLE_ENFORCE(graph);
- PADDLE_ENFORCE(backward_node);
+ PADDLE_ENFORCE_NOT_NULL(graph,
+ platform::errors::InvalidArgument(
+ "Input argument graph cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(
+ backward_node, platform::errors::InvalidArgument(
+ "Input argument backward_node cannot be nullptr."));
// strip the suffix _grad of backward_node's name
std::string forward_op_name = backward_node->Name();
diff --git a/paddle/fluid/framework/ir/lock_free_optimize_pass.h b/paddle/fluid/framework/ir/lock_free_optimize_pass.h
index 9c923480bac26..f38f48fcd92a6 100644
--- a/paddle/fluid/framework/ir/lock_free_optimize_pass.h
+++ b/paddle/fluid/framework/ir/lock_free_optimize_pass.h
@@ -87,34 +87,46 @@ class LockFreeOptimizePass : public Pass {
ir::Node* downstream_node) const;
inline bool IsOpNamed(ir::Node* node, const std::string& name) const {
- PADDLE_ENFORCE(node);
+ PADDLE_ENFORCE_NOT_NULL(node,
+ platform::errors::InvalidArgument(
+ "Input argument node cannot be nullptr."));
return node->NodeType() == Node::Type::kOperation && node->Name() == name;
}
inline bool IsVarNamed(ir::Node* node, const std::string& name) const {
- PADDLE_ENFORCE(node);
+ PADDLE_ENFORCE_NOT_NULL(node,
+ platform::errors::InvalidArgument(
+ "Input argument node cannot be nullptr."));
return node->NodeType() == Node::Type::kVariable && node->Name() == name;
}
inline bool IsVarNameEndsWith(ir::Node* node, const std::string& name) const {
- PADDLE_ENFORCE(node);
+ PADDLE_ENFORCE_NOT_NULL(node,
+ platform::errors::InvalidArgument(
+ "Input argument node cannot be nullptr."));
return node->NodeType() == Node::Type::kVariable &&
boost::algorithm::ends_with(node->Name(), name);
}
inline bool IsVarNameContains(ir::Node* node, const std::string& name) const {
- PADDLE_ENFORCE(node);
+ PADDLE_ENFORCE_NOT_NULL(node,
+ platform::errors::InvalidArgument(
+ "Input argument node cannot be nullptr."));
return node->NodeType() == Node::Type::kVariable &&
node->Name().find(name) != std::string::npos;
}
inline bool IsControlDepFrom(ir::Node* ctrl_dep_node, ir::Node* node) const {
- PADDLE_ENFORCE(ctrl_dep_node);
- PADDLE_ENFORCE(node);
+ PADDLE_ENFORCE_NOT_NULL(
+ ctrl_dep_node, platform::errors::InvalidArgument(
+ "Input argument ctrl_dep_node cannot be nullptr."));
+ PADDLE_ENFORCE_NOT_NULL(node,
+ platform::errors::InvalidArgument(
+ "Input argument node cannot be nullptr."));
return IsControlDepVar(*ctrl_dep_node) &&
ctrl_dep_node->inputs.size() >= 1u &&
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc
index 6ce14203629e0..b1afa47910fad 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_cross_op_memory_reuse_pass.cc
@@ -116,7 +116,10 @@ std::vector BufferSharedCrossOpMemoryReusePass::SortOp(
graph_view.BreadthFirstVisit(
[&](OpHandleBase *cur_op) { sorted_ops.emplace_back(cur_op); });
PADDLE_ENFORCE_EQ(sorted_ops.size(), graph_view.OpNumber(),
- "There are unvisited ops");
+ platform::errors::InvalidArgument(
+ "Sorted ops size(%d) not equal to graph op size(%d). "
+ "There are unvisited ops.",
+ sorted_ops.size(), graph_view.OpNumber()));
return sorted_ops;
}
@@ -181,7 +184,9 @@ void BufferSharedCrossOpMemoryReusePass::RunOnScopeIdx(size_t idx) const {
auto *out_node = *(out_nodes.begin());
auto *out_var =
dynamic_cast(&(out_node->Wrapper()));
- PADDLE_ENFORCE_NOT_NULL(out_var);
+ PADDLE_ENFORCE_NOT_NULL(
+ out_var, platform::errors::NotFound(
+ "Can not find a valid Var Node for Var %s.", out_arg));
// If out_arg is not reusable, skip it
if (!IsOutVarReusable(*out_var)) {
@@ -269,7 +274,8 @@ size_t BufferSharedCrossOpMemoryReusePass::ResolveDependencyBetween(
auto op_dep = GetOpDep(prev_op, op);
if (op_dep == NodeDependency::kBefore) continue;
PADDLE_ENFORCE_EQ(op_dep, NodeDependency::kNoDep,
- "The graph has circle, this may be a bug");
+ platform::errors::InvalidArgument(
+ "The graph has circle, this may be a bug."));
auto iter =
std::find_if(prev_op->Outputs().begin(), prev_op->Outputs().end(),
@@ -316,9 +322,13 @@ size_t BufferSharedCrossOpMemoryReusePass::ResolveDependencyBetween(
}
void BufferSharedCrossOpMemoryReusePass::BuildOpDependencyMap() const {
- PADDLE_ENFORCE(ops_.empty(), "ops_ must be initialized here");
- PADDLE_ENFORCE(op_to_idx_.empty(), "op_to_idx_ must be initialized here");
- PADDLE_ENFORCE(deps_.empty(), "deps_ must be initialized here");
+ PADDLE_ENFORCE_EQ(ops_.empty(), true, platform::errors::InvalidArgument(
+ "Ops must be initialized here."));
+ PADDLE_ENFORCE_EQ(
+ op_to_idx_.empty(), true,
+ platform::errors::InvalidArgument("Op to idx must be initialized here."));
+ PADDLE_ENFORCE_EQ(deps_.empty(), true, platform::errors::InvalidArgument(
+ "Deps must be initialized here."));
// Toposort ops
OpGraphView graph_view(ir::FilterByNodeWrapper(*graph_));
@@ -344,7 +354,10 @@ void BufferSharedCrossOpMemoryReusePass::BuildOpDependencyMap() const {
prev_preceding_ops.end());
}
});
- PADDLE_ENFORCE_EQ(preceding_ops.size(), op_num);
+ PADDLE_ENFORCE_EQ(preceding_ops.size(), op_num,
+ platform::errors::InvalidArgument(
+ "Preceding ops size(%d) must equal to op num(%d).",
+ preceding_ops.size(), op_num));
// Find out ComputationOpHandles only
ops_.resize(scope_num);
@@ -384,28 +397,43 @@ void BufferSharedCrossOpMemoryReusePass::BuildOpDependencyMap() const {
size_t BufferSharedCrossOpMemoryReusePass::OpIndex(
const ComputationOpHandle *op) const {
auto iter = op_to_idx_[op->GetScopeIdx()].find(op);
- PADDLE_ENFORCE(iter != op_to_idx_[op->GetScopeIdx()].end());
+ PADDLE_ENFORCE_NE(iter, op_to_idx_[op->GetScopeIdx()].end(),
+ platform::errors::NotFound(
+ "Can not find op(%s) in op_to_idx_.", op->Name()));
return iter->second;
}
NodeDependency BufferSharedCrossOpMemoryReusePass::GetOpDep(
const ComputationOpHandle *op1, const ComputationOpHandle *op2) const {
- PADDLE_ENFORCE_EQ(op1->GetScopeIdx(), op2->GetScopeIdx());
+ PADDLE_ENFORCE_EQ(op1->GetScopeIdx(), op2->GetScopeIdx(),
+ platform::errors::InvalidArgument(
+ "Op(%s) and op(%s) must in the same scope.",
+ op1->Name(), op2->Name()));
return deps_[op1->GetScopeIdx()][OpIndex(op1)][OpIndex(op2)];
}
void BufferSharedCrossOpMemoryReusePass::SetOpDep(
const ComputationOpHandle *op1, const ComputationOpHandle *op2,
NodeDependency dep) const {
- PADDLE_ENFORCE_EQ(op1->GetScopeIdx(), op2->GetScopeIdx());
+ PADDLE_ENFORCE_EQ(op1->GetScopeIdx(), op2->GetScopeIdx(),
+ platform::errors::InvalidArgument(
+ "Op(%s) and op(%s) must in the same scope.",
+ op1->Name(), op2->Name()));
if (op1 == op2) {
- PADDLE_ENFORCE(dep == NodeDependency::kSame);
+ PADDLE_ENFORCE_EQ(
+ dep, NodeDependency::kSame,
+ platform::errors::InvalidArgument(
+ "Set Same Op(%s) Dep, dep must be kSame type.", op1->Name()));
auto idx = OpIndex(op1);
deps_[op1->GetScopeIdx()][idx][idx] = NodeDependency::kSame;
} else {
auto idx1 = OpIndex(op1);
auto idx2 = OpIndex(op2);
- PADDLE_ENFORCE(dep != NodeDependency::kSame && idx1 != idx2);
+ PADDLE_ENFORCE_EQ((dep != NodeDependency::kSame && idx1 != idx2), true,
+ platform::errors::InvalidArgument(
+ "Op(%s) and Op(%s) should not have same "
+ "index(%d), and dep should not kSame type.",
+ op1->Name(), op2->Name(), idx1));
deps_[op1->GetScopeIdx()][idx1][idx2] = dep;
deps_[op1->GetScopeIdx()][idx2][idx1] = ReverseNodeDependency(dep);
}
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc
index 338a608b4ae3d..0b42f2ebd5555 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/buffer_shared_inplace_op_pass.cc
@@ -57,7 +57,9 @@ void BufferSharedInplaceOpPass::Run(Graph *graph) const {
auto *op = *(pair.second.ops().begin());
const std::string &op_type = op->GetOp()->Type();
const framework::OpDesc *op_desc = op->Node()->Op();
- PADDLE_ENFORCE_NOT_NULL(op_desc);
+ PADDLE_ENFORCE_NOT_NULL(
+ op_desc, platform::errors::NotFound("Op(%s) can not find opdesc.",
+ op->Name()));
auto &infer_inplace = OpInfoMap::Instance().Get(op_type).infer_inplace_;
if (!infer_inplace) {
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc
index 9a322bdc1dce1..7b9b5aa623074 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/eager_deletion_pass.cc
@@ -58,8 +58,12 @@ static int64_t GetMemorySize(
&vars,
const std::string &var_name) {
auto *var_desc = TryGetLatestVarDesc(vars.at(var_name));
- PADDLE_ENFORCE_NOT_NULL(var_desc);
- PADDLE_ENFORCE(IsLoDTensor(var_desc));
+ PADDLE_ENFORCE_NOT_NULL(
+ var_desc,
+ platform::errors::NotFound("Var(%s) can not find VarDesc.", var_name));
+ PADDLE_ENFORCE_EQ(IsLoDTensor(var_desc), true,
+ platform::errors::InvalidArgument(
+ "Var(%s) must be LoDTensor.", var_name));
auto dims = var_desc->GetShape();
return SizeOfType(var_desc->GetDataType()) *
std::accumulate(dims.begin(), dims.end(), static_cast(1),
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h
index 4f6bacecab4aa..94842485440bd 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_optimization_var_info.h
@@ -42,8 +42,10 @@ class MemOptVarInfo {
}
void SetRefCnt(size_t ref_cnt) {
- PADDLE_ENFORCE_GE(ref_cnt, 1,
- "Reference count must be larger than or equal to 1");
+ PADDLE_ENFORCE_GE(
+ ref_cnt, 1,
+ platform::errors::InvalidArgument(
+ "Reference count(%d) must be larger than or equal to 1.", ref_cnt));
ref_cnt_ = ref_cnt;
runtime_ref_cnt_ = ref_cnt;
}
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/memory_reuse_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/memory_reuse_pass.cc
index 20c7968d6ac56..221b0a76e7ef5 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/memory_reuse_pass.cc
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/memory_reuse_pass.cc
@@ -66,7 +66,11 @@ bool MemoryReusePass::TryReuseVar(details::VarHandle *in_var,
details::VarHandle *out_var) const {
auto *op =
dynamic_cast(out_var->GeneratedOp());
- PADDLE_ENFORCE_NOT_NULL(op);
+ PADDLE_ENFORCE_NOT_NULL(
+ op,
+ platform::errors::InvalidArgument(
+ "Var(%s) have no GeneratedOp, or it's op is not ComputationOpHandle.",
+ out_var->Name()));
if (IsVarPairReusable(*in_var, *out_var)) {
AddReuseVar(op, in_var, out_var);
return true;
@@ -91,10 +95,13 @@ VarDesc *MemoryReusePass::GetVarDesc(const details::VarHandle &var) const {
size_t scope_idx = var.scope_idx();
auto iter = var_descs_[scope_idx].find(var_name);
if (iter == var_descs_[scope_idx].end()) {
- PADDLE_ENFORCE((*all_vars_)[scope_idx].count(var_name),
- "Variable %s not found", var_name);
+ PADDLE_ENFORCE_NE(
+ (*all_vars_)[scope_idx].count(var_name), 0,
+ platform::errors::NotFound("Variable %s not found.", var_name));
auto *desc = TryGetLatestVarDesc((*all_vars_)[scope_idx].at(var_name));
- PADDLE_ENFORCE_NOT_NULL(desc);
+ PADDLE_ENFORCE_NOT_NULL(
+ desc,
+ platform::errors::NotFound("Var(%s) can not find VarDesc.", var_name));
var_descs_[scope_idx].emplace(var_name, desc);
return desc;
} else {
@@ -119,7 +126,9 @@ void MemoryReusePass::CollectShareTensorBufferOpHandles() const {
if (share_buffer_op != nullptr) {
auto *compute_op =
details::GetUniquePendingComputationOpHandle(share_buffer_op);
- PADDLE_ENFORCE(ops_.count(compute_op) == 0);
+ PADDLE_ENFORCE_EQ(
+ ops_.count(compute_op), 0,
+ platform::errors::AlreadyExists("Compute op already exists."));
ops_.emplace(compute_op, share_buffer_op);
}
}
@@ -227,8 +236,11 @@ bool MemoryReusePass::IsInVarReusable(const details::VarHandle &in_var) const {
*/
bool MemoryReusePass::IsOutVarReusable(
const details::VarHandle &out_var) const {
- PADDLE_ENFORCE_NOT_NULL(dynamic_cast(
- out_var.GeneratedOp()));
+ PADDLE_ENFORCE_NOT_NULL(
+ dynamic_cast(out_var.GeneratedOp()),
+ platform::errors::InvalidArgument(
+ "Var(%s) have no GeneratedOp, or it's op is not ComputationOpHandle.",
+ out_var.Name()));
const auto out_name = out_var.Name();
if (out_name == kEmptyVarName) {
return false;
@@ -236,9 +248,10 @@ bool MemoryReusePass::IsOutVarReusable(
// out_var must be the first version!!!
auto out_var_iter = (*all_vars_)[out_var.scope_idx()].find(out_name);
- PADDLE_ENFORCE(out_var_iter != (*all_vars_)[out_var.scope_idx()].end() &&
- !out_var_iter->second.empty(),
- "Cannot find variable %s", out_name);
+ PADDLE_ENFORCE_EQ(
+ (out_var_iter != (*all_vars_)[out_var.scope_idx()].end() &&
+ !out_var_iter->second.empty()),
+ true, platform::errors::NotFound("Cannot find variable %s.", out_name));
if (out_var_iter->second[0] != &out_var) {
return false;
@@ -282,7 +295,11 @@ bool MemoryReusePass::IsVarPairReusable(
const details::VarHandle &in_var, const details::VarHandle &out_var) const {
auto *op =
dynamic_cast(out_var.GeneratedOp());
- PADDLE_ENFORCE_NOT_NULL(op);
+ PADDLE_ENFORCE_NOT_NULL(
+ op,
+ platform::errors::InvalidArgument(
+ "Var(%s) have no GeneratedOp, or it's op is not ComputationOpHandle.",
+ out_var.Name()));
const auto in_name = in_var.Name();
if (in_name == out_var.Name()) {
@@ -308,8 +325,10 @@ bool MemoryReusePass::IsVarPairReusable(
void MemoryReusePass::AddReuseVar(details::ComputationOpHandle *op,
details::VarHandle *in_var,
details::VarHandle *out_var) const {
- PADDLE_ENFORCE((*var_infos_)[op->GetScopeIdx()].count(in_var->Name()) > 0,
- "%s does not in mem-opt var infos", in_var->Name());
+ PADDLE_ENFORCE_GT(
+ (*var_infos_)[op->GetScopeIdx()].count(in_var->Name()), 0,
+ platform::errors::NotFound("Var(%s) does not in mem opt var infos.",
+ in_var->Name()));
if (ops_.count(op) == 0) {
InsertShareTensorBufferOpHandleToGraph(op);
@@ -349,7 +368,10 @@ void MemoryReusePass::UpdateLastLiveOpOfVar(details::ComputationOpHandle *op,
if (out_var_op_iter == (*last_live_ops_of_vars_)[scope_idx].end()) {
last_live_op_of_in_var = op;
} else {
- PADDLE_ENFORCE(!out_var_op_iter->second.ops().empty());
+ PADDLE_ENFORCE_EQ(
+ out_var_op_iter->second.ops().empty(), false,
+ platform::errors::InvalidArgument(
+ "Var(%s)'s last live op should not empty.", out_var->Name()));
last_live_op_of_in_var = *(out_var_op_iter->second.ops().begin());
}
@@ -359,8 +381,9 @@ void MemoryReusePass::UpdateLastLiveOpOfVar(details::ComputationOpHandle *op,
last_live_ops_of_in_var->insert(last_live_op_of_in_var);
auto in_var_info_iter = (*var_infos_)[scope_idx].find(in_var->Name());
- PADDLE_ENFORCE(in_var_info_iter != (*var_infos_)[scope_idx].end(),
- "Cannot find variable %s", in_var->Name());
+ PADDLE_ENFORCE_NE(
+ in_var_info_iter, (*var_infos_)[scope_idx].end(),
+ platform::errors::NotFound("Cannot find variable %s.", in_var->Name()));
in_var_info_iter->second->SetRefCnt(1);
}
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc
index d2cc89a2b49d8..11c2508afb574 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.cc
@@ -39,7 +39,7 @@ void OpGraphView::Build(const std::vector &ops) {
}
PADDLE_ENFORCE(
preceding_ops_.size() == ops.size() && pending_ops_.size() == ops.size(),
- "There are duplicate ops in graph.");
+ platform::errors::InvalidArgument("There are duplicate ops in graph."));
}
std::unordered_set OpGraphView::AllOps() const {
@@ -56,8 +56,10 @@ bool OpGraphView::HasOp(details::OpHandleBase *op) const {
}
void OpGraphView::EnforceHasOp(details::OpHandleBase *op) const {
- PADDLE_ENFORCE(HasOp(op), "Cannot find op %s in OpGraphView",
- op == nullptr ? "nullptr" : op->DebugString());
+ PADDLE_ENFORCE_EQ(HasOp(op), true,
+ platform::errors::NotFound(
+ "Cannot find op %s in OpGraphView.",
+ op == nullptr ? "nullptr" : op->DebugString()));
}
const std::unordered_set &OpGraphView::PendingOps(
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h
index 86b25c13959a7..5fb2caedba85d 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/op_graph_view.h
@@ -127,9 +127,13 @@ void OpGraphView::BreadthFirstVisit(Callback &&callback) const {
}
}
- PADDLE_ENFORCE_EQ(num_calls, op_num, "There are unvisited ops");
- PADDLE_ENFORCE_EQ(visited_ops.size(), op_num, "There are unvisited ops");
- PADDLE_ENFORCE(op_deps.empty(), "There are unvisited ops");
+ PADDLE_ENFORCE_EQ(num_calls, op_num, platform::errors::InvalidArgument(
+ "There are unvisited ops."));
+ PADDLE_ENFORCE_EQ(
+ visited_ops.size(), op_num,
+ platform::errors::InvalidArgument("There are unvisited ops."));
+ PADDLE_ENFORCE_EQ(op_deps.empty(), true, platform::errors::InvalidArgument(
+ "There are unvisited ops."));
}
} // namespace ir
diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc
index 4584b3d4e0f07..88d1b2aa003ce 100644
--- a/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc
+++ b/paddle/fluid/framework/ir/memory_optimize_pass/reference_count_pass.cc
@@ -77,11 +77,15 @@ class ShrinkDepsOpFunctor {
const std::vector &ops) const {
std::unordered_map op_to_idx;
for (size_t i = 0; i < ops.size(); ++i) {
- PADDLE_ENFORCE(graph_.HasOp(ops[i]), "Op does not exist in graph");
+ PADDLE_ENFORCE_EQ(
+ graph_.HasOp(ops[i]), true,
+ platform::errors::InvalidArgument("Op does not exist in graph."));
op_to_idx[ops[i]] = i;
}
- PADDLE_ENFORCE(op_to_idx.size() == ops.size(), "Duplicate ops");
+ PADDLE_ENFORCE_EQ(
+ op_to_idx.size(), ops.size(),
+ platform::errors::InvalidArgument("Graph may have duplicate ops."));
std::vector> ret(ops.size());
for (auto &e : ret) {
@@ -247,9 +251,9 @@ ExtractComputationOpFromLastLivedVar(details::VarHandle *var, size_t scope_idx,
return {};
}
- PADDLE_ENFORCE_EQ(
- computation_ops.empty(), false,
- platform::errors::InvalidArgument("Computation ops should not be empty"));
+ PADDLE_ENFORCE_EQ(computation_ops.empty(), false,
+ platform::errors::InvalidArgument(
+ "Computation ops should not be empty."));
// stage four. Try to shrink computation op if they depend on each other.
// Get the smallest set of the most ops.
@@ -263,8 +267,9 @@ void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const {
Get>(kLastLiveOpsOfVars);
PADDLE_ENFORCE(last_live_ops_of_vars.empty() && var_infos.empty(),
- "Last Live Ops and Reference Counts of vars should be "
- "initialized at here.");
+ platform::errors::InvalidArgument(
+ "Last live ops and reference counts of vars should be "
+ "initialized at here."));
const auto &vars = graph->Get(details::kGraphVars);
@@ -304,11 +309,15 @@ void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const {
auto &var_name = name_var_pair.first;
auto &var_handles = name_var_pair.second;
- PADDLE_ENFORCE_EQ(var_desc->Name(), var_name);
-
PADDLE_ENFORCE_EQ(
- var_handles.empty(), false,
- platform::errors::InvalidArgument("Variable %s not found", var_name));
+ var_desc->Name(), var_name,
+ platform::errors::InvalidArgument(
+ "A Var, it's VarName(%s) and DescName(%s) not same.", var_name,
+ var_desc->Name()));
+
+ PADDLE_ENFORCE_EQ(var_handles.empty(), false,
+ platform::errors::InvalidArgument(
+ "Variable %s not found.", var_name));
auto last_ver_var = var_handles.back();
if (last_ver_var->Node()->IsCtrlVar()) {
@@ -327,12 +336,13 @@ void ReferenceCountPass::ApplyImpl(ir::Graph *graph) const {
continue;
}
+ PADDLE_ENFORCE_EQ(status, LastLiveOpSearchStatus::kSuccess,
+ platform::errors::InvalidArgument(
+ "Status(%d) must be success.", status));
PADDLE_ENFORCE_EQ(
- status, LastLiveOpSearchStatus::kSuccess,
- platform::errors::InvalidArgument("status must be success"));
- PADDLE_ENFORCE_EQ(result.empty(), false,
- platform::errors::NotFound(
- "Last living ops of %s cannot be empty", var_name));
+ result.empty(), false,
+ platform::errors::NotFound("Last living ops of %s cannot be empty.",
+ var_name));
std::string last_live_ops_log_str;
for (auto &each_ret : result) {
diff --git a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc
index 119917428997b..45ff275d53085 100644
--- a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc
@@ -22,7 +22,8 @@ namespace framework {
namespace ir {
void ConvActivationFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE_NOT_NULL(graph, "graph cannot be nullptr.");
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("conv_activation_mkldnn_fuse", graph);
GraphPatternDetector gpd;
@@ -75,7 +76,8 @@ void ConvActivationFusePass::ApplyImpl(ir::Graph* graph) const {
GraphSafeRemoveNodes(graph, {activation, conv_out});
PADDLE_ENFORCE_GT(subgraph.count(conv_input), 0UL,
- "subgraph has to contain conv_input node.");
+ platform::errors::InvalidArgument(
+ "Subgraph has to contain conv input node."));
IR_NODE_LINK_TO(conv, activation_out);
found_conv_activation_count++;
};
diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc
index bbfc8c005580b..82e0af3c19875 100644
--- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc
@@ -26,7 +26,11 @@ namespace ir {
template
LoDTensor tensor_apply_eltwise(const LoDTensor& vec_a, const LoDTensor& vec_b,
BinaryOperation f) {
- PADDLE_ENFORCE_EQ(vec_a.dims(), vec_b.dims());
+ PADDLE_ENFORCE_EQ(vec_a.dims(), vec_b.dims(),
+ platform::errors::InvalidArgument(
+ "Input two tensors must have same shape, but they are "
+ "different: %s, %s.",
+ vec_a.dims(), vec_b.dims()));
LoDTensor vec_y;
vec_y.Resize(vec_a.dims());
const float* a = vec_a.data();
@@ -39,11 +43,13 @@ LoDTensor tensor_apply_eltwise(const LoDTensor& vec_a, const LoDTensor& vec_b,
}
void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
auto* scope = param_scope();
- PADDLE_ENFORCE(scope);
+ PADDLE_ENFORCE_NOT_NULL(
+ scope, platform::errors::InvalidArgument("Scope cannot be nullptr."));
GraphPatternDetector gpd;
auto* conv_input =
@@ -68,7 +74,9 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
// elementwise_add op
GET_IR_NODE_FROM_SUBGRAPH(eltwise, eltwise, conv_bias_pattern);
- PADDLE_ENFORCE(subgraph.count(conv_input));
+ PADDLE_ENFORCE_NE(
+ subgraph.count(conv_input), 0,
+ platform::errors::NotFound("Detector did not find conv input."));
// check if fuse can be done and if MKL-DNN should be used
FuseOptions fuse_option = FindFuseOption(*conv, *eltwise);
@@ -86,10 +94,16 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
if (has_bias && conv->Op()->Input("Bias").size() > 0) {
auto conv_bias_names = conv->Op()->Input("Bias");
// add eltwise bias to existing conv bias
- PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1);
+ PADDLE_ENFORCE_EQ(conv_bias_names.size(), 1,
+ platform::errors::NotFound("Can not find var Bias."));
auto* conv_bias_var = scope->FindVar(conv_bias_names[0]);
auto* conv_bias_tensor = conv_bias_var->GetMutable();
- PADDLE_ENFORCE_EQ(conv_bias_tensor->dims(), eltwise_bias_tensor->dims());
+ PADDLE_ENFORCE_EQ(
+ conv_bias_tensor->dims(), eltwise_bias_tensor->dims(),
+ platform::errors::InvalidArgument(
+ "Conv bias tensor and eltwise bias tensor "
+ "must have same shape, but they are different: %s, %s.",
+ conv_bias_tensor->dims(), eltwise_bias_tensor->dims()));
*conv_bias_tensor = tensor_apply_eltwise(
*conv_bias_tensor, *eltwise_bias_tensor, std::plus());
diff --git a/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc
index 9e8f0f0c46cee..af64cb22054e9 100644
--- a/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass.cc
@@ -39,7 +39,10 @@ void ConvConcatReLUFusePass::FindConcatWithConvs(
for (auto node : concat_inputs) {
auto prev_op_node = node->inputs;
- PADDLE_ENFORCE_EQ(prev_op_node.size(), 1);
+ PADDLE_ENFORCE_EQ(prev_op_node.size(), 1,
+ platform::errors::InvalidArgument(
+ "Node(%s) input size(%d) must be 1.", node->Name(),
+ prev_op_node.size()));
auto* conv_op = prev_op_node[0];
if (conv_op->Op()->Type() != "conv2d") return;
@@ -103,7 +106,8 @@ void ConvConcatReLUFusePass::FuseConvConcatReLU(
}
void ConvConcatReLUFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
std::unordered_map concat_with_convs_counter;
diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc
index 9881f7f9e56fd..23419d5b9e0a2 100644
--- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc
@@ -68,10 +68,10 @@ void CPUQuantizePass::QuantizeInput(Graph* g, Node* op, Node* input,
auto inputs = op->Op()->InputNames();
bool name_found =
std::find(inputs.begin(), inputs.end(), input_name) != inputs.end();
- PADDLE_ENFORCE_EQ(
- name_found, true,
- platform::errors::InvalidArgument("%s isn't the input of the %s operator",
- input_name, op->Op()->Type()));
+ PADDLE_ENFORCE_EQ(name_found, true,
+ platform::errors::InvalidArgument(
+ "Var(%s) isn't the input of the %s operator.",
+ input_name, op->Op()->Type()));
unsigned max = is_unsigned ? U8_MAX : S8_MAX;
float scale = scale_to_one * max;
@@ -110,8 +110,14 @@ void CPUQuantizePass::QuantizeInputs(Graph* g, Node* op, std::string input_name,
std::string scale_attr_name) const {
auto inputs = op->inputs;
auto output = op->outputs[0];
- PADDLE_ENFORCE_GE(inputs.size(), 1);
- PADDLE_ENFORCE_EQ(op->outputs.size(), 1);
+ PADDLE_ENFORCE_GE(inputs.size(), 1,
+ platform::errors::InvalidArgument(
+ "OP(%s)'s inputs(%d) must be equal or greater than 1.",
+ op->Name(), inputs.size()));
+ PADDLE_ENFORCE_EQ(op->outputs.size(), 1,
+ platform::errors::InvalidArgument(
+ "OP(%s)'s outputs(%d) must be equal to 1.", op->Name(),
+ op->outputs.size()));
// create a quantize op desc prototype
OpDesc q_desc;
@@ -159,8 +165,8 @@ void CPUQuantizePass::DequantizeOutput(Graph* g, Node* op, Node* output,
std::find(outputs.begin(), outputs.end(), output_name) != outputs.end();
PADDLE_ENFORCE_EQ(name_found, true,
platform::errors::InvalidArgument(
- "%s isn't the output of the %s operator", output_name,
- op->Op()->Type()));
+ "Var(%s) isn't the output of the %s operator.",
+ output_name, op->Op()->Type()));
unsigned max = is_unsigned ? U8_MAX : S8_MAX;
float scale = scale_to_one * max;
@@ -682,10 +688,12 @@ void CPUQuantizePass::QuantizeMatmul(Graph* graph) const {
bool is_x_unsigned{false}, is_y_unsigned{false};
auto input_x_scale = GetScaleValueForNode(matmul_in_x, &is_x_unsigned);
auto input_y_scale = GetScaleValueForNode(matmul_in_y, &is_y_unsigned);
- PADDLE_ENFORCE_EQ(
- is_x_unsigned, is_y_unsigned,
- platform::errors::InvalidArgument(
- "Matmul inputs should have the same value of is_unsigned"));
+ PADDLE_ENFORCE_EQ(is_x_unsigned, is_y_unsigned,
+ platform::errors::InvalidArgument(
+ "Matmul inputs should have the same "
+ "attribute of signed/unsigned, but they "
+ "are different: x(%d), y(%d).",
+ is_x_unsigned, is_y_unsigned));
QuantizeInput(g, matmul_op, matmul_in_x, "X", input_x_scale, is_x_unsigned,
"Scale_x");
QuantizeInput(g, matmul_op, matmul_in_y, "Y", input_y_scale, is_y_unsigned,
@@ -785,10 +793,12 @@ void CPUQuantizePass::QuantizeElementwiseAdd(Graph* graph) const {
void CPUQuantizePass::ApplyImpl(ir::Graph* graph) const {
VLOG(3) << "Quantizing the graph.";
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
- PADDLE_ENFORCE(param_scope());
+ PADDLE_ENFORCE_NOT_NULL(param_scope(), platform::errors::InvalidArgument(
+ "Scope cannot be nullptr."));
QuantizeConv(graph, false /* with_residual_data */);
QuantizeConv(graph, true /* with_residual_data */);
diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc
index 130ba44ff64c7..bc24c10d9d0ae 100644
--- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc
@@ -75,7 +75,7 @@ void CPUQuantizeSquashPass::DequantQuantSquash(
BOOST_GET_CONST(float, quant_op->Op()->GetAttr("Scale"));
PADDLE_ENFORCE_NE(
nodes_keep_counter->find(dequant_out), nodes_keep_counter->end(),
- platform::errors::NotFound("The dequant output node is not found"));
+ platform::errors::NotFound("The dequant output node is not found."));
// check if dequantize op should be kept or removed, decrease the counter
bool keep_dequant = (*nodes_keep_counter)[dequant_out]-- > 1;
@@ -153,8 +153,9 @@ void CPUQuantizeSquashPass::OpRequantSquash(Graph* graph) const {
PADDLE_ENFORCE_NE(
any_op_output_name.empty(), true,
- platform::errors::NotFound("Operator before requantize operator "
- "should have requantize input as output"));
+ platform::errors::NotFound("Operator before requantize operator(%s) "
+ "should have requantize input as output.",
+ requant_in->Name()));
float requant_scale_out =
BOOST_GET_CONST(float, requant_op->Op()->GetAttr("Scale_out"));
@@ -195,10 +196,11 @@ void CPUQuantizeSquashPass::RequantOpSquash(Graph* graph) const {
for (auto input_name : any_op->Op()->Input(name))
if (input_name == requant_out->Name()) any_op_input_name = name;
- PADDLE_ENFORCE_NE(
- any_op_input_name.empty(), true,
- platform::errors::NotFound("The operator after requantize operator "
- "should have requantize output as input"));
+ PADDLE_ENFORCE_NE(any_op_input_name.empty(), true,
+ platform::errors::NotFound(
+ "The operator after requantize operator(%s) "
+ "should have requantize output as input.",
+ requant_out->Name()));
float requant_scale_in =
boost::get(requant_op->Op()->GetAttr("Scale_in"));
@@ -206,11 +208,14 @@ void CPUQuantizeSquashPass::RequantOpSquash(Graph* graph) const {
if (any_op->Op()->Type() == "matmul")
scale_name = any_op_input_name == "X" ? "Scale_x" : "Scale_y";
- PADDLE_ENFORCE_EQ(requant_op->Op()->GetAttrIfExists("Scale_out"),
- any_op->Op()->GetAttrIfExists(scale_name),
- platform::errors::InvalidArgument(
- "The operator after requantize should have input "
- "scale equal to requantize output scale"));
+ PADDLE_ENFORCE_EQ(
+ requant_op->Op()->GetAttrIfExists("Scale_out"),
+ any_op->Op()->GetAttrIfExists(scale_name),
+ platform::errors::InvalidArgument(
+ "The operator after requantize should have input "
+ "scale(%f) equal to requantize output scale(%f).",
+ any_op->Op()->GetAttrIfExists(scale_name),
+ requant_op->Op()->GetAttrIfExists("Scale_out")));
any_op->Op()->SetAttr(scale_name, requant_scale_in);
any_op->Op()->SetInput(any_op_input_name,
std::vector({requant_in->Name()}));
@@ -286,8 +291,9 @@ void CPUQuantizeSquashPass::MultipleQuantizeSquash(Graph* graph) const {
auto* first_quant_out = first_quant_op->outputs[0];
float scale = first_quant_op->Op()->GetAttrIfExists("Scale");
- PADDLE_ENFORCE_NE(scale, 0, platform::errors::InvalidArgument(
- "Quantize scale should not be equal 0"));
+ PADDLE_ENFORCE_NE(scale, 0,
+ platform::errors::InvalidArgument(
+ "Quantize scale(%f) should not be equal 0.", scale));
for (int iter = prev_out->outputs.size() - 1; iter >= 0; iter--) {
auto quant_op = prev_out->outputs[iter];
@@ -304,8 +310,9 @@ void CPUQuantizeSquashPass::MultipleQuantizeSquash(Graph* graph) const {
PADDLE_ENFORCE_NE(
last_op_input_name.empty(), true,
- platform::errors::NotFound("Operator after quantize operator "
- "should has quantize output as input"));
+ platform::errors::NotFound("Operator after quantize operator(%s) "
+ "should has quantize output as input.",
+ quant_out->Name()));
last_op->Op()->SetInput(
last_op_input_name,
std::vector({first_quant_out->Name()}));
@@ -345,10 +352,12 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const {
PADDLE_ENFORCE_GT(dequant_scale, 0.0f,
platform::errors::InvalidArgument(
- "Dequantize scale should have positive value"));
+ "Dequantize scale(%f) should have positive value.",
+ dequant_scale));
PADDLE_ENFORCE_GT(scale_scale, 0.0f,
platform::errors::InvalidArgument(
- "Scale of scale op should have positive value"));
+ "Scale(%f) of scale op should have positive value.",
+ scale_scale));
dequant_op->Op()->SetAttr("Scale", dequant_scale / scale_scale);
dequant_op->Op()->SetOutput(
@@ -367,8 +376,8 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const {
void CPUQuantizeSquashPass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NOT_NULL(
graph,
- platform::errors::NotFound(
- "The graph in function CPUQuantizeSquashPass::ApplyImpl is null"));
+ platform::errors::InvalidArgument(
+ "The graph in function CPUQuantizeSquashPass::ApplyImpl is null."));
FusePassBase::Init("cpu_quantize_squash_pass", graph);
std::unordered_map nodes_keep_counter;
diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc
index 9b827fdf6fef1..37af0274ea8a2 100644
--- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc
+++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc
@@ -57,7 +57,7 @@ void SetOp(ProgramDesc* prog, const std::string& type, const std::string& name,
PADDLE_ENFORCE_EQ(inputs.size(), 2UL,
platform::errors::InvalidArgument(
"The fc inputs should contain input and weights, but "
- "now the size of inputs is %d",
+ "now the size of inputs is %d.",
inputs.size()));
op->SetInput("W", {inputs[1]});
op->SetOutput("Out", outputs);
diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc
index e854559ae7a87..c5965701a53d4 100644
--- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass.cc
@@ -19,14 +19,17 @@ namespace paddle {
namespace framework {
namespace ir {
-#define GET_NODE(id, pattern) \
- PADDLE_ENFORCE(subgraph.count(pattern.RetrieveNode(#id)), \
- "pattern has no Node called %s", #id); \
- auto* id = subgraph.at(pattern.RetrieveNode(#id)); \
- PADDLE_ENFORCE_NOT_NULL(id, "subgraph has no node %s", #id);
+#define GET_NODE(id, pattern) \
+ PADDLE_ENFORCE_NE(subgraph.count(pattern.RetrieveNode(#id)), 0, \
+ platform::errors::InvalidArgument( \
+ "Pattern has no Node called %s.", #id)); \
+ auto* id = subgraph.at(pattern.RetrieveNode(#id)); \
+ PADDLE_ENFORCE_NOT_NULL( \
+ id, platform::errors::InvalidArgument("Subgraph has no node %s.", #id));
void DepthwiseConvMKLDNNPass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init("depthwise_conv_mkldnn_pass", graph);
GraphPatternDetector gpd;
diff --git a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc
index 0d720e828b6d0..6c87e437caa1b 100644
--- a/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/mkldnn/scale_matmul_fuse_pass.cc
@@ -46,12 +46,15 @@ void ScaleMatmulFusePass::ApplyImpl(ir::Graph* graph) const {
if (scale_op->Op()->GetAttrIfExists("bias") == 0.0) {
auto matmul_alpha = matmul_op->Op()->GetAttrIfExists("alpha");
auto scale_scale = scale_op->Op()->GetAttrIfExists("scale");
- PADDLE_ENFORCE_GT(matmul_alpha, 0.0f,
- platform::errors::InvalidArgument(
- "Alpha of matmul op should have positive value"));
+ PADDLE_ENFORCE_GT(
+ matmul_alpha, 0.0f,
+ platform::errors::InvalidArgument(
+ "Alpha(%f) of matmul op should have positive value.",
+ matmul_alpha));
PADDLE_ENFORCE_GT(scale_scale, 0.0f,
platform::errors::InvalidArgument(
- "Scale of scale op should have positive value"));
+ "Scale(%f) of scale op should have positive value.",
+ scale_scale));
std::string matmul_op_input_name;
for (auto name : matmul_op->Op()->InputNames())
@@ -60,8 +63,9 @@ void ScaleMatmulFusePass::ApplyImpl(ir::Graph* graph) const {
PADDLE_ENFORCE_NE(
matmul_op_input_name.empty(), true,
- platform::errors::NotFound("Operator after scale operator "
- "should have scale output as input"));
+ platform::errors::NotFound("Operator after scale operator(%s) "
+ "should have scale output as input.",
+ scale_out->Name()));
matmul_op->Op()->SetAttr("alpha", matmul_alpha * scale_scale);
matmul_op->Op()->SetInput(matmul_op_input_name,
std::vector({scale_in->Name()}));
diff --git a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc
index d67f2274ebf1f..456e642ad86ab 100644
--- a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc
+++ b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc
@@ -85,7 +85,9 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const {
// 1. record op nodes of different roles
for (auto node : nodes) {
if (!node->IsOp()) continue;
- PADDLE_ENFORCE(node->Op(), "must find opdesc");
+ PADDLE_ENFORCE_NOT_NULL(
+ node->Op(), platform::errors::InvalidArgument(
+ "Node(%s) must hold op description.", node->Name()));
int op_role = BOOST_GET_CONST(
int, node->Op()->GetAttr(
framework::OpProtoAndCheckerMaker::OpRoleAttrName()));
@@ -108,7 +110,9 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const {
} else if (op_role & static_cast(framework::OpRole::kLRSched)) {
lr_ops.push_back(node);
} else { // NOLINT
- PADDLE_THROW("Invalid op_role: %d", static_cast(op_role));
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Invalid op role(%d), in node(%s).", static_cast(op_role),
+ node->Name()));
}
}
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc
index 8923dfc3232fb..6d5e4ac27bf8a 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/all_reduce_deps_pass.cc
@@ -45,7 +45,9 @@ class AllReduceDepsPass : public ir::Pass {
for (size_t i = 0; i < all_reduce_op_handles.size(); ++i) {
auto op_handle =
dynamic_cast(all_reduce_op_handles[i]);
- PADDLE_ENFORCE(op_handle, "op_handle must be NCCLOpHandleBase");
+ PADDLE_ENFORCE_NOT_NULL(op_handle,
+ platform::errors::InvalidArgument(
+ "Op handle must be NCCLOpHandleBase."));
op_handle->SetRunEnv(i, use_hierarchical_allreduce);
}
#endif
@@ -95,7 +97,9 @@ class AllReduceDepsPass : public ir::Pass {
}
}
- PADDLE_ENFORCE_NE(next_ready_ops.size(), 0, "There maybe have a cycle.");
+ PADDLE_ENFORCE_NE(
+ next_ready_ops.size(), 0,
+ platform::errors::InvalidArgument("There may be a cycle."));
ready_ops.clear();
std::swap(ready_ops, next_ready_ops);
GetSortedAllReduceOps(ready_ops, &all_reduce_op_handles);
@@ -122,18 +126,25 @@ class AllReduceDepsPass : public ir::Pass {
// NOTE(zcd): For distributed training, it is important to keep the order of
// allReduce on each node consistent. Otherwise, hang may occur.
// Sort the current_all_reduce_op_handles according to the name of input.
- sort(current_all_reduce_op_handles.begin(),
- current_all_reduce_op_handles.end(),
- [](const details::OpHandleBase* left,
- const details::OpHandleBase* right) -> bool {
- auto left_in_vars =
- details::DynamicCast(left->Inputs());
- auto right_in_vars =
- details::DynamicCast(right->Inputs());
- PADDLE_ENFORCE_GT(left_in_vars.size(), 0);
- PADDLE_ENFORCE_GT(right_in_vars.size(), 0);
- return left_in_vars[0]->Name() > right_in_vars[0]->Name();
- });
+ sort(
+ current_all_reduce_op_handles.begin(),
+ current_all_reduce_op_handles.end(),
+ [](const details::OpHandleBase* left,
+ const details::OpHandleBase* right) -> bool {
+ auto left_in_vars =
+ details::DynamicCast(left->Inputs());
+ auto right_in_vars =
+ details::DynamicCast(right->Inputs());
+ PADDLE_ENFORCE_GT(left_in_vars.size(), 0,
+ platform::errors::InvalidArgument(
+ "OpHandle(%s) inputs size must greater than 0.",
+ left->Name()));
+ PADDLE_ENFORCE_GT(right_in_vars.size(), 0,
+ platform::errors::InvalidArgument(
+ "OpHandle(%s) inputs size must greater than 0.",
+ right->Name()));
+ return left_in_vars[0]->Name() > right_in_vars[0]->Name();
+ });
all_reduce_op_handles->insert(all_reduce_op_handles->end(),
current_all_reduce_op_handles.begin(),
@@ -170,7 +181,10 @@ class AllReduceDepsPass : public ir::Pass {
break;
}
}
- PADDLE_ENFORCE(find_valid_input, "Doesn't find valid input.");
+ PADDLE_ENFORCE_EQ(
+ find_valid_input, true,
+ platform::errors::NotFound(
+ "In OpHandle(%s) Doesn't find valid input.", op->Name()));
}
VLOG(10) << out2.str();
if (grads_of_stale_program != all_reduce_op_handles.size()) {
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/backward_optimizer_op_deps_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/backward_optimizer_op_deps_pass.cc
index 782c51a032c03..2aae14fa33391 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/backward_optimizer_op_deps_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/backward_optimizer_op_deps_pass.cc
@@ -179,9 +179,10 @@ class BackWardOpDepsPass : public ir::Pass {
// Currently, we assume that once gradient is generated, it can be
// broadcast, and each gradient is only broadcast once.
auto backward_vars = details::GetOpRoleVarsOrEmpty(op_desc);
- PADDLE_ENFORCE_EQ(node->IsWrappedBy(), true,
- platform::errors::InvalidArgument(
- "Node must be wrapped by OpHandleBase"));
+ PADDLE_ENFORCE_EQ(
+ node->IsWrappedBy(), true,
+ platform::errors::InvalidArgument(
+ "Node(%s) must be wrapped by OpHandleBase.", node->Name()));
backward_op_handles->emplace_back(&node->Wrapper());
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc
index 86fbbaf7720be..81c98ecf0c0b6 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc
@@ -64,9 +64,10 @@ class FuseAllReduceOpPass : public ir::Pass {
PADDLE_ENFORCE_EQ(
all_reduce_ops.size(), grads.size(),
platform::errors::Unimplemented(
- "The number of all_reduce OpHandle is not equal to the "
- "number of grads. Maybe some gradients are sparse type, "
- "it is not supported currently."));
+ "The number of all_reduce OpHandle(%d) is not equal to the "
+ "number of grads(%d). Maybe some gradients are sparse type, "
+ "it is not supported currently.",
+ all_reduce_ops.size(), grads.size()));
auto &group_params_grads = graph->Get(
details::kGroupParamsAndDenseGrads);
@@ -79,7 +80,10 @@ class FuseAllReduceOpPass : public ir::Pass {
for (auto &group_p_g : group_params_grads) {
size_t group_size = group_p_g.size();
- PADDLE_ENFORCE_GT(group_size, static_cast(0));
+ PADDLE_ENFORCE_GT(
+ group_size, static_cast(0),
+ platform::errors::InvalidArgument(
+ "Parameter and Parameter@grad in one group, must not be empty."));
std::vector group_all_reduce_ops;
group_all_reduce_ops.reserve(group_size);
for (auto &p_g : group_p_g) {
@@ -103,26 +107,40 @@ class FuseAllReduceOpPass : public ir::Pass {
all_reduce_ops.reserve(grads.size());
for (auto &node : result.Nodes()) {
if (node->IsOp()) {
- PADDLE_ENFORCE(node->IsWrappedBy());
+ PADDLE_ENFORCE_EQ(
+ node->IsWrappedBy(), true,
+ platform::errors::InvalidArgument(
+ "Op Node(%s) should Wrapped by OpHandleBase.", node->Name()));
auto *all_reduce_op_handle = dynamic_cast(
&node->Wrapper());
if (all_reduce_op_handle) {
#if defined(PADDLE_WITH_DGC)
PADDLE_ENFORCE_NE(
all_reduce_op_handle->Name(), "sparse_all_reduce",
- "DGC doesn't support fuse for now, if you want to use DGC "
- "you need set strategy.fuse_all_reduce_ops = False.");
+ platform::errors::InvalidArgument(
+ "DGC doesn't support fuse for now, if you want to use DGC "
+ "you need set strategy.fuse_all_reduce_ops = False."));
#endif
auto inputs = details::DynamicCast(
all_reduce_op_handle->Inputs());
- PADDLE_ENFORCE_EQ(inputs.size(), num_place);
+ PADDLE_ENFORCE_EQ(inputs.size(), num_place,
+ platform::errors::InvalidArgument(
+ "The input size(%d) of all reduce op must "
+ "equal to place cnt(%d)!",
+ inputs.size(), num_place));
// The inputs' name should be the same.
auto &grad_name = inputs[0]->name();
for (size_t i = 1; i < inputs.size(); ++i) {
- PADDLE_ENFORCE_EQ(inputs[i]->name(), grad_name,
- "The input name should be the same.");
+ PADDLE_ENFORCE_EQ(
+ inputs[i]->name(), grad_name,
+ platform::errors::InvalidArgument(
+ "The input name should be the same.diff name: %s %s.",
+ inputs[i]->name(), grad_name));
}
- PADDLE_ENFORCE_NE(grads.count(grad_name), static_cast(0));
+ PADDLE_ENFORCE_NE(
+ grads.count(grad_name), static_cast(0),
+ platform::errors::InvalidArgument(
+ "Parameter@grad(%s) must in grad set.", grad_name));
all_reduce_ops.emplace(grad_name, node);
}
}
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc
index 8cc33a6ceb9f1..73f8cd67ee89e 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_check_pass.cc
@@ -24,7 +24,10 @@ namespace ir {
class SSAGraghBuilderWithChecker : public ir::Pass {
protected:
void ApplyImpl(ir::Graph *graph) const override {
- PADDLE_ENFORCE(IsValidGraph(graph));
+ PADDLE_ENFORCE_EQ(
+ IsValidGraph(graph), true,
+ platform::errors::InvalidArgument(
+ "In SSAGraghBuilderWithChecker, invalid Graph input."));
}
bool IsValidGraph(const ir::Graph *graph) const {
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc
index 4fbd8a878a7cf..fd82d6b10e718 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc
@@ -163,7 +163,13 @@ void MultiDevSSAGraphBuilderBase::Init() const {
nccl_ctxs_ = multi_nccl_ctxs_->DefaultFlatCtx();
}
#endif
- PADDLE_ENFORCE_EQ(places_.size(), local_scopes_.size());
+ PADDLE_ENFORCE_EQ(
+ places_.size(), local_scopes_.size(),
+ platform::errors::InvalidArgument(
+ "Places size and LocalScopes not equal "
+ "Places size(%d), LocalScopes size(%d) "
+ "If use multi devices, Places size must equas to LocalScopes size.",
+ places_.size(), local_scopes_.size()));
}
void MultiDevSSAGraphBuilderBase::ApplyImpl(ir::Graph *graph) const {
@@ -500,7 +506,11 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result,
SetCommunicationContext(op_handle, places_[i]);
auto &vars = result->Get(details::kGraphVars)[i][og];
- PADDLE_ENFORCE(!vars.empty());
+ PADDLE_ENFORCE_EQ(vars.empty(), false,
+ platform::errors::InvalidArgument(
+ "Can not find Var(%s) in Place[%d] "
+ "Paddle Can not add AllReduce OP for Var(%s).",
+ og, i, og));
auto &prev_grad = vars.back();
op_handle->AddInput(prev_grad);
VLOG(10) << "all_reduce_op_handle add input " << prev_grad->DebugString();
@@ -566,7 +576,11 @@ details::VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp(
auto &p = places_[i];
SetCommunicationContext(op_handle, p);
auto &vars = result->Get(details::kGraphVars)[i][og];
- PADDLE_ENFORCE(!vars.empty());
+ PADDLE_ENFORCE_EQ(vars.empty(), false,
+ platform::errors::InvalidArgument(
+ "Can not find Var(%s) in Place[%d] "
+ "Paddle Can not add Reduce OP for Var(%s).",
+ og, i, og));
auto &prev_grad = vars.back();
op_handle->AddInput(prev_grad);
}
@@ -590,7 +604,11 @@ bool MultiDevSSAGraphBuilderBase::IsScaleLossOp(ir::Node *node) const {
bool MultiDevSSAGraphBuilderBase::IsSparseGradient(
const std::string &og) const {
- PADDLE_ENFORCE(all_vars_.count(og) != 0);
+ PADDLE_ENFORCE_NE(all_vars_.count(og), 0,
+ platform::errors::InvalidArgument(
+ "Can not find Var(%s) in VarDescs "
+ "Paddle Can not add Collective OP for Var(%s).",
+ og, og));
return all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS;
}
@@ -641,10 +659,20 @@ int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const {
std::vector,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
- PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
+ PADDLE_ENFORCE_EQ(
+ param_grad.size(), 2U,
+ platform::errors::InvalidArgument(
+ "In Node %s, the size of attribute %s must be 2, include Parameter "
+ "and Parameter@Grad.",
+ node->Name(), OpProtoAndCheckerMaker::OpRoleVarAttrName()));
int dev_id = GetVarDeviceID(param_grad[1]);
- PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]",
- node->Op()->Type(), param_grad[0], param_grad[1]);
+ PADDLE_ENFORCE_NE(dev_id, -1, platform::errors::NotFound(
+ "Can not find Device ID, for NodeName:%s, "
+ "NodeType:%s, Param:%s, Param@Grad:%s"
+ "For this fault, you can consult the "
+ "Paddle technical personnel for answer ",
+ node->Name(), node->Op()->Type(),
+ param_grad[0], param_grad[1]));
return dev_id;
}
@@ -654,10 +682,16 @@ size_t BalanceVarSSAGraphBuilder::GetAppropriateDeviceID(
for (auto var_name : var_names) {
if (all_vars_.find(var_name) == all_vars_.end()) continue;
auto var_desc = all_vars_.at(var_name);
- PADDLE_ENFORCE_NOT_NULL(var_desc);
+ PADDLE_ENFORCE_NOT_NULL(var_desc,
+ platform::errors::NotFound(
+ "Can not find Var(%s) in Var Desc.", var_name));
auto dim = framework::make_ddim(var_desc->GetShape());
int64_t numel = framework::product(dim);
- PADDLE_ENFORCE_GT(numel, 0);
+ PADDLE_ENFORCE_GT(numel, 0,
+ platform::errors::InvalidArgument(
+ "The numel of Var(%s) must greater than 0"
+ "Please check your code,about Var(%s) Shape.",
+ var_name, var_name));
numel_sum += numel;
}
@@ -736,7 +770,12 @@ int ReduceSSAGraphBuilder::GetOpDeviceID(
std::vector,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
- PADDLE_ENFORCE_EQ(param_grad.size(), 2U);
+ PADDLE_ENFORCE_EQ(
+ param_grad.size(), 2U,
+ platform::errors::InvalidArgument(
+ "In Node %s, The size of attribute %s must be 2, include Parameter "
+ "and Parameter@Grad.",
+ node->Name(), OpProtoAndCheckerMaker::OpRoleVarAttrName()));
int dev_id = GetVarDeviceID(param_grad[1]);
if (dev_id == -1) {
@@ -798,7 +837,12 @@ std::vector ReduceSSAGraphBuilder::SortForReduceMode(
}
}
- PADDLE_ENFORCE_EQ(sorted_ops.size(), topo_ops.size());
+ PADDLE_ENFORCE_EQ(sorted_ops.size(), topo_ops.size(),
+ platform::errors::InvalidArgument(
+ "Sorted ops calc error!"
+ "The result for sorted ops size(%d) must be "
+ "equal to topo ops size(%d).",
+ sorted_ops.size(), topo_ops.size()));
ResetState();
return sorted_ops;
@@ -820,14 +864,23 @@ bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result,
bool insert_op = false;
if (OpHaveRole(*node, OpRole::kRPC)) {
int op_dev_id = CreateRPCOp(result, node);
- PADDLE_ENFORCE(op_dev_id != -1,
- "Can not schedule the RPC operator to the right place.");
+ PADDLE_ENFORCE_NE(op_dev_id, -1, platform::errors::InvalidArgument(
+ "Can not schedule the RPC operator to "
+ "the right place. NodeName:%s.",
+ node->Name()));
if (node->Op()->Type() == "recv") {
auto recv_vars_attr =
BOOST_GET_CONST(std::vector,
node->Op()->GetNullableAttr(
OpProtoAndCheckerMaker::OpRoleVarAttrName()));
- PADDLE_ENFORCE(recv_vars_attr.size() == 2UL); // [parameter, gradient]
+ PADDLE_ENFORCE_EQ(
+ recv_vars_attr.size(), 2UL,
+ platform::errors::InvalidArgument(
+ "In Node %s, the size of attribute %s must be 2, include "
+ "Parameter and Parameter@Grad.",
+ node->Name(),
+ OpProtoAndCheckerMaker::OpRoleVarAttrName())); // [parameter,
+ // gradient]
if (recv_vars_attr[0].find(".block") == std::string::npos) {
bcast_var_name_set_[op_dev_id].emplace(recv_vars_attr[0]);
}
@@ -879,8 +932,9 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
if (node->Op()->Type() == "send") {
// TODO(paddle-dev): getting the first var is not safe.
op_dev_id = GetVarDeviceID(node->inputs[0]->Name());
- PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]),
- "This hack no longer holds, please fix.");
+ PADDLE_ENFORCE_EQ(ir::IsControlDepVar(*node->inputs[0]), false,
+ platform::errors::InvalidArgument(
+ "This hack no longer holds, please fix."));
// the variable name which contains .block means it was split by
// split_byref op
if (strategy_.reduce_ ==
@@ -893,7 +947,12 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
auto send_param_grad = BOOST_GET_CONST(
std::vector,
node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName()));
- PADDLE_ENFORCE_EQ(send_param_grad.size(), 2U);
+ PADDLE_ENFORCE_EQ(
+ send_param_grad.size(), 2U,
+ platform::errors::InvalidArgument(
+ "In Node %s, the size of attribute %s must be 2, include "
+ "Parameter and Parameter@Grad.",
+ node->Name(), OpProtoAndCheckerMaker::OpRoleVarAttrName()));
op_dev_id = GetAppropriateDeviceID({send_param_grad[1]});
VLOG(10) << "send grad " << input_var_names[0] << " origin "
<< send_param_grad[1] << " place: " << op_dev_id;
@@ -926,9 +985,10 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
op_dev_id = 0;
}
- PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s",
- node->Op()->Type());
-
+ PADDLE_ENFORCE_NE(
+ op_dev_id, -1,
+ platform::errors::NotFound("Can not find the right place for rpc op: %s.",
+ node->Op()->Type()));
// Create fetch_barrier op handle to enable output on all devices.
// **NOTE** fetch_barrier should output variables list same as recv op does.
if (node->Op()->Type() == "fetch_barrier") {
@@ -956,7 +1016,10 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const {
int outvar_dev_id = op_dev_id;
if (node->Op()->Type() == "fetch_barrier") {
outvar_dev_id = GetVarDeviceID(output->Name());
- PADDLE_ENFORCE_NE(outvar_dev_id, -1, "output name %s", output->Name());
+ PADDLE_ENFORCE_NE(outvar_dev_id, -1,
+ platform::errors::NotFound(
+ "Can not find the right place for the var: %s.",
+ output->Name()));
}
p = places_[outvar_dev_id];
ir::Node *new_node = nullptr;
@@ -1007,13 +1070,14 @@ int DistSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result,
} else {
LOG(ERROR) << "got unexpected dist op: " << node->Op()->Type();
PADDLE_THROW(
- "the distribute training related op should be in [split_byref, "
- "concat].");
+ platform::errors::Unimplemented("The distribute training related op "
+ "should be in [split_byref, concat]."));
}
- PADDLE_ENFORCE(op_dev_id != -1,
- "can not find right place for distributed op: %s",
- node->Op()->Type());
+ PADDLE_ENFORCE_NE(op_dev_id, -1,
+ platform::errors::NotFound(
+ "Can not find right place for distributed op: %s.",
+ node->Op()->Type()));
CreateComputationalOp(result, node, op_dev_id);
return op_dev_id;
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc
index efd549e79d0ef..a080b4bc33c53 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_print_pass.cc
@@ -28,7 +28,10 @@ class SSAGraghBuilderWithPrinterPass : public ir::Pass {
void ApplyImpl(ir::Graph *graph) const override {
std::unique_ptr fout(
new std::ofstream(Get(kGraphvizPath)));
- PADDLE_ENFORCE(fout->good());
+ PADDLE_ENFORCE_EQ(
+ fout->good(), true,
+ platform::errors::Unavailable("Open file fail! kGraphvizPath = %s.",
+ Get(kGraphvizPath)));
if (Has("graph_printer")) {
Get("graph_printer").Print(*graph, *fout);
} else {
diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc
index 7de3b7c605418..bcbd1e066cc1f 100644
--- a/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc
+++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/sequential_execution_pass.cc
@@ -54,11 +54,16 @@ class SequentialExecutionPass : public ir::Pass {
if (!node->IsOp()) continue;
std::unordered_set preceding_ops;
for (auto *in : node->inputs) {
- PADDLE_ENFORCE(in->IsVar(),
- "Preceding Node of Op Nodes must be Var Node");
+ PADDLE_ENFORCE_EQ(
+ in->IsVar(), true,
+ platform::errors::InvalidArgument(
+ "Preceding Node(%s) of Op Nodes must be Var Node.",
+ in->Name()));
if (in->inputs.empty()) continue;
- PADDLE_ENFORCE(in->inputs.size() == 1 && in->inputs[0]->IsOp(),
- "Preceding Op Node of Var Node must be unique");
+ PADDLE_ENFORCE_EQ((in->inputs.size() == 1 && in->inputs[0]->IsOp()),
+ true,
+ platform::errors::InvalidArgument(
+ "Preceding Op Node of Var Node must be unique."));
preceding_ops.insert(in->inputs[0]);
pending_ops[in->inputs[0]].insert(node);
}
@@ -72,15 +77,18 @@ class SequentialExecutionPass : public ir::Pass {
ir::Node *found_node = nullptr;
for (auto *node : ready_ops) {
if (IsSameOpDesc(op_desc, node->Op())) {
- PADDLE_ENFORCE(found_node == nullptr,
- "Found multiple op_desc in graph: %s",
- op_desc->Type());
+ PADDLE_ENFORCE_EQ(
+ found_node, nullptr,
+ platform::errors::InvalidArgument(
+ "Found multiple op_desc in graph: %s.", op_desc->Type()));
found_node = node;
}
}
- PADDLE_ENFORCE_NOT_NULL(found_node, "Cannot find op_desc in graph: %s",
- op_desc->Type());
+ PADDLE_ENFORCE_NOT_NULL(
+ found_node,
+ platform::errors::NotFound("Cannot find op_desc in graph: %s.",
+ op_desc->Type()));
for (auto *pending_op : pending_ops[found_node]) {
if (--op_deps.at(pending_op) == 0) {
ready_ops.insert(pending_op);
diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h
index fbc0d7599eae1..87e7e64acb71a 100644
--- a/paddle/fluid/framework/ir/node.h
+++ b/paddle/fluid/framework/ir/node.h
@@ -66,12 +66,18 @@ class Node {
std::string Name() const { return name_; }
VarDesc* Var() const {
- PADDLE_ENFORCE_EQ(IsVar(), true);
+ PADDLE_ENFORCE_EQ(IsVar(), true,
+ platform::errors::InvalidArgument(
+ "Node(%s) must be kVariable type, but not %d.", name_,
+ static_cast(type_)));
return var_desc_.get();
}
OpDesc* Op() const {
- PADDLE_ENFORCE_EQ(IsOp(), true);
+ PADDLE_ENFORCE_EQ(IsOp(), true,
+ platform::errors::InvalidArgument(
+ "Node(%s) must be kOperation type, but not %d.",
+ name_, static_cast(type_)));
return op_desc_.get();
}
@@ -92,8 +98,9 @@ class Node {
try {
return *boost::any_cast(wrapper_);
} catch (boost::bad_any_cast&) {
- PADDLE_THROW("Invalid wrapper type error, expected %s, actual %s",
- typeid(T).name(), wrapper_type_.name());
+ PADDLE_THROW(platform::errors::InvalidArgument(
+ "Invalid wrapper type error, expected %s, actual %s.",
+ typeid(T).name(), wrapper_type_.name()));
}
}
@@ -114,8 +121,9 @@ class Node {
}
void RenameVar(const std::string& new_name) {
- PADDLE_ENFORCE(type_ == Type::kVariable && var_desc_,
- "Must be type of variable");
+ PADDLE_ENFORCE_EQ(
+ type_ == Type::kVariable && var_desc_, true,
+ platform::errors::InvalidArgument("Node must be type of variable."));
name_ = new_name;
var_desc_->SetName(new_name);
}
diff --git a/paddle/fluid/framework/ir/pass.cc b/paddle/fluid/framework/ir/pass.cc
index 78e8b16126484..fb95504d9a53a 100644
--- a/paddle/fluid/framework/ir/pass.cc
+++ b/paddle/fluid/framework/ir/pass.cc
@@ -26,7 +26,8 @@ namespace ir {
Graph* Pass::Apply(Graph* graph) const {
CheckPrevPass();
- PADDLE_ENFORCE(graph, "graph passed to Pass::Apply() cannot be empty.");
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
for (const std::string& attr : required_pass_attrs_) {
PADDLE_ENFORCE_NE(
attrs_.find(attr), attrs_.end(),
@@ -40,11 +41,14 @@ Graph* Pass::Apply(Graph* graph) const {
}
ApplyImpl(graph);
// TODO(panyx0718): Add more verifications.
- PADDLE_ENFORCE(!HasCircle(*graph),
- "Illegal Pass %s. Generated graph shouldn't have cycle.",
- Type());
- PADDLE_ENFORCE(VarDescIsConsistency(*graph),
- "The VarDescs of persistable variable are not consistency.");
+ PADDLE_ENFORCE_EQ(
+ HasCircle(*graph), false,
+ platform::errors::InvalidArgument(
+ "Illegal pass %s. Generated graph shouldn't contain cycle.", Type()));
+ PADDLE_ENFORCE_EQ(
+ VarDescIsConsistency(*graph), true,
+ platform::errors::InvalidArgument(
+ "The VarDescs of persistable variable are not consistency."));
applied_ = true;
if (!graph->Has(kPassRecorder)) {
graph->Set(kPassRecorder, new PassRecorder);
diff --git a/paddle/fluid/framework/ir/pass.h b/paddle/fluid/framework/ir/pass.h
index b7b46085b9067..0f5ef551f044d 100644
--- a/paddle/fluid/framework/ir/pass.h
+++ b/paddle/fluid/framework/ir/pass.h
@@ -55,8 +55,9 @@ class Pass {
// Get a reference to the attributed previously set.
template
AttrType &Get(const std::string &attr_name) const {
- PADDLE_ENFORCE(attrs_.find(attr_name) != attrs_.end(),
- "%s attr not registered for pass.", attr_name);
+ PADDLE_ENFORCE_NE(attrs_.find(attr_name), attrs_.end(),
+ platform::errors::InvalidArgument(
+ "Attribute %s not registered for pass.", attr_name));
try {
return *boost::any_cast(attrs_.at(attr_name));
} catch (boost::bad_any_cast &) {
@@ -76,7 +77,7 @@ class Pass {
};
PADDLE_THROW(platform::errors::InvalidArgument(
- "Invalid type for attritube %s, expected: %s, actual: %s", attr_name,
+ "Invalid type for attritube %s, expected: %s, actual: %s.", attr_name,
TypeToString(typeid(AttrType *)),
TypeToString(attrs_.at(attr_name).type())));
}
@@ -101,9 +102,10 @@ class Pass {
template
void Set(const std::string &attr_name, AttrType *attr) {
if (default_pass_attrs_.count(attr_name) == 0) {
- PADDLE_ENFORCE_EQ(attrs_.count(attr_name), 0,
- platform::errors::InvalidArgument(
- "Attribute %s already set in the pass", attr_name));
+ PADDLE_ENFORCE_EQ(
+ attrs_.count(attr_name), 0,
+ platform::errors::AlreadyExists(
+ "Attribute %s already set in the pass.", attr_name));
} else {
VLOG(3) << "Setting the attribute " << attr_name << " for the pass "
<< type_;
@@ -119,15 +121,16 @@ class Pass {
// should delete the attribute.
template
void SetNotOwned(const std::string &attr_name, AttrType *attr) {
- PADDLE_ENFORCE(attrs_.count(attr_name) == 0, "%s already set in the pass",
- attr_name);
+ PADDLE_ENFORCE_EQ(attrs_.count(attr_name), 0,
+ platform::errors::AlreadyExists(
+ "Attribute %s already set in the pass.", attr_name));
attrs_[attr_name] = attr;
}
protected:
virtual void ApplyImpl(Graph *graph) const {
PADDLE_THROW(platform::errors::Unimplemented(
- "The virtual Pass called is not implemented."));
+ "The virtual pass called is not implemented."));
}
// Some Pass must be placed before this Pass, and some
@@ -198,8 +201,9 @@ class PassRegistry {
}
std::unique_ptr Get(const std::string &pass_type) const {
- PADDLE_ENFORCE(Has(pass_type), "Pass %s has not been registered",
- pass_type);
+ PADDLE_ENFORCE_EQ(Has(pass_type), true,
+ platform::errors::InvalidArgument(
+ "Pass %s has not been registered.", pass_type));
return map_.at(pass_type)();
}
@@ -213,8 +217,10 @@ class PassRegistry {
template
struct PassRegistrar : public Registrar {
explicit PassRegistrar(const char *pass_type) {
- PADDLE_ENFORCE(!PassRegistry::Instance().Has(pass_type),
- "'%s' is registered more than once.", pass_type);
+ PADDLE_ENFORCE_EQ(
+ PassRegistry::Instance().Has(pass_type), false,
+ platform::errors::AlreadyExists(
+ "Pass '%s' is registered more than once.", pass_type));
PassRegistry::Instance().Insert(
pass_type, [this, pass_type]() -> std::unique_ptr {
std::unique_ptr pass(new PassType());
diff --git a/paddle/fluid/framework/ir/pass_builder.cc b/paddle/fluid/framework/ir/pass_builder.cc
index 8355764aa6c98..6457bd230c59c 100644
--- a/paddle/fluid/framework/ir/pass_builder.cc
+++ b/paddle/fluid/framework/ir/pass_builder.cc
@@ -28,13 +28,19 @@ std::shared_ptr PassBuilder::AppendPass(const std::string& pass_type) {
}
void PassBuilder::RemovePass(size_t idx) {
- PADDLE_ENFORCE(passes_.size() > idx);
+ PADDLE_ENFORCE_GT(
+ passes_.size(), idx,
+ platform::errors::InvalidArgument(
+ "Passes size is %d, %d is not a valid index.", passes_.size(), idx));
passes_.erase(passes_.begin() + idx);
}
std::shared_ptr PassBuilder::InsertPass(size_t idx,
const std::string& pass_type) {
- PADDLE_ENFORCE(passes_.size() >= idx);
+ PADDLE_ENFORCE_GE(
+ passes_.size(), idx,
+ platform::errors::InvalidArgument(
+ "Passes size is %d, %d is not a valid index.", passes_.size(), idx));
std::shared_ptr pass(
ir::PassRegistry::Instance().Get(pass_type).release());
passes_.insert(passes_.begin() + idx, std::move(pass));
diff --git a/paddle/fluid/framework/ir/pass_test.cc b/paddle/fluid/framework/ir/pass_test.cc
index 14e94a2bc5c51..0c5286b3f77e1 100644
--- a/paddle/fluid/framework/ir/pass_test.cc
+++ b/paddle/fluid/framework/ir/pass_test.cc
@@ -119,7 +119,7 @@ TEST(PassTest, TestPassAttrCheck) {
} catch (paddle::platform::EnforceNotMet& e) {
exception = std::string(e.what());
}
- ASSERT_TRUE(exception.find("shouldn't have cycle") != exception.npos);
+ ASSERT_TRUE(exception.find("shouldn't contain cycle") != exception.npos);
pass = PassRegistry::Instance().Get("test_pass");
pass->Set("test_pass_attr", new int);
diff --git a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc
index 1f1a54f140b0d..4506c162fa743 100644
--- a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc
@@ -43,9 +43,11 @@ void DeleteQuant(ir::Graph* graph, Scope* scope,
// ops linked from it
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Graph* g) {
- PADDLE_ENFORCE_EQ(subgraph.count(input_act_node), true,
- platform::errors::NotFound(
- "Input act node not found in Delete Quant fusion."));
+ PADDLE_ENFORCE_EQ(
+ subgraph.count(input_act_node), true,
+ platform::errors::NotFound(
+ "Input act node(%s) not found in QuantDequantFuse pass.",
+ input_act_node->name()));
Node* input_act = subgraph.at(input_act_node);
Node* input_scale = subgraph.at(pattern.GetPDNode("input_scale_node"));
Node* quant = subgraph.at(pattern.GetPDNode("quant_node"));
@@ -58,7 +60,7 @@ void DeleteQuant(ir::Graph* graph, Scope* scope,
std::string input_scale_var_name = quant->Op()->Input("InScale").front();
PADDLE_ENFORCE_NOT_NULL(
scope, platform::errors::InvalidArgument(
- "scope in DeleteQuantOpFuse pass should not be null."));
+ "Scope in QuantDequantFuse pass should not be null."));
const LoDTensor& input_scale_tensor =
scope->FindVar(input_scale_var_name)->Get();
PADDLE_ENFORCE_EQ(
@@ -84,8 +86,8 @@ void DeleteQuant(ir::Graph* graph, Scope* scope,
} else if (quantized_op_type == "mul") {
op_desc->SetAttr("X_scale", scale_value);
} else {
- PADDLE_THROW(platform::errors::InvalidArgument(
- "Unsupported quantized op type %s", quantized_op_type));
+ PADDLE_THROW(platform::errors::Unimplemented(
+ "Unsupported quantized op type %s.", quantized_op_type));
}
op_desc->SetAttr("bit_length", bit_length);
op_desc->RenameInput(output_act_name, input_act_name);
@@ -119,9 +121,9 @@ void FuseDequant(ir::Graph* graph, Scope* scope,
weight_name = "W";
input_name = "Input";
} else {
- PADDLE_ENFORCE(
+ PADDLE_THROW(platform::errors::Unimplemented(
"QuantDequantFuse: We only support conv2d, conv2d_fusion, fc, mul for "
- "now.");
+ "now."));
}
const std::string pattern_name = "dequant_fuse";
GraphPatternDetector gpd;
@@ -141,8 +143,9 @@ void FuseDequant(ir::Graph* graph, Scope* scope,
Graph* g) {
PADDLE_ENFORCE_EQ(
subgraph.count(quantized_op_input), true,
- platform::errors::NotFound(
- "Quantized op input node not found in Delete Quant fusion."));
+ platform::errors::NotFound("Quantized op input node(%s) did not find "
+ "in QuantDequantFuse pass.",
+ quantized_op_input->name()));
Node* quantized_op_input_node = subgraph.at(quantized_op_input);
Node* quantized_op_weight_node =
subgraph.at(pattern.GetPDNode("quantized_op_weight"));
@@ -165,7 +168,7 @@ void FuseDequant(ir::Graph* graph, Scope* scope,
PADDLE_ENFORCE_EQ(
scales_name.size(), 2,
platform::errors::InvalidArgument(
- "Scales size in channel-wise dequantize op should be 2, got %d",
+ "Scales size in channel-wise dequantize op should be 2, got %d.",
scales_name.size()));
const LoDTensor& channel_scale_tensor =
scope->FindVar(scales_name[0])->Get();
@@ -193,9 +196,10 @@ void FuseDequant(ir::Graph* graph, Scope* scope,
bool valid_scale_size =
(weight_scale.size() == 1 ||
weight_scale.size() == static_cast(w_dims[0]));
- PADDLE_ENFORCE_EQ(valid_scale_size, true,
- platform::errors::InvalidArgument(
- "TRT int8 quant: invalid scale size"));
+ PADDLE_ENFORCE_EQ(
+ valid_scale_size, true,
+ platform::errors::InvalidArgument(
+ "TRT int8 quant: invalid scale size(%d).", weight_scale.size()));
float* quantized_weight_data =
weight_tensor->mutable_data(platform::CPUPlace());
for (int j = 0; j < weight_tensor->numel(); j++) {
diff --git a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc
index dddb2affbbad0..2396a7f3c4f84 100644
--- a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc
@@ -278,11 +278,12 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
auto retrieve_node = [](const std::string& name,
const GraphPatternDetector::subgraph_t& subgraph,
const PDPattern& pat) -> Node* {
- PADDLE_ENFORCE(subgraph.count(pat.RetrieveNode(name)),
- "pattern has no Node called %s", name.c_str());
+ PADDLE_ENFORCE_GT(subgraph.count(pat.RetrieveNode(name)), 0,
+ platform::errors::NotFound(
+ "Pattern has no node called %s.", name.c_str()));
Node* p = subgraph.at(pat.RetrieveNode(name));
- PADDLE_ENFORCE_NOT_NULL(
- p, platform::errors::NotFound("subgraph has no node %s", name.c_str()));
+ PADDLE_ENFORCE_NOT_NULL(p, platform::errors::NotFound(
+ "Subgraph has no node %s.", name.c_str()));
return p;
};
@@ -365,7 +366,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
}
void RepeatedFCReluFusePass::ApplyImpl(ir::Graph* graph) const {
- PADDLE_ENFORCE_NOT_NULL(graph);
+ PADDLE_ENFORCE_NOT_NULL(
+ graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
int fusion_count = 0;
diff --git a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc
index 81d9476d409d9..283fe3797e454 100644
--- a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc
+++ b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass_tester.cc
@@ -55,9 +55,15 @@ void TestMain(int num_fc) {
VLOG(3) << DebugString(graph);
// Delete (num_fc_nodes_before - 1) fc ops
- PADDLE_ENFORCE_EQ(num_nodes_before - (num_fc_nodes_before - 1) + 1,
- num_nodes_after);
- PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1);
+ PADDLE_ENFORCE_EQ(
+ num_nodes_before - (num_fc_nodes_before - 1) + 1, num_nodes_after,
+ platform::errors::InvalidArgument(
+ "num_nodes_before = %d, num_fc_nodes_before = %d, num_nodes_after = "
+ "%d.",
+ num_nodes_before, num_fc_nodes_before, num_nodes_after));
+ PADDLE_ENFORCE_EQ(num_fused_nodes_after, 1,
+ platform::errors::InvalidArgument(
+ "num_fused_nodes_after = %d.", num_fused_nodes_after));
}
TEST(RepeatedFCReluFusePass, basic_3) { TestMain(3); }
diff --git a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc
index bd826709b1d88..19ec2d818a3db 100644
--- a/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/seq_concat_fc_fuse_pass.cc
@@ -185,11 +185,13 @@ void SeqConcatFcFusePass::ApplyImpl(ir::Graph* graph) const {
auto* concat_out = BuildSeqExpandConcatPattern(pattern);
BuildFCPattern(pattern, concat_out);
-#define GET_NODE(id, pattern) \
- PADDLE_ENFORCE(subgraph.count(pattern.RetrieveNode(#id)), \
- "pattern has no Node called %s", #id); \
- auto* id = subgraph.at(pattern.RetrieveNode(#id)); \
- PADDLE_ENFORCE_NOT_NULL(id, "subgraph has no node %s", #id);
+#define GET_NODE(id, pattern) \
+ PADDLE_ENFORCE_GT( \
+ subgraph.count(pattern.RetrieveNode(#id)), 0, \
+ platform::errors::NotFound("Pattern has no node called %s.", #id)); \
+ auto* id = subgraph.at(pattern.RetrieveNode(#id)); \
+ PADDLE_ENFORCE_NOT_NULL( \
+ id, platform::errors::NotFound("Subgraph has no node %s.", #id));
int fuse_count{0};
diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc
index ea376b371f592..1c220ee4d5718 100644
--- a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc
@@ -139,11 +139,12 @@ static int BuildFusion(Graph* graph, const std::string& name_scope,
auto retrieve_node = [](const std::string& name,
const GraphPatternDetector::subgraph_t& subgraph,
const PDPattern& pat) -> Node* {
- PADDLE_ENFORCE(subgraph.count(pat.RetrieveNode(name)),
- "pattern has no Node called %s", name.c_str());
+ PADDLE_ENFORCE_GT(subgraph.count(pat.RetrieveNode(name)), 0,
+ platform::errors::NotFound(
+ "Pattern has no node called %s.", name.c_str()));
Node* p = subgraph.at(pat.RetrieveNode(name));
- PADDLE_ENFORCE_NOT_NULL(
- p, platform::errors::NotFound("subgraph has no node %s", name.c_str()));
+ PADDLE_ENFORCE_NOT_NULL(p, platform::errors::NotFound(
+ "Subgraph has no node %s.", name.c_str()));
return p;
};
diff --git a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
index 92d2a6acbb9f7..d9a65e71592ff 100644
--- a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
+++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc
@@ -47,7 +47,9 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const {
Graph* g) {
GET_NODES;
- PADDLE_ENFORCE(subgraph.count(x));
+ PADDLE_ENFORCE_GT(
+ subgraph.count(x), 0,
+ platform::errors::NotFound("Detector did not find input X."));
auto* input_node = subgraph.at(x);
auto reshape1_desc = reshape1_op->Op();
auto reshape2_desc = reshape2_op->Op();
diff --git a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass_tester.cc b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass_tester.cc
index 324b9c0b7da24..80f387c442760 100644
--- a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass_tester.cc
+++ b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass_tester.cc
@@ -59,12 +59,25 @@ TEST(SimplifyWithBasicOpsPass, dropout) {
int num_scale_nodes_after = GetNumOpNodes(graph, "scale");
VLOG(3) << DebugString(graph);
- PADDLE_ENFORCE_EQ(num_dropout_nodes_after, 0);
+ PADDLE_ENFORCE_EQ(
+ num_dropout_nodes_after, 0,
+ platform::errors::InvalidArgument("num_dropout_nodes_after = %d.",
+ num_dropout_nodes_after));
if (dropout_implementation == "downgrade_in_infer") {
- PADDLE_ENFORCE_EQ(num_dropout_nodes_before,
- num_scale_nodes_after - num_scale_nodes_before);
+ PADDLE_ENFORCE_EQ(
+ num_dropout_nodes_before,
+ num_scale_nodes_after - num_scale_nodes_before,
+ platform::errors::InvalidArgument(
+ "num_dropout_nodes_before = %d, num_scale_nodes_after = %d, "
+ "num_scale_nodes_before = %d.",
+ num_dropout_nodes_before, num_scale_nodes_after,
+ num_scale_nodes_before));
} else {
- PADDLE_ENFORCE_EQ(num_scale_nodes_after - num_scale_nodes_before, 0);
+ PADDLE_ENFORCE_EQ(
+ num_scale_nodes_after - num_scale_nodes_before, 0,
+ platform::errors::InvalidArgument(
+ "num_scale_nodes_after = %d, num_scale_nodes_before = %d.",
+ num_scale_nodes_after, num_scale_nodes_before));
}
}
}
diff --git a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc
index 6d908b4362b80..035b198bdcc51 100644
--- a/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/squared_mat_sub_fuse_pass.cc
@@ -300,10 +300,12 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) {
auto retrieve_node = [](const std::string& name,
const GraphPatternDetector::subgraph_t& subgraph,
const PDPattern& pat) -> Node* {
- PADDLE_ENFORCE(subgraph.count(pat.RetrieveNode(name)),
- "pattern has no Node called %s", name.c_str());
+ PADDLE_ENFORCE_GT(subgraph.count(pat.RetrieveNode(name)), 0,
+ platform::errors::NotFound(
+ "Pattern has no node called %s.", name.c_str()));
Node* p = subgraph.at(pat.RetrieveNode(name));
- PADDLE_ENFORCE_NOT_NULL(p, "subgraph has no node %s", name.c_str());
+ PADDLE_ENFORCE_NOT_NULL(p, platform::errors::NotFound(
+ "Subgraph has no node %s.", name.c_str()));
return p;
};
diff --git a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc
index 90ffaada055a9..9a0a5f07a7080 100644
--- a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc
+++ b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc
@@ -51,15 +51,25 @@ void RunTransposeFlattenConcatFuse(ir::Graph *graph, int times) {
std::vector