Skip to content

Commit

Permalink
Refine the calling of PADDLE_ENFORCE.
Browse files Browse the repository at this point in the history
test=develop
  • Loading branch information
Xreki committed Dec 27, 2019
1 parent fc8a6eb commit cf97946
Show file tree
Hide file tree
Showing 5 changed files with 45 additions and 29 deletions.
22 changes: 13 additions & 9 deletions paddle/fluid/operators/fused/fusion_group_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,25 +22,29 @@ class FusionGroupOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_GE(ctx->Inputs("Inputs").size(), 1UL,
"The number of inputs should be no less than 1.");
PADDLE_ENFORCE_GE(ctx->Outputs("Outs").size(), 1UL,
"The number of outputs should be no less than 1.");

const size_t num_ins = ctx->Inputs("Inputs").size();
const size_t num_outs = ctx->Outputs("Outs").size();

PADDLE_ENFORCE_GE(
num_ins, 1UL,
platform::errors::InvalidArgument(
"Expected the number of inputs >= 1. Received %d.", num_ins));
PADDLE_ENFORCE_GE(
num_outs, 1UL,
platform::errors::InvalidArgument(
"Expected the number of outputs >= 1. Recived %d.", num_outs));

int type = ctx->Attrs().Get<int>("type");
PADDLE_ENFORCE_EQ(type, 0UL,
"Only support fusion of elementwise operations.");
platform::errors::InvalidArgument(
"Only support fusion of elementwise operations."));

std::vector<framework::DDim> x_dims = ctx->GetInputsDim("Inputs");
PADDLE_ENFORCE_EQ(x_dims.size(), num_ins);

if (type == 0) {
for (size_t i = 1; i < num_ins; ++i) {
PADDLE_ENFORCE_EQ(x_dims[0], x_dims[i],
"All the inputs' dims should be the same.");
platform::errors::InvalidArgument(
"All the inputs' dims should be the same."));
}
std::vector<framework::DDim> out_dims;
for (size_t j = 0; j < num_outs; ++j) {
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/operators/fused/fusion_group_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ framework::OpDesc* CreateFusionGroupOp(
const std::vector<std::vector<int64_t>>& input_shapes,
const std::vector<std::string>& output_names, int type,
std::string func_name) {
PADDLE_ENFORCE_EQ(input_names.size(), input_shapes.size());
EXPECT_EQ(input_names.size(), input_shapes.size());

for (size_t i = 0; i < input_names.size(); ++i) {
auto* var = program->MutableBlock(0)->Var(input_names[i]);
Expand Down Expand Up @@ -122,7 +122,7 @@ void CheckOutputs(framework::Scope* scope,
int64_t length = cpu_outputs[j].numel();
LOG(INFO) << "Check the " << j << "th output...";
for (int64_t i = 0; i < length; ++i) {
PADDLE_ENFORCE_LT(fabs(dev_ptr[i] - cpu_ptr[i]), 1.E-05);
EXPECT_NEAR(dev_ptr[i], cpu_ptr[i], 1.E-05);
}
}
}
Expand Down
34 changes: 22 additions & 12 deletions paddle/fluid/platform/device_code.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ void DeviceCodePool::Set(std::unique_ptr<DeviceCode>&& code) {

auto iter = device_codes_.find(place);
if (iter == device_codes_.end()) {
PADDLE_THROW("Place %s is not supported.", place);
PADDLE_THROW(platform::errors::NotFound(
"Place %s is not supported for runtime compiling.", place));
}

auto& codes_map = iter->second;
Expand All @@ -40,21 +41,26 @@ platform::DeviceCode* DeviceCodePool::Get(const platform::Place& place,
const std::string& name) {
auto iter = device_codes_.find(place);
if (iter == device_codes_.end()) {
PADDLE_THROW("Place %s is not supported.", place);
PADDLE_THROW(platform::errors::NotFound(
"Place %s is not supported for runtime compiling.", place));
}

auto& codes_map = iter->second;
auto code_iter = codes_map.find(name);
if (code_iter == codes_map.end()) {
PADDLE_THROW("There is not a device code named %s for place %s.",
name.c_str(), place);
PADDLE_THROW(platform::errors::NotFound(
"Device code named %s for place %s does not exist.", name.c_str(),
place));
}

return code_iter->second.get();
}

DeviceCodePool::DeviceCodePool(const std::vector<platform::Place>& places) {
PADDLE_ENFORCE_GT(places.size(), 0);
PADDLE_ENFORCE_GT(
places.size(), 0,
errors::InvalidArgument(
"Expected the number of places >= 1. Expected %d.", places.size()));
// Remove the duplicated places
std::set<Place> set;
for (auto& p : places) {
Expand All @@ -65,9 +71,8 @@ DeviceCodePool::DeviceCodePool(const std::vector<platform::Place>& places) {
#ifdef PADDLE_WITH_CUDA
device_codes_.emplace(p, DeviceCodeMap());
#else
PADDLE_THROW(
"'CUDAPlace' is not supported, Please re-compile with WITH_GPU "
"option");
PADDLE_THROW(platform::errors::PreconditionNotMet(
"CUDAPlace is not supported, please re-compile with WITH_GPU=ON."));
#endif
}
}
Expand All @@ -77,7 +82,8 @@ DeviceCodePool::DeviceCodePool(const std::vector<platform::Place>& places) {
CUDADeviceCode::CUDADeviceCode(const Place& place, const std::string& name,
const std::string& kernel) {
if (!is_gpu_place(place)) {
PADDLE_THROW("CUDADeviceCode can only launch on GPU place.");
PADDLE_THROW(platform::errors::PermissionDenied(
"CUDADeviceCode can only launch on GPU place."));
}

place_ = place;
Expand Down Expand Up @@ -170,7 +176,10 @@ bool CUDADeviceCode::Compile() {
}

void CUDADeviceCode::Launch(const size_t n, std::vector<void*>* args) const {
PADDLE_ENFORCE_EQ(is_compiled_, true, "Please compile the code first.");
PADDLE_ENFORCE_EQ(
is_compiled_, true,
errors::PreconditionNotMet(
"Please compile the code before launching the kernel."));

int max_blocks = std::max(max_threads_ / num_threads_, 1);
int workload_per_block = workload_per_thread_ * num_threads_;
Expand All @@ -187,8 +196,9 @@ void CUDADeviceCode::Launch(const size_t n, std::vector<void*>* args) const {
dev_ctx->stream(), // stream
args->data(), // arguments
nullptr),
CUDA_SUCCESS, "Fail to launch kernel %s (in cuLaunchKernel.)",
name_.c_str());
CUDA_SUCCESS,
errors::External("Fail to launch kernel %s (in cuLaunchKernel.)",
name_.c_str()));
}

bool CUDADeviceCode::CheckNVRTCResult(nvrtcResult result,
Expand Down
5 changes: 4 additions & 1 deletion paddle/fluid/platform/device_code.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,10 @@ class DeviceCodePool {
explicit DeviceCodePool(const std::vector<platform::Place>& places);

static DeviceCodePool& Instance() {
PADDLE_ENFORCE_NOT_NULL(pool, "Need to create DeviceCodePool first!");
PADDLE_ENFORCE_NOT_NULL(
pool,
errors::NotFound("Need to create DeviceCodePool first, by calling "
"DeviceCodePool::Init(places)!"));
return *pool;
}

Expand Down
9 changes: 4 additions & 5 deletions paddle/fluid/platform/device_code_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ TEST(DeviceCode, cuda) {
TensorCopySync(cpu_x, place, &x);
TensorCopySync(cpu_y, place, &y);

PADDLE_ENFORCE_EQ(code.Compile(), true, "Compiling of device code failed.");
EXPECT_EQ(code.Compile(), true);

std::vector<void*> args = {&scale, &x_data, &y_data, &z_data, &n};
code.SetNumThreads(1024);
Expand All @@ -80,8 +80,7 @@ TEST(DeviceCode, cuda) {

TensorCopySync(z, paddle::platform::CPUPlace(), &cpu_z);
for (size_t i = 0; i < n; i++) {
PADDLE_ENFORCE_EQ(cpu_z.data<float>()[i],
static_cast<float>(i) * scale + 0.5);
EXPECT_EQ(cpu_z.data<float>()[i], static_cast<float>(i) * scale + 0.5);
}
}

Expand All @@ -95,14 +94,14 @@ TEST(DeviceCodePool, cuda) {
paddle::platform::DeviceCodePool& pool =
paddle::platform::DeviceCodePool::Init({place});
size_t num_device_codes_before = pool.size(place);
PADDLE_ENFORCE_EQ(num_device_codes_before, 0UL);
EXPECT_EQ(num_device_codes_before, 0UL);

std::unique_ptr<paddle::platform::DeviceCode> code(
new paddle::platform::CUDADeviceCode(place, "saxpy_kernel", saxpy_code));
LOG(INFO) << "origin ptr: " << code.get();
pool.Set(std::move(code));
size_t num_device_codes_after = pool.size(place);
PADDLE_ENFORCE_EQ(num_device_codes_after, 1UL);
EXPECT_EQ(num_device_codes_after, 1UL);

paddle::platform::DeviceCode* code_get = pool.Get(place, "saxpy_kernel");
LOG(INFO) << "get ptr: " << code_get;
Expand Down

0 comments on commit cf97946

Please sign in to comment.