Skip to content

Commit

Permalink
Revert "[CherryPick] Fix issue 60092 (PaddlePaddle#61427)"
Browse files Browse the repository at this point in the history
This reverts commit f025385.
  • Loading branch information
hanhaowen-mt committed May 13, 2024
1 parent e4b1793 commit caf0458
Show file tree
Hide file tree
Showing 13 changed files with 441 additions and 4 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
get_property(phi_modules GLOBAL PROPERTY PHI_MODULES)
get_property(ir_targets GLOBAL PROPERTY IR_TARGETS)
get_property(not_infer_modules GLOBAL PROPERTY NOT_INFER_MODULES)
set(utils_modules pretty_log string_helper utf8proc)
set(utils_modules pretty_log string_helper benchmark utf8proc)

if(NOT WITH_GFLAGS)
set(utils_modules ${utils_modules} paddle_flags)
Expand Down
13 changes: 13 additions & 0 deletions paddle/fluid/inference/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
cc_library(
benchmark
SRCS benchmark.cc
DEPS enforce common)
paddle_test(test_benchmark SRCS benchmark_tester.cc DEPS benchmark)
cc_library(
infer_io_utils
SRCS io_utils.cc
Expand All @@ -8,5 +13,13 @@ cc_library(
DEPS proto_desc enforce common)

cc_library(table_printer SRCS table_printer.cc)
paddle_test(test_table_printer SRCS table_printer_tester.cc)

proto_library(shape_range_info_proto SRCS shape_range_info.proto)

if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(test_benchmark)
copy_onnx(test_table_printer)
endif()
54 changes: 54 additions & 0 deletions paddle/fluid/inference/utils/benchmark.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/inference/utils/benchmark.h"

#include <fstream>

#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace inference {

std::string Benchmark::SerializeToString() const {
std::stringstream ss;
ss << "-----------------------------------------------------\n";
ss << "name\t";
ss << "batch_size\t";
ss << "num_threads\t";
ss << "latency\t";
ss << "qps";
ss << '\n';

ss << name_ << "\t";
ss << batch_size_ << "\t\t";
ss << num_threads_ << "\t";
ss << latency_ << "\t";
ss << 1000.0 / latency_;
ss << '\n';
return ss.str();
}
void Benchmark::PersistToFile(const std::string &path) const {
std::ofstream file(path, std::ios::app);
PADDLE_ENFORCE_EQ(
file.is_open(),
true,
platform::errors::Unavailable("Can not open %s to add benchmark.", path));
file << SerializeToString();
file.flush();
file.close();
}

} // namespace inference
} // namespace paddle
56 changes: 56 additions & 0 deletions paddle/fluid/inference/utils/benchmark.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <fstream>
#include <iostream>
#include <string>

#include "paddle/utils/test_macros.h"

namespace paddle {
namespace inference {

/*
* Helper class to calculate the performance.
*/
struct TEST_API Benchmark {
int batch_size() const { return batch_size_; }
void SetBatchSize(int x) { batch_size_ = x; }

int num_threads() const { return num_threads_; }
void SetNumThreads(int x) { num_threads_ = x; }

bool use_gpu() const { return use_gpu_; }
void SetUseGpu() { use_gpu_ = true; }

float latency() const { return latency_; }
void SetLatency(float x) { latency_ = x; }

const std::string& name() const { return name_; }
void SetName(const std::string& name) { name_ = name; }

std::string SerializeToString() const;
void PersistToFile(const std::string& path) const;

private:
bool use_gpu_{false};
int batch_size_{0};
float latency_;
int num_threads_{1};
std::string name_;
};

} // namespace inference
} // namespace paddle
40 changes: 40 additions & 0 deletions paddle/fluid/inference/utils/benchmark_tester.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <glog/logging.h>
#include <gtest/gtest.h>

#include "paddle/fluid/inference/utils/benchmark.h"

using namespace paddle::inference; // NOLINT
TEST(Benchmark, basic) {
Benchmark benchmark;
benchmark.SetName("key0");
benchmark.SetBatchSize(10);
benchmark.SetUseGpu();
benchmark.SetLatency(220);
LOG(INFO) << "benchmark:\n" << benchmark.SerializeToString();
}

TEST(Benchmark, PersistToFile) {
Benchmark benchmark;
benchmark.SetName("key0");
benchmark.SetBatchSize(10);
benchmark.SetUseGpu();
benchmark.SetLatency(220);

benchmark.PersistToFile("1.log");
benchmark.PersistToFile("2.log");
benchmark.PersistToFile("3.log");
}
82 changes: 82 additions & 0 deletions paddle/fluid/inference/utils/table_printer_tester.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <glog/logging.h>
#include <gtest/gtest.h>

#include "paddle/fluid/inference/utils/table_printer.h"

namespace paddle {
namespace inference {} // namespace inference
} // namespace paddle

TEST(table_printer, output) {
std::vector<std::string> header{"config", "value"};
paddle::inference::TablePrinter table(header);

// model_dir
table.InsertRow({"model_dir", "./model_dir"});
// model
table.InsertRow({"model_file", "./model.pdmodel"});
table.InsertRow({"params_file", "./model.pdiparams"});

table.InsetDivider();
// gpu
table.InsertRow({"use_gpu", "true"});
table.InsertRow({"gpu_device_id", "0"});
table.InsertRow({"memory_pool_init_size", "100MB"});
table.InsertRow({"thread_local_stream", "false"});
table.InsetDivider();

// trt precision
table.InsertRow({"use_trt", "true"});
table.InsertRow({"trt_precision", "fp32"});
table.InsertRow({"enable_dynamic_shape", "true"});
table.InsertRow({"DisableTensorRtOPs", "{}"});
table.InsertRow({"EnableVarseqlen", "ON"});
table.InsertRow({"tensorrt_dla_enabled", "ON"});
table.InsetDivider();

// lite
table.InsertRow({"use_lite", "ON"});
table.InsetDivider();

// xpu
table.InsertRow({"use_xpu", "true"});
table.InsertRow({"xpu_device_id", "0"});
table.InsetDivider();

// ir
table.InsertRow({"ir_optim", "true"});
table.InsertRow({"ir_debug", "false"});
table.InsertRow({"enable_memory_optim", "false"});
table.InsertRow({"EnableProfile", "false"});
table.InsertRow({"glog_info_disabled", "false"});
table.InsetDivider();

// cpu
table.InsertRow({"CpuMathLibrary", "4"});
// mkldnn
table.InsertRow({"enable_mkldnn", "false"});
table.InsertRow({"mkldnn_cache_capacity", "10"});

// a long string
table.InsertRow(
{"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ a long string "
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~",
"------------------------------------------ a long value "
"-----------------------------------------------------"});

LOG(INFO) << table.PrintTable();
}
2 changes: 2 additions & 0 deletions test/cpp/fluid/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
add_subdirectory(benchmark)
add_subdirectory(framework)

add_subdirectory(inference)

if(WITH_CINN)
add_subdirectory(cinn)
endif()
Expand Down
1 change: 1 addition & 0 deletions test/cpp/fluid/inference/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
add_subdirectory(utils)
16 changes: 16 additions & 0 deletions test/cpp/fluid/inference/utils/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
if(WITH_TESTING)
if(NOT APPLE)
inference_base_test(
infer_io_utils_tester SRCS io_utils_tester.cc
DEPS
paddle_inference_shared
common
)
endif()
endif()

if(WITH_ONNXRUNTIME AND WIN32)
# Copy onnxruntime for some c++ test in Windows, since the test will
# be build only in CI, so suppose the generator in Windows is Ninja.
copy_onnx(infer_io_utils_tester)
endif()

0 comments on commit caf0458

Please sign in to comment.