Skip to content

Commit

Permalink
Merge pull request PaddlePaddle#98 from jiweibo/model_update
Browse files Browse the repository at this point in the history
update model for test/shrink_memory
  • Loading branch information
jiweibo committed Mar 8, 2021
2 parents 5f1fbb6 + 4ae5434 commit b1b7448
Show file tree
Hide file tree
Showing 8 changed files with 86 additions and 127 deletions.
10 changes: 8 additions & 2 deletions c++/run_demo.sh
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,19 @@ declare -a test_demos
for dir in $(ls ${work_path})
do
# test all demos
if [ "${dir}" != 'lib' -a -d "${dir}" ]; then
if [ "${dir}" != 'lib' -a "${dir}" != "test" -a -d "${dir}" ]; then
test_demos+=("${dir}")
fi
if [ "${dir}" == "test" ]; then
for test_dir in $(ls "${dir}")
do
test_demos+=("${dir}/${test_dir}")
done
fi
done

# tmp support demos
test_demos=(yolov3 LIC2020 resnet50)
test_demos=(yolov3 LIC2020 resnet50 test/shrink_memory)

for demo in ${test_demos[@]};
do
Expand Down
96 changes: 0 additions & 96 deletions c++/test/shrink_memory/CMakeLists.txt

This file was deleted.

31 changes: 15 additions & 16 deletions c++/test/shrink_memory/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,21 @@

### 一:获取MobilentV1测试模型

点击[链接](https://paddlepaddle-inference-banchmark.bj.bcebos.com/MobileNetV1_inference.tar)下载模型.
点击[链接](https://paddle-inference-dist.bj.bcebos.com/Paddle-Inference-Demo/mobilenetv1.tgz)下载模型.

### 二:**样例编译**

文件`single_thread_test.cc` 为单线程使用ShrinkMemory降低内/显存的预测样例程序(程序中的输入为固定值,如果您有opencv或其他方式进行数据读取的需求,需要对程序进行一定的修改)。
文件`thread_local_test.cc` 为使用thread_local多线程使用ShrinkMemory降低内/显存的预测样例。
文件`multi_thread_test.cc` 为多线程使用ShrinkMemory降低内/显存的预测样例程序。
文件`CMakeLists.txt` 为编译构建文件。
脚本`run_impl.sh` 包含了第三方库、预编译库的信息配置
脚本`compile.sh` 包含了第三方库、预编译库的信息配置。
脚本`run.sh`为一键运行脚本

编译single_thread_test样例,我们首先需要对脚本`run_impl.sh` 文件中的配置进行修改。
编译single_thread_test样例,我们首先需要对脚本`compile.sh` 文件中的配置进行修改。

1)**修改`run_impl.sh`**
1)**修改`compile.sh`**

打开`run_impl.sh`,我们对以下的几处信息进行修改:
打开`compile.sh`,我们对以下的几处信息进行修改:

```shell
# 根据需要选择single_thread_test, multi_thread_test, thread_local_test
Expand All @@ -28,26 +28,25 @@ WITH_GPU=ON
USE_TENSORRT=OFF

# 配置预测库的根目录
LIB_DIR=${YOUR_LIB_DIR}/paddle_inference_install_dir
LIB_DIR=${work_path}/../../lib/paddle_inference

# 如果上述的WITH_GPU 或 USE_TENSORRT设为ON,请设置对应的CUDA, CUDNN, TENSORRT的路径。
CUDNN_LIB=/usr/local/cudnn/lib64
CUDNN_LIB=/usr/lib/x86_64-linux-gnu/
CUDA_LIB=/usr/local/cuda/lib64
# TENSORRT_ROOT=/usr/local/TensorRT-6.0.1.5
TENSORRT_ROOT=/usr/local/TensorRT-6.0.1.5
```

运行 `sh run_impl.sh`, 会在目录下产生build目录。
运行 `bash compile.sh`, 会在目录下产生build目录。


2) **运行样例**

```shell
# 进入build目录
cd build
# 运行样例
./build/single_thread_test -model_dir ${YOLO_MODEL_PATH} --use_gpu
# ./build/multi_thread_test --model_dir ${YOUR_MODEL_PATH} --use_gpu --thread_num 2
# ./build/thread_local_test --model_dir ${YOUR_MODEL_PATH} --use_gpu
bash run.sh
#
./build/single_thread_test --model_file mobilenetv1/inference.pdmodel --params_file mobilenetv1/inference.pdiparams --use_gpu
# ./build/multi_thread_test --model_file mobilenetv1/inference.pdmodel --params_file mobilenetv1/inference.pdiparams --use_gpu --thread_num 2
# ./build/thread_local_test --model_file mobilenetv1/inference.pdmodel --params_file mobilenetv1/inference.pdiparams --use_gpu
```

运行过程中,请根据提示观测GPU的显存占用或CPU的内存占用,可以发现,当某次运行的batch_size很大时,会使得显/内存池较大,此时应用的显/内存占用较高,可以通过ShrinkMemory操作来显示的释放显/内存池。
Expand Down
Original file line number Diff line number Diff line change
@@ -1,22 +1,36 @@
#!/bin/bash
set +x
set -e

work_path=$(dirname $(readlink -f $0))

# 1. check paddle_inference exists
if [ ! -d "${work_path}/../../lib/paddle_inference" ]; then
echo "Please download paddle_inference lib and move it in Paddle-Inference-Demo/lib"
exit 1
fi

# 2. check CMakeLists exists
if [ ! -f "${work_path}/CMakeLists.txt" ]; then
cp -a "${work_path}/../../lib/CMakeLists.txt" "${work_path}/"
fi

# 3. compile
mkdir -p build
cd build
rm -rf *

# same with the single_thread_test, multi_thread_test or thread_local_test
DEMO_NAME=single_thread_test
#DEMO_NAME=multi_thread_test
#DEMO_NAME=thread_local_test

WITH_MKL=ON
WITH_GPU=ON
USE_TENSORRT=OFF

LIB_DIR=${work_path}/paddle_inference_install_dir
CUDNN_LIB=/usr/local/cudnn/lib64
LIB_DIR=${work_path}/../../lib/paddle_inference
CUDNN_LIB=/usr/lib/x86_64-linux-gnu/
CUDA_LIB=/usr/local/cuda/lib64
# TENSORRT_ROOT=/usr/local/TensorRT-6.0.1.5
TENSORRT_ROOT=/usr/local/TensorRT-6.0.1.5

cmake .. -DPADDLE_LIB=${LIB_DIR} \
-DWITH_MKL=${WITH_MKL} \
Expand Down
10 changes: 8 additions & 2 deletions c++/test/shrink_memory/multi_thread_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
#include <thread>
#include <vector>

DEFINE_string(model_dir, "./mobilenetv1", "model directory.");
DEFINE_string(model_file, "", "Directory of the inference model.");
DEFINE_string(params_file, "", "Directory of the inference model.");
DEFINE_string(model_dir, "", "Directory of the inference model.");
DEFINE_int32(thread_num, 1, "thread num");
DEFINE_bool(use_gpu, false, "use gpu.");
DEFINE_bool(test_leaky, false,
Expand All @@ -16,7 +18,11 @@ DEFINE_bool(test_leaky, false,
namespace paddle_infer {

void PrepareConfig(Config *config) {
config->SetModel(FLAGS_model_dir + "/model", FLAGS_model_dir + "/params");
if (FLAGS_model_dir != "") {
config->SetModel(FLAGS_model_dir);
} else {
config->SetModel(FLAGS_model_file, FLAGS_params_file);
}
if (FLAGS_use_gpu) {
config->EnableUseGpu(500, 0);
}
Expand Down
17 changes: 17 additions & 0 deletions c++/test/shrink_memory/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash
set +x
set -e

work_path=$(dirname $(readlink -f $0))

# 1. compile
bash ${work_path}/compile.sh

# 2. download model
if [ ! -d mobilenetv1 ]; then
wget https://paddle-inference-dist.bj.bcebos.com/Paddle-Inference-Demo/mobilenetv1.tgz
tar xzf mobilenetv1.tgz
fi

# 3. run
./build/single_thread_test --model_file mobilenetv1/inference.pdmodel --params_file mobilenetv1/inference.pdiparams --use_gpu
15 changes: 11 additions & 4 deletions c++/test/shrink_memory/single_thread_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,11 @@
#include <iostream>
#include <memory>
#include <numeric>
#include <thread>

DEFINE_string(model_dir, "./mobilenetv1", "Directory of the inference model.");
DEFINE_string(model_file, "", "Directory of the inference model.");
DEFINE_string(params_file, "", "Directory of the inference model.");
DEFINE_string(model_dir, "", "Directory of the inference model.");
DEFINE_bool(use_gpu, false, "use_gpu");

namespace paddle_infer {
Expand All @@ -21,8 +24,11 @@ double time_diff(Time t1, Time t2) {
}

void PrepareConfig(Config *config) {
config->SetProgFile(FLAGS_model_dir + "/model");
config->SetParamsFile(FLAGS_model_dir + "/params");
if (FLAGS_model_dir != "") {
config->SetModel(FLAGS_model_dir);
} else {
config->SetModel(FLAGS_model_file, FLAGS_params_file);
}
if (FLAGS_use_gpu) {
config->EnableUseGpu(500, 0);
}
Expand Down Expand Up @@ -76,7 +82,8 @@ void Demo(int repeat) {
auto pause = [](const std::string &hint) {
std::string temp;
LOG(INFO) << hint;
std::getline(std::cin, temp);
// std::getline(std::cin, temp);
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
};
pause("Pause, init predictor done, please enter any character to continue "
"running.");
Expand Down
10 changes: 8 additions & 2 deletions c++/test/shrink_memory/thread_local_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,9 @@
#include <thread>
#include <vector>

DEFINE_string(model_dir, "./mobilenetv1", "model directory.");
DEFINE_string(model_file, "", "Directory of the inference model.");
DEFINE_string(params_file, "", "Directory of the inference model.");
DEFINE_string(model_dir, "", "Directory of the inference model.");
DEFINE_bool(use_gpu, false, "use gpu.");
DEFINE_bool(test_leaky, false,
"run 1000 times, and observe whether leaky memory or not.");
Expand All @@ -21,7 +23,11 @@ paddle::inference::Barrier barrier_warmup(thread_num);
namespace paddle_infer {

void PrepareConfig(Config *config) {
config->SetModel(FLAGS_model_dir + "/model", FLAGS_model_dir + "/params");
if (FLAGS_model_dir != "") {
config->SetModel(FLAGS_model_dir);
} else {
config->SetModel(FLAGS_model_file, FLAGS_params_file);
}
if (FLAGS_use_gpu) {
config->EnableUseGpu(500, 0);
}
Expand Down

0 comments on commit b1b7448

Please sign in to comment.