Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Serving] add ppdet serving example #641

Merged
merged 9 commits into from
Nov 22, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ tar -xvf ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg

# 将配置文件放入预处理目录
mv ResNet50_vd_infer/inference_cls.yaml models/preprocess/1/
mv ResNet50_vd_infer/inference_cls.yaml models/preprocess/1/inference_cls.yaml

# 将模型放入 models/runtime/1目录下, 并重命名为model.pdmodel和model.pdiparams
mv ResNet50_vd_infer/inference.pdmodel models/runtime/1/model.pdmodel
Expand Down
1 change: 1 addition & 0 deletions examples/vision/detection/paddledetection/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,4 @@

- [Python部署](python)
- [C++部署](cpp)
- [服务化部署](serving)
Original file line number Diff line number Diff line change
@@ -0,0 +1,110 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import numpy as np
import time

import fastdeploy as fd

# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils


class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""

def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = json.loads(args['model_config'])
print("model_config:", self.model_config)

self.input_names = []
for input_config in self.model_config["input"]:
self.input_names.append(input_config["name"])
print("postprocess input names:", self.input_names)

self.output_names = []
self.output_dtype = []
for output_config in self.model_config["output"]:
self.output_names.append(output_config["name"])
dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
self.output_dtype.append(dtype)
print("postprocess output names:", self.output_names)

self.postprocess_ = fd.vision.detection.PaddleDetPostprocessor()

def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference is requested
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse.
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
responses = []
for request in requests:
infer_outputs = []
for name in self.input_names:
infer_output = pb_utils.get_input_tensor_by_name(request, name)
if infer_output:
infer_output = infer_output.as_numpy()
infer_outputs.append(infer_output)

results = self.postprocess_.run(infer_outputs)
r_str = fd.vision.utils.fd_result_to_json(results)

r_np = np.array(r_str, dtype=np.object)
out_tensor = pb_utils.Tensor(self.output_names[0], r_np)
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor, ])
responses.append(inference_response)
return responses

def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is optional. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...')
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
name: "postprocess"
backend: "python"

input [
{
name: "post_input1"
data_type: TYPE_FP32
dims: [ -1, 6 ]
},
{
name: "post_input2"
data_type: TYPE_INT32
dims: [ -1 ]
}
]

output [
{
name: "post_output"
data_type: TYPE_STRING
dims: [ -1 ]
}
]

instance_group [
{
count: 1
kind: KIND_CPU
}
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
backend: "python"

input [
{
name: "post_input1"
data_type: TYPE_FP32
dims: [ -1, 6 ]
},
{
name: "post_input2"
data_type: TYPE_INT32
dims: [ -1 ]
},
{
name: "post_input3"
data_type: TYPE_INT32
dims: [ -1, -1, -1 ]
}
]

output [
{
name: "post_output"
data_type: TYPE_STRING
dims: [ -1 ]
}
]

instance_group [
{
count: 1
kind: KIND_CPU
}
]
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
# PaddleDetection Pipeline

The pipeline directory does not have model files, but a version number directory needs to be maintained.
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
platform: "ensemble"

input [
{
name: "INPUT"
data_type: TYPE_UINT8
dims: [ -1, -1, -1, 3 ]
}
]
output [
{
name: "DET_RESULT"
data_type: TYPE_STRING
dims: [ -1 ]
}
]
ensemble_scheduling {
step [
{
model_name: "preprocess"
model_version: 1
input_map {
key: "preprocess_input"
value: "INPUT"
}
output_map {
key: "preprocess_output1"
value: "RUNTIME_INPUT1"
}
output_map {
key: "preprocess_output2"
value: "RUNTIME_INPUT2"
}
output_map {
key: "preprocess_output3"
value: "RUNTIME_INPUT3"
}
},
{
model_name: "runtime"
model_version: 1
input_map {
key: "image"
value: "RUNTIME_INPUT1"
}
input_map {
key: "scale_factor"
value: "RUNTIME_INPUT2"
}
input_map {
key: "im_shape"
value: "RUNTIME_INPUT3"
}
output_map {
key: "concat_12.tmp_0"
value: "RUNTIME_OUTPUT1"
}
output_map {
key: "concat_8.tmp_0"
value: "RUNTIME_OUTPUT2"
}
},
{
model_name: "postprocess"
model_version: 1
input_map {
key: "post_input1"
value: "RUNTIME_OUTPUT1"
}
input_map {
key: "post_input2"
value: "RUNTIME_OUTPUT2"
}
output_map {
key: "post_output"
value: "DET_RESULT"
}
}
]
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
platform: "ensemble"

input [
{
name: "INPUT"
data_type: TYPE_UINT8
dims: [ -1, -1, -1, 3 ]
}
]
output [
{
name: "DET_RESULT"
data_type: TYPE_STRING
dims: [ -1 ]
}
]
ensemble_scheduling {
step [
{
model_name: "preprocess"
model_version: 1
input_map {
key: "preprocess_input"
value: "INPUT"
}
output_map {
key: "preprocess_output1"
value: "RUNTIME_INPUT1"
}
output_map {
key: "preprocess_output2"
value: "RUNTIME_INPUT2"
}
output_map {
key: "preprocess_output3"
value: "RUNTIME_INPUT3"
}
},
{
model_name: "runtime"
model_version: 1
input_map {
key: "image"
value: "RUNTIME_INPUT1"
}
input_map {
key: "scale_factor"
value: "RUNTIME_INPUT2"
}
input_map {
key: "im_shape"
value: "RUNTIME_INPUT3"
}
output_map {
key: "concat_9.tmp_0"
value: "RUNTIME_OUTPUT1"
}
output_map {
key: "concat_5.tmp_0"
value: "RUNTIME_OUTPUT2"
},
output_map {
key: "tmp_109"
value: "RUNTIME_OUTPUT3"
}
},
{
model_name: "postprocess"
model_version: 1
input_map {
key: "post_input1"
value: "RUNTIME_OUTPUT1"
}
input_map {
key: "post_input2"
value: "RUNTIME_OUTPUT2"
}
input_map {
key: "post_input3"
value: "RUNTIME_OUTPUT3"
}
output_map {
key: "post_output"
value: "DET_RESULT"
}
}
]
}
Loading