-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
predict.py
171 lines (136 loc) · 5.59 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import utils
import numpy as np
import logging
import time
from paddle.fluid.core import AnalysisConfig
from paddle.fluid.core import create_paddle_predictor
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def parse_args():
def str2bool(v):
return v.lower() in ("true", "t", "1")
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--image_file", type=str)
parser.add_argument("-d", "--image_dir", type=str)
parser.add_argument("-m", "--model_file", type=str)
parser.add_argument("-p", "--params_file", type=str)
parser.add_argument("-b", "--batch_size", type=int, default=1)
parser.add_argument("--use_fp16", type=str2bool, default=False)
parser.add_argument("--use_gpu", type=str2bool, default=True)
parser.add_argument("--ir_optim", type=str2bool, default=True)
parser.add_argument("--use_tensorrt", type=str2bool, default=False)
parser.add_argument("--gpu_mem", type=int, default=8000)
parser.add_argument("--enable_benchmark", type=str2bool, default=False)
parser.add_argument("--model_name", type=str)
return parser.parse_args()
def create_predictor(args):
config = AnalysisConfig(args.model_file, args.params_file)
if args.use_gpu:
config.enable_use_gpu(args.gpu_mem, 0)
else:
config.disable_gpu()
config.disable_glog_info()
config.switch_ir_optim(args.ir_optim) # default true
if args.use_tensorrt:
config.enable_tensorrt_engine(
precision_mode=AnalysisConfig.Precision.Half
if args.use_fp16 else AnalysisConfig.Precision.Float32,
max_batch_size=args.batch_size)
config.enable_memory_optim()
# use zero copy
config.switch_use_feed_fetch_ops(False)
predictor = create_paddle_predictor(config)
return predictor
def create_operators():
size = 224
img_mean = [0.485, 0.456, 0.406]
img_std = [0.229, 0.224, 0.225]
img_scale = 1.0 / 255.0
decode_op = utils.DecodeImage()
resize_op = utils.ResizeImage(resize_short=256)
crop_op = utils.CropImage(size=(size, size))
normalize_op = utils.NormalizeImage(
scale=img_scale, mean=img_mean, std=img_std)
totensor_op = utils.ToTensor()
return [decode_op, resize_op, crop_op, normalize_op, totensor_op]
def preprocess(fname, ops):
data = open(fname, 'rb').read()
for op in ops:
data = op(data)
return data
def main():
import os
args = parse_args()
if not args.enable_benchmark:
assert args.batch_size == 1
assert args.use_fp16 is False
else:
assert args.use_gpu is True
assert args.model_name is not None
assert args.use_tensorrt is True
assert args.image_file is not None
# HALF precission predict only work when using tensorrt
if args.use_fp16 is True:
assert args.use_tensorrt is True
operators = create_operators()
predictor = create_predictor(args)
input_names = predictor.get_input_names()
input_tensor = predictor.get_input_tensor(input_names[0])
output_names = predictor.get_output_names()
output_tensor = predictor.get_output_tensor(output_names[0])
test_num = 500
test_time = 0.0
if not args.enable_benchmark:
image_files = []
if args.image_file is not None:
image_files = [args.image_file]
elif args.image_dir is not None:
supported_exts = ('.jpg', 'jpeg', '.png', '.gif', '.bmp')
for root, _, files in os.walk(args.image_dir, topdown=False):
image_files += [os.path.join(root, f) for f in files
if os.path.splitext(f)[-1].lower() in supported_exts]
for image_file in image_files:
inputs = preprocess(image_file, operators)
inputs = np.expand_dims(
inputs, axis=0).repeat(
args.batch_size, axis=0).copy()
input_tensor.copy_from_cpu(inputs)
predictor.zero_copy_run()
output = output_tensor.copy_to_cpu()
output = output.flatten()
cls = np.argmax(output)
score = output[cls]
logger.info("image file: {0}".format(image_file))
logger.info("class: {0}".format(cls))
logger.info("score: {0}".format(score))
else:
for i in range(0, test_num + 10):
inputs = np.random.rand(args.batch_size, 3, 224,
224).astype(np.float32)
start_time = time.time()
input_tensor.copy_from_cpu(inputs)
predictor.zero_copy_run()
output = output_tensor.copy_to_cpu()
output = output.flatten()
if i >= 10:
test_time += time.time() - start_time
fp_message = "FP16" if args.use_fp16 else "FP32"
logger.info("{0}\t{1}\tbatch size: {2}\ttime(ms): {3}".format(
args.model_name, fp_message, args.batch_size, 1000 * test_time /
test_num))
if __name__ == "__main__":
main()