/
test_models.py
114 lines (89 loc) · 3.32 KB
/
test_models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from pathlib import Path
from typing import List, Tuple
import cv2
import numpy as np
import pytest
import tritonclient.http as httpclient
def send_request(
client, model_name: str, input_shape: Tuple[int, ...], input_type: str
) -> np.ndarray:
inputs, outputs = [], []
inputs.append(httpclient.InferInput("input__0", input_shape, input_type))
input_dtype = np.float32 if input_type == "FP32" else np.uint8
inputs[0].set_data_from_numpy(np.random.randn(*input_shape).astype(input_dtype))
outputs.append(httpclient.InferRequestedOutput("output__0", binary_data=False))
results = client.infer(
model_name=model_name,
inputs=inputs,
outputs=outputs,
)
return results.as_numpy("output__0")
@pytest.fixture(scope="session")
def triton_client() -> httpclient.InferenceServerClient:
return httpclient.InferenceServerClient(url="localhost:8000")
def test_stn(triton_client):
model_name = "stn"
input_shape = (4, 3, 24, 94)
output_shape = (4, 3, 24, 94)
input_type = "FP32"
prediction = send_request(triton_client, model_name, input_shape, input_type)
assert prediction.shape == output_shape
assert prediction.dtype == np.float32
def test_lprnet(triton_client):
model_name = "lprnet"
input_shape = (4, 3, 24, 94)
output_shape = (4, 23, 18)
input_type = "FP32"
prediction = send_request(triton_client, model_name, input_shape, input_type)
assert prediction.shape == output_shape
assert prediction.dtype == np.float32
def test_yolo(triton_client):
model_name = "yolo"
input_shape = (500, 800, 3)
output_shape = (0,) # no detections
input_type = "UINT8"
prediction = send_request(triton_client, model_name, input_shape, input_type)
assert prediction.shape == output_shape
assert prediction.dtype == np.float32
@pytest.mark.parametrize(
"img_path,expected_coordinates,expected_texts",
[
[
"tests/data/car.jpg",
np.array(
[
[232.44186, 814.19446, 324.64374, 841.9125],
[1097.4425, 661.41547, 1141.9923, 674.0388],
[1520.4563, 639.6942, 1567.8191, 653.18317],
[1286.0554, 636.13745, 1317.4097, 645.0998],
]
),
np.array(["B840CK197", "", "", ""]),
],
["tests/data/cat.jpeg", np.empty((0, 4)), np.empty((0, 1))],
],
)
def test_plate_recognition(
triton_client,
img_path: Path,
expected_coordinates: List[float],
expected_texts: List[str],
):
model_name = "plate_recognition"
image = cv2.imread(str(img_path))
inputs, outputs = [], []
inputs.append(httpclient.InferInput("input__0", image.shape, "UINT8"))
inputs[0].set_data_from_numpy(image)
outputs.append(httpclient.InferRequestedOutput("coordinates", binary_data=False))
outputs.append(httpclient.InferRequestedOutput("texts", binary_data=False))
results = triton_client.infer(
model_name=model_name,
inputs=inputs,
outputs=outputs,
)
coordinates = results.as_numpy("coordinates")
texts = results.as_numpy("texts")
assert coordinates.shape == expected_coordinates.shape
assert texts.shape == texts.shape
assert np.allclose(coordinates, expected_coordinates)
assert np.all(texts == expected_texts)