Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add inference report for both simulator and evb. #1061

Merged
merged 4 commits into from
Aug 22, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion requirements.test.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,6 @@ pytest-xdist
pyyaml
pythonnet==3.0.1
clr_loader==0.2.4
toml==0.10.2
toml==0.10.2
pandas
tabulate
2 changes: 1 addition & 1 deletion tests/compare_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def compare_ndarray(expected: np.ndarray,
if dump_hist:
y, x = np.histogram(expected - actual, 100)
np.savetxt(dump_file, np.stack((x[:-1], y)).T, fmt='%f', delimiter=',')
similarity_info = f"\n{similarity_name} similarity = {similarity}, threshold = {threshold}\n"
similarity_info = f"{similarity_name} similarity = {similarity}, threshold = {threshold}"

if similarity_name in ['cosine', 'euclidean', 'segment']:
compare_op = lt
Expand Down
1 change: 1 addition & 0 deletions tests/config.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
name = 'default_config'
root = 'tests_output'
dump_hist = false
dump_infer = false

[compile_opt]
preprocess = false
Expand Down
38 changes: 35 additions & 3 deletions tests/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import socket
import json
from test_utils import *
import time


class Inference:
Expand All @@ -19,8 +20,17 @@ def run_inference(self, compiler, target, ptq_enabled, infer_dir):
running_on_evb = in_ci and target in kpu_targets and nuc_ip is not None and nuc_port is not None and test_executable is not None and len(
self.inputs) > 0 and len(self.outputs) > 0

if self.cfg['dump_infer']:
self.infer_dict['case'] = os.path.basename(self.case_dir)
self.infer_dict['target'] = target
if ptq_enabled:
self.set_quant_opt(compiler)

if self.cfg['dump_infer']:
case = os.path.basename(self.case_dir)
self.infer_dict['if_quant_type'] = self.cfg['ptq_opt']['quant_type']
self.infer_dict['w_quant_type'] = self.cfg['ptq_opt']['w_quant_type']

compiler.compile()
kmodel = compiler.gencode_tobytes()
os.makedirs(infer_dir, exist_ok=True)
Expand All @@ -35,7 +45,17 @@ def run_inference(self, compiler, target, ptq_enabled, infer_dir):
sim = nncase.Simulator()
sim.load_model(kmodel)
self.set_infer_input(sim, compile_opt)

if self.cfg['dump_infer']:
t1 = time.perf_counter()

sim.run()

if self.cfg['dump_infer']:
t = (time.perf_counter() - t1) * 1000
self.infer_dict['time(ms)'] = str(t)
self.infer_dict['fps'] = str(round(1000 / t, 2))

outputs = self.dump_infer_output(sim, compile_opt, infer_dir)
return outputs

Expand Down Expand Up @@ -126,8 +146,15 @@ def run_evb(self, target, kmodel, compile_opt):

# get infer result
outputs = []
cmd_result = client_socket.recv(1024).decode()
if cmd_result.find('finish') != -1:
result_dict = {}
ret = client_socket.recv(1024)
result_dict = json.loads(ret.decode())
if result_dict['type'].find('finish') != -1:
if self.cfg['dump_infer']:
t = result_dict['time']
self.infer_dict['time(ms)'] = str(t)
self.infer_dict['fps'] = str(round(1000 / t, 2))

client_socket.sendall(f"pls send outputs".encode())

# recv outputs
Expand All @@ -150,6 +177,11 @@ def run_evb(self, target, kmodel, compile_opt):
client_socket.close()
else:
client_socket.close()
raise Exception(f'{cmd_result}')

if self.cfg['dump_infer']:
self.infer_dict['result'] = 'Fail'
self.infer_dict['remark'] = result_dict['error']
dump_dict_to_json(self.infer_dict, self.infer_file)
raise Exception(result_dict['error'])

return outputs
24 changes: 24 additions & 0 deletions tests/json2md.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
import argparse
import json
import pandas as pd


def json2md(json_file):
json_list = []
with open(json_file, 'r') as f:
json_list = json.load(f)

json_list = sorted(json_list, key=lambda d: d['case'])
df = pd.DataFrame.from_records(json_list)
md = df.to_markdown()
md_file = json_file.split('/')[-1].split('.')[0] + '.md'

with open(md_file, 'w') as f:
f.write(md)


if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="json2md")
parser.add_argument("--json", help='json file', type=str)
args = parser.parse_args()
json2md(args.json)
19 changes: 12 additions & 7 deletions tests/nuc_proxy.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def __init__(self, port, baudrate, logger):
self.port = port
self.baudrate = baudrate
self.logger = logger
self.timeout = 20
self.timeout = 60

def open(self):
self.logger.debug(f'open {self.port} begin')
Expand Down Expand Up @@ -144,25 +144,30 @@ def infer_worker(target):

for cmd in cmds.split(';'):
ret = target.s1.run_cmd(cmd, separator)
target.logger.debug("ret = {0}".format(ret))

# infer result
dict = {'type': 'finish', 'time': 0.0, 'error': ''}
if ret.find('terminate') != -1 or ret.find('Exception') != -1:
err = f'infer exception: {ret}'
target.logger.error('infer exception')
conn.sendall(err[0:1024].encode())
err = f'infer exception: {ret}'
dict['type'] = 'exception'
dict['error'] = err[0:1024]
conn.sendall(json.dumps(dict).encode())
elif ret.find(separator) == -1:
# reboot target when timeout
conn.sendall(f'infer timeout'.encode())
target.logger.error('reboot {0} for timeout'.format(target.name))
target.logger.error('reboot for timeout')
dict['type'] = 'timeout'
dict['error'] = 'infer timeout'
conn.sendall(json.dumps(dict).encode())

# reboot after login
target.s0.run_cmd('root')
target.s0.run_cmd('')
target.s0.run_cmd('reboot')
time.sleep(20)
else:
conn.sendall(f'infer finish'.encode())
dict['time'] = float(ret.split('\n')[1].split()[1])
conn.sendall(json.dumps(dict).encode())
dummy = conn.recv(1024)

# send outputs
Expand Down
27 changes: 23 additions & 4 deletions tests/test_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,19 @@ def __init__(self, case_name, override_cfg: str = None) -> None:
# used for tag dynamic model for onnx simplify
self.dynamic = False

if self.cfg['dump_infer']:
self.infer_file = test_utils.infer_file()
self.infer_dict = {
'case': 'unknown',
'target': 'cpu',
'if_quant_type': 'uint8',
'w_quant_type': 'uint8',
'time(ms)': 'N/A',
'fps': 'N/A',
'result': 'Pass',
'remark': 'N/A'
}

def transform_input(self, values: List[np.ndarray], type: str, stage: str) -> List[np.ndarray]:
new_values = []
compile_opt = self.cfg['compile_opt']
Expand Down Expand Up @@ -252,6 +265,10 @@ def run(self, model_file: Union[List[str], str]):
judge, result = self.compare_results(
expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], dump_hist, mode_dir)

if stage == 'infer' and self.cfg['dump_infer']:
self.infer_dict['result'] = 'Pass' if judge else 'Fail'
self.infer_dict['remark'] = result.replace('\n', ' ')
dump_dict_to_json(self.infer_dict, self.infer_file)
if not judge:
if test_utils.in_ci():
self.clear(self.case_dir)
Expand Down Expand Up @@ -407,17 +424,19 @@ def compare_results(self,
stage, target, similarity_name, mode, threshold, dump_hist, dump_dir) -> Tuple[bool, str]:
i = 0
judges = []
result = ''
for expected, actual in zip(ref_ouputs, test_outputs):
expected = expected.astype(np.float32)
actual = actual.astype(np.float32)
dump_file = os.path.join(dump_dir, 'nncase_result_{0}_hist.csv'.format(i))
judge, similarity_info = compare_ndarray(
expected, actual, similarity_name, threshold, dump_hist, dump_file)
result_info = "\n{0} [ {1} {2} {3} ] Output: {4}!!\n".format(
result_info = "{0} [ {1} {2} {3} ] Output {4}:".format(
'Pass' if judge else 'Fail', stage, target, mode, i)
result = similarity_info + result_info
with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f:
f.write(result)
result += result_info + similarity_info
i = i + 1
judges.append(judge)

with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f:
f.write(result)
return sum(judges) == len(judges), result
16 changes: 16 additions & 0 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import os
import json
import numpy as np


Expand Down Expand Up @@ -33,6 +34,17 @@ def _cast_bfloat16_then_float32(values: np.array):
values[i] = value


def dump_dict_to_json(dict, json_file):
json_list = []
if os.path.exists(json_file):
with open(json_file, 'r') as f:
json_list = json.load(f)

json_list.append(dict)
with open(json_file, 'w') as f:
json.dump(json_list, f)


def in_ci():
return os.getenv('CI', False)

Expand All @@ -51,3 +63,7 @@ def nuc_port():

def test_executable(target):
return os.getenv('TEST_EXECUTABLE_{0}'.format(target.upper()))


def infer_file():
return os.getenv('INFER_FILE', 'infer_report.json')
Loading