diff --git a/requirements.test.txt b/requirements.test.txt index 29787e2db0..a67a03da9f 100644 --- a/requirements.test.txt +++ b/requirements.test.txt @@ -17,4 +17,6 @@ pytest-xdist pyyaml pythonnet==3.0.1 clr_loader==0.2.4 -toml==0.10.2 \ No newline at end of file +toml==0.10.2 +pandas +tabulate \ No newline at end of file diff --git a/tests/compare_util.py b/tests/compare_util.py index 6398bf65b4..9395cbca21 100644 --- a/tests/compare_util.py +++ b/tests/compare_util.py @@ -95,7 +95,7 @@ def compare_ndarray(expected: np.ndarray, if dump_hist: y, x = np.histogram(expected - actual, 100) np.savetxt(dump_file, np.stack((x[:-1], y)).T, fmt='%f', delimiter=',') - similarity_info = f"\n{similarity_name} similarity = {similarity}, threshold = {threshold}\n" + similarity_info = f"{similarity_name} similarity = {similarity}, threshold = {threshold}" if similarity_name in ['cosine', 'euclidean', 'segment']: compare_op = lt diff --git a/tests/config.toml b/tests/config.toml index a743bb958a..93825dc482 100644 --- a/tests/config.toml +++ b/tests/config.toml @@ -1,6 +1,7 @@ name = 'default_config' root = 'tests_output' dump_hist = false +dump_infer = false [compile_opt] preprocess = false diff --git a/tests/inference.py b/tests/inference.py index c8052efa30..3b6b696b8a 100644 --- a/tests/inference.py +++ b/tests/inference.py @@ -7,6 +7,7 @@ import socket import json from test_utils import * +import time class Inference: @@ -19,8 +20,17 @@ def run_inference(self, compiler, target, ptq_enabled, infer_dir): running_on_evb = in_ci and target in kpu_targets and nuc_ip is not None and nuc_port is not None and test_executable is not None and len( self.inputs) > 0 and len(self.outputs) > 0 + if self.cfg['dump_infer']: + self.infer_dict['case'] = os.path.basename(self.case_dir) + self.infer_dict['target'] = target if ptq_enabled: self.set_quant_opt(compiler) + + if self.cfg['dump_infer']: + case = os.path.basename(self.case_dir) + self.infer_dict['if_quant_type'] = self.cfg['ptq_opt']['quant_type'] + self.infer_dict['w_quant_type'] = self.cfg['ptq_opt']['w_quant_type'] + compiler.compile() kmodel = compiler.gencode_tobytes() os.makedirs(infer_dir, exist_ok=True) @@ -35,7 +45,17 @@ def run_inference(self, compiler, target, ptq_enabled, infer_dir): sim = nncase.Simulator() sim.load_model(kmodel) self.set_infer_input(sim, compile_opt) + + if self.cfg['dump_infer']: + t1 = time.perf_counter() + sim.run() + + if self.cfg['dump_infer']: + t = (time.perf_counter() - t1) * 1000 + self.infer_dict['time(ms)'] = str(t) + self.infer_dict['fps'] = str(round(1000 / t, 2)) + outputs = self.dump_infer_output(sim, compile_opt, infer_dir) return outputs @@ -126,8 +146,15 @@ def run_evb(self, target, kmodel, compile_opt): # get infer result outputs = [] - cmd_result = client_socket.recv(1024).decode() - if cmd_result.find('finish') != -1: + result_dict = {} + ret = client_socket.recv(1024) + result_dict = json.loads(ret.decode()) + if result_dict['type'].find('finish') != -1: + if self.cfg['dump_infer']: + t = result_dict['time'] + self.infer_dict['time(ms)'] = str(t) + self.infer_dict['fps'] = str(round(1000 / t, 2)) + client_socket.sendall(f"pls send outputs".encode()) # recv outputs @@ -150,6 +177,11 @@ def run_evb(self, target, kmodel, compile_opt): client_socket.close() else: client_socket.close() - raise Exception(f'{cmd_result}') + + if self.cfg['dump_infer']: + self.infer_dict['result'] = 'Fail' + self.infer_dict['remark'] = result_dict['error'] + dump_dict_to_json(self.infer_dict, self.infer_file) + raise Exception(result_dict['error']) return outputs diff --git a/tests/json2md.py b/tests/json2md.py new file mode 100644 index 0000000000..983d859166 --- /dev/null +++ b/tests/json2md.py @@ -0,0 +1,24 @@ +import argparse +import json +import pandas as pd + + +def json2md(json_file): + json_list = [] + with open(json_file, 'r') as f: + json_list = json.load(f) + + json_list = sorted(json_list, key=lambda d: d['case']) + df = pd.DataFrame.from_records(json_list) + md = df.to_markdown() + md_file = json_file.split('/')[-1].split('.')[0] + '.md' + + with open(md_file, 'w') as f: + f.write(md) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog="json2md") + parser.add_argument("--json", help='json file', type=str) + args = parser.parse_args() + json2md(args.json) diff --git a/tests/nuc_proxy.py b/tests/nuc_proxy.py index bc9dcab969..f2ffdac1fa 100644 --- a/tests/nuc_proxy.py +++ b/tests/nuc_proxy.py @@ -19,7 +19,7 @@ def __init__(self, port, baudrate, logger): self.port = port self.baudrate = baudrate self.logger = logger - self.timeout = 20 + self.timeout = 60 def open(self): self.logger.debug(f'open {self.port} begin') @@ -144,17 +144,21 @@ def infer_worker(target): for cmd in cmds.split(';'): ret = target.s1.run_cmd(cmd, separator) - target.logger.debug("ret = {0}".format(ret)) # infer result + dict = {'type': 'finish', 'time': 0.0, 'error': ''} if ret.find('terminate') != -1 or ret.find('Exception') != -1: - err = f'infer exception: {ret}' target.logger.error('infer exception') - conn.sendall(err[0:1024].encode()) + err = f'infer exception: {ret}' + dict['type'] = 'exception' + dict['error'] = err[0:1024] + conn.sendall(json.dumps(dict).encode()) elif ret.find(separator) == -1: # reboot target when timeout - conn.sendall(f'infer timeout'.encode()) - target.logger.error('reboot {0} for timeout'.format(target.name)) + target.logger.error('reboot for timeout') + dict['type'] = 'timeout' + dict['error'] = 'infer timeout' + conn.sendall(json.dumps(dict).encode()) # reboot after login target.s0.run_cmd('root') @@ -162,7 +166,8 @@ def infer_worker(target): target.s0.run_cmd('reboot') time.sleep(20) else: - conn.sendall(f'infer finish'.encode()) + dict['time'] = float(ret.split('\n')[1].split()[1]) + conn.sendall(json.dumps(dict).encode()) dummy = conn.recv(1024) # send outputs diff --git a/tests/test_runner.py b/tests/test_runner.py index 9d8c03b28f..2e0b89055e 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -55,6 +55,19 @@ def __init__(self, case_name, override_cfg: str = None) -> None: # used for tag dynamic model for onnx simplify self.dynamic = False + if self.cfg['dump_infer']: + self.infer_file = test_utils.infer_file() + self.infer_dict = { + 'case': 'unknown', + 'target': 'cpu', + 'if_quant_type': 'uint8', + 'w_quant_type': 'uint8', + 'time(ms)': 'N/A', + 'fps': 'N/A', + 'result': 'Pass', + 'remark': 'N/A' + } + def transform_input(self, values: List[np.ndarray], type: str, stage: str) -> List[np.ndarray]: new_values = [] compile_opt = self.cfg['compile_opt'] @@ -252,6 +265,10 @@ def run(self, model_file: Union[List[str], str]): judge, result = self.compare_results( expected, actual, stage, k_target, v_target['similarity_name'], k_mode, v_mode['threshold'], dump_hist, mode_dir) + if stage == 'infer' and self.cfg['dump_infer']: + self.infer_dict['result'] = 'Pass' if judge else 'Fail' + self.infer_dict['remark'] = result.replace('\n', ' ') + dump_dict_to_json(self.infer_dict, self.infer_file) if not judge: if test_utils.in_ci(): self.clear(self.case_dir) @@ -407,17 +424,19 @@ def compare_results(self, stage, target, similarity_name, mode, threshold, dump_hist, dump_dir) -> Tuple[bool, str]: i = 0 judges = [] + result = '' for expected, actual in zip(ref_ouputs, test_outputs): expected = expected.astype(np.float32) actual = actual.astype(np.float32) dump_file = os.path.join(dump_dir, 'nncase_result_{0}_hist.csv'.format(i)) judge, similarity_info = compare_ndarray( expected, actual, similarity_name, threshold, dump_hist, dump_file) - result_info = "\n{0} [ {1} {2} {3} ] Output: {4}!!\n".format( + result_info = "{0} [ {1} {2} {3} ] Output {4}:".format( 'Pass' if judge else 'Fail', stage, target, mode, i) - result = similarity_info + result_info - with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f: - f.write(result) + result += result_info + similarity_info i = i + 1 judges.append(judge) + + with open(os.path.join(self.case_dir, 'test_result.txt'), 'a+') as f: + f.write(result) return sum(judges) == len(judges), result diff --git a/tests/test_utils.py b/tests/test_utils.py index b2716c8b9c..3cd04c54bd 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,5 @@ import os +import json import numpy as np @@ -33,6 +34,17 @@ def _cast_bfloat16_then_float32(values: np.array): values[i] = value +def dump_dict_to_json(dict, json_file): + json_list = [] + if os.path.exists(json_file): + with open(json_file, 'r') as f: + json_list = json.load(f) + + json_list.append(dict) + with open(json_file, 'w') as f: + json.dump(json_list, f) + + def in_ci(): return os.getenv('CI', False) @@ -51,3 +63,7 @@ def nuc_port(): def test_executable(target): return os.getenv('TEST_EXECUTABLE_{0}'.format(target.upper())) + + +def infer_file(): + return os.getenv('INFER_FILE', 'infer_report.json')