diff --git a/.azure-pipelines/model-test.yml b/.azure-pipelines/model-test.yml index 4f4ce12c680..1aea506d48b 100644 --- a/.azure-pipelines/model-test.yml +++ b/.azure-pipelines/model-test.yml @@ -12,7 +12,7 @@ pr: exclude: - neural_compressor/ux -pool: MODEL_PERF_TEST +pool: test-pool variables: OUT_SCRIPT_PATH: $(Build.SourcesDirectory)/.azure-pipelines/scripts/models @@ -155,7 +155,8 @@ stages: cd ${OUT_SCRIPT_PATH} mkdir generated mkdir last_generated - python -u collect_log_all.py --logs_dir $(OUT_SCRIPT_PATH) --output_dir generated + pip install requests + python -u collect_log_all.py --logs_dir $(OUT_SCRIPT_PATH) --output_dir generated --build_id=$(Build.BuildId) displayName: "Collect all logs" - task: DownloadPipelineArtifact@2 continueOnError: true diff --git a/.azure-pipelines/scripts/models/collect_log_all.py b/.azure-pipelines/scripts/models/collect_log_all.py index fb9db0d6721..be884e6e730 100644 --- a/.azure-pipelines/scripts/models/collect_log_all.py +++ b/.azure-pipelines/scripts/models/collect_log_all.py @@ -1,9 +1,11 @@ import argparse import os +import requests parser = argparse.ArgumentParser(allow_abbrev=False) parser.add_argument("--logs_dir", type=str, default=".") parser.add_argument("--output_dir", type=str, default=".") +parser.add_argument("--build_id", type=str, default="0") args = parser.parse_args() print(args) @@ -12,6 +14,7 @@ def main(): file_dir = args.logs_dir summary_content = ['OS;Platform;Framework;Version;Precision;Model;Mode;Type;BS;Value;Url\n'] tuning_info_content = ['OS;Platform;Framework;Version;Model;Strategy;Tune_time\n'] + url_dict = parse_download_url() # get full path of all files for root, dirs, files in os.walk(file_dir): for name in files: @@ -19,13 +22,13 @@ def main(): print(file_name) if '_summary.log' in name: for line in open(file_name, "r"): - # print(line) if 'linux' in line: + line = line.replace("", parse_summary_log(line, url_dict)) summary_content.append(line) if '_tuning_info.log' in name: for line in open(file_name, "r"): - # print(line) if 'linux' in line: + line = line.replace("", parse_tuning_log(line, url_dict)) tuning_info_content.append(line) f = open(args.output_dir + '/summary.log', "a") for summary in summary_content: @@ -35,5 +38,36 @@ def main(): f2.writelines(str(tuning_info)) +def parse_tuning_log(line, url_dict): + result = line.split(";") + OS, Platform, Framework, Version, Model, Strategy, Tune_time, Tuning_trials, URL, __ = result + file_name = f"{Framework}-{Model}-tune.log" + download_url = url_dict.get(f"{Framework}_{Model}") + download_url = f"{download_url}{file_name}" + return download_url + + +def parse_summary_log(line, url_dict): + result = line.split(";") + OS, Platform, Framework, Version, Precision, Model, Mode, Type, BS, Value, Url = result + file_name = f"{Framework}-{Model}-tune.log" + download_url = url_dict.get(f"{Framework}_{Model}") + download_url = f"{download_url}{file_name}" + return download_url + + +def parse_download_url(): + azure_artifact_api_url = f'https://dev.azure.com/lpot-inc/neural-compressor/_apis/build/builds/{args.build_id}/artifacts?api-version=5.1' + azure_artifacts_data = {key: value for (key, value) in requests.get(azure_artifact_api_url).json().items()} + artifact_count = azure_artifacts_data.get("count") + artifact_value = azure_artifacts_data.get("value") + url_dict = {} + for item in artifact_value: + artifact_download_url = item.get("resource").get("downloadUrl") + artifact_download_url = f"{artifact_download_url[:-3]}file&subPath=%2F" + url_dict[item.get("name")] = artifact_download_url + return url_dict + + if __name__ == '__main__': main() diff --git a/.azure-pipelines/scripts/models/collect_log_model.py b/.azure-pipelines/scripts/models/collect_log_model.py index b26adf52bd6..c73623fa1d0 100644 --- a/.azure-pipelines/scripts/models/collect_log_model.py +++ b/.azure-pipelines/scripts/models/collect_log_model.py @@ -133,9 +133,9 @@ def collect_log(): parse_tuning_line(line, tmp) print(tmp) - results.append('{};{};{};{};FP32;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['fp32_acc'], URL)) - results.append('{};{};{};{};INT8;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['int8_acc'], URL)) - tuning_infos.append(';'.join([OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['strategy'], str(tmp['tune_time']), str(tmp['tuning_trials']), URL, f"{round(tmp['max_mem_size'] / tmp['total_mem_size'] * 100, 4)}%"])+'\n') + results.append('{};{};{};{};FP32;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['fp32_acc'], "")) + results.append('{};{};{};{};INT8;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['int8_acc'], "")) + tuning_infos.append(';'.join([OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['strategy'], str(tmp['tune_time']), str(tmp['tuning_trials']), "", f"{round(tmp['max_mem_size'] / tmp['total_mem_size'] * 100, 4)}%"])+'\n') # get model benchmark results for precision in ['int8', 'fp32']: throughput = 0.0 diff --git a/.azure-pipelines/scripts/models/generate_report.sh b/.azure-pipelines/scripts/models/generate_report.sh index 568799ebbc1..714c718d2f9 100644 --- a/.azure-pipelines/scripts/models/generate_report.sh +++ b/.azure-pipelines/scripts/models/generate_report.sh @@ -198,7 +198,7 @@ function generate_html_core { printf("%.2f", target); }else if(target < 1) { printf("%.2f", target); - job_status = "fail" + perf_status = "fail" }else{ printf("%.2f", target); } @@ -233,10 +233,11 @@ function generate_html_core { printf("%.2f %", status_png, target*100); } else { target = new_result / previous_result; - if(target <= 1.104 && target >= 0.895) { + if(target <= 1.054 && target >= 0.945) { status_png = "background-color:#90EE90"; } else { status_png = "background-color:#FFD2D2"; + perf_status = "fail" } printf("%.2f", status_png, target); } @@ -264,7 +265,7 @@ function generate_html_core { status_png = "background-color:#90EE90"; } else { status_png = "background-color:#FFD2D2"; - job_status = "fail" + ratio_status = "fail" } printf("%.2f", status_png, target); } else { @@ -272,7 +273,7 @@ function generate_html_core { printf(""); } else { if (new_result == nan) { - job_status = "fail" + ratio_status = "fail" status_png = "background-color:#FFD2D2"; printf("", status_png); } else { @@ -284,6 +285,8 @@ function generate_html_core { BEGIN { job_status = "pass" + perf_status = "pass" + ratio_status = "pass" // issue list jira_mobilenet = "https://jira01.devtools.intel.com/browse/PADDLEQ-384"; jira_resnext = "https://jira01.devtools.intel.com/browse/PADDLEQ-387"; @@ -377,8 +380,11 @@ function generate_html_core { printf("\n"); + status = (perf_status == "fail" && ratio_status == "fail") ? "fail" : "pass" + status = (job_status == "fail") ? "fail" : status + } END{ - printf("\n%s", job_status); + printf("\n%s", status); } ' >> ${output_dir}/report.html job_state=$(tail -1 ${WORKSPACE}/report.html) diff --git a/.azure-pipelines/scripts/models/new_benchmark.py b/.azure-pipelines/scripts/models/new_benchmark.py index daae8f0d415..91d906379fc 100644 --- a/.azure-pipelines/scripts/models/new_benchmark.py +++ b/.azure-pipelines/scripts/models/new_benchmark.py @@ -110,7 +110,7 @@ def config_instance(cores_per_instance, num_of_instance): else: core_list = np.arange(0, cores_per_instance) + i * cores_per_instance core.append(core_list.tolist()) - + core = core[::-1] for i in range(len(core)): core[i] = [str(j) for j in core[i]] core[i] = ','.join(core[i])