Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .azure-pipelines/model-test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ pr:
exclude:
- neural_compressor/ux

pool: MODEL_PERF_TEST
pool: test-pool

variables:
OUT_SCRIPT_PATH: $(Build.SourcesDirectory)/.azure-pipelines/scripts/models
Expand Down Expand Up @@ -155,7 +155,8 @@ stages:
cd ${OUT_SCRIPT_PATH}
mkdir generated
mkdir last_generated
python -u collect_log_all.py --logs_dir $(OUT_SCRIPT_PATH) --output_dir generated
pip install requests
python -u collect_log_all.py --logs_dir $(OUT_SCRIPT_PATH) --output_dir generated --build_id=$(Build.BuildId)
displayName: "Collect all logs"
- task: DownloadPipelineArtifact@2
continueOnError: true
Expand Down
38 changes: 36 additions & 2 deletions .azure-pipelines/scripts/models/collect_log_all.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
import argparse
import os
import requests

parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--logs_dir", type=str, default=".")
parser.add_argument("--output_dir", type=str, default=".")
parser.add_argument("--build_id", type=str, default="0")
args = parser.parse_args()
print(args)

Expand All @@ -12,20 +14,21 @@ def main():
file_dir = args.logs_dir
summary_content = ['OS;Platform;Framework;Version;Precision;Model;Mode;Type;BS;Value;Url\n']
tuning_info_content = ['OS;Platform;Framework;Version;Model;Strategy;Tune_time\n']
url_dict = parse_download_url()
# get full path of all files
for root, dirs, files in os.walk(file_dir):
for name in files:
file_name = os.path.join(root, name)
print(file_name)
if '_summary.log' in name:
for line in open(file_name, "r"):
# print(line)
if 'linux' in line:
line = line.replace("<url>", parse_summary_log(line, url_dict))
summary_content.append(line)
if '_tuning_info.log' in name:
for line in open(file_name, "r"):
# print(line)
if 'linux' in line:
line = line.replace("<url>", parse_tuning_log(line, url_dict))
tuning_info_content.append(line)
f = open(args.output_dir + '/summary.log', "a")
for summary in summary_content:
Expand All @@ -35,5 +38,36 @@ def main():
f2.writelines(str(tuning_info))


def parse_tuning_log(line, url_dict):
result = line.split(";")
OS, Platform, Framework, Version, Model, Strategy, Tune_time, Tuning_trials, URL, __ = result
file_name = f"{Framework}-{Model}-tune.log"
download_url = url_dict.get(f"{Framework}_{Model}")
download_url = f"{download_url}{file_name}"
return download_url


def parse_summary_log(line, url_dict):
result = line.split(";")
OS, Platform, Framework, Version, Precision, Model, Mode, Type, BS, Value, Url = result
file_name = f"{Framework}-{Model}-tune.log"
download_url = url_dict.get(f"{Framework}_{Model}")
download_url = f"{download_url}{file_name}"
return download_url


def parse_download_url():
azure_artifact_api_url = f'https://dev.azure.com/lpot-inc/neural-compressor/_apis/build/builds/{args.build_id}/artifacts?api-version=5.1'
azure_artifacts_data = {key: value for (key, value) in requests.get(azure_artifact_api_url).json().items()}
artifact_count = azure_artifacts_data.get("count")
artifact_value = azure_artifacts_data.get("value")
url_dict = {}
for item in artifact_value:
artifact_download_url = item.get("resource").get("downloadUrl")
artifact_download_url = f"{artifact_download_url[:-3]}file&subPath=%2F"
url_dict[item.get("name")] = artifact_download_url
return url_dict


if __name__ == '__main__':
main()
6 changes: 3 additions & 3 deletions .azure-pipelines/scripts/models/collect_log_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,9 @@ def collect_log():
parse_tuning_line(line, tmp)
print(tmp)

results.append('{};{};{};{};FP32;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['fp32_acc'], URL))
results.append('{};{};{};{};INT8;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['int8_acc'], URL))
tuning_infos.append(';'.join([OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['strategy'], str(tmp['tune_time']), str(tmp['tuning_trials']), URL, f"{round(tmp['max_mem_size'] / tmp['total_mem_size'] * 100, 4)}%"])+'\n')
results.append('{};{};{};{};FP32;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['fp32_acc'], "<url>"))
results.append('{};{};{};{};INT8;{};Inference;Accuracy;1;{};{}\n'.format(OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['int8_acc'], "<url>"))
tuning_infos.append(';'.join([OS, PLATFORM, args.framework, args.fwk_ver, args.model, tmp['strategy'], str(tmp['tune_time']), str(tmp['tuning_trials']), "<url>", f"{round(tmp['max_mem_size'] / tmp['total_mem_size'] * 100, 4)}%"])+'\n')
# get model benchmark results
for precision in ['int8', 'fp32']:
throughput = 0.0
Expand Down
16 changes: 11 additions & 5 deletions .azure-pipelines/scripts/models/generate_report.sh
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ function generate_html_core {
printf("<td style=\"background-color:#90EE90\">%.2f</td>", target);
}else if(target < 1) {
printf("<td style=\"background-color:#FFD2D2\">%.2f</td>", target);
job_status = "fail"
perf_status = "fail"
}else{
printf("<td>%.2f</td>", target);
}
Expand Down Expand Up @@ -233,10 +233,11 @@ function generate_html_core {
printf("<td style=\"%s\" colspan=2>%.2f %</td>", status_png, target*100);
} else {
target = new_result / previous_result;
if(target <= 1.104 && target >= 0.895) {
if(target <= 1.054 && target >= 0.945) {
status_png = "background-color:#90EE90";
} else {
status_png = "background-color:#FFD2D2";
perf_status = "fail"
}
printf("<td style=\"%s\" colspan=2>%.2f</td>", status_png, target);
}
Expand Down Expand Up @@ -264,15 +265,15 @@ function generate_html_core {
status_png = "background-color:#90EE90";
} else {
status_png = "background-color:#FFD2D2";
job_status = "fail"
ratio_status = "fail"
}
printf("<td style=\"%s\">%.2f</td>", status_png, target);
} else {
if (new_result == nan && previous_result == nan) {
printf("<td class=\"col-cell col-cell3\"></td>");
} else {
if (new_result == nan) {
job_status = "fail"
ratio_status = "fail"
status_png = "background-color:#FFD2D2";
printf("<td style=\"%s\"></td>", status_png);
} else {
Expand All @@ -284,6 +285,8 @@ function generate_html_core {

BEGIN {
job_status = "pass"
perf_status = "pass"
ratio_status = "pass"
// issue list
jira_mobilenet = "https://jira01.devtools.intel.com/browse/PADDLEQ-384";
jira_resnext = "https://jira01.devtools.intel.com/browse/PADDLEQ-387";
Expand Down Expand Up @@ -377,8 +380,11 @@ function generate_html_core {

printf("</tr>\n");

status = (perf_status == "fail" && ratio_status == "fail") ? "fail" : "pass"
status = (job_status == "fail") ? "fail" : status

} END{
printf("\n%s", job_status);
printf("\n%s", status);
}
' >> ${output_dir}/report.html
job_state=$(tail -1 ${WORKSPACE}/report.html)
Expand Down
2 changes: 1 addition & 1 deletion .azure-pipelines/scripts/models/new_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def config_instance(cores_per_instance, num_of_instance):
else:
core_list = np.arange(0, cores_per_instance) + i * cores_per_instance
core.append(core_list.tolist())

core = core[::-1]
for i in range(len(core)):
core[i] = [str(j) for j in core[i]]
core[i] = ','.join(core[i])
Expand Down