From 75e77caa001ab9cbc98e0f02f757b8d964149613 Mon Sep 17 00:00:00 2001 From: zhangkaili Date: Sat, 7 Mar 2020 09:27:05 +0800 Subject: [PATCH] Testing adlik performance (#79) Closes #79 Signed-off-by: zhangkaili --- benchmark/.flake8 | 2 + benchmark/.pylintrc | 21 +++ benchmark/README.md | 84 +++++++++ benchmark/bandit.yaml | 4 + benchmark/setup.py | 46 +++++ benchmark/src/automatic_test.py | 104 +++++++++++ benchmark/src/compile_model.py | 33 ++++ benchmark/src/supervisord.conf | 18 ++ benchmark/src/test_result.py | 72 ++++++++ benchmark/test/client/imagenet_client.py | 169 ++++++++++++++++++ benchmark/test/client/mnist_client.py | 159 ++++++++++++++++ benchmark/test/client_script/client_script.sh | 4 + .../test/compile_script/compile_script.sh | 3 + .../compile_script/openvino_compile_script.sh | 5 + benchmark/test/data/imagenet.JPEG | Bin 0 -> 33311 bytes benchmark/test/data/mnist.png | Bin 0 -> 373 bytes .../test/docker_build/openvino.Dockerfile | 49 +++++ .../test/docker_build/tensorflow.Dockerfile | 45 +++++ .../docker_build/tensorflow_gpu.Dockerfile | 65 +++++++ .../test/docker_build/tensorrt.Dockerfile | 67 +++++++ benchmark/test/docker_build/tflite.Dockerfile | 45 +++++ .../test/docker_test/openvino.Dockerfile | 88 +++++++++ .../test/docker_test/tensorflow.Dockerfile | 82 +++++++++ .../docker_test/tensorflow_gpu.Dockerfile | 104 +++++++++++ .../test/docker_test/tensorrt.Dockerfile | 124 +++++++++++++ benchmark/test/docker_test/tflite.Dockerfile | 87 +++++++++ .../serving_script/openvino_serving_script.sh | 5 + .../test/serving_script/serving_script.sh | 3 + benchmark/test/test_automatic_test.py | 33 ++++ .../test_model/mnist_keras/mnist_keras.py | 50 ++++++ .../test_model/mnist_keras/serving_model.json | 22 +++ .../test_model/mnist_pytorch/mnist_pytorch.py | 93 ++++++++++ .../mnist_pytorch/serving_model.json | 24 +++ .../mnist_tensorflow/mnist_tensorflow.py | 134 ++++++++++++++ .../mnist_tensorflow/serving_model.json | 22 +++ .../resnet50_keras/resnet50_keras.py | 22 +++ .../resnet50_keras/serving_model.json | 22 +++ .../resnet50_pytorch/resnet50_pytorch.py | 28 +++ .../resnet50_pytorch/serving_model.json | 25 +++ .../resnet50_tensorflow/resnet50_ckpt.py | 25 +++ .../resnet50_tensorflow.py | 55 ++++++ .../resnet50_tensorflow/serving_model.json | 22 +++ .../resnet50_tensorflow/serving_model_pb.json | 22 +++ benchmark/tox.ini | 9 + 44 files changed, 2096 insertions(+) create mode 100644 benchmark/.flake8 create mode 100644 benchmark/.pylintrc create mode 100644 benchmark/README.md create mode 100644 benchmark/bandit.yaml create mode 100644 benchmark/setup.py create mode 100644 benchmark/src/automatic_test.py create mode 100644 benchmark/src/compile_model.py create mode 100644 benchmark/src/supervisord.conf create mode 100644 benchmark/src/test_result.py create mode 100644 benchmark/test/client/imagenet_client.py create mode 100644 benchmark/test/client/mnist_client.py create mode 100644 benchmark/test/client_script/client_script.sh create mode 100644 benchmark/test/compile_script/compile_script.sh create mode 100644 benchmark/test/compile_script/openvino_compile_script.sh create mode 100644 benchmark/test/data/imagenet.JPEG create mode 100644 benchmark/test/data/mnist.png create mode 100644 benchmark/test/docker_build/openvino.Dockerfile create mode 100644 benchmark/test/docker_build/tensorflow.Dockerfile create mode 100644 benchmark/test/docker_build/tensorflow_gpu.Dockerfile create mode 100644 benchmark/test/docker_build/tensorrt.Dockerfile create mode 100644 benchmark/test/docker_build/tflite.Dockerfile create mode 100644 benchmark/test/docker_test/openvino.Dockerfile create mode 100644 benchmark/test/docker_test/tensorflow.Dockerfile create mode 100644 benchmark/test/docker_test/tensorflow_gpu.Dockerfile create mode 100644 benchmark/test/docker_test/tensorrt.Dockerfile create mode 100644 benchmark/test/docker_test/tflite.Dockerfile create mode 100644 benchmark/test/serving_script/openvino_serving_script.sh create mode 100644 benchmark/test/serving_script/serving_script.sh create mode 100644 benchmark/test/test_automatic_test.py create mode 100644 benchmark/test/test_model/mnist_keras/mnist_keras.py create mode 100644 benchmark/test/test_model/mnist_keras/serving_model.json create mode 100644 benchmark/test/test_model/mnist_pytorch/mnist_pytorch.py create mode 100644 benchmark/test/test_model/mnist_pytorch/serving_model.json create mode 100644 benchmark/test/test_model/mnist_tensorflow/mnist_tensorflow.py create mode 100644 benchmark/test/test_model/mnist_tensorflow/serving_model.json create mode 100644 benchmark/test/test_model/resnet50_keras/resnet50_keras.py create mode 100644 benchmark/test/test_model/resnet50_keras/serving_model.json create mode 100644 benchmark/test/test_model/resnet50_pytorch/resnet50_pytorch.py create mode 100644 benchmark/test/test_model/resnet50_pytorch/serving_model.json create mode 100644 benchmark/test/test_model/resnet50_tensorflow/resnet50_ckpt.py create mode 100644 benchmark/test/test_model/resnet50_tensorflow/resnet50_tensorflow.py create mode 100644 benchmark/test/test_model/resnet50_tensorflow/serving_model.json create mode 100644 benchmark/test/test_model/resnet50_tensorflow/serving_model_pb.json create mode 100644 benchmark/tox.ini diff --git a/benchmark/.flake8 b/benchmark/.flake8 new file mode 100644 index 000000000..6deafc261 --- /dev/null +++ b/benchmark/.flake8 @@ -0,0 +1,2 @@ +[flake8] +max-line-length = 120 diff --git a/benchmark/.pylintrc b/benchmark/.pylintrc new file mode 100644 index 000000000..08d432d8b --- /dev/null +++ b/benchmark/.pylintrc @@ -0,0 +1,21 @@ +[MASTER] +jobs=0 + +[MESSAGES CONTROL] +disable = fixme, + no-else-return, + too-many-arguments, + too-few-public-methods, + too-many-locals, + too-many-instance-attributes, + no-member, + unnecessary-pass + +[FORMAT] +max-line-length = 120 + +[BASIC] +good-names = i, + j, + k, + o diff --git a/benchmark/README.md b/benchmark/README.md new file mode 100644 index 000000000..118fabb72 --- /dev/null +++ b/benchmark/README.md @@ -0,0 +1,84 @@ +# About the benchmark +The benchmark is used to test the adlik serving performance of different models. Before using the benchmark to test the +performance of the runtime, you need to build the client, the binary, and compile the model. + +## Installing prerequisites + +- python3 +- pip3 + +## Build and install packages + +1. Build clients and serving binary and make client pip packages (see [README.md](../../README.md)). + +2. Install clients pip package: + + ```sh + pip3 install {dir_of_pip_package}/adlik_serving_api-0.0.0-py2.py3-none-any.whl + ``` + +3. Install model_compiler: + + ```sh + cd {Adlik_root_dir}/model_compiler + pip3 install . + ``` + +## Compile the test models + +1. Prepare model code and serving_model.json (If you don't know how to write, you can refer to the existing serving_model.json). + + ```sh + cd {Adlik_root_dir}/benchmark/test + mkdir model_name + cd model_name + ``` + + Then put your prepared model and serving_model.json in the directory model_name. + +2. Run the model code, and save the model in {Adlik_root_dir}/benchmark/test/model_name/model. + + ```sh + cd {Adlik_root_dir}/benchmark/test/model_name + python3 model.py + ``` + +3. Compile the model and save the serving model. + + ```sh + cd {Adlik_root_dir}/benchmark/src + python3 compile_model.py + ``` + + In the compile_model.py you can also specify the files that need to be compiled. + +## Test the serving performance + +1. Deploy a serving service: + + ```sh + cd {dir_of_adlik_serving_binary} + ./adlik_serving --model_base_path={model_serving_dir} --grpc_port={grpc_port} --http_port={http_port} + ``` + + Usually the adlik serving binary is in the directory {Adlik_root_dir}/bazel-bin/adlik_serving, the grpc_port can + be set to 8500 and the http_port can be set to 8501. And It should be noted that the type of the compiled model is + the same as the type of the serving service + +2. Run a client and do inference: + + ```sh + cd {Adlik_root_dir}/benchmark/test/client + python3 xxx_client.py --batch-size=128 path_image + ``` + + The log of serving and client will be saved in time_log.log. + +3. Analyze inference results + + ```sh + cd {Adlik_root_dir}/benchmark/src + python3 test_result.py path_client_log path_serving_log batch_size model_name runtime + ``` + + Then you can get the performance analysis results of the serving. \ No newline at end of file diff --git a/benchmark/bandit.yaml b/benchmark/bandit.yaml new file mode 100644 index 000000000..dd822cb3c --- /dev/null +++ b/benchmark/bandit.yaml @@ -0,0 +1,4 @@ +include: + - '*.py' + +skips: [B404,B603] \ No newline at end of file diff --git a/benchmark/setup.py b/benchmark/setup.py new file mode 100644 index 000000000..d70e3ca73 --- /dev/null +++ b/benchmark/setup.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 + +# Copyright 2019 ZTE corporation. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +Benchmark test. +""" + +from setuptools import find_packages, setup + +_VERSION = '0.0.0' + +_REQUIRED_PACKAGES = [ + 'keras==2.2.4', + 'onnx==1.5.0', + 'protobuf==3.6.1', + 'torch==1.3.0', + 'torchvision==0.4.0', + 'requests', + 'tensorflow==1.14.0', + 'jsonschema==3.1.1', + 'networkx==2.3', + 'defusedxml==0.5.0' +] + +_TEST_REQUIRES = [ + 'bandit==1.6.0', + 'flake8==3.7.7', + 'pylint==2.3.1' +] + +setup( + name="benchmark", + version=_VERSION.replace('-', ''), + author='ZTE', + author_email='ai@zte.com.cn', + packages=find_packages('src'), + package_dir={'': 'src'}, + description=__doc__, + license='Apache 2.0', + keywords='Test serving-lite performance', + install_requires=_REQUIRED_PACKAGES, + extras_require={'test': _TEST_REQUIRES} + +) diff --git a/benchmark/src/automatic_test.py b/benchmark/src/automatic_test.py new file mode 100644 index 000000000..91294fa7d --- /dev/null +++ b/benchmark/src/automatic_test.py @@ -0,0 +1,104 @@ +import subprocess +import argparse +import os +import time + + +def _parse_arguments(): + args_parser = argparse.ArgumentParser() + args_parser.add_argument("-d", "--docker-file-path", type=str, help="The docker file path of the test serving type") + args_parser.add_argument("-s", "--serving-type", type=str, help="The test serving type") + args_parser.add_argument("-b", "--build-directory", type=str, help="The directory which to build the docker") + args_parser.add_argument("-a", "--adlik-directory", type=str, default="Adlik-master", help="The adlik directory") + args_parser.add_argument("-m", "--model-name", type=str, help="The path of model used for test") + args_parser.add_argument("-c", "--client-script", type=str, default="client_script.sh", + help="The script used to infer") + args_parser.add_argument("-ss", "--serving-script", type=str, default="serving_script.sh", + help="The serving script") + args_parser.add_argument("-ov", "--openvino-version", type=str, default="2019.3.344", + help="The version of the OpenVINO") + args_parser.add_argument("-tt", "--tensorrt-tar", type=str, + default="TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.0.cudnn7.6.tar.gz", + help="The tar version of the TensorRT") + args_parser.add_argument("-tv", "--tensorrt-version", type=str, default="7.0.0.11", help="The version of TensorRT") + args_parser.add_argument("-l", "--log-path", type=str, default="log", help="The path of log directory") + args_parser.add_argument('-tm', '--test-model-path', type=str, help="The path of test model") + args_parser.add_argument("-sj", "--serving-json", type=str, default="serving_model.json", help="The json of model") + args_parser.add_argument("-cis", "--client-inference-script", type=str, required=True, help="The inference script") + args_parser.add_argument("-i", "--image-filename", type=str, required=True, nargs="?", help="Input image.") + args_parser.add_argument("-gl", "--gpu-label", type=int, default=None, help="The GPU label") + args_parser.add_argument("-cs", "--compile-script", type=str, default="compile_script.sh", + help="Compile the model script") + return args_parser.parse_args() + + +def _close_docker(): + close_docker_command = ['sh', '-c', + 'docker rm -f adlik-test'] + subprocess.run(close_docker_command) + + +def _get_result(log_path, model_name): + calculate_command = ['python3', os.path.join(os.path.dirname(__file__), 'test_result.py'), + '-c', os.path.join(log_path, 'client_time.log'), + '-s', os.path.join(log_path, 'serving_time.log'), + '-m', model_name] + with subprocess.Popen(calculate_command) as result_process: + print(result_process.stdout) + + +def _get_log(log_path): + if os.path.exists(os.path.join(log_path, 'client_time.log')): + return False + else: + return True + + +def _docker_build_command(args): + build_arg = f'--build-arg SERVING_SCRIPT={args.serving_script} ' \ + f'--build-arg CLIENT_SCRIPT={args.client_script} ' \ + f'--build-arg TEST_MODEL_PATH={args.test_model_path} ' \ + f'--build-arg SERVING_JSON={args.serving_json} ' \ + f'--build-arg CLIENT_INFERENCE_SCRIPT={args.client_inference_script} ' \ + f'--build-arg IMAGE_FILENAME={args.image_filename} ' \ + f'--build-arg COMPILE_SCRIPT={args.compile_script} ' + + if args.serving_type == 'openvino': + build_arg = build_arg + f'--build-arg OPENVINO_VERSION={args.openvino_version} ' + elif args.serving_type == 'tensorrt': + build_arg = build_arg + f'--build-arg TENSORRT_VERSION={args.tensorrt_version} ' \ + f'--build-arg TENSORRT_TAR={args.tensorrt_tar} ' + else: + build_arg = build_arg + + build_command = f'docker build --build-arg ADLIK_DIRECTORY={args.adlik_directory} ' + build_arg + \ + f' -f {args.docker_file_path} -t adlik-test:{args.serving_type} {args.build_directory}' + return build_command + + +def main(args): + try: + _close_docker() + except Exception: + pass + finally: + docker_build_command = _docker_build_command(args) + + if not args.gpu_label: + docker_run_command = f'docker run -d --name adlik-test -v {args.log_path}:/home/john/log ' \ + f'adlik-test:{args.serving_type}' + else: + docker_run_command = f'NV_GPU={args.gpu_label} nvidia-docker run -d --name adlik-test ' \ + f'-v {args.log_path}:/home/john/log adlik-test:{args.serving_type}' + + test_command = ['sh', '-c', docker_build_command + ' && ' + docker_run_command] + + with subprocess.Popen(test_command): + while _get_log(args.log_path): + time.sleep(10) + _get_result(args.log_path, args.model_name) + _close_docker() + + +if __name__ == '__main__': + main(_parse_arguments()) diff --git a/benchmark/src/compile_model.py b/benchmark/src/compile_model.py new file mode 100644 index 000000000..89be631ac --- /dev/null +++ b/benchmark/src/compile_model.py @@ -0,0 +1,33 @@ +import os +import json +import model_compiler +import argparse + + +def _get_request(request_file, test_model_dir): + request = json.load(request_file) + model_dir = request["input_model"] + request["input_model"] = os.path.join(test_model_dir, model_dir) + export_dir = request["export_path"] + request["export_path"] = os.path.join(test_model_dir, export_dir) + return request + + +def compile_model(args): + request_dir = os.path.join(args.test_model_path, args.serving_model_json) + try: + with open(request_dir, 'r') as request_file: + test_model_dir = args.test_model_path + request = _get_request(request_file, test_model_dir) + result = model_compiler.compile_model(request) + print(result) + except FileNotFoundError: + print(f"Can not compile the model in {os.path.join(test_model_dir, args.model_path)}") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-t', '--test-model-path', type=str, required=True, help='The path of test model') + parser.add_argument('-s', '--serving-model-json', type=str, default='serving_model.json', help='The json of model') + args = parser.parse_args() + compile_model(args) diff --git a/benchmark/src/supervisord.conf b/benchmark/src/supervisord.conf new file mode 100644 index 000000000..8469277cf --- /dev/null +++ b/benchmark/src/supervisord.conf @@ -0,0 +1,18 @@ +[supervisord] +nodaemon=true + +[program:serving] +command=/home/john/serving_script.sh +priority=1 +autostart=true +autorestart=unexpected +stdout_logfile=/home/john/log/serving.stdout.log +stderr_logfile=/home/john/log/serving.stderr.log + +[program:client] +command=/home/john/client_script.sh && exit +priority=2 +autostart=true +autorestart=unexpected +stdout_logfile=/home/john/log/client.stdout.log +stderr_logfile=/home/john/log/client.stderr.log diff --git a/benchmark/src/test_result.py b/benchmark/src/test_result.py new file mode 100644 index 000000000..27d1993e0 --- /dev/null +++ b/benchmark/src/test_result.py @@ -0,0 +1,72 @@ +""" +The test result of adlik performance +""" +import argparse + + +def _speed_of_client(client_log_path, batch_size): + with open(client_log_path, 'r') as file: + lines = file.readlines() + sum_time = [] + for line in lines: + line = line.strip('\n') + time = line.split('predict:')[-1] + time = float(time.strip(' ')) + sum_time.append(time) + sum_time.pop(0) + batch_num = len(sum_time) + speed_processing_picture = (batch_num * batch_size) / sum(sum_time) + return speed_processing_picture, batch_num + + +def _speed_of_serving(serving_log_path, batch_size): + with open(serving_log_path, 'r') as file: + lines = file.readlines() + runtime = lines[0].partition('found runtime ')[-1] + lines = [line.partition('PredictServiceImpl')[-1] for line in lines] + sum_time = [] + for line in lines: + if line: + line = line.strip('\n') + time = line.partition('time (milliseconds):')[-1] + time = float(time.strip(' ')) + sum_time.append(time) + sum_time.pop(0) + batch_num = len(sum_time) + speed_processing_picture = (batch_num * batch_size) / sum(sum_time) * 1000 + return speed_processing_picture, batch_num, runtime + + +def main(args): + """ + Analyze inference results + """ + speed_processing_picture_client, batch_num = _speed_of_client(args.client_log_path, args.batch_size) + speed_processing_picture_serving, batch_num1, serving_runtime = _speed_of_serving(args.serving_log_path, + args.batch_size) + assert batch_num == batch_num1 + if args.runtime: + serving_runtime = args.runtime + else: + serving_runtime = serving_runtime + tail_latency = 1 / speed_processing_picture_client - 1 / speed_processing_picture_serving + print(f'Model: {args.model_name}, Runtime: {serving_runtime}') + print(f'The speed of processing picture in the client is : {speed_processing_picture_client}') + print(f'The speed of processing picture in the serving is : {speed_processing_picture_serving}') + print(f'The tail latency of one picture is : {tail_latency}') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-c', '--client-log-path', type=str, required=True, + help='The path of client log') + parser.add_argument('-s', '--serving-log-path', type=str, required=True, + help='The path of serving log') + parser.add_argument('-b', '--batch-size', type=int, required=False, default=128, + help='Batch size. Default is 128.') + parser.add_argument('-m', '--model-name', type=str, required=True, + help='The name of model') + parser.add_argument('-r', '--runtime', type=str, required=False, default=None, + help='The serving type') + args = parser.parse_args() + main(args) diff --git a/benchmark/test/client/imagenet_client.py b/benchmark/test/client/imagenet_client.py new file mode 100644 index 000000000..d39f37fba --- /dev/null +++ b/benchmark/test/client/imagenet_client.py @@ -0,0 +1,169 @@ +# Copyright 2019 ZTE corporation. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +This is a sample for Adlik_serving prediction +""" + +import argparse +import os +import time +import logging + +from PIL import Image +from adlik_serving import PredictContext, model_config_pb2, tensor_dtype_to_np_dtype +import numpy as np + +FLAGS = None + +ISOTIMEFORMAT = '%Y-%m-%d %H:%M:%S,%f' + + +def _parse_model(config, model_name, batch_size): + if config.max_batch_size == 0: + if batch_size != 1: + raise Exception("batching not supported for model '" + model_name + "'") + else: # max_batch_size > 0 + if batch_size > config.max_batch_size: + raise Exception( + "expecting batch size <= {} for model '{}'".format(config.max_batch_size, model_name)) + + input_ = config.input[0] + output = config.output[0] + if input_.format == model_config_pb2.ModelInput.FORMAT_NHWC: + h = input_.dims[0] + w = input_.dims[1] + c = input_.dims[2] + else: + c = input_.dims[0] + h = input_.dims[1] + w = input_.dims[2] + + return input_.name, output.name, c, h, w, input_.format, tensor_dtype_to_np_dtype(input_.data_type) + + +def _gen_input_data(data_format, dtype, c, h, w): + if os.path.isdir(FLAGS.image_filename): + file_names = [os.path.join(FLAGS.image_filename, f) + for f in os.listdir(FLAGS.image_filename) + if os.path.isfile(os.path.join(FLAGS.image_filename, f))] + else: + file_names = [FLAGS.image_filename] + + file_names.sort() + + image_data = [] + for filename in file_names: + img = Image.open(filename) + array = _preprocess(img, data_format, dtype, c, h, w) + image_data.append(array) + return file_names, image_data + + +def _preprocess(img, data_format, dtype, c, h, w): + if c == 3: + half_the_width = img.size[0] / 2 + half_the_height = img.size[1] / 2 + img4 = img.crop( + ( + half_the_width - 112, + half_the_height - 112, + half_the_width + 112, + half_the_height + 112 + ) + ) + sample_img = img4.convert('RGB') + else: + raise Exception('Imagenet image channel must be 3, bug not {}'.format(c)) + + resized_img = sample_img.resize((h, w), Image.BILINEAR) + resized = np.array(resized_img).reshape((h, w, 3)) + + scaled = resized.astype(dtype) / 255.0 + + # Swap to CHW if necessary + if data_format == model_config_pb2.ModelInput.FORMAT_NCHW: + ordered = np.transpose(scaled, (2, 0, 1)) + else: + ordered = scaled + return ordered + + +def _postprocess(results, file_names, batch_size): + if len(results.tensor) != len(file_names): + raise Exception("expected {} results, got {}".format(batch_size, len(results))) + if len(file_names) != batch_size: + raise Exception("expected {} file names, got {}".format(batch_size, len(file_names))) + + if results.batch_classes: + for i in range(batch_size): + print("Image: '{}', result: {}".format(file_names[i], + results.batch_classes[i])) + else: + print("response doesn't contain 'batch classes' field, get class information from 'tensor' field!") + for i in range(batch_size): + print("Image: '{}', result: {}".format(file_names[i], np.argmax(results.tensor[i]))) + + +def _main(): + context = PredictContext(FLAGS.model_name, url=FLAGS.url, protocol=FLAGS.protocol, verbose=True) + model_config = context.model_config + + input_name, output_name, c, h, w, data_format, dtype = _parse_model( + model_config, FLAGS.model_name, FLAGS.batch_size) + + file_names, image_data = _gen_input_data(data_format, dtype, c, h, w) + + cur_idx = 0 + num_of_images = len(image_data) + + def _next_batch(batch_size): + nonlocal cur_idx + if cur_idx + batch_size <= num_of_images: + inputs = image_data[cur_idx:cur_idx + batch_size] + outputs = file_names[cur_idx:cur_idx + batch_size] + cur_idx = (cur_idx + batch_size) % num_of_images + else: + image_idx = cur_idx + cur_idx = 0 + next_inputs, next_outputs = _next_batch(batch_size - (num_of_images - image_idx)) + inputs = image_data[image_idx:] + next_inputs + outputs = file_names[image_idx:] + next_outputs + + return inputs, outputs + + num_of_batches = 99 + if num_of_images % FLAGS.batch_size != 0: + num_of_batches += 1 + logging.basicConfig(level=logging.DEBUG, + filename=os.path.join(FLAGS.log_path, 'client_time.log'), + filemode='a', + format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s') + for _ in range(num_of_batches): + i_inputs, i_outputs = _next_batch(FLAGS.batch_size) + time1 = time.time() + context.run(inputs={input_name: i_inputs}, + outputs={output_name: FLAGS.classes}, + batch_size=FLAGS.batch_size) + logging.info(f'The time of predict: {time.time() - time1}') + print(f'{_} / {num_of_batches}') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--model-name', type=str, required=False, default='resnet50', + help='Name of model') + parser.add_argument('-b', '--batch-size', type=int, required=False, default=1, + help='Batch size. Default is 1.') + parser.add_argument('-c', '--classes', type=int, required=False, default=1, + help='Number of class results to report. Default is 1.') + parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8500', + help='Server URL. Default is localhost:8500.') + parser.add_argument('-i', '--protocol', type=str, required=False, default='grpc', + help='Protocol ("http"/"grpc") used to ' + + 'communicate with service. Default is "grpc".') + parser.add_argument('image_filename', type=str, nargs='?', + help='Input image.') + parser.add_argument('-l', '--log-path', default='/home/john/Adlik', type=str, help='Log path') + FLAGS = parser.parse_args() + _main() diff --git a/benchmark/test/client/mnist_client.py b/benchmark/test/client/mnist_client.py new file mode 100644 index 000000000..157930a58 --- /dev/null +++ b/benchmark/test/client/mnist_client.py @@ -0,0 +1,159 @@ +# Copyright 2019 ZTE corporation. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 + +""" +This is a sample for Adlik_serving prediction +""" + +import argparse +import os +import time +import logging + +from PIL import Image +from adlik_serving import PredictContext, model_config_pb2, tensor_dtype_to_np_dtype +import numpy as np + +FLAGS = None + +ISOTIMEFORMAT = '%Y-%m-%d %H:%M:%S,%f' + + +def _parse_model(config, model_name, batch_size): + if config.max_batch_size == 0: + if batch_size != 1: + raise Exception("batching not supported for model '" + model_name + "'") + else: # max_batch_size > 0 + if batch_size > config.max_batch_size: + raise Exception( + "expecting batch size <= {} for model '{}'".format(config.max_batch_size, model_name)) + + input_ = config.input[0] + output = config.output[0] + if input_.format == model_config_pb2.ModelInput.FORMAT_NHWC: + h = input_.dims[0] + w = input_.dims[1] + c = input_.dims[2] + else: + c = input_.dims[0] + h = input_.dims[1] + w = input_.dims[2] + + return input_.name, output.name, c, h, w, input_.format, tensor_dtype_to_np_dtype(input_.data_type) + + +def _gen_input_data(data_format, dtype, c, h, w): + if os.path.isdir(FLAGS.image_filename): + file_names = [os.path.join(FLAGS.image_filename, f) + for f in os.listdir(FLAGS.image_filename) + if os.path.isfile(os.path.join(FLAGS.image_filename, f))] + else: + file_names = [FLAGS.image_filename] + + file_names.sort() + + image_data = [] + for filename in file_names: + img = Image.open(filename) + array = _preprocess(img, data_format, dtype, c, h, w) + image_data.append(array) + return file_names, image_data + + +def _preprocess(img, data_format, dtype, c, h, w): + if c == 1: + sample_img = img.convert('L') + else: + raise Exception('MNIST image channel must be 1, bug not {}'.format(c)) + + resized_img = sample_img.resize((h, w), Image.BILINEAR) + resized = np.array(resized_img).reshape((h, w, 1)) + + scaled = resized.astype(dtype) / 255.0 + + # Swap to CHW if necessary + if data_format == model_config_pb2.ModelInput.FORMAT_NCHW: + ordered = np.transpose(scaled, (2, 0, 1)) + else: + ordered = scaled + return ordered + + +def _postprocess(results, file_names, batch_size): + if len(results.tensor) != len(file_names): + raise Exception("expected {} results, got {}".format(batch_size, len(results))) + if len(file_names) != batch_size: + raise Exception("expected {} file names, got {}".format(batch_size, len(file_names))) + + if results.batch_classes: + for i in range(batch_size): + print("Image: '{}', result: {}".format(file_names[i], + results.batch_classes[i])) + else: + print("response doesn't contain 'batch classes' field, get class information from 'tensor' field!") + for i in range(batch_size): + print("Image: '{}', result: {}".format(file_names[i], np.argmax(results.tensor[i]))) + + +def _main(): + context = PredictContext(FLAGS.model_name, url=FLAGS.url, protocol=FLAGS.protocol, verbose=True) + model_config = context.model_config + + input_name, output_name, c, h, w, data_format, dtype = _parse_model( + model_config, FLAGS.model_name, FLAGS.batch_size) + + file_names, image_data = _gen_input_data(data_format, dtype, c, h, w) + + cur_idx = 0 + num_of_images = len(image_data) + + def _next_batch(batch_size): + nonlocal cur_idx + if cur_idx + batch_size <= num_of_images: + inputs = image_data[cur_idx:cur_idx + batch_size] + outputs = file_names[cur_idx:cur_idx + batch_size] + cur_idx = (cur_idx + batch_size) % num_of_images + else: + image_idx = cur_idx + cur_idx = 0 + next_inputs, next_outputs = _next_batch(batch_size - (num_of_images - image_idx)) + inputs = image_data[image_idx:] + next_inputs + outputs = file_names[image_idx:] + next_outputs + + return inputs, outputs + + num_of_batches = 99 + if num_of_images % FLAGS.batch_size != 0: + num_of_batches += 1 + logging.basicConfig(level=logging.DEBUG, + filename=os.path.join(FLAGS.log_path, 'client_time.log'), + filemode='a', + format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s') + for _ in range(num_of_batches): + i_inputs, i_outputs = _next_batch(FLAGS.batch_size) + time1 = time.time() + context.run(inputs={input_name: i_inputs}, + outputs={output_name: FLAGS.classes}, + batch_size=FLAGS.batch_size) + logging.info(f'The time of predict: {time.time() - time1}') + print(f'{_} / {num_of_batches}') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-m', '--model-name', type=str, required=False, default='mnist', + help='Name of model') + parser.add_argument('-b', '--batch-size', type=int, required=False, default=1, + help='Batch size. Default is 1.') + parser.add_argument('-c', '--classes', type=int, required=False, default=1, + help='Number of class results to report. Default is 1.') + parser.add_argument('-u', '--url', type=str, required=False, default='localhost:8500', + help='Server URL. Default is localhost:8500.') + parser.add_argument('-i', '--protocol', type=str, required=False, default='grpc', + help='Protocol ("http"/"grpc") used to ' + + 'communicate with service. Default is "grpc".') + parser.add_argument('image_filename', type=str, nargs='?', + help='Input image.') + parser.add_argument('-l', '--log-path', default='/home/john/Adlik', type=str, help='Log path') + FLAGS = parser.parse_args() + _main() diff --git a/benchmark/test/client_script/client_script.sh b/benchmark/test/client_script/client_script.sh new file mode 100644 index 000000000..8abe1025b --- /dev/null +++ b/benchmark/test/client_script/client_script.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +python3 /home/john/Adlik/benchmark/test/client/$CLIENT_INFERENCE_SCRIPT --batch-size=128 /home/john/Adlik/benchmark/test/data/$IMAGE_FILENAME && \ +mv /home/john/Adlik/client_time.log /home/john/log/client_time.log && \ +mv /home/john/Adlik/serving_time.log /home/john/log/serving_time.log \ No newline at end of file diff --git a/benchmark/test/compile_script/compile_script.sh b/benchmark/test/compile_script/compile_script.sh new file mode 100644 index 000000000..45c61c8c9 --- /dev/null +++ b/benchmark/test/compile_script/compile_script.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +cd /home/john/Adlik/benchmark/src &&\ +python3 compile_model.py -t /home/john/Adlik/model -s $SERVING_JSON \ No newline at end of file diff --git a/benchmark/test/compile_script/openvino_compile_script.sh b/benchmark/test/compile_script/openvino_compile_script.sh new file mode 100644 index 000000000..9615c9324 --- /dev/null +++ b/benchmark/test/compile_script/openvino_compile_script.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +INSTALL_DIR=/opt/intel/openvino_$OPENVINO_VERSION +source $INSTALL_DIR/bin/setupvars.sh +cd /home/john/Adlik/benchmark/src &&\ +python3 compile_model.py -t /home/john/Adlik/model -s $SERVING_JSON \ No newline at end of file diff --git a/benchmark/test/data/imagenet.JPEG b/benchmark/test/data/imagenet.JPEG new file mode 100644 index 0000000000000000000000000000000000000000..3e5c052024041edb9add1846dc7bb87a3255f25e GIT binary patch literal 33311 zcmeFYbzGFe_doh9OG}4<)XGv)QcHJ(beBjsNOyzeQX(iwNk}aSNGd1@($b-{bcd7z z!hKLb-}|}u_50&@@87?9m}lN|=FFLMCU?&)*I%y}0U{LzWd#5NfdF;j16+TnzmoTJ zumu2RWi|jC0010-5P}9^f+zrtL?EUB(=afSK%f8wOmE^H2*#f@AH>xEa4Cpc|M8%K zm=~-|2By_uBt`qb>pH}s|Eb#v(gUynK-R+bvFqPAOio!*Syx&WrfjIE1LNo823h_E zP}x7V{*&@@a|?)Y3yJUwzW{KH8g22!v9bTHxO<>iK^ z{MAbWR1*Ck)&?;o^)C+u)D|Jp{z)|L7SY#`s6i1u@ps zzqVq4?cw~xR3OIv$MYKG33`eJ+{mZ`V>F1d!A`Zk>9qWRdh&2{L-POcK12J57r`Nd z{==oc#V66X_-q6Irs0c<}Fy4fl z@p%MB7#Mf|rNm%uD8L2g!oX>b2I8C9Jqx~||L_Wk(Lu}r(synOL432Sxo?{O$0G(} z5)jjX6gd!+{-Xzh7!#BV1L;v9#sV=Vh`|W}?h$`yoVg4kxL5q;X#wd(|L6dq4WR(# z+*oom<0x+s_%{pkQ2nFdl;MMz5lm-qlmdh4pBjH>*p23Y`{z&5pBEv>4@#8+_pH$C z>u$7N00vkBZ-MJ;Y}kLeuCD=L5&LG1+_dnAGxR1!1N)5*GWAz1n3n@%7{Chl`c41; zclhu>83-sJ0Ohy+qn!Us|0cZukKBJUIAD(~VwYkrVvhrv5N5#k3IMVo|NWl2zIp!g z-ve$Q%0IULr6{W@iu}KKK$wcA{LO5*2?`D#F1{8XHZXk~4^M~3t}q@h?mt_k(ckj_ z^ZsA=fD$+^p^#9BKO_u%LLjk_DDb(NV*sG>XEbnu^XpsSifg)>1Ej&ZfBttt{oybH z-*Vu5S^RTG-t5>w1p`0{PBAIq4loL4Z)V(0yjdA&f5uZ=SO`S_wU-n9T@g22X#eod z{(TcL0qDO|^M>%BleGujW&d-s4ukl=PgV%p-!*uQ14{dcWp9pJF#P9y{7(u9_pL~fXG8zI z&j7`f%4qPZPltnv31!gJr&EL|-ZAQPG{Hk4P=S4YV<-#+Qm1cPhY^7&)#>khgsMlN zbX59GP`-#BEyuo@CQby(qz|l*fM4ot_31<*5c#qjV4jQzrL*NQ2)JIwjwxa{qM;3$aPfzON5r|kFm95bm`JsJ%M_`I1 zjR*DNN5q;8_PPI&GdOL4Lfq)rr_G7rsnls((y8RHLFx=nKR})EAa(ZobU;3A(&-z! z_)$h+J1Bng4V{5_lbd>JeS_K@%s2fq921@NO8ItC^dHV*DBA^;5n zg`%NDF)+}iprNBf(XnpqfuQ+<5lW1XahC^^ zL|O~W!kv_XHyoQxCiPWKI}V&r`+(fiV+8j$BmZ}%!yA+SSoFVVP{jW)i~h%;|FP$K z8o+~sJ03BV7?1>hIKY36xbLgmAHlkWXeI@j$g@KZtgX<;VyLQOon`3^7OJmw!)UY^ z`Ni&Gp+%_^j+Wz|{a}_&=O%DennHK~Y@hR6`D>;oiTh_ns@}7M;8w>S<%eBA+nGva03M8iJ4NPnG4zVY%Z#= zEnlepCDK2#`5M4gpi>Q_i)nRl?cb8+{hlOZn_2scj8)c(8FJqqGxqxHtsyf-n>;P}G5b9-uer)v>y~-(BTH=C> zhWP7Zn*|xHQKk$@(G`GT_h)EcvGw2+L3SiLqFsYly=WD~yXQ>QCgvGK&e%tp_pfH9 zDN79vw0#ZsY@9@i>jhq^>{flA?v+?1%gxSHQP8f8)+T&KRy53D;vdwAuJ=B!u`3Fa zHrcdNkEq^oxte&opCpPK;q{})dOLEm>RHunfMF)PqqF@L&ASOA<*RZcz9!^aL;f&o zMXE$@qBv?OH|5Ipf(U*s%kI6Ugl(rX8A$htshkDGnYe@IQpR{ zttUym2sUK)tX4tnOE<=QeX%e5759peFUC^{ohLR%jG3^i0`!zJ#FwAWX%$+Qe~ElZ zHc#Zl{$3~GP-GxNmGMeDARyZ+R)dG;yoRhb85(x-n}8r2VvP zCcRXP>3H0f*sdh;t(+puq?^lQ5vnhHdp6H(PRxOCr)m}1w50*Hoa73;kz|(_R_npEC z^yH`&n3ZHDP#3S5efRcwcyV{T>?JRI7kbqSs}RQ9_G8)#-bc^bgrToKt5H?b8@Y_p~E4|fK?tSR#W80hSUqXaM6bez6;UP7o$9}xwycpasn<^Nq zxm7`$q;#%}YL8FM*CF4tbJ(aP#v*6$UR6deH&p~?nvTaPQEXCj>1kT0&NmchOwUbH zuFmV8tStU^!cBklX(zuCQLNmr`!comOpE@+nLr3@d_w;nNsTTI!TSK+*Tv-y#p50c z33X`+JN-UK&UdjPGApE7X8r74VsE3@q0{Md({&GX>MSU#-W8mns9RWzJN8L=Eaxan z&}x=G1WUMVK&Vy&q1$GdxE0Y1zMERFlU0hy%4Yhdr?O&ytTd;^W`>@3H7mY5ay3&L zF2@|?8@ByWnAvpXoF&%NZQ`w(Wx!`lBZ#XErAh9p@(Wc@HW;ht_h&F#L_VNfj#w6g z1x)hW?u^afm-Qdp9-YqK>C^jW^5(I-)=XcGY0$UNKJT&**7i_?@&raEZ-?KVIijrE zXOcb~A5wDLG3HM6)%|+eYZyY=g(G}EPOXbPb}X_Ts9;A1MTH=$tK(pNJWxtpLeWH# zcXy=&`^$>0f0!n7X2YzG=#4+P$a$bQJ|l1&LgGy&#ARnUo7HRul9}GC)YH7Vp55Q?=y_M~ zFk#lGvU^Fo(?{&@S@xJaLNcP*Wd>vg0)|Ns~xFM2o zwCu|IQQ;h(Ll`w>YUH*u*C)R4WMc#V34uE43dMz@ zJQs|ysoPo)S{f+C=uYrYvk*-Bi7Fxu3K~}XzYK=^74oe5NHlIePsvN4J${?<#!Gh=VY6W`FFg7$Jc+NG8CNH8_My6-|59&0PE|MV%PPPluA8@Uol9A9QG+EzC zD0=ggY-wh=?gMR0YX5EdUjc+K>BeimnVEc9Q^sah7!D-f*4!!{+0cG{4Gd%iOPpjf zCW*)0g)&8ZPR5LKS&3%x;x1-;rm77x{@}gGvG@8}S4x9>hPp_=n2ZM56Whdz@OG^H zSNx~sSu9O&Gt~G^JE(b2+b%vDukY(C$~?okuY@20R$v>CmlXeD6f+ z+OL{1&`tk*%|#P8Lc&b=iy7~2DP!h#tuqCoZsWZ&_DasOUUIwABx^5%kyVjo_q_m3 zSEfv?XPpWJGS!RATWn(cfzNPksoXj}k=z!aC3}FkJi;s-;*K`#k6Xg;x5fJvG=E!T z;K7rSk-{ujcFncWeki>H>4lCV+#mbCu|SLj^$~CWkmrB7!lZ->=Crha%(fi$y41ZZ({IP= zZCmu<`K)Z?O_L;-y5iRMYA0C!R)#>r1jftvl(BhEzIt5*ID5fg9-KM_zHGFEtXd!; zNo|XFVlY>o-ajZaX4=qjpt)UoPi@X|aywEpz+~Z!KB)lcOxo}+ixlj(eC!7B3G(~#I zu~onP3xn|^0r}f`q+OIQvBE2RivjC1#d^)d$miTcWDCqVxCZV+R@oCJU#jL}>s$J+ zfp3C34_RIumO~O>v|DWvx{fCox2W9ep{{Tloa=tgx2ZdE^;j!36}wT!pJ`@$e0^** z`+&Z#WnSkti@i&$(0cxm@t4i@;4k_g^yv|;@8D&3ADCCucb{Bt?Y_OALB;66D?)

Rb?emAu>bn*OoE}i<^48`( zBfeA|_wN2VS@HH;YG{j=us)sZDh(ucVfL&7pCheWnQWH^nwB>CcIr{*$4!!5Qnm^b zmE;c!y#BlAA>@o6w-x2}eksDmu`1;zUOt`5B+Hq|&7g=5q8v{>5)ej1Nwa3oGu~@9 z!Qv3KJL&!fo*Ox$$%i<0PV9EX&9-{eVV-Guu8w}I_s}vU)`6#(ZlS%KdcIC?{9kLB zeWD^CdSR$U2#fB4(MxMiPuJJ+v44EY{^2{EE|LaM+tGMo|DH0F>q^{!93Sb7aM!)G zFC4eOAN72vY@GjWM_G5--pdnhUl19zgO-=(^iWbDFGZ)|8Z#(x1Hp zSu;JWC78!^(CRvXm7s`XI@SrJ#$Z(B7J2BWWDB)pLWlcJ0Y9JXa5l|ec6|6e^rH-Z z$o%>#e}WsdasKf=k5HA6F51ZH2Tdm+xyoc$l4t$5@ip*-#4kqsnZ;gCw1)zgaY!F+ zO7rTh1#;0vEJnU@S*9~LmD6aOLo40!xG(NIjvj{?H^w_`&K&tV@+=Q zjFfj>^g*_5lG5oTx8^(_sWBzph#l!IoTUOEm^VUS$-tUpcHB9b6^#W!O{0nKtfpX` zLYMy$o$EYDAJ#hb)bF>W)`7?h@kf0raXIvBU}v15vt!XK9@G07)iqEOdev8=5uTX$ z3;(|4@jkeVXq6hK1mO!y9*U^?{O-O6Y_wV0rg|4D!bIueZ?44UzZ$*>A}|&3_MxBq zQ>>A^7$h#)K)*mlcv!IC!|?$8QmWp66>)TNwGo=A^by1B``L+Tw$CTlV&4@}!bcI) z2Ej51ztO<%VGkl7{kpP6VfEDoti*L0(H3&ceoJ!s{OrI6BzRm>4hPhKSq?TO+*EjF z0ghjZn0Q@=)0;P$;g@9^p9t|vn7HW0v#frzPBj^Nh2pmGmoT*TVV$j6{B{7jD=+yI zGDa7n;=G;LfC$pV}JIACJ9Kop1=^{C-UJkhzD@@A)O`Ey{{R6@!@uRgdvF1kDy{SM-%! z-gEexWtX*~cbAvs!+gFgZE!Cm%rs&rcYf(7l7;$3{`_0WRvAq=i zieD(+kilG;D*&lPEwEuG#n*Ux|4Ep;7ju zOYOdM%n4QHtd`35l0u0<=qMxA9ywO%nUlbJ_jF$w}xH+U)Y$CC~wp;4fh`}%s}7wEMM`?vtoi2$k4h3f^-Js$QEEPw&nz|r+J zcsoKFcC`2Lr-- zZt^!?#q0e?H~InBQ{d{60>TzReQs_BZf=|i$?BD$L!UC8D&Ti}m8x95O zpkq)fbpK)jxC;=0?QH>J0Oj@m^%*+srXHwQ_#eA3U=TE^>+5q+KPYek)&sDK&B0^= zkU~?szPPw)AN}|K^+FpA2Fkqv9gYC@#WhH0MAHPzZ~E_#9soKKHdycakN;N|^#5XE zV`2XB|Kk3S|CgE!^#8(W85m%+^z@9Zp#PVdhlT!+{}*xN|An(l%1H{z@(YUyf=*;v z2$QKJO!)Y`R9=Rk7zLc>yQP@m_dK+-vBoMmjMDgI`5M3pi7f#F<6lCx?{-vKOLaI zOmHY;ylL|diENdgO4z*Zvct3W^@E5h)2=9Q0zyf!!&~Dj+}8jPUfGhtYXZLear&E- zBvGVooeYiK_a=$FpD|O?^f=%uXU|DkLZ7p=@Fu~wsl;O4DKi7QKk{UCVTYq+OXE^H zjIV6$anGM$yiX-*=JtRGH-(Smh4Wdd${$5A2nK)K<)?HXP&rw1xGexFqI>)PNHobd zR*s6K8&ygi!Ifi0e*OY)35T5}o0NKNGiMf%u<>|MGZu^~XO2bR0z+^3R#tp)_Bwx0 zcb~@m6j{ugPZXAyD!jTs0_nF5&MP|`d~9(7$)B~h^bM&hu(6!G1`wE|mEs%GV+!gE z%MKGTG3teln;U1i4qjn(GB_N%&I`)bGi9tivK5tF6N= zP-WY`{ediClEQEjs@KRRv>hiZCA48^!;8IS_HL&Z?d?uFybH zj3t|tH+eRMV}*2Uy(J|qAS~RrW3eB0|AmBzZ)A`J->_HsguujXtFCnMOR0JH+&msb zO>g{cuV2xu=?6$RMX@!&kb+9=LZKE3p|A5H!;g6`?ON==gmxilq_nYOoopY-C5KKt zG~`!v%lpclimhc}#z#N8?mXgLM<=-b=UHmR3U4V!K%Ux2wJ7h#X2H_SF1!52m~&U z!cOSYs(Qp_%e? zO)_Ty8(yZz(c0T4ao}Y2HqUeV*6{=U9$YNSeW7!&M!Wqjyob|WZ_RtoGMb2u!w2`U zliG>pEliCPKi6ESk-BnYQBlP@bX3L{1z;~Gjg+*V&_yCI6$F)PYg`K{pX(%WD|hih zwP?;vvqNBFYe(aaG)H;%49;^S+(hK($jNFa z1mtBFYeY}7UA=xlH*)S(CN*7{?8&j^VJCK(tc8faUvtN6TZ@t^S+ftN=s!`Vv?RbT z(_+O(Wo_GHpk(_pMfZJ-be!Y9qIMgRgyqCUZWK}mp9kjQ7O1ppRZgX^6RX<}#Tq9h zt@D0LWe7mpS?UBgdl)A>A;WRHchwxLCh;?t8C6%x=R~nq?(PJ?ULLPBvfF4$+rNEC zj{jXFkU&u*mB#CV_2|Z4u?Y-&?#qY7{Je|I@b5`lpCsK8)B))=k9K!+-zw@#+l>j8 zA_1W1S8B0PK10*YQ~xJ|--7*Xs^>7Vlo-p{DD{QmX-DY2`95eR?)&q)##*vv-;oJ8 zmb$r2vgI3zhDjc=FVK$iqxmk*$Equ);T{YrrqdHF?LT)3=Z#i!Gr zb=9XS-BNuXZmg$Q(WliW=FVPHTIdB!dTkoKgC)#U#4IvEORUE&)j{vi0ZlbD4dXSW zOUY76^D`mf<$LOvU6wWXY~}u}jULI1;tzS86lXk6Sa0B^OT2r$p6#>@?GGfB;JJr* zk91KRWQ1B5tu>-O_YLtdDT*AAKU>%Aq7jy0rQ)K+ziL(GB_!$Nu+=lgF#A!s_Z1UU z(%vz8UIaU=vRUhNU;L2OeMnS!o70M%C34^vCWdJ>bY@;I#>SbgP&7Iq#6tg55hhjJ z0LDaV*&YjZ08W6Xr8R-l6+9k-s*lHBRG}j~`uVJbi_8v9%1vUKAe`4(NWFOgaH9c) z1`ud@!83VI9nWr9sFM<({MNhel4iAL@@B2ur3(q?QpTy-)1Pn1DG}2Q> ztTW+rO^1iH73u53ohm0b=HFmxNi}sU9O2rv01#*6{G@k#QomMNB0EhK-aU2o||V6cS3WVsf#5qo&&Z|zLAuS)K-X{3>tfhXrp;3 zvzK{L@457gM*~awUAA1yuyz8W#AnB!E2)k7yRGdqy*u;Uqs&B9(B_H+)fv-vSv@SF zv7Mv@eot8Db&)TI`ykj@lq4^nKRTpLU|nA#!pqcKBiCKRQ)G>ip4U?>0X+9{Bg1cz zG6_AQgl0-l5;It*4Klq~vJlNrX@epq+z8wEDfnNa*a~Tm4k@cok@k*e#nB!@;;N>l zlf-XjrHj=h!j-hvNOOaW$Ob{LoC{?noesYzx^(K0;a<9~{#VLbo_mjL1m!oz*mVa8g*&r0Le^7pG-2iI zE8%xwY)`Q`gws1sqjs!x^n-`WdC%^{AU}x&?q|O@w0Lg`dWE~ub6TyoWp>_uYa(}r z2(9_FIz1%S^zWgh--UX_^kVCX7k~SbuCtcpq!yL8M_bJHijOk|0|PR9x770Zhx(Eq z?`I*F2Fr6F8_(oj2eT*obctW=uc~IfFBN>t8$XCIAq5J)O5c4+UkB$htBpbaU}Trq zwy6|?e6`RplGfpA+u&ZP8+*w5QF*OFN++x)i0%b=afstxBk~YBA~N;n^`K92r0>$> zO@R;P>!x@$<`T>}x)cb9pUn0f&Ua%fmU+kSCgsFgrREv+hPesSxKlaCRmLsap|lU5 z9A_^#4yY1qAkt$wl;A+10{>JN`4m$JGN1t2@=|$oXrEC+%Ia5`<^yM@qohmqqh4ph z{X^K&U~v8}hRF{YZMF*+6?K($c-mAQF8)y3Oq~#|a4hSZsBYR!CfnyWSOj%Z)o3@# zy#D9qSbQ>Xc5}u9zxIUHtlvlv$kknxg@xv~zU){*RY6m`mW*ikdKW7ry3}s*?@q7M zFhABEcB)LwevhG~>`8QZ?5oy-B(RFjjn0nxp5(#cV~}-zl<7p689n4$ZTqDVofNYj zt^S3;*ls57YO{DW6Q_yT!TL^TL4!w2={^ZH-B@-mb^11^>`0A4}X2RWxkEc z$0aInRmhxC`-Y8Al+kmi)LeAt-=!)i&=>HhOpYg5G9lm5Vc+9Q?`;L-*|%~jTOb!Z zFg^#ng_wWyiQGM;++d}mI!&h(U|mKT?KxLE67ck%CKwKSM#Iv&@a}KmCm;}d?PAHU zme05KyP!XN;`EV==g9*i4!S9wX?LoJKN}YsF9OnMPagQDNU#6SmsAO-8qZ!XdYwaC z3xy2XPgx>JTF_J?lV9+@Qup-;RKedigUklTs!4s*bPez&uEum`q+Ks@7@_78OKaP) zQ-=1Dry+3|dsyIWMv3W?Jb_&s9Ug8>w7f8WMxhM-6Qz2e7u;^6!(AS#%VTkRV#?b@ z;XT&kyRfhUIB(?%oe6nfNVbn)`dz zL1o5jCE&d;sjy_Bp)_xu?qq|>=})AAELSmy-@DWCNlme4MN_{&n?IKU92VO;nLq>k&bK8=H|i~T zn7)CD9)NCO5%?lP$!xtT$MP|rNIG5WeM1Rp+b*6q7 zIKDJ&{AlkKyBfEK*3Y-Vo|KAqt2HY>Z*v1rAi8zja7X&bU7gXSicdA| zlElabgR(k%{Q*fTvk~@sh~bk22QJRnKe~lCh0`-u6|gvGrdfO-j+XSE>nu&t!xVM^ zU3TUd^;Jr7Op73%JFVJflmgH@F5DG@vcD#xx+ynre?Wzf+VgT{ravRWUVM>MlvWcX zyv3Z9+q0oZQO2#m>w254fw;9d{==g0iVGfJ9EY5i6`al&N^sSPi{eA3qR$ZQ<|p}u zSzbB(n8~(e<|%GJVcAa_Z5Lejb+tSe81-4_@UXgPzM)Y{U-o5q?zaTZE*=89S=Xu$C(@RuiZMt?@^3 zoUP819TOzJXpNzH(mQ1TP*0Tr*ltgiVt1P*;E8o7k35pepgbYS<4zYW^lT7XQ85d1 zwVG+6>%Y@NAiaNw-pa{h2DvT{bF~|*Hut?3X~H4j-VQA?o_u$zjg*PpBId_@B2wA= z94;|dIZi-sx<4c%)TuiBup<&l*^85Ygl&rFiAyCC^C_heXxCqymK?qNNJVK-eaPSY zbzK+MI#0e$DtGbt;c%SwEVc0QGWQm-9}RS%a(DPm10nIT+H)!@_%3@}7|O*opE1wD zv68ugW`)LXN&Drvsm?2+Q|N;=GHoFT{B^U!ah0UzC#bVQ>HS;Lx59!YzV36P@lNL% z2?@h9N8TF{c87S1ut7VCJHsJ?A6Wq9mxoW`IP`BQf?)AiUdE}1e` z47WdgJY z(e2yW>$sBB21-xD#oysX+NtWnz8m<#V?VnzEk<@0z;JPT87S1-1e8)T<@eO;%e0c* zRO=lzFOZa;<)IiVLrT4KteA@$Ot`78WX_$>| z(Oh(P37)^?7_K=$pu?R=K>#gkN>>xEJ*g;FStYrat^ll+l+@mEC zYbIwmYKd=2hPR(^`ST5Jd5a{V82XL}Brkk=otwJ49v^yX{DOt34t&({DeK2KGW#yw z&FwS(OITfkVMYqZ{jBtxl`P~WR#}iQ(6WPrYuq!VI&H5Doyi)HduWZcAD+E_7S}Uk zCG9xQeVo16E0~&76n=GJn$-Mzh2|92xzc?Io0vbz(@<}T3q;(qr?W&8U2ZX9HYDAp zV5Y|ni5}BUUxAs6DjdH%Z@C8ev`ri+S*Z9Bx}?!MV>MCowBt#4e-IroJLTBsMB?tX z?a)~9`D(aEOup`%(4Hz^rVi`;0Wqi%K25b@?cj*l_#m8US3pNtIKRzNH-2YQt(+UT z(IHu{Yv0g$a>4ls=b7YK?Ppq^rf$|las7;96q{iVT6L$Bn8~;MpYsY`KZ*Ek!x&0k z=6JGI7k*X>TmyrO#qX?26GLhnp-vUnKD^JyHZvcPEl*?r>UBOa$E^>k^-K%1(93DI zfdxKD_rZ2UTbE2_uLZ_4ZppghGO^VpG!UXl2&@=Mes)t{F(S-~!*FOw*`l!A-Fq%C z`}Lg2#jKp`!_YZ*(cZ_w#VAV&s2awhi7j2qC^*o;XchNJ2Eal;Nif8nHmTuBq$oCe zE*ksuEaR&zi8NIdDlxx`?~p?;wgY&rPT{D0t+yk?G?eeRKN)PoF&RI=)`b=WrC%Ak=&T2uFN?44 zRBM-`{AN0hPJm@gSC+EV9GCY8O}(_O1sq{pejdR;Q*i92TS6GwZd+Xgdv`}$SXpbF zZxsZdL^+POk%z(8v}*^I;OGiufRv;c+9%V6F+Ey_lM~@V3m3L9K$)cup{xVRCG=ym zj$}_CbTiLhf0eX#?^8Zj_&le_WWG(3hCM$}VSqbWkwQS&SzUS?fB3~iZkb~F+^20R z0hv>-`nK#FL(pk%npe6QI!FZ<3q9-9JPSU(myinKJ@_+$E#2%4t+cdxy=q&tk0xd> zR<8l1dh}b>H&(`N(4@4Y@MRH8B^KVOw9eYdB-?1Fx9zDS(qd9JCaJfZ`CfDoK?a}= zdj%Ns0$k$AFs4)QH3J8l(?s5EVwmxXL{!o5G(iPF!LVzIFO9QQCVh6Mm4lmN+Unj;F=35$u@Af5 zbE?FR@{{+&T5@n$ z_%h1-HPYjEC3$&$ZTrx^NPkc*AO`Kpk}fBUH+a_or!@EPN!e|VMNiAEHgi4o(dnS` zA* zkH2ZV4`POrNb8J;W(Lu?F`oNC>Y%wb#{8;p_N1pJ+WFPDLC#%9igkP`RT3pZ={- zVd&rz@PSxBzpIv4Qv7@6_*E--g%+C-ltpUZkbV-f-W>5PRhi)f{rBmaOYswj${+T! zjrWHnMjZF1j7+Y9HG@%!(d-xV(XINGL(%q}PWNgW<6lC3L@!mgZ@;VWe~k#qOph>r z-w1e48uuf{uwuyGD;c-i)}Y8si?a{Y+MZr9l`2dh>nkmWjTvkOc%AE%6uf%CEhRtU zMiX?|Ke0qdCy}Rif0MZWMMsUh*u&ni5PuT;gGz%qf!DhP*$BP!7k1Q&)n{36n4M}LMLv0^ z=94=XU44H&)|+8S8je+XZ=k2P07oW@xNFvvd6)KF4cY4=`pb01)`^~Fr|Z#?!Ie>M zzJY6-A5G*wl@qNqhMTU(lK)7>1iRgrBLbdn)2^{D?x;u7vwnSa9|ty){P;br4?Maj zGNv5y8v;ZKYNS?sP$fbc~ z#=w1_(uLffJV{z&WV%caI2wr97?OX?+q#W(@Afay^0815kPU!?*4d_#q+!Z=xlHuaOTcm5 zd_>X9`eaR(k51sjoJyrUM;g;Fs)dz!Zu|helY&a|@@v&qroW zL}NeuA-sK0ePCW7o{hqsBD3WCG_RrXE@%ELk__Kj;Sb}$XuaRX&=t+UQ+R}q(=311tZ;ktV`8^a%XC6zOhH< zh(>0c;V>Spu;u~xKq57K6FtLq-y!=M0r2;v!Ntzr6}#<1-AfQ%(ltJFyKqvUzDnoG z;mLubT!^_dS zua|(DJwGlpt8Fc(xvwd@#`Hb+Uim?S@f_#t+F6m*l!dz9FLj{jy^;)ZKW6LXPOxo` zi&x(w&!;^9`tk!c=Df{-7L^LaEy{@bk4i(GeeV)&3%^VmLt~icx){7j1D&ZOa;3oE zI5<$7uNRI6-(9{Fd_R8w(A-&3s$lAb{Aa{@-xsDUKAT)hvTkH4rxbSjz`71qV z6Ix};^=ZD9tmBWRlu~{)zei0SUdxtn$%Y={GyIgwPv}A&%%kqoUtM(aXGOpI@UsAt1bBjiEuS>Q2l!uImCL`M#O!}?G?(Gsw zOsFw~;u>NOjuo46B$-4P>G4u4(Ref6+bE!Y%EC73V^{qh@Q!xO&$}-2+y~uMn2~_t zT#|g;p5Hi;mZ70Sr{ggd98RMkksC~6P)^4%Z-V|s(T6v<_$vGH$j`(Orpl4}ERJ*^ zHcGr>p1`jT%yy#-ugO1T&R5E~aVW0mon{3rU`=S{yZE$na9Usm9yR6pg_h?jFViv7 zw{6fp^gjPaVb3D&e*YSvZ+ui(^665qVH5>5F62-;OTB+w+NZ{xvOXe#qwww};&~Rw zw2Hqz?C}+|Jl*YtrD8;T`6%aS+OZ1_i`?O?;>J&iZH*|W%!+YHefZfjKHVW9&ZM7W z%=A2y1;bU7bM)g(!gP;^7~_>6IweY2r;kTBs{%_a6{ajH*B37o#GsvCR}`;5R5-eO z=UJ7t{ay}L^kaV6=~Y=SGWvWfdt8%#nUAD1P)w#R-&p&Tl5|cRU4V@8@i|7Sz<2XB z<;p3OkXCjs$HzP~a9C1+Z^U6J>G;pa3~w%!rW+bWJT$@a687`fRuCdo!_&e@cUKZl z+8-=hJZiEuK}fz}mN~xrHh=C{I@ZF56GNcANeBO((5dlU4$iU79c{fA`4|k75PuE-3MB{#m2vY2-CmZ(Na)yoqy;^&gS3IEK z3wNATBRd$Y(YW;KKrs6X_Z7qa2W-(;u;b??IyQ`6 z-n4N!FOGhUiCxZPwb;IEW)qv-9Oca|{&WqzDD+E;)KQs?=&Pia{atckWh~Dw%i}hH zbtI@Jku2c1j<0;X5k4&G_mV-cjFI-ey7G|nWb3};vAoza>UML=WDoZP0*w}Z8TR73 ztAZ}GC@Tr@mU^qQBMPfQb)drSu3p%Zu%5gk3r{|cK39ymjc2f z#5kId%d$f^x4EpblUuPex*bo^!xa&{<;#H!R1YKJljojg#RB7g?BnuQh!fRHBbFo) z^;~cE)>i%L-L&kz9Okcd))6IFqq;vlF$rwm4s7snj22eek*2~+_K8U5hq**QrLj*! zt3^HGxp$S?H#?Emb|l6EQL*T8Hp1I%()%!LCEE?wb)gJZyWDC=cMbx^3?geUp#kxL z{*=UaQ-aJeoo=xQ-rM9)m>ouzo`oks@1AwZIkxHrDl6(^O97*sOdt>9C{0)`boJc*(1DgrsZap$Zp8|Nhj{!+W<~^wx>D z%8P_%rBA0lIZIy|oY&J>P2+3j#E%B0TtpitNidumr9D-**|AFdVcu{4Bf<)9&nRts zHN$gY{UwY`^LmiRxF}D46wXXhZIBgfV zRox?RvR0hL3_N0F-n11*^2i8{W^6`G$L`ctnwr88PdGf#9Q>cFiqV-@O(Owe+gE8R zq5ir$Cef>gzB&F4{bK#XEK3^9-wLxHVl*mE2QvjcHRx~LT~8C0OE=~eXp%Kvfp+b8 z5_Eg(;ipftvA`O#vY;3d84wu@Y4Twf-6HVSX(O>}tTmaB8N%DN7|(6RfcMb~tAo7aubgXZe9oO5(=K!y zRIU!agj~L**Cgirq-A&`FTi^P4Aoh#!X zk~%ECX_?jcQVZ^vp^86ctad+&op|GN2kpt8{M%wX824GHO0Bb%!(%hg0g-KL1CNHc z-tpOhsIooB>;bO!NBp;)X{PtsN<7k6@*WQi9;nId`n?$6-X?rNVpu)Mw?Hfk-V~Im ze_6>G95}h`IvHd?m}UK2pvqWEO$(dMRRezJmwY3GR< z&zr+LTVqZLd=~^Agl>MlB@eeY2RxKHYeLJV$+(?~lvdj9Qyj>>v+C}bTpbj)a8}4T z@Xp`$*hCd=yqPu2p_Ra~r8+(^X~3In9yiM&0ph+bOvbX{KKt8AD)-Y%}m>(go0JM2`}+t9KX6)lSy z{`sV=oWjvwO|uJl+c?lj?^F;I#OZyf)!R5x-YnRg04c;Kxy!jcf7kY{_EtukLNKew ztqY&$kEkX_*8Hj2t2**H6`_Z`3j^=E%v;r!pv{?Gvz5o%YEMSCI1gHt#y9UXEWTIn zQV=h=x?9z#ziTr%IN+HfoEDO-BeOPrinOEZ?bJ%Ltx_U}F+*aA1v6^vZ zO4anSpN+79_RVT5YX4*Jq&8+F&$O*ArbgFxowOwL-L6$(7;8H{Z%@lPgN4i)k->N? zp3#Z^9*_%cevK=1liGYrMm(tDpI4?j4N$SHsn8`%IfK7a^{Mz>w3Kz$-snv}Y;a{6k7rD<2 zO}0s!%!HHRhKx)B;nQ6G8St5wFLi@2Hh|0iM92Kmq5ARa%3vNh>=QbpZYGTHnFUfLcMoQT)cOz!Y}rCdCu)lx(0g`E-0 z!#Ef`@SY1K(usNOq6e!b{|*bmEAx?@4$Wlq=bf|`z=D&QDM-eU-1|s>4Uk*|zesGJ zoRLpgXcM}%42n$+Vd{}BF0N_xGCbq?O&aYP{dk770l7Gc`|Yx)<2H$j^H|nH7!t$W zD!W%(AY*e#`GICNwT06gM2bc!-BYhru^qvfM|!?7<7EMD!qCeHSNS=LFY+Pp*~Ifn zYD>_)qsc8i3hcPV<9gGoCa4GJn!1fNM6t^i{GRjk5n&lwl+3y(J$1c@yMymKQD|<* zoBz~9QMurqCi}DoOqv(NH7CD>ny6}N3+fq%hwUmEJ3N{J{*U%IQ%@mPqC@f zhn=F1sDwA(y(hye#aEi8?CrFD3QY;)x-dyY!#3$xWjDR16jEd=C)3vi(k?#QHyy6O zg_?7VQB{i%ekiuZ{a*ptIVZ*h?+Sx)BLj&t{S7gl9jZnffdzYqvuGRwC5kAoaKJq= zI8-+&6DmpBT*gg9^vz&m4`s($+EgbjSO5lj?<%$yVvKka_MjE1Q|c#?@gcFMR*y6F z8`N$&&L+J;_|^5*pDY7_80Qjv8j37pmZ(0Q=h`pfZn|F*b|W>`#EFchcU_{>1$Kzj z;)hPmO=w65-e`ETQBQ{*GO>2B1CV-tMD0bVU*Zj@L^vGd9U{A|)LvPUyuJRS7o>}A zru`P1^ajKlO(H8P7P{_6_<+)B*^ih;>~qg~wc}b{D_>Ef!)k2x9AgLC6G=#-k0n?r zxq&$u!1oaOsyFS2`eD6RYOPSo#&L+-=>-+2C{*XX)}p$+KK}qc(JxV-OMRvw`p-Ju z;<5K-YaIlik<0q@Qdj6DUJ{k*hOb+2T=QKx$+P~_&Enc^E0d?C63k(6Gv$x^n2(D1 zmZjT$jk>|d0{~19Q`ku-u5f11!t!oJIiAD7YS4I}>nPP(THIGPJ&lZhKY8c18XGFP z(}yU;5}b(shK)UoTQ;50j;~*<`4eADrqTeU_>o6$3X1Xg<|$9(Zi~k$S>{HKZcWv= zQaYi7*!7=EfI;g$h297>?zd^}MrPv!kKSPGD08TPF>vJNm)baIjP!2g`$Q(Qc9pBP zytX)SW<`}8;~$BurqVU`EmaRuIKeUdJR5QYyC3%vthn(%^mi#On3_FLk>*ls8{A=u zIK(UH1^RC|T7kgzAvigK@oghi>T{AfBzsKq)(&&hZxp5ZMULrHL8-a4w{A=~Y#(W- zr?D>l6%Wm?58MkJq%N6Q#{Wr#g0wIQ#}i=>cJdJEqNJ&(rcFLwld<; z@6gAPsTnvEr8IccgrMn*qU~|UBWgQ;m<1uMIEDuef(YZR83Z^4fDbw8EX}c$kHMDH zrt3!>p0JmMM`M%6aoQlU4O&-yfs-Y+;Hlt#V6@s&rGRbN>SLjUQPWjP^^Af1{;mK~+I7?cNzIR-Xe1RXlb zJvkZTP+*B{i7as-b>>}12NI)}mlHN`5=SILslmjm3~*;KJ!CUMgz5R5+;R^vL4X(& zmAd}`yoQ3y1G%9YNCHxkm*zC}?jd_flcqEXsZ{*QR6BEs&y$q^_J^-d4Ou?;nK{ta zi|MM~;d^5+te@A)sK>F4>9p9~-GnjI45Oh0ADN7@Y;*$6I9AH?0L*o?nA4#vh9e}x z6h=i0*Q9E}y#kvH5TlQ2jIs`dDAEnPk1NniY4qRur37%?e(~)h){Hmu#N=S{i7&Bk zIUi~1GI<@9LfXoeEj)p=;KX%B8QGKpxO5TiX5H+>o@RZU0VKU8Y?rc?*SiBah6=uCPBA1>;1M{{W}_KY0f#R1L$PpA&mc;m>xjdM?`6?f~^NI8IVuu$EOG&nK#>Usqr1 zD=X#^yki7Lb^aMi@ikia3P21_dwWl3Z-v)kH(yZnjspqeJZDgUReCOZ;9&NVk~7Bm zG)^p$$n<4+$^BNStqOK@RUGFL57RwLNg*5m090^zllWt@=DINo)K3jc)t>% z=tjFisEM~UiJ zTjE5eIP_DVF%bIS!;vr2RaXgGHZtDRJh82pa|6?fdp}fzFG~UNAj2h} zY3H~-X7T$>9N$d3wpbO zgq-fja|o@!cNot(Gap%~+Uym9=LSQtO4z@MUe96Yj>|Khx#<|P;6W?~U>U*aBZ{MC z8Y}xtXWBsKNXapjSaj(mussJehe-!HJV|9ovhkvfukXZQ00sNl15 zB@K$Ud5g=a&*BZQ7GuoG1kBt@d3Mp8P1$UiYF?ajDmJaIb9 zjsIP7gSR(}0M_zB+rs zwoQSmfNv-WI5^CIS*NT}j1@(rgO1_{M_6lvK5-aoo$M+Uw24WL8d_~NRA|>wFI}#f zD#M?|Q^TvIY8czQ%OH`obC_2~vq-vfpf!A{1}~>pRCXO#uf&GgNM~I7cC1%M@&V3Y zu4J7>bvExMFhKNyftVHBVk;m#m~BGWyJC(A2Oh>}dmo^_5Nv-D{Y6eIHRP5Zz9!zg zQL4Qvtqb{99-s^y_nv!AOI2Alw^Nb?^?yyO+p4OYL1a_74&qcB9C9Y_)D2RyG7^jrB=zE7K~+DYTJ0tBHzE@!SBe%kU0@g|ne>0C&7Y)IL&0nr;C`h+r?%AtgIL7*&p#(WE~}%^ z@D)bPm7=ponCbTgO^ z25qeTEa^%xhRkwF%33~m z7t}h>8Q0j>T{1|&DZ%=?$qz9&zQcokavXkFxacB%f> z?>pt-O9$l!X1vEP&xO;Q5RyGh!w+cfdR1z-j(}z}<61iOKT+%S`f>dsVJb78Csx~6 z1Z8`Ox&56n$;oKU3Rzh6$7xh;z3@j^>!2mJ_ldn^KS@x@Ox2||Owgxms+K1o5wxq) zae}QfbhjGY)H1%tXwzs7<>%7grJXU#o{u*_h~u;rX@c??jCPj@*cjw@kSry=U8Tzr zw5LP-P0_&F$yu?+4<9^EgrhcE0UYD6Y0wp)KVOM(dE!({EZEGjJf0+jAagr7hzsK* z0ymmjq&E$+BBGRyN=sdds2@x z^vhCgMcA8|_Y#KOKQNl6O~vczdj4W=+m91i#>!GZ0w<%V7l#G3U7c00uDu!X?s4X0pSW@R|O%TpYC2q^ym z<^`?7Hr}VHmbFI&7`X&4PeU?_P3*TiS{c(^)gs^?xShR@pycul4R3+Bv7Gk=^%2P8 zR}>#39X7jlV648w6H~yvLd`y{8+~B)&8Gv7+4-2Bb6blUw>ALe^DpKN!8>ED)9H|< z+37r2#DC&!r+FPgtp5OS@7^(_*Ib%ECd|8`^f>B1F+AIsR;+`Jj&U`7BV4p<8SMh-v&y0rFe=cH^)-W_fQP)A?G0K?P4CyAeBlcfE>o<+BQ~$oso?o|C~ZW!QJ% zsTfh-VAedtfH)k-v<2z6cI;!M_b_yE&Raga;l*1vmtz#R+k2-vDG2vG;C+d$BIzJ1qM@jy+q<}H)-g-}? z@;nlyBLx_#o(ETLTF)F?Eo|1Lgg=gx-+dr+;Nax*Ve2@5=O&wM-^ryKLYmDjt>gay zs0SrU01?|0z?JZPuTHA5=)SSnbzUauTN6+9p|aj>C?czkk;Dj6ZHis9sMK9`j0RC| zW^#W5d5^37ZTwR8$TexI{{WX!@A783B%-#=w%sGhT${BmVdaP!G5-Jy@J&aE<AxZT<}LxI#nF!hg*5(5pEggEJh5Oy^}-9U?+)n8;Apv zdPHl_0&aM906ke5+<%ClK#*-cFH0&Y<+{hNI9#5%=0>LK>Rg|{JWTs8cO{E;?K#}6 z)ke$NbCZs$ItaawoP}I|CmS26X-BcT4F1v{&0`uUcOJR@%ng9|X?qKik`4?GJ~#}QLubPyihA*-`##LqgVC)3Q2)EvX& zY+oWaR>dpVnJ;~4I)HyO4XtuM;yxuoB;@n?iFp42vK~}zrmD6*L51_idtzhKxaXLD zdIpW*h?YjpRJD2>Z%j_hjSwR ztmK9rIr)m#cHk2%4hhM~?K9!9aszKJ7qZTBHu_I9pBdZ? z4mF$WZ}kqJ^@Y(|8Nl6+Mt$aRv-*lN+c2Xmx!146BLtSolrr5_P3iX=r{+M&Y7DczJaA}!ECP!pYHrjvEoxq-O7?5(D{BN z;f+J!t&6vwd7d-GXbW3-Ua3mzH0(`w6ZfyQbHa7@cyEYbpuM*3V#rM1`k%u`ig;~$ z{U$v6mvAGg9mMn@fzpyWJHxB2mtq3|c+b50Khu6t@Vfk*;GFY5b*5I+F{<7h zJda~NXVUu7O%9>AsBOHSkijktte=!mNp&UNkw6N^B*naI!s+o{3+b-tc~-`7W{%}6 zxDY|+Q+oF4@7igb1SFsA=2DVAPp|q{#PnK%x_bQe>a4xQ>UADIw7!t(H5Q>5D#@IF z^XoxZtmaifTO9^Y2Q!x+hngzXv~<5OlBq_|mI}V6)_HV|8|4>jmr%Pz(D3h}ZoSs; z7t~~fpCvkfGuzp$t6*krnd$h8>ik1bUe8u@(C0ar&N7yzD=Il<{{WcK9WRCiVOnCk zs~7=CXw&I5^)BIJrhAhx{7c5lrWYzY0wjDM~F0Hgfm`!#-~ z@jX2iHF|AD)?5w%R`kyisJhDuEZp}qb-X;;`}dgos!?339tU~D8AZyHI@B6QV^Y>| zI(IVuN>I7uF{flj!knLZk>Wb!q+>Z_&`)C?I$AlWk$~|Fmx0FY!B=*9nb%%yV#le4 zpt}vhNm11MPV8}UTRA5#Ox8=7s0p{8xt$lNWP)c+He`8HzYxi#wbOpW7u@%ar8U%p ze=VMnmfXcMR|N6xD$+{^Wd-<-; z*?}x2nZ3w?W|+~mNh}9!%Ds#S0~4~Nwtq6dw`?fn#YTyPV$IabZO=(}1z3m9M^Phb z#I%6mIg$jB3C1~u34xaWAi$pDLAu4Q2;Rp7am1~NYq9HLf~2V*G6?=B5M)`U1V_LdW1~}L z%~`q2Z0VVvp!GfB>)?!>%;yy(H0ag6*3ump3+yB=64gK2M_9UA%z+buo?}Qx;0VZ^ zZ8#eL0Al<1F(E>1@f~BxOd9kRc^sI`>wbTxC)+wzSwMGV{ zsa?3|Ff^+N!H(+d7T9g9jH%^Mcr-q&WCMvY=xLQ|%GtaQ4{1x)g9lO5ym?psT|MPO zDI^1sVr?O-Vo(17D{vjhNjL>R5%Z&Aji&&Pgm6Ss>MYZf?U-_i=mlysD(XXkIzu&W zfa9U-iBP^^&$moUu%eCWQb$}uQhnqwBMXVjg-%aM>{M93Ujkai3cHtsBvrU>%x0to zFma4v=0v2DDmdjJ2r|6En#x{5&%|jH2WZA2H4JRQd=jGqXY#8ySR}>WGCFhbEkPs* zYM(ufvH%V?gC*snzl|YXLJ+~#eK^5~(z@kCk4fO-Jc^>SN&;tBU2aYgbJh#UjZ-1` zk_B3MR`1iCr?EXFRqCHoO<7{O#xcZVwW$;VUJNoaQI@K_=bjcP<;S32B!Jpo6KkH> z>pfS~9w$b>OJ0{)!o5ug61=I25OKZ1C)=2K`J|_%c56Q! zaHh{s^@oFMJjK@6UgDz+$^~FRjQHvFd5#u0;yPhP)@!^uu$^p7bo@)zW}xR)r(ZCz1(1!wzDKmi z(s-TKA*tB4WMgUR8`ElnM^#ma>~Bn`%K%lsjD^CLwRV2An}Gh z#65xS?(w3Q8mp-7Q;ZYLJzgN}S0pO+DznK^t*vzaTcLIU0dP1t_CE3bF0EK7tQ>R) z-d8DmCyMlXgz+8CfK(51B`)zX<&X#@f2XOKFQ{-z?bzyYPh88Du;qcrvBbZURaQq5 zb-JLpB3@@uO_+}s(be`FnCdN zx+;D|t7fl?__Arxtz5a|tiWsD(i+2-Jz`#Au;GvAF@)#wJy_yVM0udXD1mrm)a8-VaFh536C++ zSDLvP4c-dP70NOfEA9-*qe#gL+d=9vpN0c3COs5#zJ)+OdX_>IpS3S10qQ|BQa4o_6puaQ7Zw75z7}L zy~JEF3oaxueNQ44Q!CF;X+<5+a|>9>&SeN1RutrQmnce~aSvTV&rTthf_PD%#4J=N zRXc`A=^L#v+BHs&!;Q;@1NWTh6yp+G8qgpYX$Lv&i6=WTlZg*vsK6YEGR#gpOy136 zUm|hI?;sC(Ip#@cC_Uk;QJ!TLOJlS(YOBfX0R~3L3L#at^2eWuxw9Mrfd*{HB;sc% zrZn0%^tP_T*#P#2I=liMiTs|E&DpIBl?V8MI=8Wav8l0Ie5sfo#h!9b*qdhRPo%Vnjj)TtmiEAl2#)Ug=EBHTgdVLS}lGDhuMuG!oN zX!0(a`U>88BezjJ(&`i*@TsgI@Mk3O%h9@~Xnvc@;eV)r^ghrAr$;Ok2u@|F@eQcJ z{{VLxgiTOp_XFNSbB|>_X^T3Ra#u5wuP}vC>c(*Ntk2nMuPD_=CpYoR5f98D6}|aBtBsIL&t0NcEW5y5$&Gz=+TZa3kTDF@G7z zhvd+|n$}!H^&QM*XjUnKy96A2!1;AEk{o7eY4irlx7DqwTmJy-JWof(!Nao*nfU~5 zP_J<{rC|R6YIKOcugr6}aC=VIrHH#3)z4zHKV#=Cc%2!f{T%UEviDR8ERbl-r=F0i zFg=8pRbiaq$j6bb4pB@dn!rh1crO5Gj@lX`(2ru#$v+^bJz11*@M9x`iDwFo(Daq*a@tHm_ck6ojWa~&`@e^YvnEi4CZ(#_)9&7*C+0o>0Etonwf$kXm) z@0W$kv?3OoRZ~^M?Rx3 zRkzj&uFD6=X|!-rJkO)qt4tCOO!K}g^$@FddT&$vo`QMy)?QcFeS~%d#7W5{nQIaU zGWxAc-(h=Pvmbtf02vL}tgc2eHc2I;Htf3RIfqLQHuiw8Fp7nbrVIuvIVT+>K;Q!b zRYVXw!i8agN8&>m)h9hCPST;>(sYqnTHGp)i<2WuxqQ=LCnt>dho-C&4+E|tuh{A~ zumE)sp|&8{vGM!M(UHl|s zM2q*H06F%Ff(2_8w5tCAZ&i|?KyvEN0_)# zf;)p5Tz_)mqGBekK*RROGcLkQ9DB#LBp~OF!e`lHxFzZV<+AuxhYJtn>25@Cr_~6UlfD*as?G{s*I2XgQ;9_0W&ui%~Ya>1D+1y2(6RM&TU`^>@o+4fN~Ij zG9DQ+MpJRtO_8oj$7C0rK3geW!~50FU^9PPZbi2|SVVGd-5EJRin1HtJNV za-3~AVf# zwQRDJSgko??T_Q$Pz4Kt!wKb=>O-8eIhOwb>O?J)t^7!SJg=dtqtvIVSxLdiS=5UV zk2559Ii7ltsr1U;wLJTSG%E4^G3jFM_ROf^k_R-+WMHmX4XS%fHlzdSTo2}B>U?uV z-7Kw7<|U}{?LBLsFk|9Z$0g8|k?3B~dCys2G-ImZdzs_z`k8Tx)!XN!PS2<=aCV?m z`H%SXVDcl}8_<7tARk#+I4_UH@K*S?t;Rg!KWM9KFJqRq$^6G35&LAkr@hp8wG{)p z&_8*Ls``eoNvm@o#wV9+8IL5Nl4NpEGiZ3xk7T@!n@8#`UQXSZ_Apj>_O-?Zii7_E zO3c8#NF$lD#}(*~Om+3zn@9C7bNj%SX5^B8FhKT~2N;z|EbN~1v*{R<_sfd#1|T3~ z+{-Ir533&@@P!8^S7z>z09H0nCJES*AwnI;B2vWzh|qxZ+)n4Dzs#DHlPE!`PR2J? zICGxeL=|X38@f#$HAT5S)Jg{UY)o<6ELwPMM4ir z(^Tn@V3Ew|gGOPsSDoCB!@O0FvUY~pT$S`k2T!sGdXc47-=HKt4ofKFwU*0zn~DhC+r1i`UY zb}NOijK%NN8lM{mdwfT4zX6htGtm1?+iPO{{{S)Fy^F4nI;XXEpZ+2ALtq6)=IUVD zg-j4R>n}@eaf67&HidX_I6TW)aAm@be99Hv;S}%(XmzB4((mPmHP?9>H(`zUY9RC0^ zu8PP?93Ju;qO1*G9)^};b7FJ1JP{SW&2=7qAZ9joD!hY&A{zEKP}_JNOo=;Sn==li zo#p-bkgdG~4B*LHfgY(~p-=!g67)6u0b=GZP%lBxXw{t}?srOwG7C`PW)ZSRWKs(+ z%zBLzSilj)Y%ED@ID|^QS;yvRwuQ9zGPwR_Y^Y-k*SrN{2Cp%&qa;H9h>lx>@g1g; z2LqC0HdnEIU1D~E%qE~@f;z=*)2cV8kv1%=4oLjKU8g4pIEhJ&$=E?WO2>|z=RETV zWg9^&%)1o{oNmdM?U}}+&(BCeyx>Lv4^U@ko{)ul`^uHbj756R8P7r`!uyFTrLqhF zs8U8HfIT4sam0YEIWd7pEWa6pEJsLI%X5s%GMse$L?A)id?agcN?-t?*O}Y`iK#|Dg^}b&%BY0m1Y?$$7#|F zFbCTl4%46`&ayIq3uKAZ*6Q3I4^a=~wgGOi)usV0(p`u{Q@A)@N3r&RMPX7bH*mZ&~)mRE1p3xzUqMB$J2QBI%aVEtC370AbHYlUAIZmvzWJGZSS3t(gOi@MVDoHUTB7RdI&rJom(< z7O={k4o~qXuuNrisKeKs{$(xD`ZA=P^vpY3SXCv6z{cElgH21j922m1Fs+4yr%JRi zEM}S%4~E6R2{{?VJ<3GMFWyMOQ}Iw$rccu$LeYm?K+Y5y# z1K0i~PL)E9xIcj!VzbUJnS=WH9fUe*y?Fv@ew%gp1E7sAnOE){!t5-@I&P<)Agrq# zW0N;pl>`p5t)x~vv4~h%fHdvT5N}g(F^Rl%*gQc#m6&wqI|B`>IGyA%>L$R_1mTw~ zc>u!Krm654kbCh96__TI+CZS5_?y3=^|mvP(#_$!@ySu~3j+{o->jc21Dws%q}qTH zo;tw3lOO@cB5Y3i(*ZRAdRpka&)oOLN{4Dgk#sFit;stSnNE zI+5xITu=x*h7VvPyA^Sc{eC4Z9Qs5oLP<*kZ~ze6WyvS@j0F2vx#BK&R!IYbc`=AM zK3f5vNb4k)D#}|K{{U%9-E3_8Nw;#4!z7Ut88p%uVEwtk%q1a)rB6zeu0Oh0PiZUb zBO|nCrUiNEy&wga(}wh8aqI6nj_*%?vd5py4(wZ#_hS(NPcJLL{8c87Mfz0nOE1YH_q63|PV?1#L&6QPPKu(guf`X)TD{_-jE7kxWyFgrw zkC7)#0d`_|V-*fj3b5i;E<1S05Z7ZyhM=HAR46@XpE==zL-6u=tHz$UCMm4#yesZ zx`0O?yv_xy&Nw7-Ep1j;QnM061CbL$OgpV?yPFUeagC(nQCN@w;bAOcutm_@1##b5 z9?&%atB><8**zr>JPC1PXqyrSl(f;GQ$@6|t4kTx|4r9Sqvj+2&MHmv~+} z@+Jze>8#p&bD5*3ysG*JcO3G1&22PG53AY~8qPanDqm>?b&pspwxHSd4&N|uLe){Q zkU$b@fe+QCsdQ;|ubUlm7u)kQ)r+|WafA1J#aF~gBN^I9Qw!$@^&!S#-#``g=-lHh zM_-78GOzTi7$cq|yPtx`BjR0YN99K(bCC`qW@544a!v;mth<5~a60B{*{4p5qi`Hz zBy>!*n~$g-aTBxxC(O=RaC%}EwPhUV9?UqSb7HK=zGG11b}c4ykQg^p8e#70F3b}Gz_V! zRCY3!D&TJI2-}^#Bb!hy+(7~#4{*qf`ZM}5t{ZsVPvT2#u#xS^htg?Hsp}h14&C4_3u#fF?6yGn zjxgHc6mjk)ma)93$iT>iTq!*IyF$i}1Jke$edHx7$Jfcsva!!R6AxI$_ z0U#MSR?D@Nou{{P8cIR1mLHi*5UsxBq>YTFPFaWtAWoL@w%NEI*qsXzmTo&0Q1TBZ z3>>*F&N?J&QE+ixAnnH|5~!@jjy_Bn7F-(;eDfOCF?3X{90?Uvpaui4d1A`O03xRY2fT-3 z08)Pvbx6SBI>D_+?r2b0by8C!_k$;DaqlW$Z`yw{a#(cY6=hl>L%GVXVeL{8OE*Cx zlBxzc#1^-8ns#D#tfsX@Te6nw1!zd+yJA~QuXZ6=93F;Jy1d%K+t5m`6qEy`hZMA0 zc5PPWum?|>HUh!1l480oMpq8FBmBnJ#ZY~WhY{F1sbyoI{$V_@bL=}yivzEvNc_q( z{{W{M`G7G?(V4JE1H2l)lV;vZ<11-VP%4%;L1!SBa zu?s7;9|Rqvp_Nv5JdEN~*oP4Tz{eegWoiju9diSxvj?0W!a6_$x&5Q#T>*U)8=RpW zuMvhNPp1(rFm%;qf-7OwM>&k%+Ybw8?xsINWvs9(XLmBEP^T&fND}JG10e+ah_kzF zeSnkO9i-a|8l}V8$9Tt-Ny45FQ5DgGQ!Jzr91P z?-^3tPT(>AduJtklD<&d9Z+bcbrt`9pP(Hf^c&kh#DuXpJ@aFxlw>m5X=fz&5qG+{{YcW zPxI0OIFhAt(h|lbaVkzo$3Q=sLfFna#dHLmAmsY7$$(}#P&vI03WAc;4NKj;9>Ks8%~%@$(O0 zz^zPl;^ZLniKSL*tOkoMf71$w3a2C59=B4sI3F-1kMe?{{RrN1>|++bfQ0_B;<}KNdp}K+T3*M7Ah5vIGs9J z92nwrI}w6-ojL%(1|*D_!L3N?kHqQF7#sZn?cxTvX18fPW2EWPp$%?j#?nXPT-{?R zQpXdgKoIQ}A=<=q2dO{_#%E57Lt^1ZCnu$bCr*hA(CtG6;u7=&ILP!Klc!3TLP0Ah z^py_V1XA)Cz-LPKT1&jxj0*Cr*6KR!@}w0R7OU4YQ{}mM40sZP*0- z&R;3y*Au5fb}@$lZ6mDey(DAabm)5n zwH&WL(=Sj+y%ZCSPMrtXF}+bTr$z(2K%s~n;w@j=zlqbN zdl=nHHfNm6)}qJU+?)<)PJ-+LsxRsX5|8(Z)1&kV9f|KP)C)^h+}x1BY}Hhe}!4>hnI zfV~aiZQX5VEhhl3Mq76hj$6%e5r8R4b4RcvN%d9MIVtIQFfJ+DCOw*hfqnQ0mg^8; zK<>;#=1)O+kM$jCmJM;I+f}E3C`qU)jl48$nKhMAUz;S7DAHp&!*%?yTxo6cC4tMbslGE?D4(109_eDtkeueKS&|&V zn*uz?OuH+Q@@gm)N&q~Kn3kgjNXIe&yodYEOrB)rA5&ZNs*xuPz>>EX{k6UTv-_g! T;|Xom00000NkvXXu0mjfO@yaV literal 0 HcmV?d00001 diff --git a/benchmark/test/docker_build/openvino.Dockerfile b/benchmark/test/docker_build/openvino.Dockerfile new file mode 100644 index 000000000..87e766d9e --- /dev/null +++ b/benchmark/test/docker_build/openvino.Dockerfile @@ -0,0 +1,49 @@ +FROM ubuntu:bionic +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + https://apt.repos.intel.com/openvino/2019/GPG-PUB-KEY-INTEL-OPENVINO-2019 \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://apt.repos.intel.com/openvino/2019 all main\n\ +deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + patch \ + git \ + make \ + intel-openvino-runtime-ubuntu18-2019.3.344 \ + intel-openvino-dev-ubuntu18-2019.3.344 \ + libtbb2 \ + libtool \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + + +RUN useradd -m john + +USER john + +WORKDIR /home/john + +RUN bazel version + diff --git a/benchmark/test/docker_build/tensorflow.Dockerfile b/benchmark/test/docker_build/tensorflow.Dockerfile new file mode 100644 index 000000000..3c85c764b --- /dev/null +++ b/benchmark/test/docker_build/tensorflow.Dockerfile @@ -0,0 +1,45 @@ +FROM ubuntu:bionic +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + git \ + libtbb2 \ + libtool \ + python \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN useradd -m john + +USER john + +WORKDIR /home/john + +RUN bazel version + diff --git a/benchmark/test/docker_build/tensorflow_gpu.Dockerfile b/benchmark/test/docker_build/tensorflow_gpu.Dockerfile new file mode 100644 index 000000000..cc097627b --- /dev/null +++ b/benchmark/test/docker_build/tensorflow_gpu.Dockerfile @@ -0,0 +1,65 @@ +FROM ubuntu:bionic +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + "https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64/7fa2af80.pub" \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://developer.download.nvidia.com/compute/machine-learning/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + cuda-cublas-dev-10-0 \ + cuda-cufft-dev-10-0 \ + cuda-cupti-10-0 \ + cuda-curand-dev-10-0 \ + cuda-cusolver-dev-10-0 \ + cuda-cusparse-dev-10-0 \ + cuda-nvml-dev-10-0 \ + cuda-nvrtc-10-0 \ + git \ + libtbb2 \ + 'libcudnn7=*+cuda10.0' \ + 'libcudnn7-dev=*+cuda10.0' \ + libtool \ + openssh-client \ + rsync \ + python3-setuptools \ + python \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-mark hold libcudnn7 libcudnn7-dev + +RUN useradd -m john + +USER john + +WORKDIR /home/john + +RUN bazel version + +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,utility + diff --git a/benchmark/test/docker_build/tensorrt.Dockerfile b/benchmark/test/docker_build/tensorrt.Dockerfile new file mode 100644 index 000000000..5e50d151b --- /dev/null +++ b/benchmark/test/docker_build/tensorrt.Dockerfile @@ -0,0 +1,67 @@ +FROM ubuntu:bionic +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + "https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64/7fa2af80.pub" \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://developer.download.nvidia.com/compute/machine-learning/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + cuda-cublas-dev-10-0 \ + cuda-cufft-dev-10-0 \ + cuda-cupti-10-0 \ + cuda-curand-dev-10-0 \ + cuda-cusolver-dev-10-0 \ + cuda-cusparse-dev-10-0 \ + cuda-nvml-dev-10-0 \ + cuda-nvrtc-10-0 \ + git \ + libtbb2 \ + 'libcudnn7=*+cuda10.0' \ + 'libcudnn7-dev=*+cuda10.0' \ + 'libnvinfer7=*+cuda10.0' \ + 'libnvinfer-dev=*+cuda10.0' \ + 'libnvonnxparsers7=*+cuda10.0' \ + 'libnvonnxparsers-dev=*+cuda10.0' \ + libtool \ + python \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-mark hold libcudnn7 libcudnn7-dev libnvinfer7 libnvinfer-dev libnvonnxparsers7 libnvonnxparsers-dev + +RUN useradd -m john + +USER john + +WORKDIR /home/john + +RUN bazel version + +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,utility + diff --git a/benchmark/test/docker_build/tflite.Dockerfile b/benchmark/test/docker_build/tflite.Dockerfile new file mode 100644 index 000000000..8a2e8fed1 --- /dev/null +++ b/benchmark/test/docker_build/tflite.Dockerfile @@ -0,0 +1,45 @@ +FROM ubuntu:bionic +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + python \ + git \ + libtbb2 \ + libtool \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN useradd -m john + +USER john + +WORKDIR /home/john + +RUN bazel version + diff --git a/benchmark/test/docker_test/openvino.Dockerfile b/benchmark/test/docker_test/openvino.Dockerfile new file mode 100644 index 000000000..f094fbca8 --- /dev/null +++ b/benchmark/test/docker_test/openvino.Dockerfile @@ -0,0 +1,88 @@ +FROM ubuntu:bionic +ARG ADLIK_DIRECTORY +ARG OPENVINO_VERSION +ARG SERVING_SCRIPT +ARG CLIENT_SCRIPT +ARG TEST_MODEL_PATH +ARG SERVING_JSON +ARG CLIENT_INFERENCE_SCRIPT +ARG IMAGE_FILENAME +ARG COMPILE_SCRIPT +ENV COMPILE_SCRIPT=${COMPILE_SCRIPT} +ENV SERVING_JSON=${SERVING_JSON} +ENV OPENVINO_VERSION=${OPENVINO_VERSION} +ENV CLIENT_INFERENCE_SCRIPT=${CLIENT_INFERENCE_SCRIPT} +ENV IMAGE_FILENAME=${IMAGE_FILENAME} +COPY ${ADLIK_DIRECTORY} /home/john/Adlik +COPY ${TEST_MODEL_PATH} /home/john/Adlik/model +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-get update && \ + apt-get install -y supervisor + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + https://apt.repos.intel.com/openvino/2019/GPG-PUB-KEY-INTEL-OPENVINO-2019 \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://apt.repos.intel.com/openvino/2019 all main\n\ +deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + patch \ + git \ + make \ + intel-openvino-runtime-ubuntu18-$OPENVINO_VERSION \ + intel-openvino-dev-ubuntu18-$OPENVINO_VERSION \ + libtbb2 \ + libtool \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +WORKDIR /home/john + +RUN bazel version + +COPY benchmark/src/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +RUN cd /home/john/Adlik &&\ + bazel build //adlik_serving/clients/python:build_pip_package -c opt --distdir=/home/john/Adlik/archives &&\ + mkdir /tmp/pip-packages && bazel-bin/adlik_serving/clients/python/build_pip_package /tmp/pip-packages &&\ + export INTEL_CVSDK_DIR=/opt/intel/openvino_${OPENVINO_VERSION}/ &&\ + export InferenceEngine_DIR=$INTEL_CVSDK_DIR/deployment_tools/inference_engine/share &&\ + bazel build //adlik_serving \ + --config=openvino \ + -c opt \ + --distdir=/home/john/Adlik/archives \ + --jobs=5 &&\ + pip3 install --upgrade pip &&\ + pip3 install /tmp/pip-packages/adlik_serving_api-0.0.0-py2.py3-none-any.whl &&\ + cd /home/john/Adlik/model_compiler &&\ + pip3 install . &&\ + pip3 install -U tensorflow==1.14 defusedxml==0.5.0 networkx==2.3.0 pillow + +COPY ${SERVING_SCRIPT} /home/john/serving_script.sh +RUN chmod +x /home/john/serving_script.sh +COPY ${CLIENT_SCRIPT} /home/john/client_script.sh +RUN chmod +x /home/john/client_script.sh +COPY ${COMPILE_SCRIPT} /home/john/compile_script.sh +RUN chmod +x /home/john/compile_script.sh +CMD /home/john/compile_script.sh && "/usr/bin/supervisord" diff --git a/benchmark/test/docker_test/tensorflow.Dockerfile b/benchmark/test/docker_test/tensorflow.Dockerfile new file mode 100644 index 000000000..5b95d664a --- /dev/null +++ b/benchmark/test/docker_test/tensorflow.Dockerfile @@ -0,0 +1,82 @@ +FROM ubuntu:bionic +ARG ADLIK_DIRECTORY +ARG SERVING_SCRIPT +ARG CLIENT_SCRIPT +ARG TEST_MODEL_PATH +ARG SERVING_JSON +ARG CLIENT_INFERENCE_SCRIPT +ARG IMAGE_FILENAME +ARG COMPILE_SCRIPT +ENV COMPILE_SCRIPT=${COMPILE_SCRIPT} +ENV SERVING_JSON=${SERVING_JSON} +ENV CLIENT_INFERENCE_SCRIPT=${CLIENT_INFERENCE_SCRIPT} +ENV IMAGE_FILENAME=${IMAGE_FILENAME} +COPY ${ADLIK_DIRECTORY} /home/john/Adlik +COPY ${TEST_MODEL_PATH} /home/john/Adlik/model + +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-get update && \ + apt-get install -y supervisor + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + git \ + libtbb2 \ + libtool \ + python \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +WORKDIR /home/john + +RUN bazel version + +COPY benchmark/src/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +RUN cd /home/john/Adlik &&\ + bazel build //adlik_serving/clients/python:build_pip_package -c opt --distdir=/home/john/Adlik/archives &&\ + mkdir /tmp/pip-packages && bazel-bin/adlik_serving/clients/python/build_pip_package /tmp/pip-packages &&\ + bazel build //adlik_serving \ + --config=tensorflow-cpu \ + -c opt \ + --distdir=/home/john/Adlik/archives \ + --jobs=3 &&\ + pip3 install --upgrade pip &&\ + pip3 install /tmp/pip-packages/adlik_serving_api-0.0.0-py2.py3-none-any.whl &&\ + cd /home/john/Adlik/model_compiler &&\ + pip3 install . &&\ + pip3 install -U pillow + +COPY ${SERVING_SCRIPT} /home/john/serving_script.sh +RUN chmod +x /home/john/serving_script.sh +COPY ${CLIENT_SCRIPT} /home/john/client_script.sh +RUN chmod +x /home/john/client_script.sh +COPY ${COMPILE_SCRIPT} /home/john/compile_script.sh +RUN chmod +x /home/john/compile_script.sh +CMD /home/john/compile_script.sh && "/usr/bin/supervisord" diff --git a/benchmark/test/docker_test/tensorflow_gpu.Dockerfile b/benchmark/test/docker_test/tensorflow_gpu.Dockerfile new file mode 100644 index 000000000..a67c36c82 --- /dev/null +++ b/benchmark/test/docker_test/tensorflow_gpu.Dockerfile @@ -0,0 +1,104 @@ +FROM ubuntu:bionic +ARG ADLIK_DIRECTORY +ARG SERVING_SCRIPT +ARG CLIENT_SCRIPT +ARG TEST_MODEL_PATH +ARG SERVING_JSON +ARG CLIENT_INFERENCE_SCRIPT +ARG IMAGE_FILENAME +ARG COMPILE_SCRIPT +ENV COMPILE_SCRIPT=${COMPILE_SCRIPT} +ENV SERVING_JSON=${SERVING_JSON} +ENV CLIENT_INFERENCE_SCRIPT=${CLIENT_INFERENCE_SCRIPT} +ENV IMAGE_FILENAME=${IMAGE_FILENAME} +COPY ${ADLIK_DIRECTORY} /home/john/Adlik +COPY ${TEST_MODEL_PATH} /home/john/Adlik/model + +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-get update && \ + apt-get install -y supervisor + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + "https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64/7fa2af80.pub" \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://developer.download.nvidia.com/compute/machine-learning/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + cuda-cublas-dev-10-0 \ + cuda-cufft-dev-10-0 \ + cuda-cupti-10-0 \ + cuda-curand-dev-10-0 \ + cuda-cusolver-dev-10-0 \ + cuda-cusparse-dev-10-0 \ + cuda-nvml-dev-10-0 \ + cuda-nvrtc-10-0 \ + git \ + libtbb2 \ + 'libcudnn7=*+cuda10.0' \ + 'libcudnn7-dev=*+cuda10.0' \ + libtool \ + openssh-client \ + rsync \ + python3-setuptools \ + python \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-mark hold libcudnn7 libcudnn7-dev + +WORKDIR /home/john + +RUN bazel version + +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,utility + +COPY benchmark/src/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +RUN cd /home/john/Adlik &&\ + bazel build //adlik_serving/clients/python:build_pip_package -c opt --distdir=/home/john/Adlik/archives &&\ + mkdir /tmp/pip-packages && bazel-bin/adlik_serving/clients/python/build_pip_package /tmp/pip-packages &&\ + env TF_CUDA_VERSION=10.0 \ + bazel build //adlik_serving \ + --config=tensorflow-gpu \ + -c opt \ + --incompatible_use_specific_tool_files=false \ + --distdir=/home/john/Adlik/archives \ + --jobs=50 &&\ + pip3 install --upgrade pip &&\ + pip3 install /tmp/pip-packages/adlik_serving_api-0.0.0-py2.py3-none-any.whl &&\ + cd /home/john/Adlik/model_compiler &&\ + pip3 install . &&\ + pip3 install -U pillow + +COPY ${SERVING_SCRIPT} /home/john/serving_script.sh +RUN chmod +x /home/john/serving_script.sh +COPY ${CLIENT_SCRIPT} /home/john/client_script.sh +RUN chmod +x /home/john/client_script.sh +COPY ${COMPILE_SCRIPT} /home/john/compile_script.sh +RUN chmod +x /home/john/compile_script.sh +CMD /home/john/compile_script.sh && "/usr/bin/supervisord" diff --git a/benchmark/test/docker_test/tensorrt.Dockerfile b/benchmark/test/docker_test/tensorrt.Dockerfile new file mode 100644 index 000000000..434a3452b --- /dev/null +++ b/benchmark/test/docker_test/tensorrt.Dockerfile @@ -0,0 +1,124 @@ +FROM ubuntu:bionic +ARG ADLIK_DIRECTORY +ARG SERVING_SCRIPT +ARG CLIENT_SCRIPT +ARG TEST_MODEL_PATH +ARG SERVING_JSON +ARG CLIENT_INFERENCE_SCRIPT +ARG IMAGE_FILENAME +ARG TENSORRT_VERSION +ARG TENSORRT_TAR +ARG COMPILE_SCRIPT +ENV COMPILE_SCRIPT=${COMPILE_SCRIPT} +ENV CLIENT_INFERENCE_SCRIPT=${CLIENT_INFERENCE_SCRIPT} +ENV IMAGE_FILENAME=${IMAGE_FILENAME} +ENV TENSORRT_VERSION=${TENSORRT_VERSION} +ENV SERVING_JSON=${SERVING_JSON} +COPY ${ADLIK_DIRECTORY} /home/john/Adlik +COPY ${TEST_MODEL_PATH} /home/john/Adlik/model + +RUN mkdir /home/john/tensorrt +COPY ${TENSORRT_TAR} /home/john/tensorrt + +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-get update && \ + apt-get install -y supervisor + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + "https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64/7fa2af80.pub" \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://developer.download.nvidia.com/compute/cuda/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://developer.download.nvidia.com/compute/machine-learning/repos/$ID$(echo $VERSION_ID | tr -d .)/x86_64 /\n\ +deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + cuda-cublas-dev-10-0 \ + cuda-cufft-dev-10-0 \ + cuda-cupti-10-0 \ + cuda-curand-dev-10-0 \ + cuda-cusolver-dev-10-0 \ + cuda-cusparse-dev-10-0 \ + cuda-nvml-dev-10-0 \ + cuda-nvrtc-10-0 \ + git \ + libtbb2 \ + 'libcudnn7=*+cuda10.0' \ + 'libcudnn7-dev=*+cuda10.0' \ + 'libnvinfer7=*+cuda10.0' \ + 'libnvinfer-dev=*+cuda10.0' \ + 'libnvonnxparsers7=*+cuda10.0' \ + 'libnvonnxparsers-dev=*+cuda10.0' \ + libtool \ + python \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-mark hold libcudnn7 libcudnn7-dev libnvinfer7 libnvinfer-dev libnvonnxparsers7 libnvonnxparsers-dev + +WORKDIR /home/john + +RUN bazel version + +ENV NVIDIA_VISIBLE_DEVICES all +ENV NVIDIA_DRIVER_CAPABILITIES compute,utility + +COPY benchmark/src/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +RUN cd /home/john/tensorrt &&\ + tar xzvf ${TENSORRT_TAR} &&\ + cd TensorRT-${TENSORRT_VERSION}/python &&\ + pip3 install tensorrt-${TENSORRT_VERSION}-cp36-none-linux_x86_64.whl &&\ + cd /home/john/tensorrt/TensorRT-${TENSORRT_VERSION}/uff &&\ + pip3 install uff-*.whl &&\ + cd /home/john/tensorrt/TensorRT-${TENSORRT_VERSION}/graphsurgeon &&\ + pip3 install graphsurgeon-*.whl + +ENV LD_LIBRARY_PATH=/home/john/tensorrt/TensorRT-${TENSORRT_VERSION}/lib + +RUN cd /home/john/Adlik &&\ + bazel build //adlik_serving/clients/python:build_pip_package -c opt --distdir=/home/john/Adlik/archives &&\ + mkdir /tmp/pip-packages && bazel-bin/adlik_serving/clients/python/build_pip_package /tmp/pip-packages &&\ + env TF_CUDA_VERSION=10.0 \ + bazel build //adlik_serving \ + --config=tensorrt \ + -c opt \ + --action_env=LIBRARY_PATH=/usr/local/cuda-10.0/lib64/stubs \ + --incompatible_use_specific_tool_files=false \ + --distdir=/home/john/Adlik/archives \ + --jobs=50 &&\ + pip3 install --upgrade pip &&\ + pip3 install /tmp/pip-packages/adlik_serving_api-0.0.0-py2.py3-none-any.whl &&\ + cd /home/john/Adlik/model_compiler &&\ + pip3 install . &&\ + pip3 install -U tensorflow==1.14 pillow + +COPY ${SERVING_SCRIPT} /home/john/serving_script.sh +RUN chmod +x /home/john/serving_script.sh +COPY ${CLIENT_SCRIPT} /home/john/client_script.sh +RUN chmod +x /home/john/client_script.sh +COPY ${COMPILE_SCRIPT} /home/john/compile_script.sh +RUN chmod +x /home/john/compile_script.sh +CMD /home/john/compile_script.sh && "/usr/bin/supervisord" diff --git a/benchmark/test/docker_test/tflite.Dockerfile b/benchmark/test/docker_test/tflite.Dockerfile new file mode 100644 index 000000000..20f468d29 --- /dev/null +++ b/benchmark/test/docker_test/tflite.Dockerfile @@ -0,0 +1,87 @@ +FROM ubuntu:bionic +COPY sources.list /etc/apt/sources.list +COPY pip.conf /root/.pip/pip.conf +ARG ADLIK_DIRECTORY +ARG SERVING_SCRIPT +ARG CLIENT_SCRIPT +ARG TEST_MODEL_PATH +ARG SERVING_JSON +ARG CLIENT_INFERENCE_SCRIPT +ARG IMAGE_FILENAME +ARG COMPILE_SCRIPT +ENV COMPILE_SCRIPT=${COMPILE_SCRIPT} +ENV SERVING_JSON=${SERVING_JSON} +ENV CLIENT_INFERENCE_SCRIPT=${CLIENT_INFERENCE_SCRIPT} +ENV IMAGE_FILENAME=${IMAGE_FILENAME} +COPY ${ADLIK_DIRECTORY} /home/john/Adlik +COPY ${TEST_MODEL_PATH} /home/john/Adlik/model +RUN apt-get update && \ + apt-get install --no-install-recommends -y ca-certificates && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN apt-get update && \ + apt-get install -y supervisor + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y gnupg && \ + apt-key adv --fetch-keys \ + https://storage.googleapis.com/bazel-apt/doc/apt-key.pub.gpg && \ + apt-get autoremove --purge -y gnupg && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +RUN . /etc/os-release && \ + echo "deb https://storage.googleapis.com/bazel-apt stable jdk1.8" >> /etc/apt/sources.list + +RUN . /etc/os-release && \ + apt-get update && \ + apt-get install --no-install-recommends -y \ + automake \ + bazel \ + make \ + patch \ + python \ + git \ + libtbb2 \ + libtool \ + python3-setuptools \ + python3-wheel \ + python3.7-dev \ + python3-six \ + python3-pip && \ + apt-get clean && \ + find /var/lib/apt/lists -delete + +WORKDIR /home/john + +RUN bazel version + +COPY benchmark/src/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +RUN cd /home/john/Adlik &&\ + bazel build //adlik_serving/clients/python:build_pip_package -c opt --distdir=/home/john/Adlik/archives &&\ + mkdir /tmp/pip-packages && bazel-bin/adlik_serving/clients/python/build_pip_package /tmp/pip-packages &&\ + export INTEL_CVSDK_DIR=/opt/intel/openvino_${OPENVINO_VERSION}/ &&\ + export InferenceEngine_DIR=$INTEL_CVSDK_DIR/deployment_tools/inference_engine/share &&\ + bazel build //adlik_serving \ + --config=tensorflow-lite-cpu \ + -c opt \ + --distdir=/home/john/Adlik/archives \ + --jobs=3 &&\ + pip3 install --upgrade pip &&\ + pip3 install /tmp/pip-packages/adlik_serving_api-0.0.0-py2.py3-none-any.whl &&\ + cd /home/john/Adlik/model_compiler &&\ + pip3 install . &&\ + pip3 install -U tensorflow==1.14 defusedxml==0.5.0 networkx==2.3.0 pillow &&\ + cd /home/john/Adlik/benchmark/src &&\ + python3 compile_model.py -t /home/john/Adlik/model -s ${SERVING_JSON} + +COPY ${SERVING_SCRIPT} /home/john/serving_script.sh +RUN chmod +x /home/john/serving_script.sh +COPY ${CLIENT_SCRIPT} /home/john/client_script.sh +RUN chmod +x /home/john/client_script.sh +COPY ${COMPILE_SCRIPT} /home/john/compile_script.sh +RUN chmod +x /home/john/compile_script.sh +CMD /home/john/compile_script.sh && "/usr/bin/supervisord" diff --git a/benchmark/test/serving_script/openvino_serving_script.sh b/benchmark/test/serving_script/openvino_serving_script.sh new file mode 100644 index 000000000..0ba2bb269 --- /dev/null +++ b/benchmark/test/serving_script/openvino_serving_script.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +INSTALL_DIR=/opt/intel/openvino_$OPENVINO_VERSION +source $INSTALL_DIR/bin/setupvars.sh +cd /home/john/Adlik/bazel-bin/adlik_serving && \ +./adlik_serving --model_base_path=/home/john/Adlik/model/model_repos --grpc_port=8500 --http_port=8501 diff --git a/benchmark/test/serving_script/serving_script.sh b/benchmark/test/serving_script/serving_script.sh new file mode 100644 index 000000000..f0b9aa27d --- /dev/null +++ b/benchmark/test/serving_script/serving_script.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash +cd /home/john/Adlik/bazel-bin/adlik_serving && \ +./adlik_serving --model_base_path=/home/john/Adlik/model/model_repos --grpc_port=8500 --http_port=8501 \ No newline at end of file diff --git a/benchmark/test/test_automatic_test.py b/benchmark/test/test_automatic_test.py new file mode 100644 index 000000000..fa70d1640 --- /dev/null +++ b/benchmark/test/test_automatic_test.py @@ -0,0 +1,33 @@ +""" +The test of automatic test. +""" + +import unittest +import subprocess +import os + + +class TestAutomaticTest(unittest.TestCase): + """ + The test of automatic test + """ + + @staticmethod + def test_automatic_test(): + base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + model_command = ['python3', 'benchmark/test/test_model/mnist_keras/mnist_keras.py'] + subprocess.run(args=model_command, cwd=base_dir, check=True) + command = ['python3', 'benchmark/src/automatic_test.py', + '-d', 'benchmark/test/docker_test/openvino.Dockerfile' + '-s', 'openvino', + '-b', '/media/A/work/adlik-test/src/Adlik-master', + ' -a', '.', + '-m', 'mnist', + '-c', 'benchmark/test/client_script/client_script.sh', + '-ss', 'benchmark/test/serving_script/openvino_serving_script.sh', + '-l', '/media/A/work/adlik-test/src/log', + '-tm', 'benchmark/test/test_model/mnist_keras', + '-cis', 'mnist_client.py', + '-i', 'mnist.png', + '-cs', 'benchmark/test/compile_script/openvino_compile_script.sh'] + subprocess.run(args=command, cwd=base_dir, check=True) diff --git a/benchmark/test/test_model/mnist_keras/mnist_keras.py b/benchmark/test/test_model/mnist_keras/mnist_keras.py new file mode 100644 index 000000000..125e2e15d --- /dev/null +++ b/benchmark/test/test_model/mnist_keras/mnist_keras.py @@ -0,0 +1,50 @@ +""" +This is a script for training mnist model. +""" +import os + +import keras +import numpy as np + + +def process_dataset(): + # Import the data + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + x_train, x_test = x_train / 255.0, x_test / 255.0 + + # Reshape the data + x_train = np.reshape(x_train, (60000, 28, 28, 1)) + x_test = np.reshape(x_test, (10000, 28, 28, 1)) + return x_train, y_train, x_test, y_test + + +def create_model(): + model = keras.models.Sequential() + model.add(keras.layers.Conv2D(32, kernel_size=(3, 3), + activation='relu', + input_shape=(28, 28, 1))) + model.add(keras.layers.Conv2D(64, (3, 3), activation='relu')) + model.add(keras.layers.MaxPooling2D(pool_size=(2, 2))) + model.add(keras.layers.Dropout(0.25)) + model.add(keras.layers.Reshape((9216,))) + model.add(keras.layers.Dense(128, activation='relu')) + model.add(keras.layers.Dropout(0.5)) + model.add(keras.layers.Dense(10, activation='softmax')) + model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) + return model + + +def _train(): + x_train, y_train, x_test, y_test = process_dataset() + model = create_model() + model.fit(x_train, y_train, epochs=2, verbose=1) + model.evaluate(x_test, y_test) + save_path = os.path.join(os.path.dirname(__file__), 'model', 'mnist.h5') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + model.save(save_path) + + +if __name__ == '__main__': + _train() diff --git a/benchmark/test/test_model/mnist_keras/serving_model.json b/benchmark/test/test_model/mnist_keras/serving_model.json new file mode 100644 index 000000000..d492d5ae6 --- /dev/null +++ b/benchmark/test/test_model/mnist_keras/serving_model.json @@ -0,0 +1,22 @@ +{ + "serving_type": "openvino", + "input_model": "model/mnist.h5", + "export_path": "model_repos", + "input_layer_names": [ + "conv2d_1" + ], + "output_layer_names": [ + "dense_2" + ], + "input_formats": [ + "channels_last" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "mnist", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/test/test_model/mnist_pytorch/mnist_pytorch.py b/benchmark/test/test_model/mnist_pytorch/mnist_pytorch.py new file mode 100644 index 000000000..2a79e251d --- /dev/null +++ b/benchmark/test/test_model/mnist_pytorch/mnist_pytorch.py @@ -0,0 +1,93 @@ +""" +This is a script for training mnist model. +""" + +from __future__ import print_function +import os +import torch.nn as nn +import torch.onnx +import torch.nn.functional as F +import torch.optim as optim +from torchvision import datasets, transforms +import torch.utils.data.distributed +import time +import torch.nn.utils +from torch.autograd import Variable + + +def dataset(): + data_dir = os.path.join(os.path.dirname(__file__), 'data') + train_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=True, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=50, shuffle=True) + test_loader = torch.utils.data.DataLoader( + datasets.MNIST(data_dir, train=False, transform=transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.1307,), (0.3081,)) + ])), + batch_size=50, shuffle=True) + return train_loader, test_loader + + +class Net(nn.Module): + def __init__(self): + super(Net, self).__init__() + self.conv1 = nn.Conv2d(1, 20, 5, 1) + self.conv2 = nn.Conv2d(20, 50, 5, 1) + self.fc1 = nn.Linear(4 * 4 * 50, 500) + self.fc2 = nn.Linear(500, 10) + + def forward(self, x): + x = F.relu(self.conv1(x)) + x = F.max_pool2d(x, 2, 2) + x = F.relu(self.conv2(x)) + x = F.max_pool2d(x, 2, 2) + x = x.view(-1, 4 * 4 * 50) + x = F.relu(self.fc1(x)) + x = self.fc2(x) + return F.softmax(x, dim=-1) + + +def _train(model, loss_fn, optimizer, data_loader, epochs): + total_samples = 0 + total_time = 0 + + for epoch in range(epochs): + for step, (x, target) in enumerate(data_loader, 0): + start_time = time.time() + + output = model(x) + loss = loss_fn(output, target) + optimizer.zero_grad() + loss.backward() + # optimizer.step() + + end_time = time.time() + total_time += end_time - start_time + total_samples += len(x) + + print('Epoch {} Step {}: speed = {}.'.format(epoch, step, total_samples / total_time)) + + +def main(): + device = torch.device("cpu") + train_loader, test_loader = dataset() + model = Net().to(device) + loss_fn = torch.nn.CrossEntropyLoss() + optimizer = optim.SGD(model.parameters(), lr=0.01) + + _train(model=model, loss_fn=loss_fn, optimizer=optimizer, data_loader=train_loader, epochs=1) + dummy_input = Variable(torch.randn(1, 1, 28, 28)) + dummy_input = dummy_input.to(device) + save_path = os.path.join(os.path.dirname(__file__), 'model', 'mnist.onnx') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + torch.onnx.export(model, dummy_input, save_path, verbose=True, keep_initializers_as_inputs=True) + + +if __name__ == '__main__': + main() diff --git a/benchmark/test/test_model/mnist_pytorch/serving_model.json b/benchmark/test/test_model/mnist_pytorch/serving_model.json new file mode 100644 index 000000000..a5eda3c41 --- /dev/null +++ b/benchmark/test/test_model/mnist_pytorch/serving_model.json @@ -0,0 +1,24 @@ +{ + "serving_type": "openvino", + "input_model": "model/mnist_softmax.onnx", + "export_path": "model_repos", + "input_names": [ + "input.1" + ], + "input_formats": [ + "channels_first" + ], + "output_names": [ + "20" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "mnist", + "job_id": "mnist_pytorch", + "callback": "", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/test/test_model/mnist_tensorflow/mnist_tensorflow.py b/benchmark/test/test_model/mnist_tensorflow/mnist_tensorflow.py new file mode 100644 index 000000000..2bc739f18 --- /dev/null +++ b/benchmark/test/test_model/mnist_tensorflow/mnist_tensorflow.py @@ -0,0 +1,134 @@ +""" +CNN-MNIST +""" + +from __future__ import absolute_import, division, print_function, unicode_literals +import tensorflow as tf +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 +from tensorflow.keras.layers import Dense, Flatten, Conv2D +from tensorflow.keras import Model +import os + + +def load_dataset(): + mnist = tf.keras.datasets.mnist + (x_train, y_train), (x_test, y_test) = mnist.load_data() + x_train, x_test = x_train / 255.0, x_test / 255.0 + + x_train = x_train[..., tf.newaxis] + x_test = x_test[..., tf.newaxis] + + train_ds = tf.data.Dataset.from_tensor_slices( + (x_train, y_train)).shuffle(10000).batch(32) + test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32) + return train_ds, test_ds + + +class MyModel(Model): + def __init__(self): + super(MyModel, self).__init__() + self.conv1 = Conv2D(32, 3, activation='relu') + self.flatten = Flatten() + self.d1 = Dense(128, activation='relu') + self.d2 = Dense(10, activation='softmax') + + def call(self, x): + x = self.conv1(x) + x = self.flatten(x) + x = self.d1(x) + return self.d2(x) + + +def get_loss(): + return tf.keras.losses.SparseCategoricalCrossentropy() + + +def get_optimizer(): + return tf.keras.optimizers.Adam() + + +@tf.function +def train_step(images, labels, model, loss_object, optimizer, train_loss, train_accuracy): + with tf.GradientTape() as tape: + predictions = model(images) + print(model.input_names, model.output_names) + loss = loss_object(labels, predictions) + gradients = tape.gradient(loss, model.trainable_variables) + optimizer.apply_gradients(zip(gradients, model.trainable_variables)) + + train_loss(loss) + train_accuracy(labels, predictions) + + +@tf.function +def test_step(images, labels, model, loss_object, test_loss, test_accuracy): + predictions = model(images) + t_loss = loss_object(labels, predictions) + + test_loss(t_loss) + test_accuracy(labels, predictions) + + +def main(): + model = MyModel() + epochs = 2 + train_dataset, test_dataset = load_dataset() + loss_object = get_loss() + optimizer = get_optimizer() + train_loss = tf.keras.metrics.Mean(name='train_loss') + train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy') + test_loss = tf.keras.metrics.Mean(name='test_loss') + test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy') + for epoch in range(epochs): + train_loss.reset_states() + train_accuracy.reset_states() + test_loss.reset_states() + test_accuracy.reset_states() + + for images, labels in train_dataset: + train_step(images, labels, model, loss_object, optimizer, train_loss, train_accuracy) + + for test_images, test_labels in test_dataset: + test_step(test_images, test_labels, model, loss_object, train_loss, train_accuracy) + + template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}' + print(template.format(epoch + 1, + train_loss.result(), + train_accuracy.result() * 100, + test_loss.result(), + test_accuracy.result() * 100)) + save_path = os.path.join(os.path.dirname(__file__), 'model') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + tf.saved_model.save(model, save_path) + + # Convert Keras model to ConcreteFunction + full_model = tf.function(lambda x: model(x)) + full_model = full_model.get_concrete_function( + tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)) + + # Get frozen ConcreteFunction + frozen_func = convert_variables_to_constants_v2(full_model) + frozen_func.graph.as_graph_def() + + layers = [op.name for op in frozen_func.graph.get_operations()] + print("-" * 50) + print("Frozen model layers: ") + for layer in layers: + print(layer) + + print("-" * 50) + print("Frozen model inputs: ") + print(frozen_func.inputs) + print("Frozen model outputs: ") + print(frozen_func.outputs) + + # Save frozen graph from frozen ConcreteFunction to hard drive + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, + logdir="./frozen_models", + name=os.path.join(save_path, "frozen_graph.pb"), + as_text=False) + + +if __name__ == '__main__': + main() diff --git a/benchmark/test/test_model/mnist_tensorflow/serving_model.json b/benchmark/test/test_model/mnist_tensorflow/serving_model.json new file mode 100644 index 000000000..88a594097 --- /dev/null +++ b/benchmark/test/test_model/mnist_tensorflow/serving_model.json @@ -0,0 +1,22 @@ +{ + "serving_type": "tf", + "input_model": "model/frozen_graph.pb", + "export_path": "model_repos_tf2", + "input_names": [ + "x:0" + ], + "output_names": [ + "Identity:0" + ], + "input_formats": [ + "channels_last" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "mnist", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/test/test_model/resnet50_keras/resnet50_keras.py b/benchmark/test/test_model/resnet50_keras/resnet50_keras.py new file mode 100644 index 000000000..7303b998d --- /dev/null +++ b/benchmark/test/test_model/resnet50_keras/resnet50_keras.py @@ -0,0 +1,22 @@ +""" +Resnet50 model +""" + +from keras.applications.resnet50 import ResNet50 +import os + + +def get_model(): + return ResNet50(weights='imagenet') + + +def main(): + model = get_model() + save_path = os.path.join(os.path.dirname(__file__), 'model', 'resnet50.h5') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + model.save(save_path) + + +if __name__ == '__main__': + main() diff --git a/benchmark/test/test_model/resnet50_keras/serving_model.json b/benchmark/test/test_model/resnet50_keras/serving_model.json new file mode 100644 index 000000000..5e5290fee --- /dev/null +++ b/benchmark/test/test_model/resnet50_keras/serving_model.json @@ -0,0 +1,22 @@ +{ + "serving_type": "tf", + "input_model": "model/resnet50.h5", + "export_path": "model_repos", + "input_layer_names": [ + "input_1" + ], + "output_layer_names": [ + "fc1000" + ], + "input_formats": [ + "channels_last" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "resnet50", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/test/test_model/resnet50_pytorch/resnet50_pytorch.py b/benchmark/test/test_model/resnet50_pytorch/resnet50_pytorch.py new file mode 100644 index 000000000..063714c20 --- /dev/null +++ b/benchmark/test/test_model/resnet50_pytorch/resnet50_pytorch.py @@ -0,0 +1,28 @@ +""" +Resnet50 model +""" + +from torch.autograd import Variable +import torchvision +import torch.onnx +import torch +import os + + +def get_model(): + return torchvision.models.resnet50(pretrained=True) + + +def main(): + device = torch.device('cpu') + dummy_input = Variable(torch.randn(1, 3, 224, 224)) + dummy_input = dummy_input.to(device) + model = get_model() + save_path = os.path.join(os.path.dirname(__file__), 'model', 'resnet50.onnx') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + torch.onnx.export(model, dummy_input, save_path, verbose=True, keep_initializers_as_inputs=True) + + +if __name__ == '__main__': + main() diff --git a/benchmark/test/test_model/resnet50_pytorch/serving_model.json b/benchmark/test/test_model/resnet50_pytorch/serving_model.json new file mode 100644 index 000000000..a9f8bb501 --- /dev/null +++ b/benchmark/test/test_model/resnet50_pytorch/serving_model.json @@ -0,0 +1,25 @@ +{ + "serving_type": "openvino", + "input_model": "./model/resnet50.onnx", + "export_path": "model_repos", + "script_path": "./model/mnist_pytorch.py", + "input_names": [ + "input.1" + ], + "input_formats": [ + "channels_first" + ], + "output_names": [ + "495" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "resnet50", + "job_id": "resnst50_pytorch", + "callback": "", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/test/test_model/resnet50_tensorflow/resnet50_ckpt.py b/benchmark/test/test_model/resnet50_tensorflow/resnet50_ckpt.py new file mode 100644 index 000000000..8d5c54d08 --- /dev/null +++ b/benchmark/test/test_model/resnet50_tensorflow/resnet50_ckpt.py @@ -0,0 +1,25 @@ +""" +Resnet50 model +""" + +import tensorflow.compat.v1 as tf +import os + + +def _get_model(): + return tf.keras.applications.resnet50.ResNet50(weights='imagenet') + + +def main(): + with tf.Session() as sess: + model = _get_model() + print(model.output_names) + sess.run(tf.global_variables_initializer()) + save_path = os.path.join(os.path.dirname(__file__), 'model', 'resnet50.ckpt') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + tf.train.Saver().save(sess, save_path) + + +if __name__ == '__main__': + main() diff --git a/benchmark/test/test_model/resnet50_tensorflow/resnet50_tensorflow.py b/benchmark/test/test_model/resnet50_tensorflow/resnet50_tensorflow.py new file mode 100644 index 000000000..f46e19c3d --- /dev/null +++ b/benchmark/test/test_model/resnet50_tensorflow/resnet50_tensorflow.py @@ -0,0 +1,55 @@ +""" +Resnet50 model +""" + +import tensorflow as tf +from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 +import os + + +def _get_model(): + return tf.keras.applications.resnet50.ResNet50(include_top=True, weights='imagenet', + input_tensor=None, input_shape=None, + pooling=None, + classes=1000) + + +def main(): + model = _get_model() + print(model.output_names) + save_path = os.path.join(os.path.dirname(__file__), 'model') + dir_name = os.path.dirname(save_path) + os.makedirs(dir_name, exist_ok=True) + # saved model to SavedModel format + tf.saved_model.save(model, save_path) + + # Convert Keras model to ConcreteFunction + full_model = tf.function(lambda x: model(x)) + full_model = full_model.get_concrete_function( + tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype)) + + # Get frozen ConcreteFunction + frozen_func = convert_variables_to_constants_v2(full_model) + frozen_func.graph.as_graph_def() + + layers = [op.name for op in frozen_func.graph.get_operations()] + print("-" * 50) + print("Frozen model layers: ") + for layer in layers: + print(layer) + + print("-" * 50) + print("Frozen model inputs: ") + print(frozen_func.inputs) + print("Frozen model outputs: ") + print(frozen_func.outputs) + + # Save frozen graph from frozen ConcreteFunction to hard drive + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, + logdir="./frozen_models", + name=os.path.join(save_path, "frozen_graph.pb"), + as_text=False) + + +if __name__ == '__main__': + main() diff --git a/benchmark/test/test_model/resnet50_tensorflow/serving_model.json b/benchmark/test/test_model/resnet50_tensorflow/serving_model.json new file mode 100644 index 000000000..865f74265 --- /dev/null +++ b/benchmark/test/test_model/resnet50_tensorflow/serving_model.json @@ -0,0 +1,22 @@ +{ + "serving_type": "tf", + "input_model": "model/resnet50.ckpt", + "export_path": "model_repos", + "input_names": [ + "input_1:0" + ], + "output_names": [ + "fc1000/Softmax:0" + ], + "input_formats": [ + "channels_last" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "resnet50", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/test/test_model/resnet50_tensorflow/serving_model_pb.json b/benchmark/test/test_model/resnet50_tensorflow/serving_model_pb.json new file mode 100644 index 000000000..f7f91ab1d --- /dev/null +++ b/benchmark/test/test_model/resnet50_tensorflow/serving_model_pb.json @@ -0,0 +1,22 @@ +{ + "serving_type": "tf", + "input_model": "model/frozen_graph.pb", + "export_path": "model_repos_tf2", + "input_names": [ + "x:0" + ], + "output_names": [ + "Identity:0" + ], + "input_formats": [ + "channels_last" + ], + "input_signatures": [ + "image" + ], + "output_signatures": [ + "label" + ], + "model_name": "resnet50", + "max_batch_size": 128 +} \ No newline at end of file diff --git a/benchmark/tox.ini b/benchmark/tox.ini new file mode 100644 index 000000000..a6f3fea8b --- /dev/null +++ b/benchmark/tox.ini @@ -0,0 +1,9 @@ +[tox] +envlist = py37-dev + +[testenv] +commands = pylint src + bandit -c bandit.yaml -r src + flake8 src + +deps = .[test]