From c17421b29c8d38e29a4be683159427da5e9c5fb9 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Thu, 1 Apr 2021 22:46:52 +0300 Subject: [PATCH 1/9] Add simple model downloader forked from model zoo --- examples/downloader/.gitignore | 5 + examples/downloader/common.py | 675 ++++++++++++++++++++++ examples/downloader/downloader.py | 385 ++++++++++++ examples/models/.gitignore | 2 + examples/models/mobilenet-ssd/model.yml | 33 ++ examples/models/tiny-yolo/model.yml | 29 + examples/models/video-resources/model.yml | 26 + 7 files changed, 1155 insertions(+) create mode 100644 examples/downloader/.gitignore create mode 100644 examples/downloader/common.py create mode 100755 examples/downloader/downloader.py create mode 100644 examples/models/.gitignore create mode 100644 examples/models/mobilenet-ssd/model.yml create mode 100644 examples/models/tiny-yolo/model.yml create mode 100644 examples/models/video-resources/model.yml diff --git a/examples/downloader/.gitignore b/examples/downloader/.gitignore new file mode 100644 index 000000000..87158aee7 --- /dev/null +++ b/examples/downloader/.gitignore @@ -0,0 +1,5 @@ +* +!common.py +!converter.py +!downloader.py +!.gitignore \ No newline at end of file diff --git a/examples/downloader/common.py b/examples/downloader/common.py new file mode 100644 index 000000000..f49a3574b --- /dev/null +++ b/examples/downloader/common.py @@ -0,0 +1,675 @@ +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import concurrent.futures +import contextlib +import fnmatch +import json +import platform +import queue +import re +import shlex +import shutil +import signal +import subprocess +import sys +import threading +import traceback + +from pathlib import Path + +import requests +import yaml + +DOWNLOAD_TIMEOUT = 5 * 60 +OMZ_ROOT = Path(__file__).resolve().parents[1] +MODEL_ROOT = OMZ_ROOT / 'models' + +# make sure to update the documentation if you modify these +KNOWN_FRAMEWORKS = { + 'caffe': None, + 'caffe2': 'caffe2_to_onnx.py', + 'dldt': None, + 'mxnet': None, + 'onnx': None, + 'pytorch': 'pytorch_to_onnx.py', + 'tf': None, +} +KNOWN_PRECISIONS = { + 'FP16', 'FP16-INT1', 'FP16-INT8', + 'FP32', 'FP32-INT1', 'FP32-INT8', +} +KNOWN_TASK_TYPES = { + 'action_recognition', + 'classification', + 'colorization', + 'detection', + 'face_recognition', + 'feature_extraction', + 'head_pose_estimation', + 'human_pose_estimation', + 'image_inpainting', + 'image_processing', + 'image_translation', + 'instance_segmentation', + 'machine_translation', + 'monocular_depth_estimation', + 'object_attributes', + 'optical_character_recognition', + 'place_recognition', + 'question_answering', + 'semantic_segmentation', + 'sound_classification', + 'speech_recognition', + 'style_transfer', + 'token_recognition', + 'text_to_speech', +} + +KNOWN_QUANTIZED_PRECISIONS = {p + '-INT8': p for p in ['FP16', 'FP32']} +assert KNOWN_QUANTIZED_PRECISIONS.keys() <= KNOWN_PRECISIONS + +RE_MODEL_NAME = re.compile(r'[0-9a-zA-Z._-]+') +RE_SHA256SUM = re.compile(r'[0-9a-fA-F]{64}') + + +class JobContext: + def __init__(self): + self._interrupted = False + + def print(self, value, *, end='\n', file=sys.stdout, flush=False): + raise NotImplementedError + + def printf(self, format, *args, file=sys.stdout, flush=False): + self.print(format.format(*args), file=file, flush=flush) + + def subprocess(self, args, **kwargs): + raise NotImplementedError + + def check_interrupted(self): + if self._interrupted: + raise RuntimeError("job interrupted") + + def interrupt(self): + self._interrupted = True + + @staticmethod + def _signal_message(signal_num): + # once Python 3.8 is the minimum supported version, + # signal.strsignal can be used here + + signals = type(signal.SIGINT) + + try: + signal_str = f'{signals(signal_num).name} ({signal_num})' + except ValueError: + signal_str = f'{signal_num}' + + return f'Terminated by signal {signal_str}' + +class DirectOutputContext(JobContext): + def print(self, value, *, end='\n', file=sys.stdout, flush=False): + print(value, end=end, file=file, flush=flush) + + def subprocess(self, args, **kwargs): + return_code = subprocess.run(args, **kwargs).returncode + + if return_code < 0: + print(self._signal_message(-return_code), file=sys.stderr) + + return return_code == 0 + + +class QueuedOutputContext(JobContext): + def __init__(self, output_queue): + super().__init__() + self._output_queue = output_queue + + def print(self, value, *, end='\n', file=sys.stdout, flush=False): + self._output_queue.put((file, value + end)) + + def subprocess(self, args, **kwargs): + with subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + universal_newlines=True, **kwargs) as p: + for line in p.stdout: + self._output_queue.put((sys.stdout, line)) + return_code = p.wait() + + if return_code < 0: + self._output_queue.put((sys.stderr, self._signal_message(-return_code))) + + return return_code == 0 + +class JobWithQueuedOutput(): + def __init__(self, context, output_queue, future): + self._context = context + self._output_queue = output_queue + self._future = future + self._future.add_done_callback(lambda future: self._output_queue.put(None)) + + def complete(self): + for file, fragment in iter(self._output_queue.get, None): + print(fragment, end='', file=file, flush=True) # for simplicity, flush every fragment + + return self._future.result() + + def cancel(self): + self._context.interrupt() + self._future.cancel() + + +def run_in_parallel(num_jobs, f, work_items): + with concurrent.futures.ThreadPoolExecutor(num_jobs) as executor: + def start(work_item): + output_queue = queue.Queue() + context = QueuedOutputContext(output_queue) + return JobWithQueuedOutput( + context, output_queue, executor.submit(f, context, work_item)) + + jobs = list(map(start, work_items)) + + try: + return [job.complete() for job in jobs] + except BaseException: + for job in jobs: job.cancel() + raise + +EVENT_EMISSION_LOCK = threading.Lock() + +class Reporter: + GROUP_DECORATION = '#' * 16 + '||' + SECTION_DECORATION = '=' * 10 + ERROR_DECORATION = '#' * 10 + + def __init__(self, job_context, *, + enable_human_output=True, enable_json_output=False, event_context={}): + self.job_context = job_context + self.enable_human_output = enable_human_output + self.enable_json_output = enable_json_output + self.event_context = event_context + + def print_group_heading(self, format, *args): + if not self.enable_human_output: return + self.job_context.printf('{} {} {}', + self.GROUP_DECORATION, format.format(*args), self.GROUP_DECORATION[::-1]) + self.job_context.print('') + + def print_section_heading(self, format, *args): + if not self.enable_human_output: return + self.job_context.printf('{} {}', self.SECTION_DECORATION, format.format(*args), flush=True) + + def print_progress(self, format, *args): + if not self.enable_human_output: return + self.job_context.print(format.format(*args), end='\r' if sys.stdout.isatty() else '\n', flush=True) + + def end_progress(self): + if not self.enable_human_output: return + if sys.stdout.isatty(): + self.job_context.print('') + + def print(self, format='', *args, flush=False): + if not self.enable_human_output: return + self.job_context.printf(format, *args, flush=flush) + + def log_warning(self, format, *args, exc_info=False): + if exc_info: + self.job_context.print(traceback.format_exc(), file=sys.stderr, end='') + self.job_context.printf("{} Warning: {}", self.ERROR_DECORATION, format.format(*args), file=sys.stderr) + + def log_error(self, format, *args, exc_info=False): + if exc_info: + self.job_context.print(traceback.format_exc(), file=sys.stderr, end='') + self.job_context.printf("{} Error: {}", self.ERROR_DECORATION, format.format(*args), file=sys.stderr) + + def log_details(self, format, *args): + print(self.ERROR_DECORATION, ' ', format.format(*args), file=sys.stderr) + + def emit_event(self, type, **kwargs): + if not self.enable_json_output: return + + # We don't print machine-readable output through the job context, because + # we don't want it to be serialized. If we serialize it, then the consumer + # will lose information about the order of events, and we don't want that to happen. + # Instead, we emit events directly to stdout, but use a lock to ensure that + # JSON texts don't get interleaved. + with EVENT_EMISSION_LOCK: + json.dump({'$type': type, **self.event_context, **kwargs}, sys.stdout, indent=None) + print() + + def with_event_context(self, **kwargs): + return Reporter( + self.job_context, + enable_human_output=self.enable_human_output, + enable_json_output=self.enable_json_output, + event_context={**self.event_context, **kwargs}, + ) + +class DeserializationError(Exception): + def __init__(self, problem, contexts=()): + super().__init__(': '.join(contexts + (problem,))) + self.problem = problem + self.contexts = contexts + +@contextlib.contextmanager +def deserialization_context(context): + try: + yield None + except DeserializationError as exc: + raise DeserializationError(exc.problem, (context,) + exc.contexts) from exc + +def validate_string(context, value): + if not isinstance(value, str): + raise DeserializationError('{}: expected a string, got {!r}'.format(context, value)) + return value + +def validate_string_enum(context, value, known_values): + str_value = validate_string(context, value) + if str_value not in known_values: + raise DeserializationError('{}: expected one of {!r}, got {!r}'.format(context, known_values, value)) + return str_value + +def validate_relative_path(context, value): + path = Path(validate_string(context, value)) + + if path.anchor or '..' in path.parts: + raise DeserializationError('{}: disallowed absolute path or parent traversal'.format(context)) + + return path + +def validate_nonnegative_int(context, value): + if not isinstance(value, int) or value < 0: + raise DeserializationError( + '{}: expected a non-negative integer, got {!r}'.format(context, value)) + return value + +class TaggedBase: + @classmethod + def deserialize(cls, value): + try: + return cls.types[value['$type']].deserialize(value) + except KeyError: + raise DeserializationError('Unknown "$type": "{}"'.format(value['$type'])) + +class FileSource(TaggedBase): + RE_CONTENT_RANGE_VALUE = re.compile(r'bytes (\d+)-\d+/(?:\d+|\*)') + + types = {} + + @classmethod + def deserialize(cls, source): + if isinstance(source, str): + source = {'$type': 'http', 'url': source} + return super().deserialize(source) + + @classmethod + def http_range_headers(cls, offset): + if offset == 0: + return {} + + return { + 'Accept-Encoding': 'identity', + 'Range': 'bytes={}-'.format(offset), + } + + @classmethod + def handle_http_response(cls, response, chunk_size): + if response.status_code == requests.codes.partial_content: + match = cls.RE_CONTENT_RANGE_VALUE.fullmatch(response.headers.get('Content-Range', '')) + if not match: + # invalid range reply; return a negative offset to make + # the download logic restart the download. + return None, -1 + + return response.iter_content(chunk_size=chunk_size), int(match.group(1)) + + # either we didn't ask for a range, or the server doesn't support ranges + + if 'Content-Range' in response.headers: + # non-partial responses aren't supposed to have range information + return None, -1 + + return response.iter_content(chunk_size=chunk_size), 0 + + +class FileSourceHttp(FileSource): + def __init__(self, url): + self.url = url + + @classmethod + def deserialize(cls, source): + return cls(validate_string('"url"', source['url'])) + + def start_download(self, session, chunk_size, offset): + response = session.get(self.url, stream=True, timeout=DOWNLOAD_TIMEOUT, + headers=self.http_range_headers(offset)) + response.raise_for_status() + + return self.handle_http_response(response, chunk_size) + +FileSource.types['http'] = FileSourceHttp + +class FileSourceGoogleDrive(FileSource): + def __init__(self, id): + self.id = id + + @classmethod + def deserialize(cls, source): + return cls(validate_string('"id"', source['id'])) + + def start_download(self, session, chunk_size, offset): + range_headers = self.http_range_headers(offset) + URL = 'https://docs.google.com/uc?export=download' + response = session.get(URL, params={'id': self.id}, headers=range_headers, + stream=True, timeout=DOWNLOAD_TIMEOUT) + response.raise_for_status() + + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + params = {'id': self.id, 'confirm': value} + response = session.get(URL, params=params, headers=range_headers, + stream=True, timeout=DOWNLOAD_TIMEOUT) + response.raise_for_status() + + return self.handle_http_response(response, chunk_size) + +FileSource.types['google_drive'] = FileSourceGoogleDrive + +class ModelFile: + def __init__(self, name, size, sha256, source): + self.name = name + self.size = size + self.sha256 = sha256 + self.source = source + + @classmethod + def deserialize(cls, file): + name = validate_relative_path('"name"', file['name']) + + with deserialization_context('In file "{}"'.format(name)): + size = validate_nonnegative_int('"size"', file['size']) + + sha256 = validate_string('"sha256"', file['sha256']) + + if not RE_SHA256SUM.fullmatch(sha256): + raise DeserializationError( + '"sha256": got invalid hash {!r}'.format(sha256)) + + with deserialization_context('"source"'): + source = FileSource.deserialize(file['source']) + + return cls(name, size, sha256, source) + +class Postproc(TaggedBase): + types = {} + +class PostprocRegexReplace(Postproc): + def __init__(self, file, pattern, replacement, count): + self.file = file + self.pattern = pattern + self.replacement = replacement + self.count = count + + @classmethod + def deserialize(cls, postproc): + return cls( + validate_relative_path('"file"', postproc['file']), + re.compile(validate_string('"pattern"', postproc['pattern'])), + validate_string('"replacement"', postproc['replacement']), + validate_nonnegative_int('"count"', postproc.get('count', 0)), + ) + + def apply(self, reporter, output_dir): + postproc_file = output_dir / self.file + + reporter.print_section_heading('Replacing text in {}', postproc_file) + + postproc_file_text = postproc_file.read_text(encoding='utf-8') + + orig_file = postproc_file.with_name(postproc_file.name + '.orig') + if not orig_file.exists(): + postproc_file.replace(orig_file) + + postproc_file_text, num_replacements = self.pattern.subn( + self.replacement, postproc_file_text, count=self.count) + + if num_replacements == 0: + raise RuntimeError('Invalid pattern: no occurrences found') + + if self.count != 0 and num_replacements != self.count: + raise RuntimeError('Invalid pattern: expected at least {} occurrences, but only {} found'.format( + self.count, num_replacements)) + + postproc_file.write_text(postproc_file_text, encoding='utf-8') + +Postproc.types['regex_replace'] = PostprocRegexReplace + +class PostprocUnpackArchive(Postproc): + def __init__(self, file, format): + self.file = file + self.format = format + + @classmethod + def deserialize(cls, postproc): + return cls( + validate_relative_path('"file"', postproc['file']), + validate_string('"format"', postproc['format']), + ) + + def apply(self, reporter, output_dir): + postproc_file = output_dir / self.file + + reporter.print_section_heading('Unpacking {}', postproc_file) + + shutil.unpack_archive(str(postproc_file), str(postproc_file.parent), self.format) + postproc_file.unlink() # Remove the archive + +Postproc.types['unpack_archive'] = PostprocUnpackArchive + +class Model: + def __init__(self, name, subdirectory, files, postprocessing, mo_args, quantizable, framework, + description, license_url, precisions, task_type, conversion_to_onnx_args): + self.name = name + self.subdirectory = subdirectory + self.files = files + self.postprocessing = postprocessing + self.mo_args = mo_args + self.quantizable = quantizable + self.framework = framework + self.description = description + self.license_url = license_url + self.precisions = precisions + self.task_type = task_type + self.conversion_to_onnx_args = conversion_to_onnx_args + self.converter_to_onnx = KNOWN_FRAMEWORKS[framework] + + @classmethod + def deserialize(cls, model, name, subdirectory): + with deserialization_context('In model "{}"'.format(name)): + if not RE_MODEL_NAME.fullmatch(name): + raise DeserializationError('Invalid name, must consist only of letters, digits or ._-') + + files = [] + file_names = set() + + for file in model['files']: + files.append(ModelFile.deserialize(file)) + + if files[-1].name in file_names: + raise DeserializationError( + 'Duplicate file name "{}"'.format(files[-1].name)) + file_names.add(files[-1].name) + + postprocessing = [] + + for i, postproc in enumerate(model.get('postprocessing', [])): + with deserialization_context('"postprocessing" #{}'.format(i)): + postprocessing.append(Postproc.deserialize(postproc)) + + framework = validate_string_enum('"framework"', model['framework'], KNOWN_FRAMEWORKS.keys()) + + conversion_to_onnx_args = model.get('conversion_to_onnx_args', None) + if KNOWN_FRAMEWORKS[framework]: + if not conversion_to_onnx_args: + raise DeserializationError('"conversion_to_onnx_args" is absent. ' + 'Framework "{}" is supported only by conversion to ONNX.' + .format(framework)) + conversion_to_onnx_args = [validate_string('"conversion_to_onnx_args" #{}'.format(i), arg) + for i, arg in enumerate(model['conversion_to_onnx_args'])] + else: + if conversion_to_onnx_args: + raise DeserializationError('Conversion to ONNX not supported for "{}" framework'.format(framework)) + + quantized = model.get('quantized', None) + if quantized is not None and quantized != 'INT8': + raise DeserializationError('"quantized": expected "INT8", got {!r}'.format(quantized)) + + if 'model_optimizer_args' in model: + mo_args = [validate_string('"model_optimizer_args" #{}'.format(i), arg) + for i, arg in enumerate(model['model_optimizer_args'])] + precisions = {f'FP16-{quantized}', f'FP32-{quantized}'} if quantized is not None else {'FP16', 'FP32'} + else: + if framework != 'dldt': + raise DeserializationError('Model not in IR format, but no conversions defined') + + mo_args = None + + files_per_precision = {} + + # for file in files: + # if len(file.name.parts) != 2: + # raise DeserializationError('Can\'t derive precision from file name {!r}'.format(file.name)) + # p = file.name.parts[0] + # if p not in KNOWN_PRECISIONS: + # raise DeserializationError( + # 'Unknown precision {!r} derived from file name {!r}, expected one of {!r}'.format( + # p, file.name, KNOWN_PRECISIONS)) + # files_per_precision.setdefault(p, set()).add(file.name.parts[1]) + + # for precision, precision_files in files_per_precision.items(): + # for ext in ['xml', 'bin']: + # if (name + '.' + ext) not in precision_files: + # raise DeserializationError('No {} file for precision "{}"'.format(ext.upper(), precision)) + + precisions = set(files_per_precision.keys()) + + quantizable = model.get('quantizable', False) + if not isinstance(quantizable, bool): + raise DeserializationError('"quantizable": expected a boolean, got {!r}'.format(quantizable)) + + description = validate_string('"description"', model['description']) + + license_url = validate_string('"license"', model['license']) + + task_type = validate_string_enum('"task_type"', model['task_type'], KNOWN_TASK_TYPES) + + return cls(name, subdirectory, files, postprocessing, mo_args, quantizable, framework, + description, license_url, precisions, task_type, conversion_to_onnx_args) + +def load_models(args): + models = [] + model_names = set() + + for config_path in sorted(MODEL_ROOT.glob('**/model.yml')): + subdirectory = config_path.parent.relative_to(MODEL_ROOT) + + with config_path.open('rb') as config_file, \ + deserialization_context('In config "{}"'.format(config_path)): + + model = yaml.safe_load(config_file) + + for bad_key in ['name', 'subdirectory']: + if bad_key in model: + raise DeserializationError('Unsupported key "{}"'.format(bad_key)) + + models.append(Model.deserialize(model, subdirectory.name, subdirectory)) + + if models[-1].name in model_names: + raise DeserializationError( + 'Duplicate model name "{}"'.format(models[-1].name)) + model_names.add(models[-1].name) + + return models + +def load_models_or_die(args): + try: + return load_models(args) + except DeserializationError as e: + indent = ' ' + + for i, context in enumerate(e.contexts): + print(indent * i + context + ':', file=sys.stderr) + print(indent * len(e.contexts) + e.problem, file=sys.stderr) + sys.exit(1) + +# requires the --print_all, --all, --name and --list arguments to be in `args` +def load_models_from_args(parser, args): + if args.print_all: + for model in load_models_or_die(args): + print(model.name) + sys.exit() + + filter_args_count = sum([args.all, args.name is not None, args.list is not None]) + + if filter_args_count > 1: + parser.error('at most one of "--all", "--name" or "--list" can be specified') + + if filter_args_count == 0: + parser.error('one of "--print_all", "--all", "--name" or "--list" must be specified') + + all_models = load_models_or_die(args) + + if args.all: + return all_models + elif args.name is not None or args.list is not None: + if args.name is not None: + patterns = args.name.split(',') + else: + patterns = [] + with args.list.open() as list_file: + for list_line in list_file: + tokens = shlex.split(list_line, comments=True) + if not tokens: continue + + patterns.append(tokens[0]) + # For now, ignore any other tokens in the line. + # We might use them as additional parameters later. + + models = collections.OrderedDict() # deduplicate models while preserving order + + for pattern in patterns: + matching_models = [model for model in all_models + if fnmatch.fnmatchcase(model.name, pattern)] + + if not matching_models: + sys.exit('No matching models: "{}"'.format(pattern)) + + for model in matching_models: + models[model.name] = model + + return list(models.values()) + +def quote_arg_windows(arg): + if not arg: return '""' + if not re.search(r'\s|"', arg): return arg + # On Windows, only backslashes that precede a quote or the end of the argument must be escaped. + return '"' + re.sub(r'(\\+)$', r'\1\1', re.sub(r'(\\*)"', r'\1\1\\"', arg)) + '"' + +if platform.system() == 'Windows': + quote_arg = quote_arg_windows +else: + quote_arg = shlex.quote + +def command_string(args): + return ' '.join(map(quote_arg, args)) diff --git a/examples/downloader/downloader.py b/examples/downloader/downloader.py new file mode 100755 index 000000000..dd7e84fbe --- /dev/null +++ b/examples/downloader/downloader.py @@ -0,0 +1,385 @@ +#!/usr/bin/env python3 + +""" + Copyright (c) 2018 Intel Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +""" + +import argparse +import contextlib +import functools +import hashlib +import re +import requests +import shutil +import ssl +import sys +import tempfile +import threading +import time +import types + +from pathlib import Path + +import common + +CHUNK_SIZE = 1 << 15 if sys.stdout.isatty() else 1 << 20 + +def process_download(reporter, chunk_iterable, size, progress, file): + start_time = time.monotonic() + start_size = progress.size + + try: + for chunk in chunk_iterable: + reporter.job_context.check_interrupted() + + if chunk: + duration = time.monotonic() - start_time + progress.size += len(chunk) + progress.hasher.update(chunk) + + if duration != 0: + speed = int((progress.size - start_size) / (1024 * duration)) + else: + speed = '?' + + percent = progress.size * 100 // size + + reporter.print_progress('... {}%, {} KB, {} KB/s, {} seconds passed', + percent, progress.size // 1024, speed, int(duration)) + reporter.emit_event('model_file_download_progress', size=progress.size) + + file.write(chunk) + + # don't attempt to finish a file if it's bigger than expected + if progress.size > size: + break + finally: + reporter.end_progress() + +def try_download(reporter, file, num_attempts, start_download, size): + progress = types.SimpleNamespace(size=0) + + for attempt in range(num_attempts): + if attempt != 0: + retry_delay = 10 + reporter.print("Will retry in {} seconds...", retry_delay, flush=True) + time.sleep(retry_delay) + + try: + reporter.job_context.check_interrupted() + chunk_iterable, continue_offset = start_download(offset=progress.size) + + if continue_offset not in {0, progress.size}: + # Somehow we neither restarted nor continued from where we left off. + # Try to restart. + chunk_iterable, continue_offset = start_download(offset=0) + if continue_offset != 0: + reporter.log_error("Remote server refuses to send whole file, aborting") + return None + + if continue_offset == 0: + file.seek(0) + file.truncate() + progress.size = 0 + progress.hasher = hashlib.sha256() + + process_download(reporter, chunk_iterable, size, progress, file) + + if progress.size > size: + reporter.log_error("Remote file is longer than expected ({} B), download aborted", size) + # no sense in retrying - if the file is longer, there's no way it'll fix itself + return None + elif progress.size < size: + reporter.log_error("Downloaded file is shorter ({} B) than expected ({} B)", + progress.size, size) + # it's possible that we got disconnected before receiving the full file, + # so try again + else: + return progress.hasher.digest() + except (requests.exceptions.RequestException, ssl.SSLError): + reporter.log_error("Download failed", exc_info=True) + + return None + +def verify_hash(reporter, actual_hash, expected_hash, path): + if actual_hash != bytes.fromhex(expected_hash): + reporter.log_error('Hash mismatch for "{}"', path) + reporter.log_details('Expected: {}', expected_hash) + reporter.log_details('Actual: {}', actual_hash.hex()) + return False + return True + +class NullCache: + def has(self, hash): return False + def get(self, model_file, path, reporter): return False + def put(self, hash, path): pass + +class DirCache: + _FORMAT = 1 # increment if backwards-incompatible changes to the format are made + _HASH_LEN = hashlib.sha256().digest_size * 2 + + def __init__(self, cache_dir): + self._cache_dir = cache_dir / str(self._FORMAT) + self._cache_dir.mkdir(parents=True, exist_ok=True) + + self._staging_dir = self._cache_dir / 'staging' + self._staging_dir.mkdir(exist_ok=True) + + def _hash_path(self, hash): + hash = hash.lower() + assert len(hash) == self._HASH_LEN + assert re.fullmatch('[0-9a-f]+', hash) + return self._cache_dir / hash[:2] / hash[2:] + + def has(self, hash): + return self._hash_path(hash).exists() + + def get(self, model_file, path, reporter): + cache_path = self._hash_path(model_file.sha256) + cache_sha256 = hashlib.sha256() + cache_size = 0 + + with open(cache_path, 'rb') as cache_file, open(path, 'wb') as destination_file: + while True: + data = cache_file.read(CHUNK_SIZE) + if not data: + break + cache_size += len(data) + if cache_size > model_file.size: + reporter.log_error("Cached file is longer than expected ({} B), copying aborted", model_file.size) + return False + cache_sha256.update(data) + destination_file.write(data) + if cache_size < model_file.size: + reporter.log_error("Cached file is shorter ({} B) than expected ({} B)", cache_size, model_file.size) + return False + return verify_hash(reporter, cache_sha256.digest(), model_file.sha256, path) + + def put(self, hash, path): + staging_path = None + + try: + # A file in the cache must have the hash implied by its name. So when we upload a file, + # we first copy it to a temporary file and then atomically move it to the desired name. + # This prevents interrupted runs from corrupting the cache. + with path.open('rb') as src_file: + with tempfile.NamedTemporaryFile(dir=str(self._staging_dir), delete=False) as staging_file: + staging_path = Path(staging_file.name) + shutil.copyfileobj(src_file, staging_file) + + hash_path = self._hash_path(hash) + hash_path.parent.mkdir(parents=True, exist_ok=True) + staging_path.replace(self._hash_path(hash)) + staging_path = None + finally: + # If we failed to complete our temporary file or to move it into place, + # get rid of it. + if staging_path: + staging_path.unlink() + +def try_retrieve_from_cache(reporter, cache, model_file, destination): + try: + if cache.has(model_file.sha256): + reporter.job_context.check_interrupted() + + reporter.print_section_heading('Retrieving {} from the cache', destination) + if not cache.get(model_file, destination, reporter): + reporter.print('Will retry from the original source.') + reporter.print() + return False + reporter.print() + return True + except Exception: + reporter.log_warning('Cache retrieval failed; falling back to downloading', exc_info=True) + reporter.print() + + return False + +def try_update_cache(reporter, cache, hash, source): + try: + cache.put(hash, source) + except Exception: + reporter.log_warning('Failed to update the cache', exc_info=True) + +def try_retrieve(reporter, destination, model_file, cache, num_attempts, start_download): + destination.parent.mkdir(parents=True, exist_ok=True) + + if try_retrieve_from_cache(reporter, cache, model_file, destination): + return True + + reporter.print_section_heading('Downloading {}', destination) + + success = False + + with destination.open('w+b') as f: + actual_hash = try_download(reporter, f, num_attempts, start_download, model_file.size) + + if actual_hash and verify_hash(reporter, actual_hash, model_file.sha256, destination): + try_update_cache(reporter, cache, model_file.sha256, destination) + success = True + + reporter.print() + return success + +def download_model(reporter, args, cache, session_factory, requested_precisions, model): + session = session_factory() + + reporter.print_group_heading('Downloading {}', model.name) + + reporter.emit_event('model_download_begin', model=model.name, num_files=len(model.files)) + + output = args.output_dir #/ model.subdirectory + output.mkdir(parents=True, exist_ok=True) + + for model_file in model.files: + # if len(model_file.name.parts) == 2: + # p = model_file.name.parts[0] + # if p in common.KNOWN_PRECISIONS and p not in requested_precisions: + # continue + + model_file_reporter = reporter.with_event_context(model=model.name, model_file=model_file.name.as_posix()) + model_file_reporter.emit_event('model_file_download_begin', size=model_file.size) + + destination = output / model_file.name + + if not try_retrieve(model_file_reporter, destination, model_file, cache, args.num_attempts, + functools.partial(model_file.source.start_download, session, CHUNK_SIZE)): + try: + destination.unlink() + except FileNotFoundError: + pass + + model_file_reporter.emit_event('model_file_download_end', successful=False) + reporter.emit_event('model_download_end', model=model.name, successful=False) + return False + + model_file_reporter.emit_event('model_file_download_end', successful=True) + + reporter.emit_event('model_download_end', model=model.name, successful=True) + + if model.postprocessing: + reporter.emit_event('model_postprocessing_begin', model=model.name) + + for postproc in model.postprocessing: + postproc.apply(reporter, output) + + reporter.emit_event('model_postprocessing_end', model=model.name) + + reporter.print() + + return True + + +class DownloaderArgumentParser(argparse.ArgumentParser): + def error(self, message): + sys.stderr.write('error: %s\n' % message) + self.print_help() + sys.exit(2) + +def positive_int_arg(value_str): + try: + value = int(value_str) + if value > 0: return value + except ValueError: + pass + + raise argparse.ArgumentTypeError('must be a positive integer (got {!r})'.format(value_str)) + + +# There is no evidence that the requests.Session class is thread-safe, +# so for safety, we use one Session per thread. This class ensures that +# each thread gets its own Session. +class ThreadSessionFactory: + def __init__(self, exit_stack): + self._lock = threading.Lock() + self._thread_local = threading.local() + self._exit_stack = exit_stack + + def __call__(self): + try: + session = self._thread_local.session + except AttributeError: + with self._lock: # ExitStack might not be thread-safe either + session = self._exit_stack.enter_context(requests.Session()) + self._thread_local.session = session + return session + + +def main(): + parser = DownloaderArgumentParser() + parser.add_argument('--name', metavar='PAT[,PAT...]', + help='download only models whose names match at least one of the specified patterns') + parser.add_argument('--list', type=Path, metavar='FILE.LST', + help='download only models whose names match at least one of the patterns in the specified file') + parser.add_argument('--all', action='store_true', help='download all available models') + parser.add_argument('--print_all', action='store_true', help='print all available models') + parser.add_argument('--precisions', metavar='PREC[,PREC...]', + help='download only models with the specified precisions (actual for DLDT networks)') + parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR', + default=Path.cwd(), help='path where to save models') + parser.add_argument('--cache_dir', type=Path, metavar='DIR', + help='directory to use as a cache for downloaded files') + parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1, + help='attempt each download up to N times') + parser.add_argument('--progress_format', choices=('text', 'json'), default='text', + help='which format to use for progress reporting') + # unlike Model Converter, -jauto is not supported here, because CPU count has no + # relation to the optimal number of concurrent downloads + parser.add_argument('-j', '--jobs', type=positive_int_arg, metavar='N', default=1, + help='how many downloads to perform concurrently') + + args = parser.parse_args() + + def make_reporter(context): + return common.Reporter(context, + enable_human_output=args.progress_format == 'text', + enable_json_output=args.progress_format == 'json') + + reporter = make_reporter(common.DirectOutputContext()) + + cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir) + models = common.load_models_from_args(parser, args) + + failed_models = set() + + if args.precisions is None: + requested_precisions = common.KNOWN_PRECISIONS + else: + requested_precisions = set(args.precisions.split(',')) + unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS + if unknown_precisions: + sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions)))) + + with contextlib.ExitStack() as exit_stack: + session_factory = ThreadSessionFactory(exit_stack) + if args.jobs == 1: + results = [download_model(reporter, args, cache, session_factory, requested_precisions, model) + for model in models] + else: + results = common.run_in_parallel(args.jobs, + lambda context, model: download_model( + make_reporter(context), args, cache, session_factory, requested_precisions, model), + models) + + failed_models = {model.name for model, successful in zip(models, results) if not successful} + + if failed_models: + reporter.print('FAILED:') + for failed_model_name in failed_models: + reporter.print(failed_model_name) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/examples/models/.gitignore b/examples/models/.gitignore new file mode 100644 index 000000000..2d85c3ce1 --- /dev/null +++ b/examples/models/.gitignore @@ -0,0 +1,2 @@ +*.blob +*.mp4 \ No newline at end of file diff --git a/examples/models/mobilenet-ssd/model.yml b/examples/models/mobilenet-ssd/model.yml new file mode 100644 index 000000000..bbc89411d --- /dev/null +++ b/examples/models/mobilenet-ssd/model.yml @@ -0,0 +1,33 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + mobilenet-ssd +task_type: object_attributes +files: + - name: mobilenet-ssd_openvino_2021.2_6shave.blob + size: 14510848 + sha256: 5150d0e5d18abd0ecb21c8280e09870977358c04a7d2cfa539e1e0f6c2a93e71 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_6shave.blob + - name: mobilenet-ssd_openvino_2021.2_5shave.blob + size: 14531840 + sha256: c682a0f9be33ce601ce460abc580e3488ced413a7c597dfab4b74ea407d7c6d6 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_5shave.blob + - name: mobilenet-ssd_openvino_2021.2_8shave.blob + size: 14505024 + sha256: e0c60156ee97b01ac115ad838d13c8d90559064fec04c6d423bb03fdc40524eb + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_8shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE diff --git a/examples/models/tiny-yolo/model.yml b/examples/models/tiny-yolo/model.yml new file mode 100644 index 000000000..6f8ddb454 --- /dev/null +++ b/examples/models/tiny-yolo/model.yml @@ -0,0 +1,29 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + tiny-yolo +task_type: object_attributes +files: + - name: tiny-yolo-v3_openvino_2021.2_6shave.blob + size: 17752512 + sha256: 274540d3010765fbe505e2ba6bb5e380c021c2c0c13d7f9d1672fd4af38b8d15 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v3_openvino_2021.2_6shave.blob + - name: tiny-yolo-v4_openvino_2021.2_6shave.blob + size: 12172416 + sha256: 984c50e229652be9c25092df487185eae29e80e4ad7964ff3632f477dbf5e851 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v4_openvino_2021.2_6shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE diff --git a/examples/models/video-resources/model.yml b/examples/models/video-resources/model.yml new file mode 100644 index 000000000..224b3c616 --- /dev/null +++ b/examples/models/video-resources/model.yml @@ -0,0 +1,26 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + video-resources +task_type: object_attributes +files: + - name: construction_vest.mp4 + size: 5423604 + sha256: 2f35ea35a41e98ee17dc9136c495ed0ff3aa7ba6774d5eedc2b9935350c6084f + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4 + + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE From 80fc0e05ac7715b193dd6cc41d1676deb1792308 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Thu, 1 Apr 2021 22:57:27 +0300 Subject: [PATCH 2/9] Download models on install_requirements.py --- examples/install_requirements.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/examples/install_requirements.py b/examples/install_requirements.py index f118dda48..d8f53072b 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -6,7 +6,7 @@ import find_version # 3rdparty dependencies to install -DEPENDENCIES = ['opencv-python'] +DEPENDENCIES = ['opencv-python', 'pyyaml', 'requests'] # Constants ARTIFACTORY_URL = 'https://artifacts.luxonis.com/artifactory/luxonis-python-snapshot-local' @@ -61,3 +61,6 @@ if not success: print("Couldn't install dependencies as wheels and trying to compile from sources failed") print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") + +# current dir was changed to root of depthai-python +subprocess.check_call([sys.executable, "examples/downloader/downloader.py", "--all", "--cache_dir", "examples/downloader/", "-o", "examples/models"]) From 26f0db593b81b1d5b918a2731735fbf67df96332 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Thu, 1 Apr 2021 23:02:31 +0300 Subject: [PATCH 3/9] Change to absolute examples path --- examples/install_requirements.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/examples/install_requirements.py b/examples/install_requirements.py index d8f53072b..cd0bcf1f3 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -1,5 +1,7 @@ #!/usr/bin/env python3 import sys, os, subprocess +examples_dir = os.path.dirname(os.path.abspath(__file__)) + parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) os.chdir(parent_dir) sys.path.insert(1, parent_dir) @@ -62,5 +64,4 @@ print("Couldn't install dependencies as wheels and trying to compile from sources failed") print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") -# current dir was changed to root of depthai-python -subprocess.check_call([sys.executable, "examples/downloader/downloader.py", "--all", "--cache_dir", "examples/downloader/", "-o", "examples/models"]) +subprocess.check_call([sys.executable, f"{examples_dir}/downloader/downloader.py", "--all", "--cache_dir", f"{examples_dir}/downloader/", "--num_attempts", "5", "-o", f"{examples_dir}/models"]) From b8538c877f85ee416f7c09df47094f62cca525f5 Mon Sep 17 00:00:00 2001 From: Erol444 Date: Sat, 3 Apr 2021 12:44:04 +0200 Subject: [PATCH 4/9] added notification when required file/s doesn't exist and prompt user to run install_requirements.py. Also changed the path to the video in demo 17 --- examples/08_rgb_mobilenet.py | 3 +++ examples/09_mono_mobilenet.py | 2 ++ examples/10_mono_depth_mobilenetssd.py | 2 ++ examples/11_rgb_encoding_mono_mobilenet.py | 2 ++ examples/12_rgb_encoding_mono_mobilenet_depth.py | 2 ++ examples/15_rgb_mobilenet_4k.py | 3 +++ examples/17_video_mobilenet.py | 8 ++++++-- examples/18_rgb_encoding_mobilenet.py | 2 ++ examples/22_1_tiny_yolo_v3_device_side_decoding.py | 3 +++ examples/22_2_tiny_yolo_v4_device_side_decoding.py | 3 +++ examples/23_autoexposure_roi.py | 3 +++ examples/26_1_spatial_mobilenet.py | 3 +++ examples/26_2_spatial_mobilenet_mono.py | 3 +++ examples/26_3_spatial_tiny_yolo.py | 3 +++ 14 files changed, 40 insertions(+), 2 deletions(-) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index d17104260..e8f0bf9cf 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -13,6 +13,9 @@ parser.add_argument('-s', '--sync', action="store_true", help="Sync RGB output with NN output", default=False) args = parser.parse_args() +if not Path(nnPathDefault).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index a98bec289..37d099d35 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index eac21f9fb..3b9b676e1 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 98d75b31d..565a8fac5 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index b1cff44f1..9a6e554ad 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 4459339db..a32a826f1 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -11,6 +11,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index dca49608c..e90c7c065 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -8,12 +8,16 @@ from time import monotonic # Get argument first -nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_8shave.blob')).resolve().absolute()) -videoPath = str(Path("./construction_vest.mp4").resolve().absolute()) +parentDir = Path(__file__).parent +nnPath = str((parentDir / Path('models/mobilenet-ssd_openvino_2021.2_8shave.blob')).resolve().absolute()) +videoPath = str((parentDir / Path('models/construction_vest.mp4')).resolve().absolute()) if len(sys.argv) > 2: nnPath = sys.argv[1] videoPath = sys.argv[2] +if not Path(nnPath).exists() or not Path(videoPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index edf3be161..df5579eca 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -11,6 +11,8 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 36b005225..4ac58b1a6 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -38,6 +38,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index 4f638ee55..3bb2d2b79 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -37,6 +37,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index 8ce0fa11a..bacad35d2 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -14,6 +14,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + previewSize = (300, 300) # Start defining a pipeline diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index e0f522b11..ed59c1e9a 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -23,6 +23,9 @@ if len(sys.argv) > 1: nnBlobPath = sys.argv[1] +if not Path(nnBlobPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 7e17bc2a0..5e5a3a2f8 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -26,6 +26,9 @@ if len(sys.argv) > 1: nnPath = sys.argv[1] +if not Path(nnPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index e7316d85c..6c1faf9c1 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -36,6 +36,9 @@ if len(sys.argv) > 1: nnBlobPath = sys.argv[1] +if not Path(nnBlobPath).exists(): + raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + # Start defining a pipeline pipeline = dai.Pipeline() From 968264031f2ab450ff81655b5bd53f34ab900d7b Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 21:40:32 +0300 Subject: [PATCH 5/9] Refactor tests; use install_requirements.py script --- examples/CMakeLists.txt | 121 +++++++++++++------------------ examples/install_requirements.py | 75 ++++++++++--------- 2 files changed, 91 insertions(+), 105 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index 93ff2e6b7..4308e43ed 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -12,6 +12,15 @@ if(UNIX) set(SYS_PATH_SEPARATOR ":") endif() +add_custom_target(install_requirements + # Python path (to find compiled module) + "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" + # Example + COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/install_requirements.py" "--skip_depthai" + DEPENDS ${TARGET_NAME} + VERBATIM + COMMAND_EXPAND_LISTS +) # Macro for adding new python test macro(add_python_example example_name python_script_path) @@ -20,89 +29,59 @@ macro(add_python_example example_name python_script_path) list(REMOVE_AT arguments 0 1) # Creates a target (python my_test [args]) - add_custom_target(${example_name} - ${CMAKE_COMMAND} -E env + add_custom_target(${example_name} + ${CMAKE_COMMAND} -E env # Environment variables # Python path (to find compiled module) "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" # ASAN in case of sanitizers - "${ASAN_ENVIRONMENT_VARS}" + "${ASAN_ENVIRONMENT_VARS}" # Example ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_LIST_DIR}/${python_script_path} ${ARGN} - DEPENDS ${TARGET_NAME} + DEPENDS ${TARGET_NAME} install_requirements VERBATIM COMMAND_EXPAND_LISTS ) if(DEPTHAI_PYTHON_TEST_EXAMPLES) - + # Adds test with 5 seconds timeout and bumps all python warnings to errors - add_test(NAME ${example_name} COMMAND - ${CMAKE_COMMAND} -E env + add_test(NAME ${example_name} COMMAND + ${CMAKE_COMMAND} -E env # Python path (to find compiled module) "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" # ASAN in case of sanitizers ${ASAN_ENVIRONMENT_VARS} - ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=5 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake + ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=5 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake # Actual script to run ${PYTHON_EXECUTABLE} -Werror "${CMAKE_CURRENT_LIST_DIR}/${python_script_path}" ${arguments} ) # Sets a regex catching any logged warnings, errors or critical (coming either from device or host) - set_tests_properties (${example_name} PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") + set_tests_properties (${example_name} PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") endif() -endmacro() - -# Mobilenet resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_6shave.blob" - SHA1 "f0e14978b3f77a4f93b9f969cd39e58bb7aef490" - FILE "mobilenet-ssd_openvino_2021.2_6shave.blob" - LOCATION mobilenet_blob -) - -# Mobilenet resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_5shave.blob" - SHA1 "d715f85e474609cf3f696d7a2e3750804ed6c726" - FILE "mobilenet-ssd_openvino_2021.2_5shave.blob" - LOCATION mobilenet_5shave_blob -) - -# Construction vest video resource -hunter_private_data( - URL "http://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/construction_vest.mp4" - SHA1 "271d8d0b702e683ce02957db7c100843de5ceaec" - FILE "construction_vest.mp4" - LOCATION construction_vest -) +endmacro() -# tiny-YoloV3 neural network resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v3_openvino_2021.2_6shave.blob" - SHA1 "f0ac263a0d55c374e1892eea21c9b7d1170bde46" - FILE "tiny-yolo-v3_openvino_2021.2_6shave.blob" - LOCATION tiny_yolo_v3_blob -) +if(DEPTHAI_PYTHON_TEST_EXAMPLES) -# tiny-YoloV4 neural network resource -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/tiny-yolo-v4_openvino_2021.2_6shave.blob" - SHA1 "219d949610a5760e62a8458941e1300b81c3fe4a" - FILE "tiny-yolo-v4_openvino_2021.2_6shave.blob" - LOCATION tiny_yolo_v4_blob -) + # Adds install requirements test with 5 minute timeout + add_test(NAME install_requirements COMMAND + ${CMAKE_COMMAND} -E env + # Python path (to find compiled module) + "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" + # ASAN in case of sanitizers + ${ASAN_ENVIRONMENT_VARS} + ${CMAKE_COMMAND} -DTIMEOUT_SECONDS=300 -P ${CMAKE_CURRENT_LIST_DIR}/cmake/ExecuteTestTimeout.cmake + # Actual script to run + ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/install_requirements.py" "--skip_depthai" + ) -# NeuralNetwork node, mobilenet example, 8 shaves -hunter_private_data( - URL "https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/mobilenet-ssd_openvino_2021.2_8shave.blob" - SHA1 "3329bb8f3a9c881ef9756d232055f9d6f38aa07b" - FILE "mobilenet-ssd_openvino_2021.2_8shave.blob" - LOCATION mobilenet_8shaves_blob -) + # Sets a regex catching any logged warnings, errors or critical (coming either from device or host) + set_tests_properties (install_requirements PROPERTIES FAIL_REGULAR_EXPRESSION "\\[warning\\];\\[error\\];\\[critical\\]") +endif() # Add examples add_python_example(01_rgb_preview 01_rgb_preview.py) @@ -112,28 +91,28 @@ add_python_example(04_rgb_encoding 04_rgb_encoding.py) add_python_example(05_rgb_mono_encoding 05_rgb_mono_encoding.py) add_python_example(06_rgb_full_resolution_saver 06_rgb_full_resolution_saver.py) add_python_example(07_mono_full_resolution_saver 07_mono_full_resolution_saver.py) -add_python_example(08_rgb_mobilenet 08_rgb_mobilenet.py "${mobilenet_blob}") -add_python_example(09_mono_mobilenet 09_mono_mobilenet.py "${mobilenet_blob}") -add_python_example(10_mono_depth_mobilenetssd 10_mono_depth_mobilenetssd.py "${mobilenet_blob}") -add_python_example(11_rgb_encoding_mono_mobilenet 11_rgb_encoding_mono_mobilenet.py "${mobilenet_blob}") -add_python_example(12_rgb_encoding_mono_mobilenet_depth 12_rgb_encoding_mono_mobilenet_depth.py "${mobilenet_blob}") +add_python_example(08_rgb_mobilenet 08_rgb_mobilenet.py) +add_python_example(09_mono_mobilenet 09_mono_mobilenet.py) +add_python_example(10_mono_depth_mobilenetssd 10_mono_depth_mobilenetssd.py) +add_python_example(11_rgb_encoding_mono_mobilenet 11_rgb_encoding_mono_mobilenet.py) +add_python_example(12_rgb_encoding_mono_mobilenet_depth 12_rgb_encoding_mono_mobilenet_depth.py) add_python_example(13_encoding_max_limit 13_encoding_max_limit.py) add_python_example(14_color_camera_control 14_color_camera_control.py) -add_python_example(15_rgb_mobilenet_4k 15_rgb_mobilenet_4k.py "${mobilenet_5shave_blob}") +add_python_example(15_rgb_mobilenet_4k 15_rgb_mobilenet_4k.py) add_python_example(16_device_queue_event 16_device_queue_event.py) -add_python_example(17_video_mobilenet 17_video_mobilenet.py "${mobilenet_8shaves_blob}" "${construction_vest}") -add_python_example(18_rgb_encoding_mobilenet 18_rgb_encoding_mobilenet.py "${mobilenet_blob}") +add_python_example(17_video_mobilenet 17_video_mobilenet.py) +add_python_example(18_rgb_encoding_mobilenet 18_rgb_encoding_mobilenet.py) add_python_example(19_mono_camera_control 19_mono_camera_control.py) add_python_example(20_color_rotate_warp 20_color_rotate_warp.py) -add_python_example(21_mobilenet_device_side_decoding 21_mobilenet_device_side_decoding.py "${mobilenet_blob}") -add_python_example(22_1_tiny_yolo_v3_device_side_decoding 22_1_tiny_yolo_v3_device_side_decoding.py "${tiny_yolo_v3_blob}") -add_python_example(22_2_tiny_yolo_v4_device_side_decoding 22_2_tiny_yolo_v4_device_side_decoding.py "${tiny_yolo_v4_blob}") -add_python_example(23_autoexposure_roi 23_autoexposure_roi.py "${mobilenet_blob}") +add_python_example(21_mobilenet_device_side_decoding 21_mobilenet_device_side_decoding.py) +add_python_example(22_1_tiny_yolo_v3_device_side_decoding 22_1_tiny_yolo_v3_device_side_decoding.py) +add_python_example(22_2_tiny_yolo_v4_device_side_decoding 22_2_tiny_yolo_v4_device_side_decoding.py) +add_python_example(23_autoexposure_roi 23_autoexposure_roi.py) add_python_example(24_opencv_support 24_opencv_support.py) add_python_example(25_system_information 25_system_information.py) -add_python_example(26_1_spatial_mobilenet 26_1_spatial_mobilenet.py "${mobilenet_blob}") -add_python_example(26_2_spatial_mobilenet_mono 26_2_spatial_mobilenet_mono.py "${mobilenet_blob}") -add_python_example(26_3_spatial_tiny_yolo_v3 26_3_spatial_tiny_yolo.py "${tiny_yolo_v3_blob}") -add_python_example(26_3_spatial_tiny_yolo_v4 26_3_spatial_tiny_yolo.py "${tiny_yolo_v4_blob}") +add_python_example(26_1_spatial_mobilenet 26_1_spatial_mobilenet.py) +add_python_example(26_2_spatial_mobilenet_mono 26_2_spatial_mobilenet_mono.py) +add_python_example(26_3_spatial_tiny_yolo_v3 26_3_spatial_tiny_yolo.py) +add_python_example(26_3_spatial_tiny_yolo_v4 26_3_spatial_tiny_yolo.py) add_python_example(27_spatial_location_calculator 27_spatial_location_calculator.py) add_python_example(28_camera_video_example 28_camera_video_example.py) diff --git a/examples/install_requirements.py b/examples/install_requirements.py index cd0bcf1f3..8d01bd3c4 100755 --- a/examples/install_requirements.py +++ b/examples/install_requirements.py @@ -1,5 +1,11 @@ #!/usr/bin/env python3 import sys, os, subprocess +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-sdai', "--skip_depthai", action="store_true", help="Skip installation of depthai library.") +args = parser.parse_args() + examples_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) @@ -25,43 +31,44 @@ # Install opencv-python subprocess.check_call([*pip_install, *DEPENDENCIES]) -# Check if in git context and retrieve some information -git_context = True -git_commit = "" -git_branch = "" -try: - git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('UTF-8').strip() - git_branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('UTF-8').strip() -except (OSError, subprocess.CalledProcessError) as e: - git_context = False - -# Install depthai depending on context -if not git_context or git_branch == 'main': - # Install latest pypi depthai release - subprocess.check_call([*pip_install, '-U', '--force-reinstall', 'depthai']) -elif git_context: +if not args.skip_depthai: + # Check if in git context and retrieve some information + git_context = True + git_commit = "" + git_branch = "" try: - subprocess.check_output(['git', 'submodule', 'update', '--init', '--recursive']) + git_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']).decode('UTF-8').strip() + git_branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('UTF-8').strip() except (OSError, subprocess.CalledProcessError) as e: - print("git submodule update failed!") - raise - # Get package version if in git context - final_version = find_version.get_package_dev_version(git_commit) - # Install latest built wheels from artifactory (0.0.0.0+[hash] or [version]+[hash]) - commands = [[*pip_install, "--extra-index-url", ARTIFACTORY_URL, "depthai=="+final_version], - [*pip_install, "."]] - success = False - for command in commands: + git_context = False + + # Install depthai depending on context + if not git_context or git_branch == 'main': + # Install latest pypi depthai release + subprocess.check_call([*pip_install, '-U', '--force-reinstall', 'depthai']) + elif git_context: try: - success = subprocess.call(command) == 0 - except (OSError, subprocess.CalledProcessError) as e: - success = False - if success: - break + subprocess.check_output(['git', 'submodule', 'update', '--init', '--recursive']) + except (OSError, subprocess.CalledProcessError) as e: + print("git submodule update failed!") + raise + # Get package version if in git context + final_version = find_version.get_package_dev_version(git_commit) + # Install latest built wheels from artifactory (0.0.0.0+[hash] or [version]+[hash]) + commands = [[*pip_install, "--extra-index-url", ARTIFACTORY_URL, "depthai=="+final_version], + [*pip_install, "."]] + success = False + for command in commands: + try: + success = subprocess.call(command) == 0 + except (OSError, subprocess.CalledProcessError) as e: + success = False + if success: + break - # If all commands failed - if not success: - print("Couldn't install dependencies as wheels and trying to compile from sources failed") - print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") + # If all commands failed + if not success: + print("Couldn't install dependencies as wheels and trying to compile from sources failed") + print("Check https://github.com/luxonis/depthai-python#dependencies on retrieving dependencies for compiling from sources") subprocess.check_call([sys.executable, f"{examples_dir}/downloader/downloader.py", "--all", "--cache_dir", f"{examples_dir}/downloader/", "--num_attempts", "5", "-o", f"{examples_dir}/models"]) From 1905e0b3e8a8e5a5dd9f3b1c5c0f24b5cf4a4911 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 21:43:57 +0300 Subject: [PATCH 6/9] Change RuntimeError to FileNotFoundError --- examples/08_rgb_mobilenet.py | 2 +- examples/09_mono_mobilenet.py | 2 +- examples/10_mono_depth_mobilenetssd.py | 2 +- examples/11_rgb_encoding_mono_mobilenet.py | 2 +- examples/12_rgb_encoding_mono_mobilenet_depth.py | 2 +- examples/15_rgb_mobilenet_4k.py | 2 +- examples/17_video_mobilenet.py | 2 +- examples/18_rgb_encoding_mobilenet.py | 2 +- examples/22_1_tiny_yolo_v3_device_side_decoding.py | 2 +- examples/22_2_tiny_yolo_v4_device_side_decoding.py | 2 +- examples/23_autoexposure_roi.py | 2 +- examples/26_1_spatial_mobilenet.py | 2 +- examples/26_2_spatial_mobilenet_mono.py | 2 +- examples/26_3_spatial_tiny_yolo.py | 2 +- 14 files changed, 14 insertions(+), 14 deletions(-) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index e8f0bf9cf..62c4dc30c 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -14,7 +14,7 @@ args = parser.parse_args() if not Path(nnPathDefault).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index 37d099d35..01d1a616d 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index 3b9b676e1..b149811a2 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index 565a8fac5..c0096f5d9 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index 9a6e554ad..e68e47a7b 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index a32a826f1..635dc2b50 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index e90c7c065..0499dd0dc 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -16,7 +16,7 @@ videoPath = sys.argv[2] if not Path(nnPath).exists() or not Path(videoPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index df5579eca..3581bd3a8 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -12,7 +12,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") pipeline = dai.Pipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index 4ac58b1a6..da6dbeb98 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -39,7 +39,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index 3bb2d2b79..a0791d4a7 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -38,7 +38,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index bacad35d2..c18027a18 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -15,7 +15,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") previewSize = (300, 300) diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index ed59c1e9a..9e3d3e7c1 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -24,7 +24,7 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index 5e5a3a2f8..d335cc633 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -27,7 +27,7 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index 6c1faf9c1..005b86ff8 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -37,7 +37,7 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise RuntimeError("Required file/s not found, please run 'python3 install_requirements.py'") + raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") # Start defining a pipeline pipeline = dai.Pipeline() From 1a40674ed6ba643b430efb41dbc7067effc25608 Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 21:52:17 +0300 Subject: [PATCH 7/9] Change to sys.executable from python3 to make sure the suggested requirements are for the current interpreter --- examples/08_rgb_mobilenet.py | 3 ++- examples/09_mono_mobilenet.py | 3 ++- examples/10_mono_depth_mobilenetssd.py | 3 ++- examples/11_rgb_encoding_mono_mobilenet.py | 3 ++- examples/12_rgb_encoding_mono_mobilenet_depth.py | 3 ++- examples/15_rgb_mobilenet_4k.py | 3 ++- examples/17_video_mobilenet.py | 3 ++- examples/18_rgb_encoding_mobilenet.py | 3 ++- examples/22_1_tiny_yolo_v3_device_side_decoding.py | 3 ++- examples/22_2_tiny_yolo_v4_device_side_decoding.py | 3 ++- examples/23_autoexposure_roi.py | 3 ++- examples/26_1_spatial_mobilenet.py | 3 ++- examples/26_2_spatial_mobilenet_mono.py | 3 ++- examples/26_3_spatial_tiny_yolo.py | 3 ++- 14 files changed, 28 insertions(+), 14 deletions(-) diff --git a/examples/08_rgb_mobilenet.py b/examples/08_rgb_mobilenet.py index 62c4dc30c..02f73fcc0 100755 --- a/examples/08_rgb_mobilenet.py +++ b/examples/08_rgb_mobilenet.py @@ -14,7 +14,8 @@ args = parser.parse_args() if not Path(nnPathDefault).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/09_mono_mobilenet.py b/examples/09_mono_mobilenet.py index 01d1a616d..1b8d43a5d 100755 --- a/examples/09_mono_mobilenet.py +++ b/examples/09_mono_mobilenet.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/10_mono_depth_mobilenetssd.py b/examples/10_mono_depth_mobilenetssd.py index b149811a2..84bb48515 100755 --- a/examples/10_mono_depth_mobilenetssd.py +++ b/examples/10_mono_depth_mobilenetssd.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/11_rgb_encoding_mono_mobilenet.py b/examples/11_rgb_encoding_mono_mobilenet.py index c0096f5d9..56745eb2c 100755 --- a/examples/11_rgb_encoding_mono_mobilenet.py +++ b/examples/11_rgb_encoding_mono_mobilenet.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') pipeline = dai.Pipeline() diff --git a/examples/12_rgb_encoding_mono_mobilenet_depth.py b/examples/12_rgb_encoding_mono_mobilenet_depth.py index e68e47a7b..fb2e21674 100755 --- a/examples/12_rgb_encoding_mono_mobilenet_depth.py +++ b/examples/12_rgb_encoding_mono_mobilenet_depth.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') pipeline = dai.Pipeline() diff --git a/examples/15_rgb_mobilenet_4k.py b/examples/15_rgb_mobilenet_4k.py index 635dc2b50..23f27633c 100755 --- a/examples/15_rgb_mobilenet_4k.py +++ b/examples/15_rgb_mobilenet_4k.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/17_video_mobilenet.py b/examples/17_video_mobilenet.py index 0499dd0dc..3e4369e7e 100755 --- a/examples/17_video_mobilenet.py +++ b/examples/17_video_mobilenet.py @@ -16,7 +16,8 @@ videoPath = sys.argv[2] if not Path(nnPath).exists() or not Path(videoPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/18_rgb_encoding_mobilenet.py b/examples/18_rgb_encoding_mobilenet.py index 3581bd3a8..387434887 100755 --- a/examples/18_rgb_encoding_mobilenet.py +++ b/examples/18_rgb_encoding_mobilenet.py @@ -12,7 +12,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') pipeline = dai.Pipeline() diff --git a/examples/22_1_tiny_yolo_v3_device_side_decoding.py b/examples/22_1_tiny_yolo_v3_device_side_decoding.py index da6dbeb98..2f22660cf 100755 --- a/examples/22_1_tiny_yolo_v3_device_side_decoding.py +++ b/examples/22_1_tiny_yolo_v3_device_side_decoding.py @@ -39,7 +39,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/22_2_tiny_yolo_v4_device_side_decoding.py b/examples/22_2_tiny_yolo_v4_device_side_decoding.py index a0791d4a7..4af45b65d 100755 --- a/examples/22_2_tiny_yolo_v4_device_side_decoding.py +++ b/examples/22_2_tiny_yolo_v4_device_side_decoding.py @@ -38,7 +38,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index c18027a18..cc34878bd 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -15,7 +15,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') previewSize = (300, 300) diff --git a/examples/26_1_spatial_mobilenet.py b/examples/26_1_spatial_mobilenet.py index 9e3d3e7c1..63fdcd88f 100755 --- a/examples/26_1_spatial_mobilenet.py +++ b/examples/26_1_spatial_mobilenet.py @@ -24,7 +24,8 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_2_spatial_mobilenet_mono.py b/examples/26_2_spatial_mobilenet_mono.py index d335cc633..f15260847 100755 --- a/examples/26_2_spatial_mobilenet_mono.py +++ b/examples/26_2_spatial_mobilenet_mono.py @@ -27,7 +27,8 @@ nnPath = sys.argv[1] if not Path(nnPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() diff --git a/examples/26_3_spatial_tiny_yolo.py b/examples/26_3_spatial_tiny_yolo.py index 005b86ff8..6a9c35f1a 100755 --- a/examples/26_3_spatial_tiny_yolo.py +++ b/examples/26_3_spatial_tiny_yolo.py @@ -37,7 +37,8 @@ nnBlobPath = sys.argv[1] if not Path(nnBlobPath).exists(): - raise FileNotFoundError("Required file/s not found, please run 'python3 install_requirements.py'") + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') # Start defining a pipeline pipeline = dai.Pipeline() From b9df3964c85f163f8c2f6016e0d332b1e56c4f8f Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Sat, 3 Apr 2021 22:05:54 +0300 Subject: [PATCH 8/9] Fix failing example due to warning --- examples/23_autoexposure_roi.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/23_autoexposure_roi.py b/examples/23_autoexposure_roi.py index cc34878bd..1bd53e66c 100755 --- a/examples/23_autoexposure_roi.py +++ b/examples/23_autoexposure_roi.py @@ -10,7 +10,7 @@ # Press N to go back to the region controlled by the NN detections. # Get argument first -nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_5shave.blob')).resolve().absolute()) +nnPath = str((Path(__file__).parent / Path('models/mobilenet-ssd_openvino_2021.2_6shave.blob')).resolve().absolute()) if len(sys.argv) > 1: nnPath = sys.argv[1] From 72eefcd2ea859e78fb22a22762d52eaed60315fa Mon Sep 17 00:00:00 2001 From: SzabolcsGergely Date: Tue, 6 Apr 2021 22:11:07 +0300 Subject: [PATCH 9/9] Remove PYTHONPATH from install_requirements target --- examples/CMakeLists.txt | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index e7c0abf3a..9cace48a4 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -13,9 +13,6 @@ if(UNIX) endif() add_custom_target(install_requirements - # Python path (to find compiled module) - "PYTHONPATH=$${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}" - # Example COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/install_requirements.py" "--skip_depthai" DEPENDS ${TARGET_NAME} VERBATIM