diff --git a/.github/condarc.yml b/.github/condarc.yml index 6c32d74..85e3c8e 100644 --- a/.github/condarc.yml +++ b/.github/condarc.yml @@ -1,6 +1,5 @@ anaconda_upload: false channels: - zeroae - - defaults - conda-forge -show_channel_urls: true \ No newline at end of file +show_channel_urls: true diff --git a/.github/workflows/pypa-conda.yml b/.github/workflows/pypa-conda.yml index 38c4bb6..0442a14 100644 --- a/.github/workflows/pypa-conda.yml +++ b/.github/workflows/pypa-conda.yml @@ -22,13 +22,13 @@ jobs: strategy: matrix: os: [ubuntu-latest] - python-version: [3.6] + python-version: [3.7] black-version: [19.10b] flake8-version: [3.7.9] runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v1 - - uses: actions/setup-python@v1.1.1 + - uses: actions/setup-python@v2.2.2 with: python-version: ${{ matrix.python-version }} - uses: actions/cache@v1 @@ -75,7 +75,7 @@ jobs: rm -f *.tar.gz # Create the bdist wheel file - - uses: actions/setup-python@v1.1.1 + - uses: actions/setup-python@v2.2.2 - uses: actions/cache@v1 id: cache with: @@ -152,7 +152,7 @@ jobs: # # Setup the test environment, python + .whl + .whl[test] - - uses: actions/setup-python@v1.1.1 + - uses: actions/setup-python@v2.2.2 with: python-version: ${{ matrix.python-version }} - name: Get pip cache @@ -211,13 +211,16 @@ jobs: tar -xvf *.tar.gz --strip 1 rm *.tar.gz - - uses: goanpeca/setup-miniconda@v1 + - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: '' auto-activate-base: true - conda-build-version: 3.18 + miniforge-variant: Mambaforge + use-mamba: true + conda-build-version: 3.21.4 condarc-file: .github/condarc.yml - - run: conda install setuptools_scm conda-verify + - run: | + mamba install setuptools_scm conda-verify boa - uses: actions/cache@v1 id: conda-pkgs-cache with: @@ -228,7 +231,7 @@ jobs: - name: Run conda build run: | mkdir conda-bld - conda build --output-folder conda-bld . + conda mambabuild --output-folder conda-bld . env: ANACONDA_API_TOKEN: ${{ secrets.ANACONDA_API_TOKEN }} - name: Create conda-bld/manifest @@ -338,7 +341,7 @@ jobs: with: path: /usr/share/miniconda/pkgs key: ${{ runner.os }}-conda-ac1.7.2 - - uses: goanpeca/setup-miniconda@v1 + - uses: conda-incubator/setup-miniconda@v2 with: activate-environment: '' auto-activate-base: true diff --git a/.gitignore b/.gitignore index df17089..c1fbd07 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,9 @@ +# MacOS +.DS_Store + +# Vagrant +.vagrant + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/Makefile b/Makefile index c0919fa..d49f965 100644 --- a/Makefile +++ b/Makefile @@ -93,10 +93,10 @@ wheels: dist ## downloads wheel dependencies ls -l wheels dist-conda: ## builds conda-package - conda build --no-anaconda-upload --output-folder conda-bld \ + conda mambabuild --no-anaconda-upload --output-folder conda-bld \ -c zeroae \ -c conda-forge \ - -c anaconda . + . install: clean ## install the package to the active Python's site-packages - python setup.py install \ No newline at end of file + python setup.py install diff --git a/Vagrantfile b/Vagrantfile new file mode 100644 index 0000000..9e13fe7 --- /dev/null +++ b/Vagrantfile @@ -0,0 +1,94 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# All Vagrant configuration is done below. The "2" in Vagrant.configure +# configures the configuration version (we support older styles for +# backwards compatibility). Please don't change it unless you know what +# you're doing. +Vagrant.configure("2") do |config| + # The most common configuration options are documented and commented below. + # For a complete reference, please see the online documentation at + # https://docs.vagrantup.com. + + # Every Vagrant development environment requires a box. You can search for + # boxes at https://vagrantcloud.com/search. + config.vm.box = "hashicorp/bionic64" + + # Disable automatic box update checking. If you disable this, then + # boxes will only be checked for updates when the user runs + # `vagrant box outdated`. This is not recommended. + # config.vm.box_check_update = false + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine. In the example below, + # accessing "localhost:8080" will access port 80 on the guest machine. + # NOTE: This will enable public access to the opened port + config.vm.network "forwarded_port", guest: 8888, host: 8888 + + # Create a forwarded port mapping which allows access to a specific port + # within the machine from a port on the host machine and only allow access + # via 127.0.0.1 to disable public access + # config.vm.network "forwarded_port", guest: 80, host: 8080, host_ip: "127.0.0.1" + + # Create a private network, which allows host-only access to the machine + # using a specific IP. + # config.vm.network "private_network", ip: "192.168.33.10" + + # Create a public network, which generally matched to bridged network. + # Bridged networks make the machine appear as another physical device on + # your network. + # config.vm.network "public_network" + + # Share an additional folder to the guest VM. The first argument is + # the path on the host to the actual folder. The second argument is + # the path on the guest to mount the folder. And the optional third + # argument is a set of non-required options. + # config.vm.synced_folder "../data", "/vagrant_data" + + # Provider-specific configuration so you can fine-tune various + # backing providers for Vagrant. These expose provider-specific options. + # Example for VirtualBox: + # + # config.vm.provider "virtualbox" do |vb| + # # Display the VirtualBox GUI when booting the machine + # vb.gui = true + # + # # Customize the amount of memory on the VM: + # vb.memory = "1024" + # end + # + # View the documentation for the provider you are using for more + # information on available options. + config.vm.provider "vmware_desktop" do |v| + v.vmx["memsize"] = "2048" + end + + # Enable provisioning with a shell script. Additional provisioners such as + # Ansible, Chef, Docker, Puppet and Salt are also available. Please see the + # documentation for more information about their specific syntax and use. + config.vm.provision "shell", name: "Install Mambaforge", privileged: false, reset: true, inline: <<-SHELL + MAMBA_FORGE_FILE=Mambaforge-$(uname)-$(uname -m).sh + if ! [ -d ~/mambaforge ]; then + if ! [ -f $MAMBA_FORGE_FILE ]; then + wget -q https://github.com/conda-forge/miniforge/releases/latest/download/$MAMBA_FORGE_FILE + fi + bash $MAMBA_FORGE_FILE -b -u + rm -f $MAMBA_FORGE_FILE + + mambaforge/bin/conda init --all + fi + SHELL + + config.vm.provision "shell", name: "Install OS Packages", inline: <<-SHELL + ### Add OpenJDK 8 + apt-get update + apt-get --yes install openjdk-8-jre-headless + SHELL + + config.vm.provision "shell", name: "Create Development Environment", privileged: false, inline: <<-SHELL + ### Create the DarkNet Environment + source mambaforge/etc/profile.d/conda.sh + mamba env update --name darknet-cpu -f /vagrant/environment.yml + echo 'conda activate darknet-cpu' >> ~/.bashrc + SHELL +end diff --git a/environment.yml b/environment.yml index cd34fd5..1176b58 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,3 @@ - name: darknet.py-dev channels: - zeroae @@ -9,6 +8,8 @@ dependencies: - pip - pip: - -e . + - multi-model-server + - sagemaker-inference # Setup Requirements (setup.py:setup_requirements) - compilers @@ -21,7 +22,7 @@ dependencies: # Install Requirements (setup.py:requirements) - click >=7.0 - click-plugins - - darknet + - darknet-cpu - entrypoints - fsspec <=0.7.5 - numpy @@ -30,6 +31,12 @@ dependencies: # Zoo Optional Requirements - intake + # MMS Requirements + - enum-compat + - future + - retrying + - scipy + # Test Requirements (setup.py:test_requirements) - pytest >=3 - pytest-cov diff --git a/setup.py b/setup.py index 254725d..ddf9f67 100755 --- a/setup.py +++ b/setup.py @@ -47,6 +47,14 @@ # fmt: on ] +mms_requirements = [ + # fmt: off + "future", + "multi-model-server", + "retrying", + "sagemaker-inference", + # fmt: on +] zoo_requirements = [ # fmt: off "intake", @@ -130,7 +138,7 @@ long_description_content_type="text/x-rst", include_package_data=True, keywords="py darknet", - name="darknet-py", + name="darknet.py", package_dir={"": "src"}, packages=find_namespace_packages(where="./src"), setup_requires=setup_requirements, @@ -141,6 +149,7 @@ "test": test_requirements, "doc": doc_requirements, "zoo": zoo_requirements, + "mms": mms_requirements, # fmt: on }, url="https://github.com/zeroae/darknet.py", diff --git a/src/darknet/sagemaker/__init__.py b/src/darknet/sagemaker/__init__.py new file mode 100644 index 0000000..383b92b --- /dev/null +++ b/src/darknet/sagemaker/__init__.py @@ -0,0 +1,4 @@ +from .default_inference_handler import DefaultDarknetInferenceHandler, Network +from ..py.util import image_to_3darray + +__all__ = ["DefaultDarknetInferenceHandler", "Network", "image_to_3darray"] diff --git a/src/darknet/sagemaker/__main__.py b/src/darknet/sagemaker/__main__.py new file mode 100644 index 0000000..9ea1b9c --- /dev/null +++ b/src/darknet/sagemaker/__main__.py @@ -0,0 +1,26 @@ +from retrying import retry +from subprocess import CalledProcessError +from sagemaker_inference import model_server + +# TODO: from .classifier import handler_service as classifier_service +from .detector import handler_service as detector_service + + +def _retry_if_error(exception): + return isinstance(exception, CalledProcessError or OSError) + + +@retry(stop_max_delay=1000 * 50, retry_on_exception=_retry_if_error) +def _start_mms(): + # by default the number of workers per model is 1, but we can configure it through the + # environment variable below if desired. + # os.environ['SAGEMAKER_MODEL_SERVER_WORKERS'] = '2' + # TODO: Start Classifier *or* Detector Service + model_server.start_model_server(handler_service=detector_service.__name__) + + +def main(): + _start_mms() + + +main() diff --git a/src/darknet/sagemaker/classifier/__init__.py b/src/darknet/sagemaker/classifier/__init__.py new file mode 100644 index 0000000..ddee0ce --- /dev/null +++ b/src/darknet/sagemaker/classifier/__init__.py @@ -0,0 +1,3 @@ +from .handler_service import HandlerService + +__all__ = ["HandlerService"] diff --git a/src/darknet/sagemaker/classifier/__main__.py b/src/darknet/sagemaker/classifier/__main__.py new file mode 100644 index 0000000..afb949a --- /dev/null +++ b/src/darknet/sagemaker/classifier/__main__.py @@ -0,0 +1,25 @@ +from retrying import retry +from subprocess import CalledProcessError +from sagemaker_inference import model_server + +from . import handler_service as classifier_service + + +def _retry_if_error(exception): + return isinstance(exception, CalledProcessError or OSError) + + +@retry(stop_max_delay=1000 * 50, retry_on_exception=_retry_if_error) +def _start_mms(): + # by default the number of workers per model is 1, but we can configure it through the + # environment variable below if desired. + # os.environ['SAGEMAKER_MODEL_SERVER_WORKERS'] = '2' + # TODO: Start Classifier *or* Detector Service + model_server.start_model_server(handler_service=classifier_service.__name__) + + +def main(): + _start_mms() + + +main() diff --git a/src/darknet/sagemaker/classifier/default_inference_handler.py b/src/darknet/sagemaker/classifier/default_inference_handler.py new file mode 100644 index 0000000..8b6f7b7 --- /dev/null +++ b/src/darknet/sagemaker/classifier/default_inference_handler.py @@ -0,0 +1,36 @@ +from typing import Tuple, List + +from sagemaker_inference.errors import UnsupportedFormatError + +from .. import DefaultDarknetInferenceHandler, image_to_3darray, Network + + +class DefaultDarknetClassifierInferenceHandler(DefaultDarknetInferenceHandler): + def default_predict_fn(self, data, model: Tuple[Network, List[str]]): + """A default predict_fn for DarkNet. Calls a model on data deserialized in input_fn. + Args: + data: input data (PIL.Image) for prediction deserialized by input_fn + model: Darknet model loaded in memory by model_fn + + Returns: a prediction + """ + network, labels = model + max_labels = data.get("MaxLabels", 5) + # TODO: min_confidence = data.get("MinConfidence", 55) + + if "NDArray" in data: + probabilities = network.predict(data["NDArray"]) + elif "Image" in data: + image, _ = image_to_3darray(data["Image"], network.shape) + probabilities = network.predict_image(image) + else: + raise UnsupportedFormatError("Expected an NDArray or an Image") + + rv = [ + { + "Name": label, + "Confidence": prob * 100, + } + for label, prob in sorted(zip(labels, probabilities), key=lambda x: x[1], reverse=True) + ] + return {"Labels": rv[0:max_labels] if max_labels else rv} diff --git a/src/darknet/sagemaker/classifier/handler_service.py b/src/darknet/sagemaker/classifier/handler_service.py new file mode 100644 index 0000000..cacbf72 --- /dev/null +++ b/src/darknet/sagemaker/classifier/handler_service.py @@ -0,0 +1,22 @@ +from sagemaker_inference.default_handler_service import DefaultHandlerService +from sagemaker_inference.transformer import Transformer + +from .default_inference_handler import DefaultDarknetClassifierInferenceHandler + + +class HandlerService(DefaultHandlerService): + """Handler service that is executed by the model server. + Determines specific default inference handlers to use based on the type MXNet model being used. + This class extends ``DefaultHandlerService``, which define the following: + - The ``handle`` method is invoked for all incoming inference requests to the model server. + - The ``initialize`` method is invoked at model server start up. + Based on: https://github.com/awslabs/mxnet-model-server/blob/master/docs/custom_service.md + """ + + def __init__(self): + self._initialized = False + + transformer = Transformer( + default_inference_handler=DefaultDarknetClassifierInferenceHandler() + ) + super(HandlerService, self).__init__(transformer=transformer) diff --git a/src/darknet/sagemaker/default_inference_handler.py b/src/darknet/sagemaker/default_inference_handler.py new file mode 100644 index 0000000..94bb866 --- /dev/null +++ b/src/darknet/sagemaker/default_inference_handler.py @@ -0,0 +1,57 @@ +import io + +import numpy as np +import PIL.Image as Image + +from abc import ABC +from glob import glob +from typing import Tuple, List, Union + +from sagemaker_inference.encoder import encode +from sagemaker_inference.decoder import decode +from sagemaker_inference.default_inference_handler import DefaultInferenceHandler + +from darknet.py.network import Network + + +class DefaultDarknetInferenceHandler(DefaultInferenceHandler, ABC): + def default_model_fn(self, model_dir) -> Tuple[Network, List[str]]: + """ + Loads a model. + For PyTorch, a default function to load a model cannot be provided. + Returns: A DarkNet Detector. + """ + labels_file = glob(f"{model_dir}/*.labels")[0] + with open(labels_file) as f: + labels = [line.rstrip() for line in f.readlines()] + + cfg_file = glob(f"{model_dir}/*.cfg")[0] + weights_file = glob(f"{model_dir}/*.weights")[0] + + return Network(cfg_file, weights_file, batch_size=1), labels + + def default_input_fn(self, input_data, content_type) -> Union[Image.Image, np.array]: + """A default input_fn that can handle PIL Image Types + + Args: + input_data: the request payload serialized in the content_type format + content_type: the request content_type + + Returns: a PIL Image ready for ImageClassifier + """ + if content_type.startswith("image/"): + image = Image.open(io.BytesIO(input_data)) + return {"Image": image} + else: + return {"NDArray": decode(input_data, content_type)} + + def default_output_fn(self, prediction, accept): + """A default output_fn for PyTorch. Serializes predictions from predict_fn to JSON, CSV or NPY format. + + Args: + prediction: a prediction result from predict_fn + accept: type which the output data needs to be serialized + + Returns: output data serialized (Return Sagemaker format?) + """ + return encode(prediction, accept) diff --git a/src/darknet/sagemaker/detector/__init__.py b/src/darknet/sagemaker/detector/__init__.py new file mode 100644 index 0000000..ddee0ce --- /dev/null +++ b/src/darknet/sagemaker/detector/__init__.py @@ -0,0 +1,3 @@ +from .handler_service import HandlerService + +__all__ = ["HandlerService"] diff --git a/src/darknet/sagemaker/detector/__main__.py b/src/darknet/sagemaker/detector/__main__.py new file mode 100644 index 0000000..76e5b2a --- /dev/null +++ b/src/darknet/sagemaker/detector/__main__.py @@ -0,0 +1,24 @@ +from retrying import retry +from subprocess import CalledProcessError +from sagemaker_inference import model_server + +from . import handler_service + + +def _retry_if_error(exception): + return isinstance(exception, CalledProcessError or OSError) + + +@retry(stop_max_delay=1000 * 50, retry_on_exception=_retry_if_error) +def _start_mms(): + # by default the number of workers per model is 1, but we can configure it through the + # environment variable below if desired. + # os.environ['SAGEMAKER_MODEL_SERVER_WORKERS'] = '2' + model_server.start_model_server(handler_service=handler_service.__name__) + + +def main(): + _start_mms() + + +main() diff --git a/src/darknet/sagemaker/detector/default_inference_handler.py b/src/darknet/sagemaker/detector/default_inference_handler.py new file mode 100644 index 0000000..2d1d110 --- /dev/null +++ b/src/darknet/sagemaker/detector/default_inference_handler.py @@ -0,0 +1,54 @@ +from itertools import groupby + +from typing import Tuple, List +from sagemaker_inference.errors import UnsupportedFormatError + +from .. import DefaultDarknetInferenceHandler, Network, image_to_3darray + + +class DefaultDarknetDetectorInferenceHandler(DefaultDarknetInferenceHandler): + def default_predict_fn(self, data, model: Tuple[Network, List[str]]): + """A default predict_fn for DarkNet. Calls a model on data deserialized in input_fn. + Args: + data: input data (PIL.Image) for prediction deserialized by input_fn + model: Darknet model loaded in memory by model_fn + + Returns: detected labels + """ + if "Image" not in data: + raise UnsupportedFormatError("Detector model expects an Image.") + + network, labels = model + + max_labels = data.get("MaxLabels", None) + min_confidence = data.get("MinConfidence", 55) + + image = data["Image"] + image, frame_size = image_to_3darray(image, network.shape) + _ = network.predict_image(image) + + detections = network.detect( + frame_size=frame_size, + threshold=min_confidence / 100.0, + hierarchical_threshold=min_confidence / 100, + ) + detections = sorted(detections, key=lambda x: x[0]) + + def bbox_to_sm_map(x_0, y_0, w, h): + return {"Width": w, "Height": h, "Left": x_0 - w / 2, "Top": y_0 + h / 2} + + rv = [] + for label_idx, instances in groupby(detections, key=lambda x: x[0]): + instances = [ + {"Confidence": prob * 100, "BoundingBox": bbox_to_sm_map(*bbox)} + for _, prob, bbox in sorted(instances, key=lambda x: x[1], reverse=True) + ] + detection = { + "Name": labels[label_idx], + "Confidence": instances[0]["Confidence"], + "Instances": instances, + "Parents": [], + } + detection["Confidence"] = detection["Instances"][0]["Confidence"] + rv.append(detection) + return {"Labels": rv[0:max_labels] if max_labels else rv} diff --git a/src/darknet/sagemaker/detector/handler_service.py b/src/darknet/sagemaker/detector/handler_service.py new file mode 100644 index 0000000..41a16bc --- /dev/null +++ b/src/darknet/sagemaker/detector/handler_service.py @@ -0,0 +1,22 @@ +from sagemaker_inference.default_handler_service import DefaultHandlerService +from sagemaker_inference.transformer import Transformer + +from .default_inference_handler import DefaultDarknetDetectorInferenceHandler + + +class HandlerService(DefaultHandlerService): + """Handler service that is executed by the model server. + Determines specific default inference handlers to use based on the type MXNet model being used. + This class extends ``DefaultHandlerService``, which define the following: + - The ``handle`` method is invoked for all incoming inference requests to the model server. + - The ``initialize`` method is invoked at model server start up. + Based on: https://github.com/awslabs/mxnet-model-server/blob/master/docs/custom_service.md + """ + + def __init__(self): + self._initialized = False + + transformer = Transformer( + default_inference_handler=DefaultDarknetDetectorInferenceHandler() + ) + super(HandlerService, self).__init__(transformer=transformer)