From 5f3df042e18a866f5774062e9c49423c08b21c90 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Tue, 1 Dec 2020 16:34:03 -0800 Subject: [PATCH 01/36] Update examples --- .gitbook.yaml | 6 +- docs/aws/install.md | 2 +- docs/deployments/batch-api.md | 2 +- docs/deployments/batch-api/deployment.md | 2 +- docs/deployments/batch-api/predictors.md | 2 +- docs/deployments/realtime-api.md | 2 +- docs/deployments/realtime-api/deployment.md | 2 +- docs/deployments/realtime-api/models.md | 3 - docs/deployments/realtime-api/predictors.md | 58 - .../realtime-api/traffic-splitter.md | 4 +- docs/guides/multi-model.md | 5 +- docs/guides/single-node-deployment.md | 4 +- docs/summary.md | 4 +- examples/README.md | 4 - .../{image-classifier => python}/README.md | 0 .../{image-classifier => python}/cortex.yaml | 0 .../{image-classifier => python}/predictor.py | 0 .../requirements.txt | 0 .../{image-classifier => python}/sample.json | 0 .../python}/README.md | 0 .../python}/cortex.yaml | 0 .../python}/cortex_gpu.yaml | 0 .../python}/cortex_inf.yaml | 0 .../python}/generate_resnet50_models.ipynb | 0 .../python}/predictor.py | 0 .../python}/sample.json | 0 .../tensorflow}/README.md | 0 .../tensorflow}/cortex.yaml | 0 .../tensorflow}/cortex_gpu.yaml | 0 .../cortex_gpu_server_side_batching.yaml | 0 .../tensorflow}/cortex_inf.yaml | 0 .../cortex_inf_server_side_batching.yaml | 0 .../generate_gpu_resnet50_model.ipynb | 0 .../generate_resnet50_models.ipynb | 0 .../tensorflow}/predictor.py | 0 .../tensorflow}/requirements.txt | 0 .../tensorflow}/sample.bin | Bin .../tensorflow}/sample.json | 0 .../onnx}/README.md | 2 +- .../onnx}/cortex.yaml | 0 .../onnx}/predictor.py | 0 .../onnx}/sample.json | 0 .../onnx}/xgboost.ipynb | 0 .../python}/README.md | 0 .../python}/deploy.ipynb | 0 .../python}/predictor.py | 0 .../python}/requirements.txt | 0 .../tensorflow}/README.md | 2 +- .../tensorflow}/cortex.yaml | 0 .../tensorflow}/predictor.py | 0 .../tensorflow}/sample.json | 0 .../tensorflow}/tensorflow.ipynb | 0 examples/keras/document-denoiser/README.md | 46 - examples/keras/document-denoiser/cortex.yaml | 12 - examples/keras/document-denoiser/predictor.py | 86 -- .../keras/document-denoiser/requirements.txt | 5 - examples/keras/document-denoiser/sample.json | 3 - .../keras/document-denoiser/trainer.ipynb | 620 ---------- examples/live-reloading/onnx/README.md | 4 +- examples/live-reloading/tensorflow/README.md | 8 +- .../onnx}/README.md | 0 .../onnx}/cortex.yaml | 0 .../onnx}/predictor.py | 0 .../onnx}/requirements.txt | 0 .../onnx}/sample.json | 0 .../python}/README.md | 0 .../python}/cortex.yaml | 0 .../python}/predictor.py | 0 .../python}/requirements.txt | 0 .../python}/sample-sentiment.json | 0 .../python}/sample-summarizer.json | 0 .../tensorflow}/README.md | 0 .../tensorflow}/cortex.yaml | 4 - .../tensorflow}/predictor.py | 0 .../tensorflow}/requirements.txt | 0 .../tensorflow}/sample-image.json | 0 examples/onnx/iris-classifier/README.md | 3 - examples/onnx/yolov5-youtube/README.md | 61 - .../onnx/yolov5-youtube/conda-packages.txt | 3 - examples/onnx/yolov5-youtube/cortex.yaml | 13 - examples/onnx/yolov5-youtube/labels.json | 82 -- examples/onnx/yolov5-youtube/predictor.py | 65 -- examples/onnx/yolov5-youtube/requirements.txt | 3 - examples/onnx/yolov5-youtube/sample.json | 3 - examples/onnx/yolov5-youtube/utils.py | 130 --- examples/pytorch/answer-generator/README.md | 3 - examples/pytorch/answer-generator/cortex.yaml | 11 - .../pytorch/answer-generator/generator.py | 44 - .../pytorch/answer-generator/predictor.py | 36 - .../pytorch/answer-generator/requirements.txt | 3 - examples/pytorch/answer-generator/sample.json | 3 - .../image-classifier-alexnet/cortex.yaml | 11 - .../image-classifier-alexnet/predictor.py | 39 - .../image-classifier-alexnet/requirements.txt | 2 - .../image-classifier-alexnet/sample.json | 3 - examples/pytorch/iris-classifier/cortex.yaml | 11 - examples/pytorch/iris-classifier/predictor.py | 50 - .../pytorch/iris-classifier/requirements.txt | 2 - examples/pytorch/iris-classifier/sample.json | 6 - .../pytorch/language-identifier/README.md | 3 - .../pytorch/language-identifier/cortex.yaml | 9 - .../pytorch/language-identifier/predictor.py | 18 - .../language-identifier/requirements.txt | 2 - .../pytorch/language-identifier/sample.json | 3 - examples/pytorch/object-detector/README.md | 3 - .../pytorch/object-detector/coco_labels.txt | 91 -- examples/pytorch/object-detector/cortex.yaml | 11 - examples/pytorch/object-detector/predictor.py | 49 - .../pytorch/object-detector/requirements.txt | 2 - examples/pytorch/object-detector/sample.json | 4 - .../pytorch/question-generator/cortex.yaml | 10 - .../question-generator/dependencies.sh | 4 - .../pytorch/question-generator/predictor.py | 36 - .../question-generator/requirements.txt | 4 - .../pytorch/question-generator/sample.json | 4 - .../pytorch/reading-comprehender/README.md | 3 - .../pytorch/reading-comprehender/cortex.yaml | 11 - .../pytorch/reading-comprehender/predictor.py | 25 - .../reading-comprehender/requirements.txt | 1 - .../pytorch/reading-comprehender/sample.json | 4 - examples/pytorch/search-completer/README.md | 3 - examples/pytorch/search-completer/cortex.yaml | 11 - .../pytorch/search-completer/predictor.py | 20 - .../pytorch/search-completer/requirements.txt | 5 - examples/pytorch/search-completer/sample.json | 3 - examples/pytorch/sentiment-analyzer/README.md | 3 - .../pytorch/sentiment-analyzer/cortex.yaml | 10 - .../pytorch/sentiment-analyzer/predictor.py | 15 - .../sentiment-analyzer/requirements.txt | 2 - .../pytorch/sentiment-analyzer/sample.json | 3 - examples/pytorch/text-summarizer/README.md | 5 - examples/pytorch/text-summarizer/cortex.yaml | 11 - examples/pytorch/text-summarizer/predictor.py | 18 - .../pytorch/text-summarizer/requirements.txt | 2 - examples/pytorch/text-summarizer/sample.json | 3 - examples/sklearn/iris-classifier/README.md | 3 - examples/sklearn/iris-classifier/cortex.yaml | 15 - examples/sklearn/iris-classifier/predictor.py | 31 - .../sklearn/iris-classifier/requirements.txt | 2 - examples/sklearn/iris-classifier/sample.json | 6 - examples/sklearn/iris-classifier/trainer.py | 25 - examples/sklearn/mpg-estimator/README.md | 3 - examples/sklearn/mpg-estimator/cortex.yaml | 11 - examples/sklearn/mpg-estimator/predictor.py | 41 - .../sklearn/mpg-estimator/requirements.txt | 4 - examples/sklearn/mpg-estimator/sample.json | 7 - examples/sklearn/mpg-estimator/trainer.py | 25 - examples/spacy/entity-recognizer/README.md | 3 - examples/spacy/entity-recognizer/cortex.yaml | 10 - examples/spacy/entity-recognizer/predictor.py | 22 - .../spacy/entity-recognizer/requirements.txt | 1 - examples/spacy/entity-recognizer/sample.json | 3 - .../image-classifier-inception/README.md | 3 - .../image-classifier-inception/cortex.yaml | 13 - .../cortex_server_side_batching.yaml | 17 - .../inception.ipynb | 211 ---- .../image-classifier-inception/predictor.py | 21 - .../image-classifier-inception/sample.json | 3 - examples/tensorflow/iris-classifier/README.md | 3 - .../tensorflow/license-plate-reader/README.md | 175 --- .../license-plate-reader/config.json | 8 - .../license-plate-reader/cortex_full.yaml | 35 - .../license-plate-reader/cortex_lite.yaml | 14 - .../license-plate-reader/predictor_crnn.py | 44 - .../license-plate-reader/predictor_lite.py | 120 -- .../license-plate-reader/predictor_yolo.py | 46 - .../license-plate-reader/requirements.txt | 5 - .../license-plate-reader/sample_inference.py | 100 -- .../license-plate-reader/utils/__init__.py | 1 - .../license-plate-reader/utils/bbox.py | 111 -- .../license-plate-reader/utils/colors.py | 100 -- .../license-plate-reader/utils/preprocess.py | 59 - .../license-plate-reader/utils/utils.py | 160 --- .../multi-model-classifier/requirements.txt | 1 - .../multi-model-classifier/sample-iris.json | 8 - .../tensorflow/sentiment-analyzer/README.md | 3 - .../tensorflow/sentiment-analyzer/bert.ipynb | 1007 ----------------- .../tensorflow/sentiment-analyzer/cortex.yaml | 13 - .../sentiment-analyzer/predictor.py | 29 - .../sentiment-analyzer/requirements.txt | 5 - .../tensorflow/sentiment-analyzer/sample.json | 3 - examples/tensorflow/text-generator/README.md | 3 - .../tensorflow/text-generator/cortex.yaml | 11 - examples/tensorflow/text-generator/encoder.py | 118 -- .../tensorflow/text-generator/gpt-2.ipynb | 383 ------- .../tensorflow/text-generator/predictor.py | 24 - .../text-generator/requirements.txt | 2 - .../tensorflow/text-generator/sample.json | 3 - examples/traffic-splitter/model.py | 59 - .../README.md | 0 .../cortex.yaml | 0 .../model.py | 0 .../onnx_predictor.py | 0 .../pytorch_predictor.py | 0 .../sample.json | 0 195 files changed, 20 insertions(+), 5003 deletions(-) rename examples/batch/{image-classifier => python}/README.md (100%) rename examples/batch/{image-classifier => python}/cortex.yaml (100%) rename examples/batch/{image-classifier => python}/predictor.py (100%) rename examples/batch/{image-classifier => python}/requirements.txt (100%) rename examples/batch/{image-classifier => python}/sample.json (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/python}/README.md (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/python}/cortex.yaml (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/python}/cortex_gpu.yaml (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/python}/cortex_inf.yaml (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/python}/generate_resnet50_models.ipynb (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/python}/predictor.py (100%) rename examples/{onnx/multi-model-classifier => compute/python}/sample.json (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/README.md (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/cortex.yaml (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/cortex_gpu.yaml (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/cortex_gpu_server_side_batching.yaml (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/cortex_inf.yaml (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/cortex_inf_server_side_batching.yaml (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/generate_gpu_resnet50_model.ipynb (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/generate_resnet50_models.ipynb (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/predictor.py (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/requirements.txt (100%) rename examples/{tensorflow/image-classifier-resnet50 => compute/tensorflow}/sample.bin (100%) rename examples/{pytorch/image-classifier-resnet50 => compute/tensorflow}/sample.json (100%) rename examples/{pytorch/iris-classifier => hello-world/onnx}/README.md (64%) rename examples/{onnx/iris-classifier => hello-world/onnx}/cortex.yaml (100%) rename examples/{onnx/iris-classifier => hello-world/onnx}/predictor.py (100%) rename examples/{onnx/iris-classifier => hello-world/onnx}/sample.json (100%) rename examples/{onnx/iris-classifier => hello-world/onnx}/xgboost.ipynb (100%) rename examples/{pytorch/text-generator => hello-world/python}/README.md (100%) rename examples/{pytorch/text-generator => hello-world/python}/deploy.ipynb (100%) rename examples/{pytorch/text-generator => hello-world/python}/predictor.py (100%) rename examples/{pytorch/text-generator => hello-world/python}/requirements.txt (100%) rename examples/{pytorch/image-classifier-alexnet => hello-world/tensorflow}/README.md (64%) rename examples/{tensorflow/iris-classifier => hello-world/tensorflow}/cortex.yaml (100%) rename examples/{tensorflow/iris-classifier => hello-world/tensorflow}/predictor.py (100%) rename examples/{tensorflow/iris-classifier => hello-world/tensorflow}/sample.json (100%) rename examples/{tensorflow/iris-classifier => hello-world/tensorflow}/tensorflow.ipynb (100%) delete mode 100644 examples/keras/document-denoiser/README.md delete mode 100644 examples/keras/document-denoiser/cortex.yaml delete mode 100644 examples/keras/document-denoiser/predictor.py delete mode 100644 examples/keras/document-denoiser/requirements.txt delete mode 100644 examples/keras/document-denoiser/sample.json delete mode 100644 examples/keras/document-denoiser/trainer.ipynb rename examples/{onnx/multi-model-classifier => multi-model/onnx}/README.md (100%) rename examples/{onnx/multi-model-classifier => multi-model/onnx}/cortex.yaml (100%) rename examples/{onnx/multi-model-classifier => multi-model/onnx}/predictor.py (100%) rename examples/{onnx/multi-model-classifier => multi-model/onnx}/requirements.txt (100%) rename examples/{tensorflow/image-classifier-resnet50 => multi-model/onnx}/sample.json (100%) rename examples/{pytorch/multi-model-text-analyzer => multi-model/python}/README.md (100%) rename examples/{pytorch/multi-model-text-analyzer => multi-model/python}/cortex.yaml (100%) rename examples/{pytorch/multi-model-text-analyzer => multi-model/python}/predictor.py (100%) rename examples/{pytorch/multi-model-text-analyzer => multi-model/python}/requirements.txt (100%) rename examples/{pytorch/multi-model-text-analyzer => multi-model/python}/sample-sentiment.json (100%) rename examples/{pytorch/multi-model-text-analyzer => multi-model/python}/sample-summarizer.json (100%) rename examples/{tensorflow/multi-model-classifier => multi-model/tensorflow}/README.md (100%) rename examples/{tensorflow/multi-model-classifier => multi-model/tensorflow}/cortex.yaml (84%) rename examples/{tensorflow/multi-model-classifier => multi-model/tensorflow}/predictor.py (100%) rename examples/{tensorflow/image-classifier-inception => multi-model/tensorflow}/requirements.txt (100%) rename examples/{tensorflow/multi-model-classifier => multi-model/tensorflow}/sample-image.json (100%) delete mode 100644 examples/onnx/iris-classifier/README.md delete mode 100644 examples/onnx/yolov5-youtube/README.md delete mode 100644 examples/onnx/yolov5-youtube/conda-packages.txt delete mode 100644 examples/onnx/yolov5-youtube/cortex.yaml delete mode 100644 examples/onnx/yolov5-youtube/labels.json delete mode 100644 examples/onnx/yolov5-youtube/predictor.py delete mode 100644 examples/onnx/yolov5-youtube/requirements.txt delete mode 100644 examples/onnx/yolov5-youtube/sample.json delete mode 100644 examples/onnx/yolov5-youtube/utils.py delete mode 100644 examples/pytorch/answer-generator/README.md delete mode 100644 examples/pytorch/answer-generator/cortex.yaml delete mode 100644 examples/pytorch/answer-generator/generator.py delete mode 100644 examples/pytorch/answer-generator/predictor.py delete mode 100644 examples/pytorch/answer-generator/requirements.txt delete mode 100644 examples/pytorch/answer-generator/sample.json delete mode 100644 examples/pytorch/image-classifier-alexnet/cortex.yaml delete mode 100644 examples/pytorch/image-classifier-alexnet/predictor.py delete mode 100644 examples/pytorch/image-classifier-alexnet/requirements.txt delete mode 100644 examples/pytorch/image-classifier-alexnet/sample.json delete mode 100644 examples/pytorch/iris-classifier/cortex.yaml delete mode 100644 examples/pytorch/iris-classifier/predictor.py delete mode 100644 examples/pytorch/iris-classifier/requirements.txt delete mode 100644 examples/pytorch/iris-classifier/sample.json delete mode 100644 examples/pytorch/language-identifier/README.md delete mode 100644 examples/pytorch/language-identifier/cortex.yaml delete mode 100644 examples/pytorch/language-identifier/predictor.py delete mode 100644 examples/pytorch/language-identifier/requirements.txt delete mode 100644 examples/pytorch/language-identifier/sample.json delete mode 100644 examples/pytorch/object-detector/README.md delete mode 100644 examples/pytorch/object-detector/coco_labels.txt delete mode 100644 examples/pytorch/object-detector/cortex.yaml delete mode 100644 examples/pytorch/object-detector/predictor.py delete mode 100644 examples/pytorch/object-detector/requirements.txt delete mode 100644 examples/pytorch/object-detector/sample.json delete mode 100644 examples/pytorch/question-generator/cortex.yaml delete mode 100644 examples/pytorch/question-generator/dependencies.sh delete mode 100644 examples/pytorch/question-generator/predictor.py delete mode 100644 examples/pytorch/question-generator/requirements.txt delete mode 100644 examples/pytorch/question-generator/sample.json delete mode 100644 examples/pytorch/reading-comprehender/README.md delete mode 100644 examples/pytorch/reading-comprehender/cortex.yaml delete mode 100644 examples/pytorch/reading-comprehender/predictor.py delete mode 100644 examples/pytorch/reading-comprehender/requirements.txt delete mode 100644 examples/pytorch/reading-comprehender/sample.json delete mode 100644 examples/pytorch/search-completer/README.md delete mode 100644 examples/pytorch/search-completer/cortex.yaml delete mode 100644 examples/pytorch/search-completer/predictor.py delete mode 100644 examples/pytorch/search-completer/requirements.txt delete mode 100644 examples/pytorch/search-completer/sample.json delete mode 100644 examples/pytorch/sentiment-analyzer/README.md delete mode 100644 examples/pytorch/sentiment-analyzer/cortex.yaml delete mode 100644 examples/pytorch/sentiment-analyzer/predictor.py delete mode 100644 examples/pytorch/sentiment-analyzer/requirements.txt delete mode 100644 examples/pytorch/sentiment-analyzer/sample.json delete mode 100644 examples/pytorch/text-summarizer/README.md delete mode 100644 examples/pytorch/text-summarizer/cortex.yaml delete mode 100644 examples/pytorch/text-summarizer/predictor.py delete mode 100644 examples/pytorch/text-summarizer/requirements.txt delete mode 100644 examples/pytorch/text-summarizer/sample.json delete mode 100644 examples/sklearn/iris-classifier/README.md delete mode 100644 examples/sklearn/iris-classifier/cortex.yaml delete mode 100644 examples/sklearn/iris-classifier/predictor.py delete mode 100644 examples/sklearn/iris-classifier/requirements.txt delete mode 100644 examples/sklearn/iris-classifier/sample.json delete mode 100644 examples/sklearn/iris-classifier/trainer.py delete mode 100644 examples/sklearn/mpg-estimator/README.md delete mode 100644 examples/sklearn/mpg-estimator/cortex.yaml delete mode 100644 examples/sklearn/mpg-estimator/predictor.py delete mode 100644 examples/sklearn/mpg-estimator/requirements.txt delete mode 100644 examples/sklearn/mpg-estimator/sample.json delete mode 100644 examples/sklearn/mpg-estimator/trainer.py delete mode 100644 examples/spacy/entity-recognizer/README.md delete mode 100644 examples/spacy/entity-recognizer/cortex.yaml delete mode 100644 examples/spacy/entity-recognizer/predictor.py delete mode 100644 examples/spacy/entity-recognizer/requirements.txt delete mode 100644 examples/spacy/entity-recognizer/sample.json delete mode 100644 examples/tensorflow/image-classifier-inception/README.md delete mode 100644 examples/tensorflow/image-classifier-inception/cortex.yaml delete mode 100644 examples/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml delete mode 100644 examples/tensorflow/image-classifier-inception/inception.ipynb delete mode 100644 examples/tensorflow/image-classifier-inception/predictor.py delete mode 100644 examples/tensorflow/image-classifier-inception/sample.json delete mode 100644 examples/tensorflow/iris-classifier/README.md delete mode 100644 examples/tensorflow/license-plate-reader/README.md delete mode 100644 examples/tensorflow/license-plate-reader/config.json delete mode 100644 examples/tensorflow/license-plate-reader/cortex_full.yaml delete mode 100644 examples/tensorflow/license-plate-reader/cortex_lite.yaml delete mode 100644 examples/tensorflow/license-plate-reader/predictor_crnn.py delete mode 100644 examples/tensorflow/license-plate-reader/predictor_lite.py delete mode 100644 examples/tensorflow/license-plate-reader/predictor_yolo.py delete mode 100644 examples/tensorflow/license-plate-reader/requirements.txt delete mode 100644 examples/tensorflow/license-plate-reader/sample_inference.py delete mode 100644 examples/tensorflow/license-plate-reader/utils/__init__.py delete mode 100644 examples/tensorflow/license-plate-reader/utils/bbox.py delete mode 100644 examples/tensorflow/license-plate-reader/utils/colors.py delete mode 100644 examples/tensorflow/license-plate-reader/utils/preprocess.py delete mode 100644 examples/tensorflow/license-plate-reader/utils/utils.py delete mode 100644 examples/tensorflow/multi-model-classifier/requirements.txt delete mode 100644 examples/tensorflow/multi-model-classifier/sample-iris.json delete mode 100644 examples/tensorflow/sentiment-analyzer/README.md delete mode 100644 examples/tensorflow/sentiment-analyzer/bert.ipynb delete mode 100644 examples/tensorflow/sentiment-analyzer/cortex.yaml delete mode 100644 examples/tensorflow/sentiment-analyzer/predictor.py delete mode 100644 examples/tensorflow/sentiment-analyzer/requirements.txt delete mode 100644 examples/tensorflow/sentiment-analyzer/sample.json delete mode 100644 examples/tensorflow/text-generator/README.md delete mode 100644 examples/tensorflow/text-generator/cortex.yaml delete mode 100644 examples/tensorflow/text-generator/encoder.py delete mode 100644 examples/tensorflow/text-generator/gpt-2.ipynb delete mode 100644 examples/tensorflow/text-generator/predictor.py delete mode 100644 examples/tensorflow/text-generator/requirements.txt delete mode 100644 examples/tensorflow/text-generator/sample.json delete mode 100644 examples/traffic-splitter/model.py rename examples/{traffic-splitter => traffic-splitting}/README.md (100%) rename examples/{traffic-splitter => traffic-splitting}/cortex.yaml (100%) rename examples/{pytorch/iris-classifier => traffic-splitting}/model.py (100%) rename examples/{traffic-splitter => traffic-splitting}/onnx_predictor.py (100%) rename examples/{traffic-splitter => traffic-splitting}/pytorch_predictor.py (100%) rename examples/{traffic-splitter => traffic-splitting}/sample.json (100%) diff --git a/.gitbook.yaml b/.gitbook.yaml index 8e0ed9a10b..09f320911a 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -5,9 +5,9 @@ structure: summary: summary.md redirects: - tutorial: ../examples/pytorch/text-generator/README.md - tutorial/realtime: ../examples/pytorch/text-generator/README.md - tutorial/batch: ../examples/batch/image-classifier/README.md + tutorial: ../examples/hello-world/python/README.md + tutorial/realtime: ../examples/hello-world/python/README.md + tutorial/batch: ../examples/batch/python/README.md install: ./aws/install.md uninstall: ./aws/uninstall.md update: ./aws/update.md diff --git a/docs/aws/install.md b/docs/aws/install.md index 67bda7563b..44a40b4aba 100644 --- a/docs/aws/install.md +++ b/docs/aws/install.md @@ -20,7 +20,7 @@ cortex env default aws ``` -Try the [tutorial](../../examples/pytorch/text-generator/README.md) or deploy one of our [examples](https://github.com/cortexlabs/cortex/tree/master/examples). +Try the [tutorial](../../examples/hello-world/python/README.md) or deploy one of our [examples](https://github.com/cortexlabs/cortex/tree/master/examples). ## Configure Cortex diff --git a/docs/deployments/batch-api.md b/docs/deployments/batch-api.md index a9b368be15..9710290a6c 100644 --- a/docs/deployments/batch-api.md +++ b/docs/deployments/batch-api.md @@ -37,7 +37,7 @@ At any point, you can use the Job ID that was provided upon job submission to ma ## Next steps -* Try the [tutorial](../../examples/batch/image-classifier/README.md) to deploy a Batch API on your Cortex cluster. +* Try the [tutorial](../../examples/batch/python/README.md) to deploy a Batch API on your Cortex cluster. * See our [exporting guide](../guides/exporting.md) for how to export your model to use in a Batch API. * See the [Predictor docs](batch-api/predictors.md) for how to implement a Predictor class. * See the [API configuration docs](batch-api/api-configuration.md) for a full list of features that can be used to deploy your Batch API. diff --git a/docs/deployments/batch-api/deployment.md b/docs/deployments/batch-api/deployment.md index 81168fac42..9608e927cb 100644 --- a/docs/deployments/batch-api/deployment.md +++ b/docs/deployments/batch-api/deployment.md @@ -122,6 +122,6 @@ deleting my-api ## Additional resources -* [Tutorial](../../../examples/batch/image-classifier/README.md) provides a step-by-step walkthrough of deploying an image classification batch API +* [Tutorial](../../../examples/batch/python/README.md) provides a step-by-step walkthrough of deploying an image classification batch API * [CLI documentation](../../miscellaneous/cli.md) lists all CLI commands * [Examples](https://github.com/cortexlabs/cortex/tree/master/examples/batch) demonstrate how to deploy models from common ML libraries diff --git a/docs/deployments/batch-api/predictors.md b/docs/deployments/batch-api/predictors.md index 426a64ddd2..e66681b6a8 100644 --- a/docs/deployments/batch-api/predictors.md +++ b/docs/deployments/batch-api/predictors.md @@ -97,7 +97,7 @@ For proper separation of concerns, it is recommended to use the constructor's `c ### Examples -You can find an example of a BatchAPI using a PythonPredictor in [examples/batch/image-classifier](https://github.com/cortexlabs/cortex/tree/master/examples/batch/image-classifier). +You can find an example of a BatchAPI using a PythonPredictor in [examples/batch/python](https://github.com/cortexlabs/cortex/tree/master/examples/batch/python). ### Pre-installed packages diff --git a/docs/deployments/realtime-api.md b/docs/deployments/realtime-api.md index f90110690f..3bdba221ea 100644 --- a/docs/deployments/realtime-api.md +++ b/docs/deployments/realtime-api.md @@ -40,7 +40,7 @@ The Cortex Cluster will automatically scale based on the incoming traffic and th ## Next steps -* Try the [tutorial](../../examples/pytorch/text-generator/README.md) to deploy a Realtime API locally or on AWS. +* Try the [tutorial](../../examples/hello-world/python/README.md) to deploy a Realtime API locally or on AWS. * See our [exporting guide](../guides/exporting.md) for how to export your model to use in a Realtime API. * See the [Predictor docs](realtime-api/predictors.md) for how to implement a Predictor class. * See the [API configuration docs](realtime-api/api-configuration.md) for a full list of features that can be used to deploy your Realtime API. diff --git a/docs/deployments/realtime-api/deployment.md b/docs/deployments/realtime-api/deployment.md index a7c0a09a4c..b2bf5dccc1 100644 --- a/docs/deployments/realtime-api/deployment.md +++ b/docs/deployments/realtime-api/deployment.md @@ -63,6 +63,6 @@ deleting my-api ## Additional resources -* [Tutorial](../../../examples/pytorch/text-generator/README.md) provides a step-by-step walkthrough of deploying a text generation API +* [Tutorial](../../../examples/hello-world/python/README.md) provides a step-by-step walkthrough of deploying a text generation API * [CLI documentation](../../miscellaneous/cli.md) lists all CLI commands * [Examples](https://github.com/cortexlabs/cortex/tree/master/examples) demonstrate how to deploy models from common ML libraries diff --git a/docs/deployments/realtime-api/models.md b/docs/deployments/realtime-api/models.md index 07fdc2ce1a..114d30236e 100644 --- a/docs/deployments/realtime-api/models.md +++ b/docs/deployments/realtime-api/models.md @@ -182,9 +182,6 @@ The following is a list of events that will trigger the API to update its model( * A model changes its directory structure. * A file in the model directory is updated in-place. - -Examples can be seen in [examples/live-reloading](https://github.com/cortexlabs/cortex/tree/master/examples/live-reloading). - Usage varies based on the predictor type: ### Python diff --git a/docs/deployments/realtime-api/predictors.md b/docs/deployments/realtime-api/predictors.md index 9bc05823b5..0ff5b9951d 100644 --- a/docs/deployments/realtime-api/predictors.md +++ b/docs/deployments/realtime-api/predictors.md @@ -134,64 +134,6 @@ Your API can accept requests with different types of payloads such as `JSON`-par Your `predictor` method can return different types of objects such as `JSON`-parseable, `string`, and `bytes` objects. Navigate to the [API responses](#api-responses) section to learn about how to configure your `predictor` method to respond with different response codes and content-types. -### Examples - - -Many of the [examples](https://github.com/cortexlabs/cortex/tree/master/examples) use the Python Predictor, including all of the PyTorch examples. - - -Here is the Predictor for [examples/pytorch/text-generator](https://github.com/cortexlabs/cortex/tree/master/examples/pytorch/text-generator): - -```python -import torch -from transformers import GPT2Tokenizer, GPT2LMHeadModel - - -class PythonPredictor: - def __init__(self, config): - self.device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"using device: {self.device}") - self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2") - self.model = GPT2LMHeadModel.from_pretrained("gpt2").to(self.device) - - def predict(self, payload): - input_length = len(payload["text"].split()) - tokens = self.tokenizer.encode(payload["text"], return_tensors="pt").to(self.device) - prediction = self.model.generate(tokens, max_length=input_length + 20, do_sample=True) - return self.tokenizer.decode(prediction[0]) -``` - - -Here is the Predictor for [examples/live-reloading/python/mpg-estimator](https://github.com/cortexlabs/cortex/tree/feature/master/examples/live-reloading/python/mpg-estimator): - -```python -import mlflow.sklearn -import numpy as np - - -class PythonPredictor: - def __init__(self, config, python_client): - self.client = python_client - - def load_model(self, model_path): - return mlflow.sklearn.load_model(model_path) - - def predict(self, payload, query_params): - model_version = query_params.get("version") - - model = self.client.get_model(model_version=model_version) - model_input = [ - payload["cylinders"], - payload["displacement"], - payload["horsepower"], - payload["weight"], - payload["acceleration"], - ] - result = model.predict([model_input]).item() - - return {"prediction": result, "model": {"version": model_version}} -``` - ### Pre-installed packages The following Python packages are pre-installed in Python Predictors and can be used in your implementations: diff --git a/docs/deployments/realtime-api/traffic-splitter.md b/docs/deployments/realtime-api/traffic-splitter.md index 3a8a004da1..90726aa173 100644 --- a/docs/deployments/realtime-api/traffic-splitter.md +++ b/docs/deployments/realtime-api/traffic-splitter.md @@ -76,6 +76,6 @@ Note that this will not delete the Realtime APIs targeted by the Traffic Splitte ## Additional resources -* [Traffic Splitter Tutorial](../../../examples/traffic-splitter/README.md) provides a step-by-step walkthrough for deploying an Traffic Splitter -* [Realtime API Tutorial](../../../examples/pytorch/text-generator/README.md) provides a step-by-step walkthrough of deploying a realtime API for text generation +* [Traffic Splitter Tutorial](../../../examples/traffic-splitting/README.md) provides a step-by-step walkthrough for deploying an Traffic Splitter +* [Realtime API Tutorial](../../../examples/hello-world/python/README.md) provides a step-by-step walkthrough of deploying a realtime API for text generation * [CLI documentation](../../miscellaneous/cli.md) lists all CLI commands diff --git a/docs/guides/multi-model.md b/docs/guides/multi-model.md index 79ed0507ab..e8380d7631 100644 --- a/docs/guides/multi-model.md +++ b/docs/guides/multi-model.md @@ -9,9 +9,6 @@ It is possible to serve multiple models in the same Cortex API using any type of ### Specifying models in API config - -The following template is based on the [live-reloading/python/mpg-estimator](https://github.com/cortexlabs/cortex/tree/master/examples/live-reloading/python/mpg-estimator) example. - #### `cortex.yaml` Even though it looks as if there's only a single model served, there are actually 4 different versions saved in `s3://cortex-examples/sklearn/mpg-estimator/linreg/`. @@ -158,7 +155,7 @@ Machine learning is the study of algorithms and statistical models that computer For the TensorFlow Predictor, a multi-model API is configured by placing the list of models in the Predictor's `models` field (each model will specify its own unique name). The `predict()` method of the `tensorflow_client` object expects a second argument that represents the name of the model that will be used for inference. -The following template is based on the [tensorflow/multi-model-classifier](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/multi-model-classifier) example. +The following template is based on the [multi-model/tensorflow](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/multi-model-classifier) example. ### `cortex.yaml` diff --git a/docs/guides/single-node-deployment.md b/docs/guides/single-node-deployment.md index 7a21e560fe..1ec54a0003 100644 --- a/docs/guides/single-node-deployment.md +++ b/docs/guides/single-node-deployment.md @@ -101,7 +101,7 @@ $ sudo groupadd docker; sudo gpasswd -a $USER docker $ logout ``` -If you have installed Docker correctly, you should be able to run docker commands such as `docker run hello-world` without running into permission issues or needing `sudo`. +If you have installed Docker correctly, you should be able to run docker commands such as `docker run hello-world/python` without running into permission issues or needing `sudo`. ### Step 12 @@ -120,7 +120,7 @@ You can now use Cortex to deploy your model: ```bash $ git clone -b master https://github.com/cortexlabs/cortex.git -$ cd cortex/examples/pytorch/text-generator +$ cd cortex/examples/hello-world/python $ cortex deploy diff --git a/docs/summary.md b/docs/summary.md index e92d69a771..d1891fc600 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -32,14 +32,14 @@ * [Autoscaling](deployments/realtime-api/autoscaling.md) * [Prediction monitoring](deployments/realtime-api/prediction-monitoring.md) * [Traffic Splitter](deployments/realtime-api/traffic-splitter.md) - * [Realtime API tutorial](../examples/pytorch/text-generator/README.md) + * [Realtime API tutorial](../examples/hello-world/python/README.md) * [Batch API](deployments/batch-api.md) * [Predictor implementation](deployments/batch-api/predictors.md) * [API configuration](deployments/batch-api/api-configuration.md) * [API deployment](deployments/batch-api/deployment.md) * [Endpoints](deployments/batch-api/endpoints.md) * [Job statuses](deployments/batch-api/statuses.md) - * [Batch API tutorial](../examples/batch/image-classifier/README.md) + * [Batch API tutorial](../examples/batch/python/README.md) ## Advanced diff --git a/examples/README.md b/examples/README.md index 1eb711f57d..a9b4f3ed15 100644 --- a/examples/README.md +++ b/examples/README.md @@ -2,8 +2,6 @@ ## TensorFlow -- [Iris classification](tensorflow/iris-classifier): deploy a model to classify iris flowers. - - [Text generation](tensorflow/text-generator): deploy OpenAI's GPT-2 to generate text. - [Sentiment analysis](tensorflow/sentiment-analyzer): deploy a BERT model for sentiment analysis. @@ -50,8 +48,6 @@ ## ONNX -- [Iris classification](onnx/iris-classifier): deploy an XGBoost model (exported in ONNX) to classify iris flowers. - - [YOLOv5 YouTube detection](onnx/yolov5-youtube): deploy a YOLOv5 model trained on COCO val2017 dataset. - [Multi-model classification](onnx/multi-model-classifier): deploy 3 models (ResNet50, MobileNet, ShuffleNet) in a single API. diff --git a/examples/batch/image-classifier/README.md b/examples/batch/python/README.md similarity index 100% rename from examples/batch/image-classifier/README.md rename to examples/batch/python/README.md diff --git a/examples/batch/image-classifier/cortex.yaml b/examples/batch/python/cortex.yaml similarity index 100% rename from examples/batch/image-classifier/cortex.yaml rename to examples/batch/python/cortex.yaml diff --git a/examples/batch/image-classifier/predictor.py b/examples/batch/python/predictor.py similarity index 100% rename from examples/batch/image-classifier/predictor.py rename to examples/batch/python/predictor.py diff --git a/examples/batch/image-classifier/requirements.txt b/examples/batch/python/requirements.txt similarity index 100% rename from examples/batch/image-classifier/requirements.txt rename to examples/batch/python/requirements.txt diff --git a/examples/batch/image-classifier/sample.json b/examples/batch/python/sample.json similarity index 100% rename from examples/batch/image-classifier/sample.json rename to examples/batch/python/sample.json diff --git a/examples/pytorch/image-classifier-resnet50/README.md b/examples/compute/python/README.md similarity index 100% rename from examples/pytorch/image-classifier-resnet50/README.md rename to examples/compute/python/README.md diff --git a/examples/pytorch/image-classifier-resnet50/cortex.yaml b/examples/compute/python/cortex.yaml similarity index 100% rename from examples/pytorch/image-classifier-resnet50/cortex.yaml rename to examples/compute/python/cortex.yaml diff --git a/examples/pytorch/image-classifier-resnet50/cortex_gpu.yaml b/examples/compute/python/cortex_gpu.yaml similarity index 100% rename from examples/pytorch/image-classifier-resnet50/cortex_gpu.yaml rename to examples/compute/python/cortex_gpu.yaml diff --git a/examples/pytorch/image-classifier-resnet50/cortex_inf.yaml b/examples/compute/python/cortex_inf.yaml similarity index 100% rename from examples/pytorch/image-classifier-resnet50/cortex_inf.yaml rename to examples/compute/python/cortex_inf.yaml diff --git a/examples/pytorch/image-classifier-resnet50/generate_resnet50_models.ipynb b/examples/compute/python/generate_resnet50_models.ipynb similarity index 100% rename from examples/pytorch/image-classifier-resnet50/generate_resnet50_models.ipynb rename to examples/compute/python/generate_resnet50_models.ipynb diff --git a/examples/pytorch/image-classifier-resnet50/predictor.py b/examples/compute/python/predictor.py similarity index 100% rename from examples/pytorch/image-classifier-resnet50/predictor.py rename to examples/compute/python/predictor.py diff --git a/examples/onnx/multi-model-classifier/sample.json b/examples/compute/python/sample.json similarity index 100% rename from examples/onnx/multi-model-classifier/sample.json rename to examples/compute/python/sample.json diff --git a/examples/tensorflow/image-classifier-resnet50/README.md b/examples/compute/tensorflow/README.md similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/README.md rename to examples/compute/tensorflow/README.md diff --git a/examples/tensorflow/image-classifier-resnet50/cortex.yaml b/examples/compute/tensorflow/cortex.yaml similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/cortex.yaml rename to examples/compute/tensorflow/cortex.yaml diff --git a/examples/tensorflow/image-classifier-resnet50/cortex_gpu.yaml b/examples/compute/tensorflow/cortex_gpu.yaml similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/cortex_gpu.yaml rename to examples/compute/tensorflow/cortex_gpu.yaml diff --git a/examples/tensorflow/image-classifier-resnet50/cortex_gpu_server_side_batching.yaml b/examples/compute/tensorflow/cortex_gpu_server_side_batching.yaml similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/cortex_gpu_server_side_batching.yaml rename to examples/compute/tensorflow/cortex_gpu_server_side_batching.yaml diff --git a/examples/tensorflow/image-classifier-resnet50/cortex_inf.yaml b/examples/compute/tensorflow/cortex_inf.yaml similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/cortex_inf.yaml rename to examples/compute/tensorflow/cortex_inf.yaml diff --git a/examples/tensorflow/image-classifier-resnet50/cortex_inf_server_side_batching.yaml b/examples/compute/tensorflow/cortex_inf_server_side_batching.yaml similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/cortex_inf_server_side_batching.yaml rename to examples/compute/tensorflow/cortex_inf_server_side_batching.yaml diff --git a/examples/tensorflow/image-classifier-resnet50/generate_gpu_resnet50_model.ipynb b/examples/compute/tensorflow/generate_gpu_resnet50_model.ipynb similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/generate_gpu_resnet50_model.ipynb rename to examples/compute/tensorflow/generate_gpu_resnet50_model.ipynb diff --git a/examples/tensorflow/image-classifier-resnet50/generate_resnet50_models.ipynb b/examples/compute/tensorflow/generate_resnet50_models.ipynb similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/generate_resnet50_models.ipynb rename to examples/compute/tensorflow/generate_resnet50_models.ipynb diff --git a/examples/tensorflow/image-classifier-resnet50/predictor.py b/examples/compute/tensorflow/predictor.py similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/predictor.py rename to examples/compute/tensorflow/predictor.py diff --git a/examples/tensorflow/image-classifier-resnet50/requirements.txt b/examples/compute/tensorflow/requirements.txt similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/requirements.txt rename to examples/compute/tensorflow/requirements.txt diff --git a/examples/tensorflow/image-classifier-resnet50/sample.bin b/examples/compute/tensorflow/sample.bin similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/sample.bin rename to examples/compute/tensorflow/sample.bin diff --git a/examples/pytorch/image-classifier-resnet50/sample.json b/examples/compute/tensorflow/sample.json similarity index 100% rename from examples/pytorch/image-classifier-resnet50/sample.json rename to examples/compute/tensorflow/sample.json diff --git a/examples/pytorch/iris-classifier/README.md b/examples/hello-world/onnx/README.md similarity index 64% rename from examples/pytorch/iris-classifier/README.md rename to examples/hello-world/onnx/README.md index 41a04891b3..a45b69db8f 100644 --- a/examples/pytorch/iris-classifier/README.md +++ b/examples/hello-world/onnx/README.md @@ -1,3 +1,3 @@ _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. +Please refer to the [tutorial](https://docs.cortex.dev/tutorial) to see how to deploy an example with Cortex. diff --git a/examples/onnx/iris-classifier/cortex.yaml b/examples/hello-world/onnx/cortex.yaml similarity index 100% rename from examples/onnx/iris-classifier/cortex.yaml rename to examples/hello-world/onnx/cortex.yaml diff --git a/examples/onnx/iris-classifier/predictor.py b/examples/hello-world/onnx/predictor.py similarity index 100% rename from examples/onnx/iris-classifier/predictor.py rename to examples/hello-world/onnx/predictor.py diff --git a/examples/onnx/iris-classifier/sample.json b/examples/hello-world/onnx/sample.json similarity index 100% rename from examples/onnx/iris-classifier/sample.json rename to examples/hello-world/onnx/sample.json diff --git a/examples/onnx/iris-classifier/xgboost.ipynb b/examples/hello-world/onnx/xgboost.ipynb similarity index 100% rename from examples/onnx/iris-classifier/xgboost.ipynb rename to examples/hello-world/onnx/xgboost.ipynb diff --git a/examples/pytorch/text-generator/README.md b/examples/hello-world/python/README.md similarity index 100% rename from examples/pytorch/text-generator/README.md rename to examples/hello-world/python/README.md diff --git a/examples/pytorch/text-generator/deploy.ipynb b/examples/hello-world/python/deploy.ipynb similarity index 100% rename from examples/pytorch/text-generator/deploy.ipynb rename to examples/hello-world/python/deploy.ipynb diff --git a/examples/pytorch/text-generator/predictor.py b/examples/hello-world/python/predictor.py similarity index 100% rename from examples/pytorch/text-generator/predictor.py rename to examples/hello-world/python/predictor.py diff --git a/examples/pytorch/text-generator/requirements.txt b/examples/hello-world/python/requirements.txt similarity index 100% rename from examples/pytorch/text-generator/requirements.txt rename to examples/hello-world/python/requirements.txt diff --git a/examples/pytorch/image-classifier-alexnet/README.md b/examples/hello-world/tensorflow/README.md similarity index 64% rename from examples/pytorch/image-classifier-alexnet/README.md rename to examples/hello-world/tensorflow/README.md index 41a04891b3..a45b69db8f 100644 --- a/examples/pytorch/image-classifier-alexnet/README.md +++ b/examples/hello-world/tensorflow/README.md @@ -1,3 +1,3 @@ _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. +Please refer to the [tutorial](https://docs.cortex.dev/tutorial) to see how to deploy an example with Cortex. diff --git a/examples/tensorflow/iris-classifier/cortex.yaml b/examples/hello-world/tensorflow/cortex.yaml similarity index 100% rename from examples/tensorflow/iris-classifier/cortex.yaml rename to examples/hello-world/tensorflow/cortex.yaml diff --git a/examples/tensorflow/iris-classifier/predictor.py b/examples/hello-world/tensorflow/predictor.py similarity index 100% rename from examples/tensorflow/iris-classifier/predictor.py rename to examples/hello-world/tensorflow/predictor.py diff --git a/examples/tensorflow/iris-classifier/sample.json b/examples/hello-world/tensorflow/sample.json similarity index 100% rename from examples/tensorflow/iris-classifier/sample.json rename to examples/hello-world/tensorflow/sample.json diff --git a/examples/tensorflow/iris-classifier/tensorflow.ipynb b/examples/hello-world/tensorflow/tensorflow.ipynb similarity index 100% rename from examples/tensorflow/iris-classifier/tensorflow.ipynb rename to examples/hello-world/tensorflow/tensorflow.ipynb diff --git a/examples/keras/document-denoiser/README.md b/examples/keras/document-denoiser/README.md deleted file mode 100644 index 05f90b9bef..0000000000 --- a/examples/keras/document-denoiser/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Clean Dirty Documents w/ Autoencoders - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example model cleans text documents of anything that isn't text (aka noise): coffee stains, old wear artifacts, etc. You can inspect the notebook that has been used to train the model [here](trainer.ipynb). - -Here's a collage of input texts and predictions. - -![Imgur](https://i.imgur.com/M4Mjz2l.jpg) - -*Figure 1 - The dirty documents are on the left side and the cleaned ones are on the right* - -## Sample Prediction - -Once this model is deployed, get the API endpoint by running `cortex get document-denoiser`. - -Now let's take a sample image like this one. - -![Imgur](https://i.imgur.com/JJLfFxB.png) - -Export the endpoint & the image's URL by running -```bash -export ENDPOINT= -export IMAGE_URL=https://i.imgur.com/JJLfFxB.png -``` - -Then run the following piped commands -```bash -curl "${ENDPOINT}" -X POST -H "Content-Type: application/json" -d '{"url":"'${IMAGE_URL}'"}' | -sed 's/"//g' | -base64 -d > prediction.png -``` - -Once this has run, we'll see a `prediction.png` file saved to the disk. This is the result. - -![Imgur](https://i.imgur.com/PRB2oS8.png) - -As it can be seen, the text document has been cleaned of any noise. Success! - ---- - -Here's a short list of URLs of other text documents in image format that can be cleaned using this model. Export these links to `IMAGE_URL` variable: - -* https://i.imgur.com/6COQ46f.png -* https://i.imgur.com/alLI83b.png -* https://i.imgur.com/QVoSTuu.png diff --git a/examples/keras/document-denoiser/cortex.yaml b/examples/keras/document-denoiser/cortex.yaml deleted file mode 100644 index b616a0ff0f..0000000000 --- a/examples/keras/document-denoiser/cortex.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: document-denoiser - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - model: s3://cortex-examples/keras/document-denoiser/model.h5 - resize_shape: [540, 260] - compute: - cpu: 1 diff --git a/examples/keras/document-denoiser/predictor.py b/examples/keras/document-denoiser/predictor.py deleted file mode 100644 index 2554560388..0000000000 --- a/examples/keras/document-denoiser/predictor.py +++ /dev/null @@ -1,86 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import boto3, base64, cv2, re, os, requests -from botocore import UNSIGNED -from botocore.client import Config -import numpy as np -from tensorflow.keras.models import load_model - - -def get_url_image(url_image): - """ - Get numpy image from URL image. - """ - resp = requests.get(url_image, stream=True).raw - image = np.asarray(bytearray(resp.read()), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE) - return image - - -def image_to_png_nparray(image): - """ - Convert numpy image to jpeg numpy vector. - """ - is_success, im_buf_arr = cv2.imencode(".png", image) - return im_buf_arr - - -def image_to_png_bytes(image): - """ - Convert numpy image to bytes-encoded png image. - """ - buf = image_to_png_nparray(image) - byte_im = buf.tobytes() - return byte_im - - -class PythonPredictor: - def __init__(self, config): - # download the model - bucket, key = re.match("s3://(.+?)/(.+)", config["model"]).groups() - - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - model_path = os.path.join("/tmp/model.h5") - s3.download_file(bucket, key, model_path) - - # load the model - self.model = load_model(model_path) - - # resize shape (width, height) - self.resize_shape = tuple(config["resize_shape"]) - - def predict(self, payload): - # download image - img_url = payload["url"] - image = get_url_image(img_url) - resized = cv2.resize(image, self.resize_shape) - - # prediction - pred = self.make_prediction(resized) - - # image represented in bytes - byte_im = image_to_png_bytes(pred) - - # encode image - image_enc = base64.b64encode(byte_im).decode("utf-8") - - return image_enc - - def make_prediction(self, img): - """ - Make prediction on image. - """ - processed = img / 255.0 - processed = np.expand_dims(processed, 0) - processed = np.expand_dims(processed, 3) - pred = self.model.predict(processed) - pred = np.squeeze(pred, 3) - pred = np.squeeze(pred, 0) - out_img = pred * 255 - out_img[out_img > 255.0] = 255.0 - out_img = out_img.astype(np.uint8) - return out_img diff --git a/examples/keras/document-denoiser/requirements.txt b/examples/keras/document-denoiser/requirements.txt deleted file mode 100644 index 77eb59dc52..0000000000 --- a/examples/keras/document-denoiser/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -numpy==1.18.0 -requests==2.22.0 -opencv-python==4.1.2.30 -keras==2.3.1 -h5py==2.10.0 diff --git a/examples/keras/document-denoiser/sample.json b/examples/keras/document-denoiser/sample.json deleted file mode 100644 index 651595f4fb..0000000000 --- a/examples/keras/document-denoiser/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/JJLfFxB.png" -} diff --git a/examples/keras/document-denoiser/trainer.ipynb b/examples/keras/document-denoiser/trainer.ipynb deleted file mode 100644 index c8b0799b1b..0000000000 --- a/examples/keras/document-denoiser/trainer.ipynb +++ /dev/null @@ -1,620 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Training a Document Denoiser Model with AutoEncoders" - ] - }, - { - "cell_type": "code", - "execution_count": 69, - "metadata": {}, - "outputs": [], - "source": [ - "# _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n", - "\n", - "import keras\n", - "import cv2\n", - "import numpy as np\n", - "import pandas as pd\n", - "import seaborn as sns\n", - "import os\n", - "import ntpath\n", - "from glob import glob\n", - "from matplotlib.pyplot import imshow\n", - "from sklearn.model_selection import train_test_split\n", - "from keras.preprocessing.image import ImageDataGenerator\n", - "from keras.models import Sequential, Model, load_model\n", - "from keras.layers import Activation, Flatten, Dropout, SpatialDropout2D, Conv2D, UpSampling2D, MaxPooling2D, add, concatenate, Input, BatchNormalization\n", - "from keras.backend import set_image_data_format\n", - "from keras.utils import plot_model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Download Dataset\n", - "\n", - "Download the dataset from [kaggle (denoising dirty documents)](https://www.kaggle.com/c/denoising-dirty-documents/data). You will need to be logged in to be able to download the data.\n", - "\n", - "Once downloaded run the following commands" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!unzip denoising-dirty-documents.zip && rm denoising-dirty-documents.zip\n", - "!mv denoising-dirty-documents/*.zip . && rm -rf denoising-dirty-documents\n", - "!unzip '*.zip' > /dev/null && rm *.zip" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Define the Data Generator\n", - "\n", - "Include data augmentation because the dataset is rather small." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "x_dirty = sorted(glob(\"train/*.png\"))\n", - "x_cleaned = sorted(glob(\"train_cleaned/*.png\"))\n", - "x_test = sorted(glob(\"test/*.png\"))\n", - "input_shape = (260, 540)\n", - "height = input_shape[0]\n", - "width = input_shape[1]" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "x_train, x_valid, y_train, y_valid = train_test_split(x_dirty, x_cleaned, test_size=0.20)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "set_image_data_format(\"channels_last\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "def model_train_generator(x_train, y_train, epochs, batch_size, resize_shape):\n", - " white_fill = 1.0\n", - " datagen = ImageDataGenerator(\n", - " rotation_range=180,\n", - " width_shift_range=0.2,\n", - " height_shift_range=0.2,\n", - " zoom_range=0.3,\n", - " fill_mode=\"constant\",\n", - " cval=white_fill,\n", - " horizontal_flip=True,\n", - " vertical_flip=True,\n", - " )\n", - " \n", - " for _ in range(epochs):\n", - " for x_file, y_file in zip(x_train, y_train):\n", - " x_img = cv2.imread(x_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", - " y_img = cv2.imread(y_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", - " \n", - " xs = []\n", - " ys = []\n", - " for i in range(batch_size):\n", - " if i == 0:\n", - " x = x_img\n", - " y = y_img\n", - " else:\n", - " params = datagen.get_random_transform(img_shape=x_img.shape)\n", - " x = datagen.apply_transform(np.expand_dims(x_img, 2), params)\n", - " y = datagen.apply_transform(np.expand_dims(y_img, 2), params)\n", - " x = cv2.resize(x, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", - " y = cv2.resize(y, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", - " x = np.expand_dims(x, 2)\n", - " y = np.expand_dims(y, 2)\n", - " xs.append(x)\n", - " ys.append(y)\n", - " xs_imgs = np.array(xs)\n", - " ys_imgs = np.array(ys)\n", - " yield (xs_imgs, ys_imgs)\n", - "\n", - "def model_valid_generator(x_valid, y_valid, epochs, resize_shape):\n", - " xs = []\n", - " ys = []\n", - " for x_file, y_file in zip(x_valid, y_valid):\n", - " x_img = cv2.imread(x_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", - " y_img = cv2.imread(y_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", - " x = cv2.resize(x_img, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", - " y = cv2.resize(y_img, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", - " x = np.expand_dims(x, 2)\n", - " x = np.expand_dims(x, 0)\n", - " y = np.expand_dims(y, 2)\n", - " y = np.expand_dims(y, 0)\n", - " xs.append(x)\n", - " ys.append(y)\n", - " \n", - " for _ in range(epochs):\n", - " for xs_img, ys_img in zip(xs, ys):\n", - " yield (xs_img, ys_img)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Create the Model" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "def create_encoder(input_shape):\n", - " inp = Input(shape=input_shape)\n", - " x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), \n", - " input_shape=input_shape, activation=\"relu\", padding=\"same\")(inp)\n", - " x = BatchNormalization()(x)\n", - " x = MaxPooling2D(pool_size=(2,2))(x)\n", - " \n", - " x = Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), \n", - " activation=\"relu\", padding=\"same\")(x)\n", - " x = BatchNormalization()(x)\n", - "\n", - " return inp, x" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "def create_decoder(inp):\n", - " x = Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), activation=\"relu\",\n", - " padding=\"same\")(inp)\n", - " x = BatchNormalization()(x)\n", - " x = UpSampling2D(size=(2,2))(x)\n", - " \n", - " x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), \n", - " activation=\"relu\", padding=\"same\")(x)\n", - " x = BatchNormalization()(x)\n", - " \n", - " x = Conv2D(filters=1, kernel_size=(1,1), strides=(1,1), \n", - " activation=\"sigmoid\", padding=\"same\")(x)\n", - " x = BatchNormalization()(x)\n", - " \n", - " return inp, x" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "def create_autoencoder(input_shape):\n", - " enc_inp, encoder = create_encoder(input_shape)\n", - " dec_inp, autoencoder = create_decoder(encoder)\n", - " model = Model(inputs=[enc_inp], outputs=[autoencoder], name='AutoEncoder')\n", - " \n", - " return model" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From C:\\Users\\OboTh\\Anaconda3\\envs\\lightweight-gpu-python\\lib\\site-packages\\tensorflow_core\\python\\ops\\resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "If using Keras pass *_constraint arguments to layers.\n", - "WARNING:tensorflow:From C:\\Users\\OboTh\\Anaconda3\\envs\\lightweight-gpu-python\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n", - "\n", - "Model: \"AutoEncoder\"\n", - "_________________________________________________________________\n", - "Layer (type) Output Shape Param # \n", - "=================================================================\n", - "input_1 (InputLayer) (None, 260, 540, 1) 0 \n", - "_________________________________________________________________\n", - "conv2d_1 (Conv2D) (None, 260, 540, 64) 640 \n", - "_________________________________________________________________\n", - "batch_normalization_1 (Batch (None, 260, 540, 64) 256 \n", - "_________________________________________________________________\n", - "max_pooling2d_1 (MaxPooling2 (None, 130, 270, 64) 0 \n", - "_________________________________________________________________\n", - "conv2d_2 (Conv2D) (None, 130, 270, 32) 18464 \n", - "_________________________________________________________________\n", - "batch_normalization_2 (Batch (None, 130, 270, 32) 128 \n", - "_________________________________________________________________\n", - "conv2d_3 (Conv2D) (None, 130, 270, 32) 9248 \n", - "_________________________________________________________________\n", - "batch_normalization_3 (Batch (None, 130, 270, 32) 128 \n", - "_________________________________________________________________\n", - "up_sampling2d_1 (UpSampling2 (None, 260, 540, 32) 0 \n", - "_________________________________________________________________\n", - "conv2d_4 (Conv2D) (None, 260, 540, 64) 18496 \n", - "_________________________________________________________________\n", - "batch_normalization_4 (Batch (None, 260, 540, 64) 256 \n", - "_________________________________________________________________\n", - "conv2d_5 (Conv2D) (None, 260, 540, 1) 65 \n", - "_________________________________________________________________\n", - "batch_normalization_5 (Batch (None, 260, 540, 1) 4 \n", - "=================================================================\n", - "Total params: 47,685\n", - "Trainable params: 47,299\n", - "Non-trainable params: 386\n", - "_________________________________________________________________\n" - ] - } - ], - "source": [ - "model = create_autoencoder((height, width, 1))\n", - "model.summary()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "model.compile(optimizer='adam', loss='mse')\n", - "epochs = 20\n", - "batch_size = 8\n", - "samples = len(x_train)\n", - "validation_samples = len(x_valid)\n", - "train_generator = model_train_generator(x_train, y_train, epochs=epochs, batch_size=batch_size, resize_shape=(height, width))\n", - "valid_generator = model_valid_generator(x_valid, y_valid, epochs=epochs, resize_shape=(height, width))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Train the AutoEncoder Model" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From C:\\Users\\OboTh\\Anaconda3\\envs\\lightweight-gpu-python\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n", - "\n", - "Epoch 1/20\n", - "115/115 [==============================] - 49s 429ms/step - loss: 1.2062 - val_loss: 0.1817\n", - "Epoch 2/20\n", - "115/115 [==============================] - 43s 373ms/step - loss: 0.5792 - val_loss: 0.1720\n", - "Epoch 3/20\n", - "115/115 [==============================] - 43s 373ms/step - loss: 0.4297 - val_loss: 0.1399\n", - "Epoch 4/20\n", - "115/115 [==============================] - 43s 375ms/step - loss: 0.3160 - val_loss: 0.1023\n", - "Epoch 5/20\n", - "115/115 [==============================] - 44s 385ms/step - loss: 0.2276 - val_loss: 0.0609\n", - "Epoch 6/20\n", - "115/115 [==============================] - 44s 379ms/step - loss: 0.1599 - val_loss: 0.0292\n", - "Epoch 7/20\n", - "115/115 [==============================] - 43s 376ms/step - loss: 0.1091 - val_loss: 0.0112\n", - "Epoch 8/20\n", - "115/115 [==============================] - 43s 376ms/step - loss: 0.0730 - val_loss: 0.0074\n", - "Epoch 9/20\n", - "115/115 [==============================] - 44s 381ms/step - loss: 0.0473 - val_loss: 0.0055\n", - "Epoch 10/20\n", - "115/115 [==============================] - 45s 393ms/step - loss: 0.0301 - val_loss: 0.0047\n", - "Epoch 11/20\n", - "115/115 [==============================] - 45s 387ms/step - loss: 0.0189 - val_loss: 0.0041\n", - "Epoch 12/20\n", - "115/115 [==============================] - 43s 376ms/step - loss: 0.0118 - val_loss: 0.0042\n", - "Epoch 13/20\n", - "115/115 [==============================] - 44s 380ms/step - loss: 0.0075 - val_loss: 0.0061\n", - "Epoch 14/20\n", - "115/115 [==============================] - 43s 377ms/step - loss: 0.0051 - val_loss: 0.0048\n", - "Epoch 15/20\n", - "115/115 [==============================] - 43s 378ms/step - loss: 0.0037 - val_loss: 0.0045\n", - "Epoch 16/20\n", - "115/115 [==============================] - 43s 373ms/step - loss: 0.0029 - val_loss: 0.0045\n", - "Epoch 17/20\n", - "115/115 [==============================] - 44s 378ms/step - loss: 0.0025 - val_loss: 0.0048\n", - "Epoch 18/20\n", - "115/115 [==============================] - 43s 375ms/step - loss: 0.0023 - val_loss: 0.0047\n", - "Epoch 19/20\n", - "115/115 [==============================] - 43s 376ms/step - loss: 0.0022 - val_loss: 0.0043\n", - "Epoch 20/20\n", - "115/115 [==============================] - 44s 380ms/step - loss: 0.0021 - val_loss: 0.0042\n" - ] - } - ], - "source": [ - "hist_obj = model.fit_generator(train_generator, validation_data=valid_generator, validation_steps=validation_samples, steps_per_epoch=samples, epochs=epochs, shuffle=True) " - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3deXxU5d338c9vJpMESMIWlrAvgrIpanCtqLRlq2u1iqJWa+WhLtW+Krf6tFXv2t59bG9tbWu11lK1dQGXupRNa1txl4DsICKyhDWsYQtJZq7njzOBELJMYCYnM/N9v17zmplzrpn5zWH45sw117mOOecQEZHkF/C7ABERiQ8FuohIilCgi4ikCAW6iEiKUKCLiKSIDL9eOD8/3/Xq1cuvlxcRSUpz587d6pzrUNs63wK9V69eFBUV+fXyIiJJyczW1LVOXS4iIilCgS4ikiIU6CIiKcK3PnQRSU8VFRUUFxdTVlbmdynNWnZ2Nt26dSMUCsX8GAW6iDSp4uJicnNz6dWrF2bmdznNknOObdu2UVxcTO/evWN+nLpcRKRJlZWV0b59e4V5PcyM9u3bN/pbTIOBbmaTzWyLmS2uY/14M1sYvXxgZic1qgIRSTsK84YdzTaKZQ/9KWB0Peu/BM51zp0IPAA80egqRETkmDUY6M652cD2etZ/4JzbEb37EdAtTrXVbuNC+PNI2PBpQl9GRCTZxLsP/UZgRl0rzWyCmRWZWVFJScnRvUKoJaz7GLYsO8oSRURil5OTU+e61atXM3jw4Caspn5xC3QzOx8v0O+qq41z7gnnXKFzrrBDh1qnImhY254QCMHWFUf3eBGRFBWXYYtmdiLwJDDGObctHs9Zp2AI2vWBrZ8n9GVEJPH++40lLN1QGtfnHNglj/suHFTn+rvuuouePXty8803A3D//fdjZsyePZsdO3ZQUVHBz372My6++OJGvW5ZWRnf+973KCoqIiMjg4cffpjzzz+fJUuWcMMNN1BeXk4kEuHll1+mS5cuXHHFFRQXFxMOh/nJT37ClVdeeUzvG+IQ6GbWA3gFuNY51zS7zfn9tIcuIkdl3Lhx3HHHHQcDferUqcycOZMf/OAH5OXlsXXrVs444wwuuuiiRo00efTRRwFYtGgRy5cvZ+TIkaxYsYLHH3+c22+/nfHjx1NeXk44HGb69Ol06dKFadOmAbBr1664vLcGA93MngfOA/LNrBi4DwgBOOceB+4F2gN/iL75SudcYVyqq0t+f1gxC8IV3h67iCSl+vakE+Xkk09my5YtbNiwgZKSEtq2bUtBQQE/+MEPmD17NoFAgPXr17N582Y6d+4c8/O+99573HbbbQCccMIJ9OzZkxUrVnDmmWfy85//nOLiYr75zW/Sr18/hgwZwp133sldd93FBRdcwDnnnBOX99ZgoDvnrmpg/XeB78almlgNuxGGjgcLNunLikhquPzyy3nppZfYtGkT48aN49lnn6WkpIS5c+cSCoXo1atXow/qcc7Vuvzqq6/m9NNPZ9q0aYwaNYonn3ySESNGMHfuXKZPn84999zDyJEjuffee4/5fSXnof+tEzsyUkRS27hx47jpppvYunUr77zzDlOnTqVjx46EQiH+/e9/s2ZNnVOO12n48OE8++yzjBgxghUrVrB27VqOP/54Vq1aRZ8+ffj+97/PqlWrWLhwISeccALt2rXjmmuuIScnh6eeeiou7ys5Az0Shhl3Qc+zYPA3/a5GRJLMoEGD2L17N127dqWgoIDx48dz4YUXUlhYyNChQznhhBMa/Zw333wzEydOZMiQIWRkZPDUU0+RlZXFlClT+Nvf/kYoFKJz587ce++9zJkzh0mTJhEIBAiFQjz22GNxeV9W19eERCssLHTHdMai/+0Px30dLnk0fkWJSMItW7aMAQMG+F1GUqhtW5nZ3Lp+p0zeybny+2uki4hINcnZ5QLQ/jhY8ndwDjTRj4gk0KJFi7j22msPW5aVlcXHH3/sU0W1S95Az+8PZTth71bIOcqjTkVEYjBkyBDmz5/vdxkNSu4uF4BtOmJURASSOdC7ngKXTz4U7CIiaS55u1xatoPBl/ldhYhIs5G8e+gAy6fDvL/6XYWIJJn6psRNZskd6ItehHcf8rsKEZFmIbkDPb8/7FwDFY2bc0FEBLz5VyZNmsTgwYMZMmQIU6ZMAWDjxo0MHz6coUOHMnjwYN59913C4TDXX3/9wba//vWvfa7+SMnbhw7eNLouAttXQaeBflcjIkfjL9+offkN3tSyzLgbNi06cv3oX0DBifDpszD/uSMfF4NXXnmF+fPns2DBArZu3cqwYcMYPnw4zz33HKNGjeJHP/oR4XCYffv2MX/+fNavX8/ixYsB2LlzZ8yv01SSfA+9n3etI0ZF5Ci89957XHXVVQSDQTp16sS5557LnDlzGDZsGH/5y1+4//77WbRoEbm5ufTp04dVq1Zx2223MXPmTPLy8vwu/wjJvYfe/jjvWmcvEkleDe1Rj/l/9a8/ebx3OQp1zWU1fPhwZs+ezbRp07j22muZNGkS1113HQsWLGDWrFk8+uijTJ06lcmTJx/V6yZKcu+hZ7aCET/2Zl0UEWmk4cOHM2XKFMLhMCUlJcyePZvTTjuNNWvW0LFjR2666SZuvPFG5s2bx9atW4lEIlx22WU88MADzJs3z+/yj5Dce+gAwyf5XYGIJKlLL72UDz/8kJNOOgkz45e//CWdO3fm6aef5le/+hWhUIicnByeeeYZ1q9fzw033EAkEgHgF7/4hc/VHyl5p8+tsnMdrPvYO8hIk3SJNHuaPjd26TN9bpXPZ8HLN8LujX5XIiLiq+QP9PYa6SIiAqkQ6FWTc2mki0jS8KurN5kczTZK/kDP7QyZudpDF0kS2dnZbNu2TaFeD+cc27ZtIzs7u1GPS/5RLmbeAUbaQxdJCt26daO4uJiSkhK/S2nWsrOz6datW6Mek/yBDjDoEqjY73cVIhKDUChE7969/S4jJTUY6GY2GbgA2OKcG1zLegMeAcYC+4DrnXNNO+L+7Nub9OVERJqjWPrQnwJG17N+DNAvepkAPHbsZTVSuNLrctm/o8lfWkSkuWgw0J1zs4Ht9TS5GHjGeT4C2phZQbwKjMm2z+H3hfD5P5v0ZUVEmpN4jHLpCqyrdr84uuwIZjbBzIrMrCiuP4i06wMW0EgXEUlr8Qj02o63r3U8knPuCedcoXOusEOHDnF46aiMLGjbS4EuImktHoFeDHSvdr8bsCEOz9s4+f1h28omf1kRkeYiHoH+OnCdec4Adjnnmn5ilfx+XqBHwk3+0iIizUEswxafB84D8s2sGLgPCAE45x4HpuMNWVyJN2zxhkQVW6+Cod6lbBe0bOdLCSIifkr+6XNFRNJIak+fW10kAuX7/K5CRMQXqRXoj5wIs+7xuwoREV+kVqDnddEkXSKStlIr0PP7aSy6iKStFAv0/rC3RHO6iEhaSr1AB9iqA4xEJP2kXqCHWsLeLX5XIiLS5FLjBBdV2vWBe9ZDILX+TomIxCK1At3Mu4iIpKHU25V9+6fw55F+VyEi0uRSL9Cdg/VzIVzhdyUiIk0q9QI9vz9EKmHHar8rERFpUqkZ6KADjEQk7aRgoB/nXSvQRSTNpF6gZ7eGnM6a00VE0k5qDVusctPbkNPJ7ypERJpUagZ6625+VyAi0uRSr8sFYPV78MwlsKfE70pERJpMagZ6ZRms+rd+GBWRtJKagV41dHGbfhgVkfSRmoGe1w0yWmiki4ikldQM9EDAG4+uLhcRSSOpGejgdbso0EUkjaTmsEWAc+705nQREUkTMe2hm9loM/vMzFaa2d21rG9tZm+Y2QIzW2JmN8S/1EbqNBAKTvS7ChGRJtNgoJtZEHgUGAMMBK4ys4E1mt0CLHXOnQScBzxkZplxrrVx9u+Et+6DtR/5WoaISFOJZQ/9NGClc26Vc64ceAG4uEYbB+SamQE5wHbA3/6OYCa8/xv4cravZYiINJVYAr0rsK7a/eLosup+DwwANgCLgNudc5GaT2RmE8ysyMyKSkoSfBRnZkto3UNDF0UkbcQS6LWdpNPVuD8KmA90AYYCvzezvCMe5NwTzrlC51xhhw4dGl1so+X300gXEUkbsQR6MdC92v1ueHvi1d0AvOI8K4EvgRPiU+IxyO/v7aG7mn9/RERSTyyBPgfoZ2a9oz90jgNer9FmLfBVADPrBBwPrIpnoUcl/zio2AulNf/+iIikngbHoTvnKs3sVmAWEAQmO+eWmNnE6PrHgQeAp8xsEV4XzV3Oua0JrDs2fc6Hi34Hma38rkREJOHM+dQdUVhY6IqKinx5bRGRZGVmc51zhbWtS91D/6ssnwYr3vS7ChGRhEvdQ/+rzP5fyMqF/iP9rkREJKFSfw89vz9sW+l3FSIiCZcGgd4PStfDgd1+VyIiklBpEOhVZy/SXrqIpLb0CXRNASAiKS71A71dHxj2XWjb2+9KREQSKvVHuWRkwjce8rsKEZGES/09dIBd6zWNroikvPQI9I8fg79dDpGw35WIiCRMegR6fn8IH4Cda/2uREQkYdIn0EEjXUQkpaVZoOtkFyKSutIj0Fu2gxbtFOgiktJSf9hilUGXQJseflchIpIw6RPoF/za7wpERBIqPbpcwDuvaOkGqDzgdyUiIgmRPoH++Zvw8ADYuNDvSkREEiJ9Ar39cd61fhgVkRSVPoHepicEMxXoIpKy0ifQgxnQrq8OLhKRlJU+gQ6Qf5z20EUkZaVXoHc+CbJyIBLxuxIRkbhLr0A/dxJM+A8E0utti0h6iCnZzGy0mX1mZivN7O462pxnZvPNbImZvRPfMuNM0+iKSApqMNDNLAg8CowBBgJXmdnAGm3aAH8ALnLODQK+lYBaj13FfvhlX3j/Eb8rERGJu1j20E8DVjrnVjnnyoEXgItrtLkaeMU5txbAObclvmXGSagFBEOwbaXflYiIxF0sgd4VWFftfnF0WXX9gbZm9h8zm2tm19X2RGY2wcyKzKyopKTk6Co+Vu010kVEUlMsgW61LHM17mcApwLfAEYBPzGz/kc8yLknnHOFzrnCDh06NLrYuMjv7wW6q/kWRESSWyyBXgx0r3a/G7ChljYznXN7nXNbgdnASfEpMc7y+0PZLtjTPHuFRESOViyBPgfoZ2a9zSwTGAe8XqPNa8A5ZpZhZi2B04Fl8S01TnqcDhnZULLc70pEROKqwfnQnXOVZnYrMAsIApOdc0vMbGJ0/ePOuWVmNhNYCESAJ51zixNZ+FHrcjLcsQhyOvpdiYhIXJnzqS+5sLDQFRUV+fLaAFSWw9y/wKnXQ0aWf3WIiDSCmc11zhXWti59D5lc+wHM+C949yG/KxERiYv0DfQ+58GQK7xA39Q8e4dERBojfQMdYMyD0KItvHYLhCv9rkZE5Jikd6C3bAdjfwUb58OHv/e7GhGRY5LegQ4w8BI44QJYX6SDjUQkqTU4bDHlmcE3/+TN82K1HRQrIpIctIcOkNnSC/NV78DCF/2uRkTkqGgPvbr3H4F1H3tHk7bp4Xc1IiKNoj306i78jXf9xu3qTxeRpKNAr65ND/ja/fDFv2D+c35XIyLSKAr0mgpvhB5nwax7YPcmv6sREYmZAr2mQAAu+h20aAc71zXcXkSkmdCPorXJPw5umwuBoN+ViIjETHvodQkEvZNgvHYr7N3mdzUiIg1SoNdnbwkseAFm3u13JSIiDVKg16fTIDjnh7BoKqyY5Xc1IiL1UqA35JwfQseB8MYd3rlIRUSaKQV6QzIy4eLfw55N8Na9flcjIlInjXKJRddT4ezbwUW8I0g1iZeINEMK9Fh99T4FuYg0a+pyiVVVmH/yJ3j7AX9rERGphQK9sbYs885DuvKfflciInIYBXpjff2n0GkwTL0eNi/xuxoRkYNiCnQzG21mn5nZSjOr8ygbMxtmZmEzuzx+JTYzWTlw9RTv+rkrNYGXiDQbDQa6mQWBR4ExwEDgKjMbWEe7B4HUPwKndVe46gXYtx1eu8XvakREgNhGuZwGrHTOrQIwsxeAi4GlNdrdBrwMDItrhc1Vl6Ew7m/QpqfflYiIALF1uXQFqs8jWxxddpCZdQUuBR6PX2lJoO8IaN8XyvfqhBgi4rtYAr22wdc1z8/2G+Au51y43icym2BmRWZWVFJSEmuNzd+cP8Or34M5T/pdiYiksVi6XIqB7tXudwM21GhTCLxg3ljtfGCsmVU6516t3sg59wTwBEBhYWHqnLTzzFtgzfswfZLXBdPv635XJCJpKJY99DlAPzPrbWaZwDjg9eoNnHO9nXO9nHO9gJeAm2uGeUoLBOGyP3uzM754PWxa5HdFIpKGGgx051wlcCve6JVlwFTn3BIzm2hmExNdYNLIyoGrp0JWXnQ442a/KxKRNBPTXC7OuenA9BrLav0B1Dl3/bGXlaTyunhj1Oc8CS3a+F2NiKQZHSkabwUnwkW/hYws2P4lROr9nVhEJG4U6Imyqxj+OBze/LHflYhImlCgJ0rrbjB0PHz0B2+GRhGRBNN86Ik06uewYzXM+C9vOGP/kX5XJCIpTHvoiRQIwmVPerMzvnQDbFzod0UiksIU6IlWNZyxVT5sW+l3NSKSwtTl0hTyCuCWT7yRLwDhSghq04tIfGkPvalUhfnbD8CU8V6oi4jEkQK9qeUVwIqZMPVaKN/ndzUikkIU6E1t2Hdh7P/CZzPgr5fC/h1+VyQiKUKB7ofTboJvPQUb5sHkMbBrvd8ViUgKUKD7ZdAlcM3L3o+jAf1AKiLHToHup97DYcJsyO3knZ90/Ty/KxKRJKZA91sg+k8wfRI89Q34/C1/6xGRpKVAby5G/wLy+3lzqc9/3u9qRCQJKdCbi5yO8O1/QK+vwKsT4f1H/K5IRJKMAr05yc6D8S/CoEvhrXth+TS/KxKRJKLhFc1NRhZcNhn6jYL+Y/yuRkSSiPbQm6NAAIZe5V1/+S48fzUc2ON3VSLSzCnQm7tdxbBiBjx9Iezd6nc1ItKMKdCbu6FXwZXPwpalMHkU7Fjjd0Ui0kwp0JPBCWPh2ldhbwn8eSRsWux3RSLSDCnQk0XPM+GGmZCRCXs2+V2NiDRDCvRk0mkg3FoEx30NnIMPfgdlu/yuSkSaiZgC3cxGm9lnZrbSzO6uZf14M1sYvXxgZifFv1QBDp0oY/08eOs+eOxsbySMiKS9BgPdzILAo8AYYCBwlZkNrNHsS+Bc59yJwAPAE/EuVGrodip8ZxYEQ94ImFk/gooyv6sSER/Fsod+GrDSObfKOVcOvABcXL2Bc+4D51zVmRo+ArrFt0ypVfdhMPE9KPwOfPh7+NP5mltdJI3FEuhdgXXV7hdHl9XlRmBGbSvMbIKZFZlZUUlJSexVSt0yW8EFD8P4l6BtL29OGBFJS7EEutWyzNXa0Ox8vEC/q7b1zrknnHOFzrnCDh06xF6lNKzf1+Gq570umE2LvW6Y7V/6XZWINKFYAr0Y6F7tfjdgQ81GZnYi8CRwsXNuW3zKO9KmXWXMWrKJ+et2smHnfirCkUS9VPLatQ42zIfHvwLznvFGxIhIyotlcq45QD8z6w2sB8YBV1dvYGY9gFeAa51zK+JeZTWfrN7O95//9LBl7Vpl0jE3i4552XTMzaJTXhYdc7Ojy7zbHXKzyA4FE1la83H8GPjeB/Dq9+D122D5dLjot+qOEUlx5mLYezOzscBvgCAw2Tn3czObCOCce9zMngQuA6qOS690zhXW95yFhYWuqKio0QXvLqtg9dZ9bNldxpbdB9hc6l1vKT1Aye4yNpceoGTPAcKRI99X6xYhOuVl0aVNC84/viOjB3emU152o2tIGpEIfPwY/PO/Ia+LN4Y9qAk2RZKZmc2tK19jCvREONpAj0Uk4ti+r/xg2JeUVgv+3WWs3LKHL0r2YgaFPdsyZnABowd3pkubFgmpx3dblnn96SeMhfJ94MKQlet3VSJyFNIu0GOxcstupi/axPRFG1m+aTcAJ/dow9jBBYwZ0plubVv6VltCTbsTPp8FX/8pDLj40DlNRSQpKNAbsKpkDzMWe+G+ZEMpACd1a82YIQWMHVxAj/YpFO5rP4Y3vg8ly6HziTDiJ94IGattMJOINDcK9EZYs20vMxZvYsaijSwo9uZJGdQlj7FDChg7pIDe+a18rjAOImFY9CL8+39g5xrocRZc95o38ZeINGsK9KO0bvs+Zi7exPTFG/l07U4ATuicy9ghBVx6cle6t0vyPffKcvj0r7B9FYz6ufcj6ubFUHCi35WJSB0U6HGwYed+Zi7exIzFGyla481ycHbffK4Y1p2RAzulxpDIxS/DS9+B478BI34EnQb5XZGI1KBAj7PiHft4aW4xLxYVs37nflq3CHHJ0C5cMaw7g7q09ru8o3dgN3z0mDct74HdMORyOO8eaN/X78pEJEqBniCRiOODL7YxpWgdsxZvojwcYXDXPK4s7M5FJ3WldcuQ3yUenX3b4f1H4OM/Qrgcrp/mnWBDRHynQG8CO/eV89r8DUyZs46lG0vJyggwenBnrizszhl92hMIJOEokt2boWgyDJ/kHZC0+GXodY6OOBXxkQK9iS1ev4upRet49dP1lJZV0r1dC751ancuP7Vb8h68tHcbPDwAAkE4fSIM+y60rm/STRFJBAW6T8oqwsxasompRet4f+U2zOCcfh24srA7Iwd1IhRMsoN6tq6E//yPt6eOQZ9zofBGGHiR35WJpA0FejOwbvs+Xixax4tzi9m4q4yOuVlcfXoPrj69Bx1zk2w+me2rYMEUWPA89B8FY3/lndt040LoebaOPhVJIAV6MxKOON5ZsYWnP1jDOytKCAWNsUMKuO7MXpzSow2WTEdsRiJQud87ycbcp+CN26F1DzhpnHfR6BiRuFOgN1Nfbt3LMx+u5qWiYnYfqGRI19Zcd2ZPLjypS/KNay/fB8unwYLnYNV/wEWg++kw4sfQe7jf1YmkDAV6M7f3QCWvfLqeZz5Yzedb9tC2ZYhxp/XgmjN60jUZf0Qt3QALp8D852HsL6HPed4cMgdKoc/5msJX5Bgo0JOEc44Pv9jGUx+s5p/LNgMwcmBnrjurJ2f2aZ9c3TFw6ExJZjD127D0VcjpBEO+5fW9dzsNQkn2+4GIzxToSah4xz7+9tFaXpizlp37KujfKYfrzuzFpSd3pVVWEu7hVpbD5296P6SumAWRCsjIhhvfhIKTvIOZslt7wyJFpE4K9CRWVhHm9QUbePqD1SzZUEpudgaXn9qNS4Z25cRurZNvrx2grBTWvA9fzoav3uftpb8w3lvW6xxvOGSf86FdH03rK1KDAj0FOOeYt3YHT32whhmLNlIZcXRt04LRgzszZnBnTunRNjmPRq2y7A34bKb3g2ppsbcsrxtc+wp0OB7CFRBM0qkUROJIgZ5idu4r562lm5m5eBPvfr6V8nCEjrlZjB7cmdGDO3Nar3ZkJNtBS1Wc88a5r/qPtwd/6eMQagFTrvEObOpzLvQ+F7oMhdwC7cFL2lGgp7DdZRX8a/kWZizaxH9WbKGsIkK7VpmMGtSJ0YMLOKtv++Q7IrU2c570hkWu+QAqy7xl2W3gOzOh4wDY8KnXT99xAGTn+VurSAIp0NPEvvJK3vmshOmLN/GvZZvZWx4mLzuDrw3sxNjBBXylX37yjW+vqaIMNsyDTYthyxIY+TPvhNdTrvG6bcA7uKnTQOg4EIaOh/zj/K1ZJI4U6GmorCLMe59vZfrijfxz6WZKyypplRlkxIBOjB7UmZN7tKGgdXZy/qham13rYdMiL+Q3L4UtS2HrCrjudeh1Nrz/W2+ETceBXti36Qmtu0N+P2jZzu/qRWKmQE9z5ZURPly1jRmLNvLm0s1s31sOQOsWIQYU5DKgII8BBXkMLMijX6ccsjKSfC++SmU5WMA7kGnRS97BTpuXHvrRFWD0g3DGRPjiX/CfB70ZJFt3836Qbd3N+0FWUxhIM6JAl4MqwxEWFO9i6YZdLN24m2UbS/ls0272V4QByAgYfTvkHBb0Awry6JCb5XPlcVRWCruKoXS9t4fetpcX6O8+fGh52Pujx9Br4JJHvW8Az1wEedHAb5UPLdpCmx4w+DKv7daVkJXjLc9Ioe0lzcoxB7qZjQYeAYLAk865/1djvUXXjwX2Adc75+bV95wK9OYjHHGs2baXpRtLWbaxlGXRoN+4q+xgm/ycLAYU5DKwII/jO+fSukWIFqEg2ZlBWoSil8wg2dHboaAlb3dOJAL7tsKudZCZ4+2l71gDb/3EC/bS9bBvmxf6nYbA997zHveLHnBgl3c71MrrymnRxjvjU3ZrbwKznWuhRTtvQrPMHMhs6R0xm9PBO7iqbJe3LtTSu2jmSqnhmALdzILACuDrQDEwB7jKObe0WpuxwG14gX468Ihz7vT6nleB3vzt2FvOsk2HAn7phlJWbtlDeTjS4GODAfMCPxSkRWbgYOhnRy+hYIBQ0MgIBggFjIzDbgfICBqhQPQ6GCAjujwz2i5gYGYEzAgYBMyw6HXVMqtlXfU2Zt6oR+Pw9la9PRzWtuo2zmGV+wlU7iPcIh+A3C/+QbBsB4GyHQTLdhA8sJNg2U6KR/0JAhl0fXMCuavfxFz4sG21dMRkSjoPp+vixzhu4UOHrasMtmBJr+uZ2/v/kFv6Gecse4CKQAvCgUwiwRCRQCa7WvVh4XETCQaMwhUPgwUhmIkLhiCYCcFM1vUdTyAjRH7JR2RW7MQCGTgL4iwDAgH2tB9COKsdmfs2krV/M1gQZ0EIBHGBEJVZbQm3aIeFDxA6sNM7otcCQAAX8J4nEmoJgIUPAIazQLSN1Tq81DmHO3g7eo2Dg7drLK92vyEN7UsY0X9TDv17N3ibav/+Mbye94ja27RvlUnHvKOb9qK+QI/lGPLTgJXOuVXRJ3sBuBhYWq3NxcAzzvvr8JGZtTGzAufcxqOqWJqFtq0yOatvPmf1zT+4rCIcYe32few9UMn+8jD7K8KUVXjX+8sjh+5H1+2vCFNW7fb+8jA795VTHnZUhiNURhwV4QiVYUdlJEJFdHlFxLuO+NMjeJRaRy+9Dl+84sPojWuBa+3qQlkAAAmhSURBVMhlPy0po6UdoCVlrJ0eYTef0N86MsQm0tLKaMkBb31lGR8ua8W/lizleFtLpwyjpe0iRCWZVJJJBRtdKf+9zPvvuDDrRbKoIMsqDivhwk8GESHAc6EHOSW4lJquLv+/fBAZzG3BV/hh6KUj1v+u8hIeqryCYbacF7N+esT6TyLHc0X5fQB8kTWeoB3+DxdxRr8DzxAmyF9CD3JmYCkOw0VDzwE3Vkzio8hAJgTf4NaM17xAP7je+FPlN/hD+GJOts95IrPqD9+h55gf6cuEih8C8F7W9wkQOezxAOce+DVhgvw69Cin2oqD9VWt/0HFzcxz/bkuOIvvBGdWW+/5a3gkk8NjGGKreCT0+yMev8j15o6KWwF4M3MSAY78AI8s/yUTzu3H3WNOOGLdsYol0LsC66rdL8bbC2+oTVfgsEA3swnABIAePXo0tlZpBkLBAH075DTZ60UijorqQR8Nfucg4tzB68jB+4duRyKHt3FULY/ejhxahuPQcxy27PDHVz1/Q51JDe0hBgM1v4UYGYGzD/tGEop+U7kkcOjbTEZgAhkBw+F1lYUjjg7OMT/iqIw4yiJfsifiCIcjhCsriITLcRVlTMtsQzjiCOyezJKyUsxVQiSMRSrBhbmzdT/CWXlk7urC8tKxWCQMB9uEOad1X05tN4DQ3t6sLA5hOHBhzEXAOVq36MCzvbxYWLf4TowIuAjmnHdNhGdOPAMsQMcV32LrnjXeOgBzmIO7+p9HWV4f2mwoZ9/6LKpi1KLRfmnXcxnR/UyydxfA4hXeencosgfm9eTlIWfiHGS9P+LQ80fbAUz5ylk4C9B54Xyyd0bPjVvtm8KPB57O3jb9aLd2Jy3W7Yg+7NBXhku6FnJGt1NpWdqGFksLDz1/1ODc3vxx0Kk4B7kfnXTYuip/OP0UendIzLESsXS5fAsY5Zz7bvT+tcBpzrnbqrWZBvzCOfde9P7bwH855+bW9bzqchERabz6ulxi+cWlGOhe7X43YMNRtBERkQSKJdDnAP3MrLeZZQLjgNdrtHkduM48ZwC71H8uItK0GuxDd85VmtmtwCy8YYuTnXNLzGxidP3jwHS8ES4r8YYt3pC4kkVEpDYxnSnBOTcdL7SrL3u82m0H3BLf0kREpDF01IKISIpQoIuIpAgFuohIilCgi4ikCN9mWzSzEmCNLy/esHxgq99F1KO51wfNv0bVd2xU37E5lvp6Ouc61LbCt0BvzsysqK4jsZqD5l4fNP8aVd+xUX3HJlH1qctFRCRFKNBFRFKEAr12T/hdQAOae33Q/GtUfcdG9R2bhNSnPnQRkRShPXQRkRShQBcRSRFpG+hm1t3M/m1my8xsiZndXkub88xsl5nNj17ubeIaV5vZouhrH3E2kOh0xb81s5VmttDMTmnC2o6vtl3mm1mpmd1Ro02Tbz8zm2xmW8xscbVl7czsLTP7PHrdto7Hjjazz6Lb8+4mrO9XZrY8+m/4dzNrU8dj6/08JLC++81sfbV/x7F1PNav7TelWm2rzWx+HY9N6ParK1Oa9PPnoqftSrcLUACcEr2di3ci7IE12pwH/MPHGlcD+fWsHwvMwDsj2hnAxz7VGQQ24R3w4Ov2A4YDpwCLqy37JXB39PbdwIN1vIcvgD5AJrCg5uchgfWNBDKitx+srb5YPg8JrO9+4M4YPgO+bL8a6x8C7vVj+9WVKU35+UvbPXTn3Ebn3Lzo7d3AMrzzoCaTgyfnds59BLQxswIf6vgq8IVzzvcjf51zs4HtNRZfDDwdvf00cEktDz14MnTnXDlQdTL0hNfnnHvTOVcZvfsR3hm/fFHH9ouFb9uvipkZcAXwfLxfNxb1ZEqTff7SNtCrM7NewMnAx7WsPtPMFpjZDDMb1KSFeWeYfdPM5kZPsF1TXSfnbmrjqPs/kZ/br0onFz2DVvS6Yy1tmsu2/A7et67aNPR5SKRbo11Ck+voMmgO2+8cYLNz7vM61jfZ9quRKU32+Uv7QDezHOBl4A7nXGmN1fPwuhFOAn4HvNrE5Z3tnDsFGAPcYmbDa6yv7dzyTToO1bzTEl4EvFjLar+3X2M0h235I6ASeLaOJg19HhLlMaAvMBTYiNetUZPv2w+4ivr3zptk+zWQKXU+rJZljd5+aR3oZhbC2/DPOudeqbneOVfqnNsTvT0dCJlZflPV55zbEL3eAvwd72tZdc3h5NxjgHnOuc01V/i9/arZXNUVFb3eUksbX7elmX0buAAY76KdqjXF8HlICOfcZudc2DkXAf5Ux+v6vf0ygG8CU+pq0xTbr45MabLPX9oGerS/7c/AMufcw3W06Rxth5mdhre9tjVRfa3MLLfqNt4PZ4trNGsOJ+euc6/Iz+1Xw+vAt6O3vw28VkubWE6GnhBmNhq4C7jIObevjjaxfB4SVV/132UureN1fdt+UV8Dljvnimtb2RTbr55MabrPX6J+8W3uF+AreF9pFgLzo5exwERgYrTNrcASvF+cPwLOasL6+kRfd0G0hh9Fl1evz4BH8X4dXwQUNvE2bIkX0K2rLfN1++H9cdkIVODt9dwItAfeBj6PXreLtu0CTK/22LF4IxO+qNreTVTfSrz+06rP4eM166vr89BE9f01+vlaiBcyBc1p+0WXP1X1uavWtkm3Xz2Z0mSfPx36LyKSItK2y0VEJNUo0EVEUoQCXUQkRSjQRURShAJdRCRFKNBFjoJ5M0n+w+86RKpToIuIpAgFuqQ0M7vGzD6JzoH9RzMLmtkeM3vIzOaZ2dtm1iHadqiZfWSH5iVvG11+nJn9MzrJ2Dwz6xt9+hwze8m8ucyfrToqVsQvCnRJWWY2ALgSb1KmoUAYGA+0wpt/5hTgHeC+6EOeAe5yzp2Id2Rk1fJngUedN8nYWXhHKoI3m94deHNe9wHOTvibEqlHht8FiCTQV4FTgTnRnecWeBMjRTg0idPfgFfMrDXQxjn3TnT508CL0fk/ujrn/g7gnCsDiD7fJy46d0j0LDm9gPcS/7ZEaqdAl1RmwNPOuXsOW2j2kxrt6pv/or5ulAPVbofR/yfxmbpcJJW9DVxuZh3h4Lkde+J97i+PtrkaeM85twvYYWbnRJdfC7zjvPmsi83skuhzZJlZyyZ9FyIx0h6FpCzn3FIz+zHeWWoCeDP03QLsBQaZ2VxgF14/O3hTmz4eDexVwA3R5dcCfzSzn0af41tN+DZEYqbZFiXtmNke51yO33WIxJu6XEREUoT20EVEUoT20EVEUoQCXUQkRSjQRURShAJdRCRFKNBFRFLE/wfQbJrYpKBGxgAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "hist_pd = pd.DataFrame(hist_obj.history, index=np.arange(1, len(hist_obj.history['loss'])+1))\n", - "hist_pd.index.name = 'epoch'\n", - "sns.lineplot(data=hist_pd)" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [], - "source": [ - "model_name = \"model.h5\"" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "model.save(model_name)" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "# model = load_model(model_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Testing Accuracy" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [], - "source": [ - "def test_generator(x_test, resize_shape):\n", - " for sample in x_test:\n", - " img = cv2.imread(sample, cv2.IMREAD_GRAYSCALE) / 255.0\n", - " res_img = cv2.resize(img, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", - " res_img = np.expand_dims(res_img, 0)\n", - " res_img = np.expand_dims(res_img, 3)\n", - " np_img = np.array(res_img)\n", - " yield (np_img, np_img)" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "MSE Loss: 0.07084273546934128\n" - ] - } - ], - "source": [ - "steps = len(x_test)\n", - "test_gen = test_generator(x_test, input_shape)\n", - "loss = model.evaluate_generator(test_gen, steps=steps)\n", - "print(\"MSE Loss:\", loss)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Sample Prediction" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [], - "source": [ - "img = cv2.imread(x_test[0], cv2.IMREAD_GRAYSCALE)\n", - "img = cv2.resize(img, input_shape[::-1], interpolation=cv2.INTER_AREA)" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 36, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAADECAYAAABk6WGRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOx9eXSUVbbvr+aqpCqpzANhSAgkkDCEQEBEBkHupZkntVEc2qG7tbu1u7Ubve29IKJt215akBlU2kYBEZlRoEEBAYFAICSBEDJVUpVUKjXP03l/5O7DqTR631vv+p6rV85aWZm++r5z9tnDbw9nfxLGGHpGz+gZPaNn/HMN6f/vCfSMntEzekbP+J8fPcq9Z/SMntEz/glHj3LvGT2jZ/SMf8LRo9x7Rs/oGT3jn3D0KPee0TN6Rs/4Jxw9yr1n9Iye0TP+Ccf3ptwlEsm/SiSSGxKJpE4ikSz5vp7TM3pGz+gZPeMfh+T7qHOXSCQyALUA7gPQAuACgB8zxqr/xx/WM3pGz+gZPeMfxveF3MsA1DHG6hljQQDbAcz+np7VM3pGz+gZPaPb+L6Uey8ABuH3lv/6W8/oGT2jZ/SM/wdD/j3dV3KHv8XEfyQSydMAngaA+Pj40gEDBkAqlUIikSAajd7+EGOQSCT8ezQa5T/TF11Pfxc/CwAymYxfQz/T58Tv/zUv/rw7hawYY5BKpfx/9DlxTuKzu3+W7i0+606D5kvPutN9u9OFrhc/Lz5PnCP9TVzLtz3nTv8T70N/F/dNpHn3ffk2GtM6xDndadAau6+Tfqf/S6VSRCKRmOvF/bvT3tP1d+KlO/Em3U98Xvc9FXm1O63pHvQzPVtcf3d63Gmt3elP13wXr9LP3e9D14rP7L4v3flYHOKe3ImG4prp793XKtL122Sx+1q779l3XSd+0XPEPerOV3Qd8QDxDf3tTrQQ6Squwe12IxwOx9xLKpUiGo1CLpcjEolAo9FArVaDMQa5XH7HvWWM4cqVKxbGWNqd1vt9KfcWAL2F33MAGMULGGMbAWwEgJKSEnbq1Cn4fD4oFArEx8fD7/cjEonwxUajUSgUCoRCIUilUiiVSvh8PkQiEchkMiiVSoRCIQSDQSgUCqjVakSjUXi9Xmg0Gv75QCDACUmMpFQq4fV6oVar+bNCoRDi4uLAGINMJoNMJgNjDOFwmOaPYDAImUzG5xQOhyGVSqHRaBAOh7mCUyqV8Pv90Gg0CIVCAIBwOMyZSKVS8WsDgQDi4uLg9/v5fGn+4XAY4XAYKpUK4XAYCoUCCoUCQBfD0HW0PtG40VxpfcRAtB66XyAQgFwu598ZY9BqtXyetMZgMMifFQqF+N+JXnQNACiVSsjlcni9XsjlcigUCgSDQahUKng8Hsjlcv55uVyOuLg4uN1uzsQi7UVhpDkrlUp4PB4AXcpBKpVCpVIBAH8u7VMkEkF8fDy/H63L7/dDJpNBpVLxdZGyBwCfz8fXp1KpYtYvCjrNIRKJxAi8XC7nfBSJRKBSqRAIBDj/0npofzQaDQAgEAhApVJxugLgvCCTyfj+KRQKyGQyKBQKOJ1OPh+aE32WPk/8pVAouOIIBoP8c0Rzmm8gEOAKinhQNI6MMYRCISiVSkilUs6XwWAQUqkUfr+fK19SWsT/3UGR+Hka4XCYy45UKkVcXByX9Wg0imAwCKVSyb+TUiQ6KRQKTt9QKASfzxdzD5J9mjPJYPf5SSQSTneJRAK/3/8PPOfz+QAA8fHxiEQi/FqZTAa73Y49e/bA4XCgb9++CAQCcDqdSE5ORjgchlwuh9vtxuDBgzFixAiEw2FOQ9oX4rtwOIzMzMwmfMv4vpT7BQADJBJJLoBWAA8CWPRtFxPh5HI5Z2RS1kqlEpFIBD6fD4wxqNVqyOVyhEIhaDQazjD0f6VSye9HhKdNJ4VHm+92u/n/RcEkwkUiEX4tCQRdQ0aCrhEFnZjQ7XZzxSEyRygU4sxLwkn/J+Yk5UjXk0IRGYWUJ93H7/dDrVZzRibGJgVCzyDDR/cgmhFtIpEI1Go1p0koFOL3IeVBCpqUIN2LDF1ycjJCoRAkEglXogqFAnK5nD+Lng10KSdaK92ju3GnQfMlY0C8Q8qGhEyhUHABBQCVSsXvSTSLi4vj641EIhx5E32IjqRQ/H4/fD4fv4dIHxrdkSbxq2iobTZbjPInOtLvpExJAdHfid/p+aQIRV5QKpV8PqS8iGcZY1xuCPzEx8dzQ0m8QvtJfED7IK6R1k3rpeeSQiOlKpFIOL/TNSQjdD3RgvZI/DzJlGgY/X5/zJxkMhk3hEqlkssvyUsgEIDH4+FGkPiOaEl8QWBMlEviYaK5yI9yuZzLPfEU/c3pdEKtVvM1yWQyOBwORKNR6PV6zndk+NRqNdRqNZRKJeLj4wF06TDaH5qLRqPh+uy7xvei3BljYYlE8gsAXwCQAXiPMVb1HddzZEkM5fF4oNFoEAgEEA6HuUKKRCIx1xJDkhImYWSMcYYiZNldKZJSjkQi0Ol0HEkBt11/v98fgzoIZRPCA8CFXyKRIC4ujjOZVquNEUS6nhQKAK50icG1Wi1n/lAohPj4eK4YJBIJEhISuCEhKx6JRBAXF8f/HggEANxmTELiNAi90M+ErERDRcJLghmJRLhAkdKkudNn1Wo1tFotIpEI7HZ7DCokoxkKhfjeiCEQMqgi4lapVPD5fFz5uVwufn8SFgIA5NKq1WoEg8EYD4IUHX1G9HpISEQh745O4+Pj4fV64Xa7uaDR/UVjQnxJKC4UCnFaktGg5xG/it4ozYf2gQw6zYsUBxlZuVzODay4b3Qvuo/oUXUPqYXDYfh8PkSjUa5YRENvt9s5AiXPjp5JaxTpRWsjQ6JQKPiz5XI55HJ5jFdOIIa8RK/Xy2UM6PJc6LMAuEdDMkNKX+Qvl8sFjUbD10tzSUxM5DJAQI2QskQigVarjZERmq9MJoPL5eL8TushIy+GwORyOTweD5ddkj3igbq6Ok4rrVYLp9OJ1NRUHmHw+XxISUnhtI2Pj48BWRqNBjabjXs/3zW+L+QOxtghAIf+d64VLbvP5+Pok/5OTCCXy+Hz+fhmd7f2tBkiwgDA7wGAKyhSFiQgTU1NyMnJ4Z+nEA+hmHA4zAUyEAhwhRAfHw+Px8OZjTZORMXA7TAMXSfmDmgQIxBioefabDZ4PB5kZ2cDAAwGA7lkXNmKFp2UGAkOKV5SyiJaF0MTJKgulwtKpRJxcXGc7iSohKJEZSoaRKIfMSVdzxiD1WqFXq/nSk6tVkMikcDr9fLPk3EmpC+VSnnoRtx3ETUSD4nhBlIO5JqTYae/0f5S6I0AhshXpIhsNhvfUworkKDRfAkwEL8QkhQVslQqhcPhQCQSQVZWFucBMQRBtCQPoXs8mBBtMBiE3++PQak0h4SEBLS2tkKj0SA5ORkA+H4R0CHa0jOIZqLRpPVEIhF4vV5Eo1HExcXFKD1an2gUiEbi3Cl0YjKZkJWVBbfbDa1Wy/kdAJcv4n8Kb4h7JoZoSQ6JB8kYkkEn/eH1eqFQKNDW1galUomkpKQY74z23OfzcVpqtVruDRPSJzknIEfzEUO7pMNo0P1obzs6OjhNgC7jRaFFop/NZkPv3r1jwIioC8k76a7nuo8fxAlVcpuI6Ww2G6LRKHw+HwKBAAKBAHw+H1dKxKDAbeFVqVT8Z3HRxPSBQAChUAharZZvPinerVu3Ys+ePRyB0WYpFArOzBqNBiqVigs5ITFiKlJ0pPSJaX0+H0feLpeLu4e0BvpZdEVJ8YTDYcyZMwdnzpzBqVOncOnSJSxYsAAZGRmIj4/HoEGDOIIyGo2YPHkyVxaisJKystlsHOkCXeEZQkJkDNRqNRYvXowNGzZwZqL50NrEMBaFp8jVN5vNHKnQd/rM3/72N5jNZgDg6I48ElKSXq83RhgcDgc0Gg3//IwZM7gHdeDAATidTi7ky5Yt48JN+ygiVUJZxDsksOSxkcAplUpoNBpOjw8++ACBQADx8fExcXIKbcjlcpw6dQorVqzgvEChNzHspFQq8dhjj+Fvf/sb5zEy+nQvMYYvIlKpVMqRIM1TDO0Rsk5ISMBLL72Ezz//HE899RTfO+JJMtAejwfBYBA6nY7fg7w7Wt+SJUu4cvF4PFi7di33NETFRvwlImfaB3rWr3/9a1y6dAlarRbTpk3Diy++CJVKxXlHzCuEQiEeyiTwRUqVDD/Jfnx8PPdypk2bxg0N8TSBFKVSiR//+MfYsGEDB4JkDESjSeskj5jm6PV6oVQq+bOIx2hPCDyKnhuFimm/yJhnZGRAoVCgs7MTPp8PDocDLS0t6OjogMvlQnx8PNRqNd8X4gcyst3l+tvGD0K502bRIogBiclUKhU0Gg3faACccUgRkKIgqw+AKzoSImI0QpOE0Pv06YOmpibYbDZurQnJud3umHgqbWRCQgJkMhn8fn9MnBgATwyLyRiK2YqJMQrjiNeSsGs0Gmg0Guj1ekyZMgULFizA2rVrkZeXBwDIyspCUlIS/2xOTg6OHTvG6SfmAygMRAqYnk0InNAQISCz2YxZs2bFIE5SKsTEooKnuCK5miKiI4QHAHv37kWvXr1iYrvk0iuVSmi1Wuh0Ov5MqVSKxMREjsJ+8YtfYO/evRy5btmyhf+/vb0dCQkJ0Ov13DCK+0W/A4jJDdD+kJcGdCk3Mrrt7e347LPPoFQq4Xa7AdwOw9F9wuEw1q5dy/eXeJkAi8iLZrMZc+bMAWMMHo+HGzWiKfErGQmZTMYrJ8SkY1xcHAcjRLu4uDhIJBLs27cPixcvxrZt2/ieiaGEUCgEnU7H0R95snQ/oEtJnTp1CvHx8VAoFPjyyy8xb948zrMiwKEvUshiriISiWD79u345S9/iWHDhkGr1WLChAmYP38+bDYbLzYg8NVdbml9FB4l2RdzYySvn3zyCaelmAMiT8JisWDOnDl8f8Q8EHmpx48fx+LFi/keEC+SQqd4v1qthk6ni/EICdxpNBpotVoOskg2yBCKCdjMzEzIZDL07dsXiYmJSEhIQO/evTk6Jw+F1imGDUUP4U7jB6HciWlFRE6bQgsEEIMQAMQIATGSmOCjcAUNERlQqKK6uhrDhg3joYhwOMwNiUqlgkKhgMVi4YizqakJbrebK0NSdg6HA1arlSvllpaWGE9DLpfDarXCbrdzxiNDRdUIjY2NcDgckMvlsNvtqK6uRnJyMux2O9ra2nD9+nUkJyejtbUVdXV1XPmSu0txwaamJjgcDjgcDphMJu5aArcTTyaTiSM1EkaKp+bm5kKv18NkMsXEt2UyGcxmM1paWmJCHCaTCRKJBG1tbTxJLVZbtLW1wWKxwOFwAAAPNdHnyUCbTKYYhUmuNKEfp9MJpVIJp9OJq1evorm5GWazGVarFbW1tUhOTsbNmze5wQoEAqitreVKpKWlBQ0NDQiFQjAYDLzChniO1kqK3mQyob6+Hna7nSs0uVwOo9HIBUulUqG1tRVNTU3Izs7m3opUKoXZbIbdbuc8GwgE0K9fPx7nJdSpUCjg8Xg4fUgmZDIZmpubuXDTfnV2dqKzs5PPkxKjHo8Hra2tiEQiMXRjjOHWrVsxRrOxsRHRaBTt7e0wm83cuwQAs9mM2tpaJCUlwWq1AgAuX76MxMREtLS08PADDbVajba2NpjNZg4siAZKpRJmsxlVVVWcvqNHj0ZxcTG8Xi+MRiM3omTkSZmbTCbOn1KpFBaLBRqNBgqFAvX19TyMRvxP3qparYbb7UZbWxu/JhQKITc3FxqNBkajkc9FTIZHo1GcO3cuBoQ5HA60trZyGYlEIrBarfD5fGhpaYHL5YrhIalUCoPBgKamJoRCIbjdbi5rot5ISEjgoIgMAf2s1+uhVqvR2trK10z3djqdMBgMHLh91/hBKHcqW6KSOTFhSCgKuF0ZIKJPt9vNXXuqqvH5fLzsjhASZempfNLv9+PAgQNob29HSkoKmpqauGtL7hPFn00mE958802cOnUKLpcLI0aM4K7j5cuXcejQIajVarz55pvw+/344IMPsH79eowbNw4AMG3aNBiNRnz66aeYOXMmTxBTcrC9vR27du1CcnIyzpw5gwsXLkCn08FkMmHx4sVISkpCVlYWGGNYsGAB0tLSIJfLMWvWLITDYXz44YdYs2YN7r77bpw/fx5NTU0oLS2F1+vFxo0bMWrUKO4yr1q1CiaTCcnJyVi4cCHa29s53Uno09PT4fV68c4772D06NGIi4uDx+PBxo0b4Xa7kZSUhKKiIgQCAaxatQorVqzAuHHjoNVqcdddd2H16tWIRqNobm7GT3/6U6hUKnR2dmLMmDG4evUqNm/ejPHjx2PdunXo7OzEZ599BrVajUWLFuHdd9/Frl27UFZWhmXLlsHv92PNmjXYvHkzRo0ahdWrV0MikWDbtm0oKChASkoK0tLSsH//fkyePBnJycncaO/fvx8ZGRl44403uPFZuXIlLl++jOTkZAwbNgw6nQ4ajSbGk2lsbMQvfvEL6HQ6PPfccxgzZgwHDS+88AKysrKwZcsWXLlyBTKZDKmpqSgoKMD06dM5//7qV7/iCbLf//73kMvlqK+vR1FREdRqNV577TW4XC5Eo1H89re/RWtrK959911cunQJkUgE1dXVmDNnDjIzM/Gzn/0MVqsVly9fxp49e6BUKrF8+XKsWbOGKynKvWg0GkycOBFJSUkAgF//+td47733kJycjClTpqCiogJr1qzBG2+8gXHjxiEpKQn79+/nAIBKdjs6OvDss89yr2b37t1gjCEtLQ133303Ojo6IJPJ8O677+KFF15AWloabDYb2traeIgS6DKYzz//PDZv3owRI0ZgwoQJmDRpEtLT09HW1oY33ngDtbW1aGlpwb333otoNIra2locPHgQbrcb99xzD/e27XY77rnnHpw5cwaRSARr1qwBAGzbtg1GoxHvvfceAODChQs4f/480tLS8Oyzz4Ixhhs3biAnJwcSiQRr1qyBzWbj8yT9cenSJRw7dgwpKSmIRqPYvHkzHA4HUlNTUVRUBABYtWoV3nzzTZw5cwZ6vR6lpaVYs2YNpFIpKisr8cwzzyAhIQEulwuvv/46/va3v8FgMGD9+vUAgE8++QQejwcdHR1wOp04f/48vF4v/vCHP2DLli1gjMFoNOKXv/wldDod7HY7Xn75ZXg8HmzYsAEulwspKSl46KGHcPTo0e/Uqz8Y5U4JDlLsxLQUoiArSzFOQgfkVpHrTvcRE6OE9Cl2LpPJEB8fj6tXryIrKwsWiwV2ux1erzemsiAajcLlciEtLQ319fUYNmwYvF4vr9KRy+X405/+hEGDBqG9vZ2jJ3K/CwsLEQqFkJeXh8TERIwfPx5lZWW8XpxQ6rvvvotp06YhISEBhYWF+Prrr6FSqdDY2Ig+ffpAqVQiISEB2dnZSE1NhUajgdPpRGlpKU8Kt7a2oqSkBHa7Ha2trZDL5cjIyIDdbufhHolEgq1bt0KhUODGjRu45557eEkWudHV1dW4//770atXL47QotEoqqur8eGHHyIvLw9xcXFITk4GYwwpKSm4desWSktLkZCQwOO6MpkM69atg8/ng16v58rUbDZj6NChkMvlKC4uxq1bt7jg3HvvvcjPz0dJSQkUCgVmz56N7Oxs5OXloaSkBEqlEiUlJZBIJLhy5QqGDBnCE1FXrlxBcnIylEol2tvb8cYbb6CoqAjHjh2Dz+eD2WxGRkYG6uvrMXTo0JhKLJFPotEo1q1bB7/fj8TERMjlcpSWlgIAKioqMHXqVOj1eigUCmRnZyMUCsHr9WL48OFITU3ltPZ4PEhISIBOp8OgQYMgl8tRXV2NxYsXIycnh8d3L1++jEmTJmHgwIFQqVTIzs6GTCbDiRMn8M0338But+N3v/sdUlJSsGLFChQVFcFqtcLv9+Puu+/mCVHyVEOhEPr378/LYRsaGjB58mTEx8ejqakJp06dQkpKCurr6zF27FgolUrMmDEDcXFx3KPVaDSoqKjA4MGDAYAj8NTUVC6zBIDef/99lJSUoLq6GocOHYJer48JY1IIadWqVVi3bh2USiUqKyvh9XqRkZGBhoYGDB8+HElJSTAajQgGg9iwYQOmTJnCq1e0Wi1CoRAyMjJQVlaGkSNH8tAIAIwcORLRaBRDhgyBSqXC2bNn0draCofDgRdffBFyuRzXrl3DT37yE2RnZ/P4OXnn5GVeuHCB/3zp0iVs2bIFOTk5UKvVSE5OhsvlQnJyMurr6zFq1KiYhLBEIsHatWsRCASQnJwMnU6HgoICDB8+nM+NFLfP54NOp8PBgwdRUFAAoAuoZmVlITs7G2vXruX5EKVSifz8fBw9ehTbtm0DANy6dYvT4L9VrP+/v4YNG8YsFguzWCzMarUyi8XCbDYbM5lMrKOjg7W3tzOfz8ecTifz+/3M5XIxp9PJPB4P83q9zOl0MpvNxpxOJzOZTKy9vZ11dnYyv9/PAoEA8/l8zOfzMZfLxQKBAAsEAuzSpUusubmZmc1m1traypKTk1l5eTmzWq3M6XQyr9fLXC4Xc7lcbO3atay4uJi5XC7285//nI0ZM4ZZLBbmcrmYTqdjBoOBtba28s+6XC6WlpbG9u3bxw4fPsz279/PbDYbe//999nhw4eZzWZjZrOZmc1m9sc//pHpdDpmtVqZy+Viy5cvZ0eOHGFGo5FNmzaNr6+hoYGtWLGCeTwe1tbWxn7+85+zuro6Po/S0lJ26dIl5nK52MSJE9krr7zCXC4X69u3L9u0aROzWq2spqaGPfroo8xkMjGj0ci8Xi+z2+382S6Xi02aNInZ7XZmNptZ37592dq1a5nFYmGFhYVs5syZrLOzk9XX17M333yTmc1mZrFYWFFREbty5Qrr7OxkY8aMYc3NzczlcjGtVsv27t3LfD4fW716Nbt+/Tprbm5mtbW1bMaMGayhoYE9+OCDrL29nTmdTvbWW2+x9vZ21tDQwJYsWcKsViuz2Wysra2N1dbWsuXLlzOz2cw6OztZv3792JUrV1hzczPr6Ohgffv2ZR0dHaypqYktWbKE6XQ61tnZyZqbm5nBYGAWi4Vt2bKFFRUVMY/Hw2w2GxszZgxra2tjVquV81lnZyefdyAQYCNGjGDXr19nN27cYIWFhay1tZUZDAZWVlbGrFYrM5vN7NVXX2WVlZXM7/czp9PJ/vSnP7EdO3Yws9nMVq5cyWpra1ldXR0bN24ca2trY2azmfXr149VVlaywsJCZjabmcfjYWVlZcxisTCTycRqa2vZ4cOH2fTp09nGjRuZ1+tlOp2OtbW1MaPRyKxWK/N4PMzlcjGfz8f5dc+ePezixYvM7/ezP/3pT+zgwYPM7/ezUCjEkpKS2MGDB5nT6WRFRUWssrKSOZ1O5nQ6WWdnJ+fLyspKlp+fz+x2O2tra2MNDQ3sd7/7HXO5XOz8+fNs9erVzGazsRs3brBHHnmEr8lms3Feor2zWCysvLycdXZ2ss7OTlZZWcn+/Oc/s87OTr4fdrudLV26lOn1erZy5Uqm1WqZ1Wplb7/9NhsxYgTXB5s2bWKXL19mLpeLPfvss1x+m5qa2Lhx45jVamV2u53V19ezt99+m82aNYtt2bKFGQwGNmnSJOb1epnH42F9+/ZlbW1trL29ndntdi5D/fr1Y0uWLInhd6vVyurq6tgf//hHZjAYmN1uZ8XFxcxisTCn08lGjx7N6urqmMvlYvHx8Wzfvn3MZrOxN998k9XV1bGmpiY2ZcoUVl9fz5qamlhRURFbu3Yt27RpE9Nqteyzzz5jn332GSsuLmbbt29nbW1tLD4+nn366aesvb2dLV++nFVUVLCsrCz28MMPs5s3b7KamhrW2trK9u/fzwBc/Da9+oNA7oSWKT4uWn6xYoPK+ERkTe4yXaPVapGcnAytVsuz1WLSKhgMwufz4eDBg0hLS+OJKMYY6uvreWyUPuPz+bBjxw7MnDkTSqUSe/bswYoVK3DoUFeVZ3p6OvR6PTQaDXbs2METmowxZGdn4+OPP0ZOTg6PJY4ePZrXAiuVSgwaNAhpaWlQKBRYuXIl6urqMH78eNy8eRO1tbUcmX7zzTeYNm0aotEoHA4HDh8+jLNnz0Imk2H37t34j//4D7S3t0OhUKCjowOLFi2C0+nEhAkTMGHCBNjtdqSkpPA4bSAQwMcffxyTUKM4M4Wmxo4diylTpsBisSAlJQV9+/aFxWLBW2+9hfvuuw9GoxGBQAAzZ85E7969sX37drz++uu4ePEiACAlJQW9evXC0aNH8c4776C5uZm7/v369UN6ejoOHz4MqVQKk8mE++67DyqVCseOHcOiRYs4k9rtdjz99NOYP38+3n//fbS0tGDGjBnIycnBhQsXYDKZMG3aNBw8eBDnzp3DqFGjkJGRAZ1OB5/Ph0OHDiEajeLDDz/E9OnT4Xa7sX37dqxYsQJHjx7l+0WJ+5SUFOTm5uLIkSOYM2cOWlpaoFKpkJKSAr1ej9dffx1OpxNr1qxBW1sb3nvvPfTt2xe7d+/mpXZ5eXk4ffo01qxZw9cNdFVXmEwmPP3009Dr9dwTM5lMcDqdWLduHWw2G15++WUUFRVh8eLFyM/Ph0QiQUZGBveCPvnkE5SXl/P8BFWU7dq1C3l5eZBKpSgsLORJv7feegvHjh3DvffeC4/Hw+lHXhuFRuVyOT777DPYbDYYDAZ4vV6cOnUKCxcuRGdnJ37zm9/gvvvuw7p165CSksJzVxKJBOXl5dwLInkuLy/H+fPneVh09erVeOyxxyCTybB161bk5+cjHA7j4sWLOHjwIBISEpCcnAyJRIJ169Zh/vz5nE+3bduGgQMHAgB2796Nixcv4tixY3jssceQmZmJzZs345133sGSJUvw9NNP49FHH0W/fv3w5ZdfwmQy8XLcsWPHYsuWLTwfQnrG7XZj3rx5qKmpQWpqKvLy8tDZ2Ym3334bU6dOhd1uRyAQwOzZs6FSqfDRRx/htddew5UrV3jIqnfv3jhw4ABWrVoFvV4Pv9+P2tpapKam4urVq+jTpw8qKip4uaVMJsO1a9dwzz33cNlMS0tDZmYmjvkj1G8AACAASURBVBw5gvXr1yMrKwtjxoxBOBxGYmIiFAoF9u3bh6ysrO/Uq7KlS5f+z2jo/4uxfv36pY899hgPqwDgdadiwo0YCbhdt0vKmzHGFZVYW01JB/p/Y2MjPvroI1y9ehWzZ89GNBrF559/jiNHjkCn0+Huu+8GgJgDLrt27cIDDzyA7OxsHDp0CMnJyZg6dSp0Oh2CwSBaW1vR2NiI7OxsZGdnIxqN4tq1a3C73cjKysLVq1dhNBoxZswYJCQk8GQZYwyZmZmQSCRobW0FACxatAgpKSn46quvUFFRgSeeeAKRSAQHDx5EWVkZNwqU5Bk2bBja2trQ2NiIYDCIoqIiVFdXY8GCBYhGozhy5AgA4K677oJMJsPZs2fh9/vR3NyMsWPHIjExkde5SyQSXLx4EdOnT0cwGMSxY8cQiURQUlKCpKQk3Lp1i7udlZWVGD9+PDo7O5GWloa+ffvCZDLhxo0biEajKCoqgsFg4Im9+Ph4uN1uFBcXQ61W49ChQwgEAkhLS0N7ezsMBgPGjBkDuVyOEydOYMKECTHH1CsqKrjBLCwsxIEDBxCJRHDXXXdBp9PhwIEDGDZsGIYPH85DG5To7N27N/r06YOdO3diwYIFyMvLg9VqxY0bNzBhwgS+dgprmEwmGI1GyGQy3LhxAy6XC5MmTeLhitTUVLhcLgwdOhSlpaU8aR0fH4+8vDykp6dj9+7dMecgJk6cCL1ej/LyclRVVWHhwoVQq9XQ6/W4du0a6uvr+T3HjBmDxsZG2Gw2XL9+HfPmzYs5j9HQ0IDMzEwMHTqUV0JR6enWrVvx+OOPIxqNIiUlBTt27EBbWxui0SgmTZqEcDgMh8OBuLg49OnTJ6a2XTz5W1tbi6ysLJSWluLo0aMYO3YsFAoFKisrEQqFMHjwYPTv3x9ff/01TCYTN07FxcVcscvlcpw8eRK1tbVob2/HzZs3MWLECG5UVq5ciYKCAthsNsydOxcDBw5EamoqbDYbHA4HtFotPB4PJk+ejIyMDOzcuRMPPvgg5HI59u/fj6KiItx9992oqqoCYwwTJ05Ebm4uAoEAmpqaUFdXh7lz5+LEiRNQKBSYOnUqGGM4duwYpk6diqKiIl7rzhjD8ePHoVKpMH78eKSkpKCuro7ze01NDSZNmgSr1Yrk5GTk5OTAbDajpqYGEokExcXFcDqdaGlpwfXr11FdXY1nnnkGarUa4XAYra2tuHXrFsrLyzFhwgQkJSXxkB7JczQaxciRI+F0OnnRRHV1NX7+858jPT2dJ7NbWlowePBgmM1mfPzxx6alS5duvJNe/V76uf+fjhEjRrCTJ0/ySgMaKpUKXq+X1zWLiL77EX3qVUI9KCj2DoCfWiRGFo82UxkgfSdhobg/tTAQEQqAGKagZ5GhoQoFuj9wu3yLeraIdbvkfdDBiUOHDuGTTz7B+PHjUVJSgsLCQsyZMwcHDx7kSeNoNIqEhISYfhtU7SPWdJP3QhVF5N2QQNO6nE5nzElAWgvlK+h6MqoOh4MbGrVaDZ/PB7/fzxUl0Qy4fTyfkpZksMUDIpRQF49909oIWTPG+MEdojudBqY8CFXZiLXidDiMaujJoNFzaK+A2/1VxPwNgBglKManaYiHYWjQXovtJjweD6/tFmOmtH/kmZI3ReV6lIOi+VKfIspDnT17FsFgEFu3bsXOnTtjehhFIhEkJSXxQzhEK5o3VZRQYpHo0v1wGwEkAk1ivxaxbJDkg67tnkNTq9WQyWRYtGgRNm7cyOvc6WAWVaZQaSbRIRwO89Oy3avgxHMnpCfI66d1EV3pepJZytlR/oIOrxEd6DtV2pEc0MElpVIJh8OB+Ph4SCQSPPvss0hOTsarr74aU7bY0tKCiooKaDQaxMXF8SqYSCSCgQMHcrmkMsjf/OY3SEhIwB/+8IeYMxBA15mVffv2YenSpeWMsZF3UKs/jLAMgJg2A6JgUb2ryCi0OWJdrPg7cPtEHylO8f908IAEgxQWMTqddKTNpwMt5BGINd70fPIYxBOjxEBURgiAl0/SNfSz2LBq9erVyMrKwujRo3H69Gm0trZiypQpMYJEjEFrJQVIwit6LsSwYpKZvsilJ+VLtBQTdCIzA13KmtZB+0TKiX4nb+pOR/tJmcbHx/Pae1KANE+iJf0snncQj8qTMaHkJ9GVAAEZUKCrhPDKlStcEZCSEeuyxQMptK+kxLoLPtE4EAjwE6Vk5IiGZNTF+1Hlkrg+Wnv3k57iSUuaC4EK8bj7n//8Z1RVVXGPjUoStVotN2biIS2xAIGSwDRIcYutIGjv4+Li+GlnGuKBNvHMCq1BVEpEr5s3byItLY2X79LpUJEeRF8qayWkS3tBte8isAJiw7oi+CODQ7JJ8yOax8XF8fWIPayI9yl0KQJNMrynTp1CZWUlOjs7ER8fj0ceeSTGgyFeJANAvEDykpiYiLi4OFy4cAG1tbW8rPqBBx74BwNJe9K9JLX7+EEg95KSEnb8+HGOLmlDCcmQsJEiEI90K5VK3pKABIasPm0keQBkJIgxRNQiHvoBwBEwEZNKxILBIEcP5DmIB3XIa6D50fFnmh9VD4gHZggV0mdcLhcPP1E9NVW1yOVy3qJBPClIjYZoHcRAdJ2IykRjRHMmRSSeFxCZT/QwxLYGpERIQYhnC4iGYiMwUnZ0TzGsQHMjV5bWEwwGOdIl40773N24iDX7lJuh4fF4YLfbeQUSCQu1ZSB6UMktrYkUIV0rVtaQohaNGN2DaCYaOpG3SVkQremgHT2H/kdCTPXf9BxaK1XeqFQqfuxf9CxJOdD6qJ6d5iWeI6H9Jd6kv1G8l55PMkhroCP+tFckM6JXS96G3+/nB8IUCgV0Oh2nKx0Son0WgQJwu0MoGTvy3sLhMHQ6HTe8dCiJ0DjxJlUDuVyuGI+c5JaeIcojzYFkSpQfsROk2K6EjKZ4+ryyspIjfIVCAZfLhXA4DK1Wi3HjxnFecjgcCIVC0Ov1nP5iopTOfPxXjuFbkfv31lvm/3SQ8iS0SUQhpiTERBsqWjMAMUd+6cg1oVn6n+jaEBoj4yA2ERNPx5KBIfRKKJMScN3dQ1JWpPSJAcQTnuJBHWIOeq5UKuXxuHA4jKSkpJij/4QqRGREaEhMiqlUKrjdbn7MnMoURUUqKuzuioy+iL4isic0RApIDH/QXtKgPSK0Td4TKScxZENeG5VTimE3KjGluZCwiaEcUubk3dA+UtmpWq3mZYg+ny/mcAgpQxoiUqcENPElXU/KRzyoRvQSjSutAbgdqhEVKd0fuN0niU5dJiQk8DXSmmn+crmct2MmL0REd+IeiD2GyLARv9E6RC+G6CkCAuovIwIIAHztZOTocB7dNxKJxIQ76RAPyZgYrrtT+wiSM5H2Pp8PCQkJnOepoZvoXZIRFb0vohHQZVjoVLZotMkzJRqJ4EQMxRLQpLYP1C6FDB/tNR3yIr0gAgeFQsHDMQSsdDpdDHih0KMYtu4eArzT+MGEZUg5kFDTJlIDKlG5kIIQQx+0YbSZ4okyEhTgdixQjKdSLLq7+0PMICpSEXEBtwWHKnncbjd3y0mREsNTSEScj4iMRAQMgMdfgdshAFGBkQIWE8si8iJFLyI4UXmLVRLiXGhdovISUTtdL66RjBExtqgUgNuNsOgzhIaIzhQKo//TaVmaOxlkEZFGo9GYDphi2wExpk77R7wBgAs7DRHhEt2IjmKIT+QZkYeIPqKRI8VANBLbTouGhDpfikChO/ITjSh9p/mRMiVPjuYnzkPkfaI7hVvE/afvBCZormTI6Rg9yazIU2KYSQyDEPigQeBLBCViTkHcEzLSBAJoj0TZD4VC3MMl3UHzobmIgEPcd9EY0h6L8k+0o3vR88m7697qhOhEwJCMhVjdR4csifZi2IvuRyEumhvNhXQktWz4rvGDQe4U7wJuJytFFAHcPjpPzEibFolEkJCQAOB2AoSIDIC7tsScYjglGo3y5knkvhFSoPgpWV8SUHI9iXFI2ZMCp42n5li0HjIWoocC3EY+pLxobYSwCFWQQu6eaCSGIXRKDC+iB0rq6HQ6LlzEfGJ8FwB/YQhwuz2A6PWI+0NKiO5B15GgdFfMFDYgxS02LhO9MLG9L/EHKTExFEZotKOjg3tu1DaC4pLUeKw7Mg+FQjxU0D0RSMqP6Cuiayo7pEFtmGlNlPClPY9GozFhQTEhTIJP3gXNQ2wjTNeTF0j3IHRJXicNUjJkQKm1hohOaS4ul4t7YQQ+RPBAayW+of0hz1CMVxNoIb6k/aJ2E6FQCL169eI0pQN45GmJRkqn03FeokQwAL5eOthEckHGWrwPrZfoRvcjT4pki0b3l2t0D5VRrJ32mpQs7W1CQkJMuJOMEQDeq91isSA1NZUDK4/HA61Wyw9Q+v1+Hq4kwEa8QvJP+yr2ArrT+EEgdzEJSEIFdBHHZDLh+PHjHGESgYlwbrcby5Yt4y6PmLwU69vJQooJFPpO1xOTUAKVlJPI7KRYSBgJdYixdrL+Ho+HIwZSuMRYhHZp3vQ7ISYx1i26g6I7L8b/Ghsb8e6778YgV5o7fYaUgYhI6Xli/J3mLyJYsRqEjJeIBEUG7O6VdJ8/eSlieMjr9XJams1mvPbaa2hsbOTKQvTexNJFmvuJEyfQ1NTEBYCMKikP8XmEJsVkq6iUxGeJ+QMxbgsAb7zxBj7//POYsI7YrlqMn5NxERU+0ZYxBpPJhH//93/nz6V5AuCggTwa8kZFJdUdTRNPhkIh3Lp1C/v37wdw28MQ+wqRoRSTnsS3brcbR44cQV1dXQySp8/S50npisibntfa2oqzZ8/i3LlzOHnyJO+xIiJXki8x10ND3AsxD0Q8KIb4iBZED1L0otGm30mu6Vqv18tzL8TX5G3SftD9RI+wvr4eBw4c4MZDzF+R/BIoIlAIgCtwp9PJ9U19fT0voyW+EaMZRBexM+W3jR+EcgfArSvFF2kjLBZLjNIQrbtEIkF6ejpu3rzJUTlZVNEAUOkgue3EuGKHSfoCbpePkSEh5pFKpXj22Wd5x0CxxI0UmFqt/ofujyTMlCSlg1Ri4oY2H7hd/UPuMAmqGPYgYSDrrdVqceDAAZ6MDIfDHPGICICUl6iAxUoMADHPJCTXvZqDytKoPamIcojp6f5kLMWXlxBdaR/feustOBwOhMNh9OrVC/X19UhPT+cCRcqa4q2ESIlPKisrYzpC0j6Lryuk0A/NjdYkhlhIeIiO5FaLSkihUECv1+Po0aPo3bt3TAiAKmZo30lRisqFeJNCbQqFAn379uXNvYg3SLgJtYuVZJS/oPWKfEixcQC8ZXVGRgbfO+IdmovIv0qlEm+88Qaee+45SCQSpKamoqamBhkZGTHhJfJeiYfFtsniOtvb2/H1119j7ty5uP/++/Hpp58iLS2NK09aAw2xuiUYDMLhcMQoTOL3aDSKW7duYcaMGdz7EtvkklckJs7F0C4BGCo2IMMgfu8OIEivEOgSE72ZmZkxQIGeR3rFbrfjlVdeQTgc5joiGo3yxC/xiE6nQ1JSEjcOJEvEdzQnrVb73x5i+sEodwonALdDAX6/H9nZ2ZgwYQJfYDAYhMFg4AzR1NSEhx9+GO3t7ejs7OShDlIcYkmX0+mEx+PhhCJvoa2tDUAXwzidTq5kOjs74fV64fP54Ha7ce7cOYwbNy6mf0ogEOAuJ2OMd5CkUAltGiF3uq9Y6hWN3n7tGjFEJNLVfU58NZroVYhCDwA7duxAZmYmzGZzTOyXnuH3+2MqUyhkQd0ZSSi1Wi2Uyq7XfFmtVkQiEX4IhpJFVqsVbrebr4/CY4S+iZYdHR38FCCt3ePxoL29nc/L5XLhwoUL2LNnDw+xXb16FY888ggSExP/4UQygJjwFtAVs96/fz8YY7yWnZSgzWbjxp/4zGq1or29nX+WfvZ4PHwvKcRFyh24HQOmtQ8bNgxlZWUxLaKp0sHr9cLlcvGKEZvNxg86AV1lmW63m6OympoaPProo1ypMNZ1mtLpdEKr1UKtVvMmb9RmmpKj1AWxs7MTjHUdGKMKFo/Hg61bt2L06NFcIRHooetEg2uz2XDgwAFMmDABANDc3Iy9e/fyroYUTiTeN5vN/J2tdG+SF5lMhp07dyIvL48Dgzlz5iAYDHKAxBhDW1sbV/TEuwaDgaNyiUSCzs5ORKNRGI1GTpcPPvgAarUanZ2dHNXabDbO06QzqD2yxWLhshcOh2GxWLiipfnR+lwuF3+bGNFJLIoQ38O7fft2jB49mv+fOsTStXS4jMpWyRNwOBz8lDK9F2Hnzp0oKSlBe3t7jOdL9yTeYoxh+PDh36lTfxAxd9FdJoulVCpx8eJF7NixA4sWLUJJSQnkcjn27dsHAEhMTMTMmTNx8+ZNuFwuHDlyBI2NjVi+fDmA2+EFoMuF2b9/P1dCI0eOxODBg9He3o6KigoAQEFBAYqKinDo0CG0tbXhrrvuQn19PW7duoWXXnoJFosFe/fuRVZWFsrLyzFt2jQA4EfbjUYjnnzySRw/fhwtLS0YO3Ys3G43zpw5g9/+9rccfdTW1qK5uRkSiQQTJ04E0NVO1ePxwOfzYebMmVAoFNi/fz+Pe997770x4QBiWrGU8NKlS1Cr1bh8+TIcDgfmz58PubyrPe2FCxe4YZw7dy4Uiq4Ws0ePHoVUKsWQIUPQv39/LugKhQI1NTXYvXs3CgsLIZF0naB96KGHcPLkSUSjUbS0tOCJJ56AVCrFF198gUgkgs7OTjz66KOIRCL44osveGvhhx56CGfOnEFzczPy8/O50pk9ezZXzF6vF1euXMGoUaNQUVGBkSNHIhzuOtBVUFCAkSNH8oZQM2fO5IiR9tlqteLSpUtobW3FwoULER8fD4PBgGvXrvGXpBQXF6O1tRUVFRUIBAIoLCxEfX09jEYjhg8fDofDgYqKCjzzzDMAwOO9YnXI6dOn0dbWBqfTieHDh0Oj0cDr9eLChQtob2/nr6WbMWMGjhw5gsrKSrz11ltYsWIFZs2ahX/5l39BS0sLqqurYTAYMHHiRBQXF8c06lIoFDh69Cj8fj9aWlrw5JNPIhKJ4NChQxg0aBDcbjdu3ryJiRMnYtCgQTh//jza2tr4ydP58+fjwoULaGpqgt/vx8WLF3m1xUcffYR77rkHvXr14gaXwIfdbsf58+djqj7q6urgcDhw+fJlfPPNN1i4cCEKCwshlUpx4cIFbqznzZsXU35MwKOgoAArV66EXC7HoEGDUFZWBo/Hg6+++go3b97EyJEj0dbWhmHDhqFfv35gjOH8+fNoaWlBKBRCWVkZAoEAysvLeXWM2WxGfn4+vvrqK/Tq1QvNzc0YMmQIfD4fvvjiCw7c7rrrLuTk5CAajaKyshINDQ3w+XyYMWMGTp48ifLycvz6178GYwy7d+/Gww8/DKlUioqKCrS0tMDv92PWrFkx3i/xwjfffIPW1la43W5cuHCBe07hcBjHjx/nVXiLFi3CrVu3eMM2q9WK9PR0KBQK1NXVobGxEQAwb948/mxay+DBg1FQUACTyYTy8nJe7kldXisrK79Tr/4gkLtYFUDumFQqhdvtxqFDh2AymaDVavHXv/4VSqUSQ4YMQUJCAjweD7Zs2YJp06Zh+PDh2Lx5M38LDFnaQCAAo9GIbdu2oaSkBGlpaZDJut6J+PLLL/Pj4m1tbbBarRg3bhxOnjzJ3z6+adMmRKNRZGZm4uLFi/jZz36GqVOnwmKx4Pnnn0d2djaSk5Nx/fp12Gw2jBo1CqdPn0Z+fj6mTJmCLVu28JDRo48+ioqKCsyaNQu3bt2CVCrFzJkzcf36dRQWFuLSpUvw+XzYunUrAGDs2LHcFaeErRhrpZhjNNrVtfGpp57C9OnT8frrr8NqtSIajeKll17C8OHDMX78eLz77rtQqVTo6OjAjBkz+FF3QlIUUmhra8Pp06dx7tw5HDp0CFOnTkVhYSFefPFFpKWlobi4GDU1NVAoFFizZg0KCwsxdepUmM1muFwuPP/888jJyUFSUhJqa2tRUVEBnU6H9evXY8KECZg2bRreeustdHZ2IiMjA9988w3mzp2LyZMnIykpCZs2bUJBQQHOnj2LwsJCGI1GAMCyZctgsVh4WScdvgoEAvjRj36EWbNmYcKECdi6dSuCwSD+8Ic/YPjw4cjKyoLJZMJzzz2HF154AaWlpejVqxcsFgvvcGmxWDBq1Ch+jJ54ErhdtVRRUQGDwYCZM2fC7Xbjxz/+MSKRCL788kvU1NSgtLQUwWAQVVVVKC8vR2FhIfbv389DWEajEc8++yxeeeUVjBgxAtevX4fRaERzczPWrl2LQYMGIRqN4i9/+QuysrLwox/9CBMmTMCHH36Ic+fOISkpCYwxTJgwAbdu3cKDDz6IY8eOobq6GmPGjIHX68Xly5dRVVWFpqYmzJo1Cw6Hg58clsvlyM/P572OKJxDcd20tDSMGzcO8+bNwzPPPAO5XI733nsP48ePx7Rp07B582YcPnwYoVAIf//731FbW4v+/fvzU9AU/gLAD5VNnz4d//Zv/4ajR49iwYIFaG9vx6VLl5Camopt27ZBo9HgwQcfxAMPPIDOzk48+OCDaGpqwoIFC7Bq1SpoNBqcP38e48ePx5EjRzB9+nRMnDgREyZMgNPpxCuvvIKSkhLIZDLcf//9mDdvHu6//3688847PIyxcOFCnD9/HtOnT0dqairsdjuKiorQ1taGQCCAbdu2Yf369QgEAlCr1Vi4cCFGjRqFAQMG8PAeyaBMJsP169fR2NiIWbNmcY9VLpejqqoKc+fOxaxZszB37lysXr2a95MifTN06FCYzWa8/vrrKCoq4rrJarXC4XCgoaEB9913H2bPno1HH30UJpMJy5cvx+zZszFmzBhUVVVxT+2TTz75Tr36g1DuQOwr0QilDhs2DH369MGIESMQDAaRmpqKNWvW4PTp0yguLkZ7ezvvP97Y2MiVKMU3xVh5ZWUlfv/73/Na5/Pnz0Ov18NgMHDkRgeEDAYDUlJSUFtbG4NmW1paeOy1pqYGR44cQUtLC8xmM2bOnMkrCgwGA++nTQzh8/nQ2NiI8ePHIxqNYv78+YhEbr9I+tq1a/jxj38MlUqFnJwcrFu3Di+++CKGDRvGXU4SQko8Ed0AIDc3FyUlJTCbzRg8eDC0Wi1u3LiBwsJC9OrVK+blHGq1Gnl5eViyZAn+8z//E7m5uTzkQ0I6ceJEGAwGTJkyhcfojx8/jqamJly+fBnTp0/HjRs38PHHHyM3NxdSqRSPPPIILl26hCNHjvAXDfzoRz9Cbm4uioqK0L9/f16FNHjwYOj1eoRCIU5/MlRmsxmRSAT5+fm4ePEiBg0axMNUdB3FTYPBIDo6OjBmzBhEIhG0trZCIulqAZyUlIRLly7BYrGgtLQUn3/+OZKSklBeXg6j0Yji4mIUFRXh6tWrGDZsGORyOerq6rjLLZYdRiJd/X1GjRqFaDSKhoYGHhv97LPPwBjjfeIfeugh5OXl4eLFi7xSJykpCUOGDMHRo0eRmJiI8vJyzJo1C2VlZairq0NHRwc0Gg0aGxvxySefID8/H9FoFG63G06nE4MGDUJFRQVvj2w2mxGNRvHpp59yFJ2SkoKHH34Ye/fuxejRo6FWq9HY2IiBAwfy6op77rknprJLPLwlkUhw8+ZNlJaW8sRmY2MjpkyZwgHT4MGD4XQ6sWvXLh5OSk1N5WEgMSFqtVoRDocxcuRIPPPMM3jiiSdw7tw5DBw4EIWFhejTpw8GDhzIcxS0f6NHj0YwGMTAgQORlJSESZMmwe/3c0+3T58+iEQi6NOnD7Kzs3kxgtFo5OdSBg4ciLS0NITDYTQ2NmLcuHGIRqMoKSnh7/El0HH16lXuuQJd+auXXnoJZ8+ejUmyU2h17969GDVqFGQyGerr6zFo0CAwxrBnzx4YjUae+xs4cCCi0a624b1790ZKSgqkUim++eYbHvI0m83o3bs3nE4nbDYbMjIyeM6B+Iyaq1mtVhQVFfHcXlNT03fq1B9EWEZMZIlJnsuXL2Pp0qXo6OjAzZs3cfHiRXz00UcoKyvjREtPT0c0GsWmTZvw4IMP4vjx47jvvvt4kocxhk2bNuH8+fPo6OjA3LlzsW/fPpw/fx4vvPACBgwYwHu0JyYmYuXKlbyx0aZNm/Dwww/jzJkzGD58OPLy8tDe3o6qqipcvHgRoVAICxcuRCgU4jHGHTt2oFevXpDJut5atGjRIhw/fhzZ2dmYPHkyBgwYAMYYdDodrFYrpk2bhsWLF0MikfAY7JUrV/D+++/j+vXr2LlzJ5577jnY7XZotVrOyKKraDAY8MgjjyAtLQ3bt2/H0qVLcf36dXzxxReYP38+1Go1tm7diuLiYnz11Vc4f/48Fi5ciPz8fLz22mtwu93Q6/XcYMTFxUGn0yEnJ4e/l5V6l8ybNw9AF5pdu3Ytb2BGyd1Tp04hGAxi4sSJUKvVPNbf0NCAxYsXIxqN4vDhw3j55Zdx48YN5OfnIzc3l3tMQ4cOxcCBA3H69GmUlZVh7dq1eOKJJxCNRjF8+HAUFxfzCgaKR3766aeYPXs2bDYbNmzYgLVr1+L999/HkiVLeIdB2uPf/OY36N+/Pw9DmEwmHD58GK+++irC4TBMJhOqq6uh1+tRWFjI8z8dHR3461//ildeeQVXrlzB8ePHUVVVhbi4OHz55ZdYv3493w+73Y6MjAysW7cOU6ZMgd/vR0FBAYYNG4ZQKITnn38eeXl5PJm3ceNGFBQU4OTJk7wRF+3v22+/jVWrViEhIQGff/45XnvtNUSjUVy6dAnLly/HSy+9hA0bNvCT0x0dHXjvvffwyiuvAACOHz+O1atXo66ujisXKjGl98IC4P2FVq9ejXXr1sHhcEClUiE9PR1TpkyBzWbD/fffz98ZcOLECWzcJpIWGAAAIABJREFUuJEbWvJuCAhIpVK89NJLePnll9G3b19kZ2ejT58+SExMRHp6OgwGA37yk58gMTERe/fuxauvvspBRVZWFq5du4bHHnsM1dXVKC0txauvvopf/OIXHCydOHECjz/+OFJSUnD9+nXEx8fj3nvvRTgcRkVFBZ566ilUVVUhIyMDkydPRl5eHpe7UCiE3bt3o6mpCTKZDF9//TX+/Oc/w2AwoLW1FUeOHMG5c+fwy1/+Ek899RSA2+dM5HI5PvjgA7zwwgsIh8M4duwYVq5ciaamJrz//vuYN28eZDIZqqqq8OSTT+LGjRtobm7GokWLeE7j8OHDKCsrg0QiweXLl/H444/zt60NHjwY0WgUf//73/Hyyy/jq6++wsyZM+H1evHhhx/i+eefx+nTp3HlyhUcOXIEQ4cO/Va9+oPoCrlx48alTz75JC8To8qN1157Dbm5ubDZbAgGg9wyFxUVYdGiRfjLX/6Cp556CkOGDMG6deswbdo0FBYWIjExkSecJBIJTpw4AQCora3Fww8/jMLCQuTn5+PTTz+Fw+FAc3Mzb//75ptv4le/+hX69++PdevW4V//9V8xePBgyOVyXL16FYmJiRgyZAiGDBnC4951dXVobm5Gbm4uVq5ciZ/97GfIz8/HiRMnuJLo168fTpw4AbvdDovFgmCw63V2x48fRzAY5Gg1JSWFv7uytrYW999/PzQaDcrKyvDTn/6Uh6wIbSkUCmzfvh0zZ86EVCrFO++8g4yMDN6J8PTp0zCbzbh69SpycnIwePBg3Lx5E3Fxcejo6EA0GuWthKlkSyLpemtR7969MWTIEABdOQmK+be0tKCxsRH33XcfT0obDAYYDAb+Nh25XI6GhgaYTCbk5eVh27ZteOCBB/jboXr16oVgMMi7Zur1ev6SiatXryI3Nxf9+/dHU1MTVCoVtm/fjunTpyM3NxcA+LkDiUSCnJwcnDt3DhcuXMCkSZNQUFCAgQMHYufOnQC6Xm6QkZGBUCjEX1tHryw8fPgwGGOYMWMGrFYr6urq0Lt3b47kqeQ1ISGBv3bu8uXLcLvd0Gq1KCsrw8CBA3Ht2jWYzWbcuHEDMpkMSUlJqKmp4cmvadOmQaFQ8M6BhMp69eqFEydOICsrC3l5eZg6dSqv5qqqqsLkyZO5x7Nx40YMGjQIp06dwgsvvIARI0ZgwIABuH79Ojo6OnD16lXI5XLk5eWho6MD165dg9FoRFpaGkaPHo2JEyeiT58+yM3NjSk7JI/O6/Xi+PHj/OU0Op0OmZmZGDJkCL766ivodDo0Njbi7rvvRn5+PmpqamAymVBTU8PvSZVFEokEu3bt4l1BL168iHA4jEmTJkEmk2H9+vVc2TU0NHCvlTozVlRUIBgMor29HRkZGdiwYQMef/xxbjxqamoQiURgNBoxdOhQKBQKnDhxAuFwGDU1Nfw1e2PGjOHdTY1GI65du4aCggJ4vV40NTUhISEBBoMBGRkZGDVqFBoaGjgf3HvvvRgyZAhHymTAEhIS4Ha7UVtbC6PRiKysLAwdOhQpKSno7OyE3+9HVVUV/H4/SktLee6jvr6eG0w6e9DR0QG9Xo/MzExotVpcuXKFJ8cXLlyI9PR0VFRUwGg0oq6uDj6fD9nZ2WhoaIBCocCuXbu+tSvk/5Vyl0gkjcuWLXti2bJlP122bNmTS5cu3SiRSJKXLVu2b9myZf++bNmyWcuWLTuwdOnS73xlyIYNG5Y+9thjMfWyjDGkp6cjMTERAwYMQEZGBi/9ufvuu/lbbkaPHg2dTge9Xo+cnBwUFBTwagCqy87KykI4HIZer+fxOXq7k0aj4W9KArp6uIwdOxbx8fHIyspCeno6CgsLeW11aWkp0tPToVQqeXvRzMxM/oYivV6PUaNG8f7fjDHk5+dDKu1qKyCXy5GWlob+/ftDpVIhIyMDarUaGRkZGDx4MKRSKfR6PeRyOXJzc9G7d2+eSS8pKeGVPhTGotxC7969eRlhcnIyfwVdIBBAQkICxo4di6SkJIwYMQL9+/fn/S3Kysp4X27xngB44oe++vXrx98wlJ+fj9TUVOTm5vKSrgEDBiAxMRH5+fkIhULIyspCQUEBj/f26tULcrkcffv25W8ooiZuycnJvKpCJpNhzJgxUKvVSE9Ph1wuR3l5OW+zLJYlymRdr7mjdrPDhw+HRCLhrZXj4uKQmZmJrKws9OvXDyqVComJiUhNTUVmZiYUCgWGDh3KS/2USiVGjhyJpKSkmJI82iOfz4cRI0YgNzcXQ4YMQUZGBn/jlUKhwODBg5GRkQGZTIacnBxkZGQgNzcXaWlp3EhSNdKQIUOg1+vxv5h78/Ao63N9/J6ZJCQzWWYmK9lISFjDTkAExIoCVkVpqSgoLse1bm3VVnvq17bWU63VWq16XAABFxCr7CDIJmENi4EsbCH7HpJJMjMJITPz/v6I95Nncqz9fk/P+V2+18XFZJb3/SzPej/LJz09XXp2O51OZGRkIBAIwOl0YtKkSQJllZWVSdwoKysLQC9EwWKgzMxMZGdnIz09XYTAiBEjYLfbMWLECAnGORwOyfMmLRG2SU5ORkREBDIyMmC325GQkCCHQft8PmRnZyMyMhLx8fHwer1ISEhAenq6rDfQB7FmZGQgMzMThmFg8ODBGDVqlPRUWbJkCRYsWADDMDB+/HjZV9La5MmTYRgGxowZg7CwMMTHx2Po0KGSKeZ0OuHz+ZCUlASHwwG/3y9ewahRo2Cx9LbDttvtMrbExESkpqbKeQ+ZmZmIi4vD6NGj4XA4kJ2dLXuZnp4uJ1UxiMorIyMDFy9eRFxcnJzHQF6NjY1FQkICxo8fDwDIzs5GQkKCZPRwz5gs4XQ6ReADkBTH3NxcOVPV4/Fg2LBhGD58OBwOB4YOHYrBgwfj4sWLWL169f9Oy1+TyVQBINcwjAvqvZcAtBqG8aLJZHoagMMwjKe+6z4TJkwwdu/eLVYj81aB4OZG/TMkdE61rlrVxURsRgT0FS7w0lWatF50QQefx+ZD+nOgr8Md15BHt9F1NJlMQf1wmHmh2yxwXHoOvB8DVZcuXUJZWRlGjx4t82MvEXo6XA9Wy+mxMruGUIYutuAcmc5Hl5qQxzd7Kmum8375bN0mQFe8cu31WupMChay6Lxg3aOlvr4eX375JUJCQtDU1ITHH3/8v+Dg+l563GazGVarNaiVA4uvWJxiNpulKlFXX5IumAFBemPAjbTI/i5MNeU6WSy9xzi63e6gYhpCV7pWQUNauikX404UvBs3bkRKSgomT54sAk4XnAF9bbBJT8zu4LroQjvSm87A0pWPOveflZjcY73WpG2d481/zP3XFbGXLvWex/rcc88hPz9fUlY5Tj6f89J4tx476y/4TMYRdAq0xWKR3kpMdeTakPbofeuOpnocTLnms/g5x8R5W61Wac6nK1pJ+xUVFaitrZUamp6eHkm7Ju2FhYWJoRka2nuMo05rpmdtNpul+dngwYP/f235exOAFd+8XgFg3v/Nj8h4ZHYSpxbGFAwARADrQCwXiYsLIMjKYy4tF5dEQsansKWg1xWqWplQUBJ+4IJTyOmiK53/q4ta9Lw0zMK58f6EltLS0oKUnBauuhCFgpJpaSwA6l/MBCCogIOMCwR3B9SMp6v7mKlDWIbrwt+SKblvep46SM0gFZ+rxxEdHY0BAwYgLi4ON998s8yNz+OlBQrXj+vB/zkmHbSnMtW5/3qu3DudH87ncg9JV7otBAApJCJtcJwsrKKg5fz7Vy7rmo/S0lLU19ejqalJ5sC5aZpjWqNuy6EVnl4n0oYukOPacM/4+/7rTGu/P/1yLcgf/FvTBgA0NDRg2rRpUttB/tRVs/ytpiGtTDgG7jXHrXvlcGxUVqQ9beTxHhw376ML+rSC0QWHgUBv4aVOAaWBAgT3h+F+k3/Ii9yHzs5O6Tvl9XrR3d2N1tZWyYtnlhwv7u93Xf+q5V4OwAXAAPCOYRjvmkymNsMw7Oo7LsMwHN91n/Hjxxv79+8XjcuN7V8WDQR3T+QGscMfiZOH55KwKby1xU/BwvnTJSfT8vfcBFa7sWqTRKatCloDTM/j3wDEAu+vEDheamMSDRmdhMMcZj6TRVAAxAug16AtarYc0JWYGrfXRP7Nfsm66DkBCLJUWGmn94P7pwUt/2eOLisZabVqQa8FKteJ3Ri15QlAGJiCmKl9HCutNT0GMh7XiLnfISEh8Hg8AuNQ4FIoaMWmrWadZcJDRFghrddRW8jE8NmAS3shpAv+ljRmGH19iLQF3X+vtJDnWHlP0jHphffgeLQ1r61U/oZ0CfR5CIZhoL29HRaLRQ6zZisJfodj4D5wPvQGtAKhQuFYaOxpz1lXhZMnWS1LBUI+Z+Un6cXn8yEiIgIej0fGReFOXtB8wvGQVpixpvvv9PdyuQbcG4vFIhi8x+OB3+9HR0eHZAaRvqKioqQ/FqEY8nNGRoacmsVgOGn6uyz3fzVbZpphGHUmkykBwJcmk+n0/+0PTSbT/QDuB4C0tDQRVBRKuocD0NdfAoAIZjImG++QwMmceuNopZBJqE11cyFtvWnXjkJBu+v8nBY7CZf51xSeJAYA8mygr9cMUxDdbjeioqJE6JHI6dqTuGhBkrlIzACCCJtwEf+nBaFhEApMrfgYiNaeCtdSu9pWq1WqQbk+VET9LUmmqOqyeN0YSzMmoQ/tedFN1jAL6YSChBBWIBCQk31Yhct10W65hlKoiLm29PaI//J9MjN/x7lqRaIFLwUSGVgLR9ImhZNWFECw16irbnlPrXC0xc/X2jPkGHSLagpPLVA5HzbZ094s15z7yq6drO7UkFx/j49z4x7QwPg2T4H7zT3i2vBvQo8U/BpG0TxJSIU0QKHt8XiktQTXj2uloT7Suf5ce936RDV+ppMSNB8TggkEAvB6vQD6LG/SAscWGhqK+vp6WS/Oqba2VmKQUVFRcDqdcq9/dP1LsIxhGHXf/N8EYC2AyQAaTSbTwG82bCCApn/w23cNw8g1DCOXKVrcLG0VaxePzM8F54J+8ywAvUKTbi8A0bzcMFqWZGDt6vM1hRIvjkULT46HSkO7jDqPWFsRtAa0m6/deQ1LaMue9wwNDQ3q+qgVHz0Z3oMEyTnrNeoPN5GxaDVqHFbPXTOEFiz8Tn/cVVt9at+DxqS9J86T99dQBX/L+fJiewmt/MlYnCP3hUqcAkTfk/un14F7o2lOCy3Sg4ZGuDcaztEWNpWI/i73hJf2qEhnhOe0Naz3mftPr470onFzbV1qgdkfrtH35JpoL5VrSiMA6M271z33ubZ6vvr3jCvQg9HwlYbRNA1pr1VDTXrPaJDwnlQ2VBz0KLQXo/enPyxHOuJztMLT4yUNfFvzOd3vipW/9MZIs0Cfxa+blfH95uZmNDY2oqGhAXV1dWhubobb7cZ3Xf9t4W4ymWwmkymKrwHMBlAEYAOAO7/52p0A1v+ze2nmJQbMDTh+/Dhee+01+ZxnOpLhOjo6sHDhQrGoA4GAnCpOwUVLLhAI4OGHH8bNN9+MgwcPYtmyZXjjjTeEYEgAJKKuri60tbWJBUDLJxAIoKysTCwYCjgKMTbS+uSTT8QLoSKgEiHxlpWVwWTq69hI2IIEpgM4S5cuxZ133ilrw8+112M29xZs3Xfffd+qHPz+vtOTSHxUdCaTSdZOQ0PEFgk5caxvvfUWOjo6ZD7aK+CeElrhGE0mE2pqarBixQqcOHECL774IpYvX46TJ0/ilVdewUsvvRRkeeoSeSpMdu/zeDxYvHgx9u7dKwKQwsXtdksDLjKUtrJ4vf322/j444+xZ8+eoKMDdTC9rKwMGzZswJ49e7Bz507s3r0bb775JsLCeo/5Y7tb7h37s1OYaeXOdacQ8Hg82L17twgrrWQ4VnpGpC+uMb+vG5Vt2bIFeXl5WLt2Ld5++220tLQEeQ4cIy1FjptKr7OzE3v37pU9poLtDwXRGiVdd3d3Y+nSpSK0qPx0nIZj1F7l+++/j/z8fGzatAkLFy7Evn37cP78eTz88MPw+/0oLy/H3/72t6AYgI7LEXahQNWQK2koIiJC6JM8Qm+StEAhr2FHYt1A3zGgpEV6m9pCJ69ow6y7u1vSb+ndGoYh1jnHzjoMegSMmwBAbW0tzGaz9ESqqalBXl4etm7d+p1y9V+x3BMB7DOZTCcA5APYbBjGFwBeBDDLZDKdAzDrm7+/exAKc9MuEBeBjYU0gVD42O12nD9/XrrjaZye39FWbF1dHQKB3p4XY8aMwddffy0brBlPa2jikrSWzWYzVq1aFRSo7O9CdnZ24vz580GuNJ9BgRsWFoZVq1aJJqelqK023t9kMqG+vh51dXVBLiovDQ+w6k5b9Hx2fwyUvyVD6n3Q1oMWNiRS5jAzBsH5a6VgMvUdbk7BeeLECUycOBFZWVlobGzE2LFjJd31woULMi/tcWjsmOOPjo5GTU1NUB9tjRPTQjQpTB/oa6EbGhqKLVu2IDU1NUgAa8jkwIED2LRpE4YMGYKcnByMHDkSf/3rX1FTUxNkRVJ5kX7omfWHabTVyWwNfdCEtji1Z6YVUn+a4vpu3rwZhYWFGDNmDIYNG4a8vLxv3VeuEQUihSEVJytNNY3p/Tebe89HKCkpEePFbrfDZrMFeT2a3mk4UXEZRm/TuJKSEskRP378uNSE6KwwbSnTM+d4NHRG/tTepzbyuAZa5ujvAH3ZMDS0KAt0nIVrQg9Ue4pms1lqMIBe+IXH6QUCAXl95MiRoAQQ7g0VMemEcoH/WCfT3t4uDdL+0fXfxtwNwygDMPZb3m8BcPX/472C8FkKIFaA3njjjQCC8Uz+6+jowLx584IWg7gxXWbii2azGcXFxfjDH/6AyMhI5OTkwGq1wu12y3FmJDxuWn+cNiQkBBcuXIDT6ZTqPgoLCp2enh44HA688MILQRk/GscOCQlBS0sLnE6n9MnhpvbHPGkt7dixA3feeWdQwEd7MiTQLVu2SEk4LTXCOYFAQAJKVIY8xouMR0FJQUDC1hhzaGioNGnTcJYOdBKD1xh6c3Mz6uvrcdNNNyEkJAT79+/Hu+++C7/fj0GDBklgTgtB7dJSYPp8Ptn7sWPHikAh3XAv6Z1oS03HF9LS0jBt2jSxAjUmXFdXhz179uAXv/iFHGcXERGB2NhYzJs3LygATMFGy5UYL5WkVhoa1omJicGcOXNknPTsOBcNKTG2wDXlXvCeTz/9NNauXYvIyEgMGzYMU6ZMkTiFjiHpgGdXV5fwhmEYcDgcmDdvnuwBaZbCmrSwfft2/OAHPxD+sFgsmDlzJurq6pCUlCSxKD6Hni/H6/P5kJeXh7lz5yIiIgI7duxAbGys1IaweGjPnj246aabpBkY+ZC8RyHM/dbrroWjVlZcLw3B6kCphmxotPj9vTUx/Js0xWdog1DzgdfrFU+jqqoKDQ0NqK+vR3l5Oa688sogRIDzcTqd0taavMWAf//g9Hdd34v2AwCEYAGIBXj48GGBImbNmgWPx4PPP/8cQK/bfffdd2PLli1ITEzExo0bsWnTJrz77rtBViyzKCwWCzweD1JSUjBp0iS0tLRg9erVUtRw6dIl7Nu3DwAk62LSpEnYunUrwsPDUVdXh5tuugmHDx/Gpk2bpEthINBbKhwTE4OxY8fiq6++wrx587B+/Xq43W786U9/gmEYUsEIQA7IXr9+PcaNG4evvvoK06dPx549e1BfX4+EhASYzWZMnToVR48eRVVVFaKjoxEREYHrr79eBLF29SiIEhISsHTpUjzyyCMwDAMulwsHDhxAXFwcWltbkZKSIn0s6uvrMXbsWLS1tcHn8yE2Nha1tbXIysqSHiomkwkrV65EdHQ0ioqK8MQTT0gnR5vNhquuugrr1q1DIBCA3W5HeHg48vLy8Pjjj0tfe7qXLNa49957RSHpgPbkyZMBAOvWrYPdbkdFRQWmTJmC7OxsVFZWory8HCEhIYiJicH48eOxZcsW3HTTTSgtLcWGDRtw3333wW63BzFnT0/vgcMHDhxAIBDAmTNn8Otf/xp+vx+rV69GRkYGjh07hunTpwuz0xq8/fbbsXnzZmFACuff//73yMjIwObNm3H06FE8/PDDuHTpEt5++2089dRTOHz4MPLy8pCTkwOn04k9e/YI5JCSkoLTp0/j0UcfxbZt25Cfn4+HHnoIMTEx2L17N3bu3Inf/OY3WL16tdD//v37pYoSAKZMmSICl0Kvp6cHSUlJeOCBB3DLLbdg4cKFuP/++2UP169fD4fDgaNHj+Kpp57C2rVrUVxcjBEjRiAuLg47d+7E5ZdfjiNHjuBXv/oVbDYb1q9fj8rKSowfPx5lZWWYNWsW4uPjsWXLFrz++utISUmRIjMAcLlcIph0wBMIPqaOSkYrtfXr12PhwoXigd9zzz1obW3FRx99hEGDBqGiogIHDhzA448/jrCwMOzfvz8I9rnyyiuxfv16VFdXIz09HXV1dWhoaMA111wDoA//nzZtmtAeIdsvv/wShw8fxmOPPYaoqCg8//zz0nfpjjvugMPhwObNmxEXFyf9ktavXy905nA48MUXX+DJJ59Efn4+nE4nGhoacNVVV6Gurg5ms1mquuvr67F3714MGzYMDQ0NQUZkeno6CgoKMGnSJHg8Hpw6dQrDhg1DU1MTbDYbvF4vOjo6pJJbn173bdf3onEYmUYHTi0WC+rq6pCfn4/a2loAwLFjx+BwOGCz2XDhwgX4/X4UFBRg8ODByMzMxM6dO8Vi0Dg+mZbtNs+fP49t27YhLi4Od911F3w+H/bt24fPP/8cI0aMQFZWFsrKylBQUIB3330X48ePl6PUhg8fjoaGBkydOhXDhw9HQUEBqqqqsGPHDgwfPhyBQABHjx5FR0cHDh8+LALlk08+wahRozB06FBUVFQgMzMTDQ0NuOKKKzBkyBAp7nA6ndKKFgA+/fRTOBwODB48GEOGDJG2p0BfwAsAVq9ejdLSUkyYMAEejwdjxoyBxWLB2rVrpeFUbm4uVqxYgeLiYmRmZmLbtm0YMGAAcnJysGPHDqkuPXz4sGQrHTt2DImJiRgyZAi+/PJLmEwmlJWVYeDAgdJXpru7G7GxsaisrMSoUaPg8XhEYeixAgjysDweD4YPHy6xA8PoPXbs3LlzGDt2LNxuN3p6eqRT5ogRI5CZmQm3241AIICTJ0/C4/GgtrYWycnJch8NM1y8eBHr1q1Deno6cnNzUVlZKdZjdHQ0Zs6ciVGjRgXFTegZnT9/XjwJXhcvXkRmZib8/t5WEV6vFz6fD8XFxfjyyy/h9XpRWFiI6upqqWAcMGAAli9fjhMnTiArK0tO6HI4HNIj3efzITExUXr4xMTEoLKyEoFAb3Mwu92OzMxMVFVVCVxFa5Tr+Ytf/AKjR4/G+vXrsWrVKvEQT58+jdDQUGRnZ8uapaWlSdO9CRMmAABiY2Oli6TH40FmZiYOHDgAu92OQYMG4euvv4bVaoXNZkNXVxcmTZoEAOJd1dbWynkDOpZDa1kHNzl+ejWdnZ0YPXq0BGqtVivq6upQW1uLtLQ0jBo1CmvXrgXQa/ytWbMGOTk5GDFiBKqqqtDZ2YnU1FTs3LkTsbGxSEpKQnh4ONauXYsRI0Zg8ODBqKyshGEYQZlVnZ2dcrg1jcqvv/4agUAAcXFx6OnpQX19PcrKyjBs2DDpl9TV1YX4+Hi0tLRg2LBhiIiIwAcffIBz585h+PDhsFqtcuIUAMljZ4HmsGHDEBkZKRXkTU1N6OnpwcGDB4NoIhAI4PTp04iMjERKSgoKCgrQ0dER5GH8o+t7Idx50QLhBs+dOxfjxo0LchNfe+01HDp0CHfffTfKy8uxa9cuzJgxQ/pqAH2FSz5f72k9xCQ3bNiAZ555BrNnz8bChQvx6aefShDt/vvvR2JiIk6ePImSkhLcfvvt8sx77rkHR44cQXR0NAoKCnDo0CEkJiYiOjoa8+fPR0FBAR599FHExMTg6aefxo033ojDhw/jiSeegGEYuPnmm5Geng673Y7ExET5zYEDB6R8ffPmzUhMTERISAgKCwtx6623Ij8/H8OHD8esWbPgcrmwePFihIeH480338Rf/vIXvPrqq2KZfvLJJ7jvvvsQFhaG2bNnY+LEiejq6sIHH3yAe+65R8qwDx48iAULFsDn8+FnP/sZxo8fj4qKCtx5550YP348Tpw4gSlTpgAAjh49irvvvhtXX301mpub4fV6YbFYMHDgQGzduhU33ngjAoEAfvSjH2HHjh24/fbb0d3djRMnTiAmJiYIy2QATWOheXl5uOuuuwRDNplMeO6553D33XcjIiICp0+fxrBhwyRd9KGHHsJvfvMb/OAHP8C5c+ewbds27Ny5E4ZhYP78+bDZbOjs7AQAcbGffvppLF++HDk5OYiPj0dHRwdaW1tx6dIl7N69G2PGjBFYjKc26bRA0hNdcQo/KrRdu3YhISEBH3zwASZMmACz2YwFCxagoKAAV1xxBRISEvDUU0/BarWipKQEzz77LIYMGSIpvEeOHBFPLSoqCgsXLoTD4UBWVhZuvfVWbN26FbGxsRgwYACKi4uxcOFCUURAH45eVlaGuXPn4t1338XWrVvxyiuvYNmyZdizZw8WLFiAH/7wh4iLi8N7770n+1JQUICrrroKUVFR+O1vf4sBAwbg8OHD0ibDZDLhgQcekP7i9fX1CAQCeP/996WfC5/vdruxadMmaVnRP5DOAh0qcR3kbWpqwsSJE3HZZZcF1UF89NFHGDt2LJxOJ/x+P6ZMmQKLxYIvvvgC8fHxKCgowLFjx7Bw4ULh40ceeQSTJ0/GzTffjNGjRyMuLg5Hjx7FkSNHMHv2bDQ2NsLr9cqBMzwMZffu3aJQ5s6dK3E5AHjiiScwZMgQnDhxAgAQFRWFG264AVu2bMH2liLOAAAgAElEQVQtt9yCuLg4PPzww/jiiy+kGygVANMuA4EAYmJikJqaisGDByMjIwM2mw0jRoxAYWGhtJMAIE3bxo8fj+7ubmRmZsJmsyEyMhLNzc0Cqzmdzu+Up98L4U6sWWe8XLp0CX//+9/x3HPPobCwEJs2bcJnn32GzZs3Y/Xq1di0aZNYcxaLBUuWLMGjjz6KDRs2iAU3YMAAOJ1OmM1mVFVV4cMPP8SYMWNgGIbg5uwSFxoaioULF+L666/HnDlz0NDQgLVr12LFihV49tlnsXr1aoSEhOC1117DpEmTUF1dje3bt8PpdCInJwfTpk0TpvF6vbj22msFy+3q6sLVV18tn7e0tOCNN97AxIkTUVNTgy1btqCmpga33XYbrrzySsyaNQvt7e04ePAg5syZA7PZjHfeeQexsbFYt24dFi1ahDvuuEMyZ9rb23HllVfC6XQiLy8Pd9xxB3bu3AmXy4UrrrgCycnJciLPK6+8goSEBHzyySeYNGkSfD4fVqxYgbFjx6K7uxvvvfce7HY79u3bh3379klO95IlS3D55Zdj48aNiI6ORl5eHk6fPo3y8nKUlZUJc3z88ccAgPz8fAAQ6ICZJRQIPJ1m6tSpYuE3Nzdj+/btiIuLw969e3HkyBEcPHgQv/vd7zBp0iSsWbMGdrsdFy5cwEcffYRRo0ZhwYIFePLJJ+FyuYLKv5k/vnfvXvzgBz+AyWTCsmXLUF9fD7vdDr/fj/3790vWSlhYmOC2DEKztSxx9c7OTqxfvx5mc29rg88//xzNzc0AgKKiItxxxx1oampCdHQ0cnJycOnSJcm4ys3NxebNmxEbG4vXXnsNERER+N3vfoc5c+Zgy5Yt4mVdd911CA3t7Qy6YMEC1NTU4Oabb8a0adMwZ84c1NbWStUiBXxDQ4Ocv8qYAr9/4sQJyU6xWCxobm5GSEgIPv74Y4wePToo2Pr73/8ec+bMwebNmxEIBLBy5UpMnToVhmHg7bffxqJFi9DV1YXDhw/jrrvuwldffSW8u27dOhGwhmHg9OnTCAsLEwiS2UzMQmG8KDw8HPn5+bj77rvhcDjE8u/p6cHu3btx1113ITQ0FB9//DEeeeQRadG9YMECTJs2DVdffTUaGxsRCATw8ccfY/r06ZJBxN7ow4cPx7hx41BaWoq2tja0tbVJNlVnZyc2bNggDb8+/fRTXHbZZaisrERhYSE6OjpQXl6OadOm4bLLLsO4cePgcrlQUlKCvLw8uFwu1NXVobGxERMnTkRubi5iY2Olb/yFCxdQV1eHtrY2ybKbMmWKrJPX68WBAweQnJwMn88nDcTq6uqQkJCAqqoqZGRkICYmBl1dXZg5cyYuu+wyWdvvlKv/WwL7//ViIEzn1H7xxRfiHtfU1KC5uRmtra2YMGECJk+ejJKSEowd2xvTraurk8MO+hciXLx4ESdPnkRdXR26urokaMUGYM3NzUhPT0dRUREaGxtRWloKr9crR4jV1NRg3Lhx8Pt7TxtKSkrCqVOnYLVa0d7ejvHjx0s/jJCQEBQVFWHMmDEoLi6GYRhIT09HRUUFXC4Xzp49i66uLrS0tGDgwIEoKSmB1WrFuHHjcPbsWbjdbpw9exYejwcjR44UAqmqqsL58+fh9Xqlmi0mJkbSF3t6euByuXD48GEAQElJCaKiotDR0YHGxkbs27cP5eXlmDBhAlwuF4qLi4WR9OHHLS0tOHHihDTBSkhIQFNTE6qrq5GQkCDl82lpaSgrK4PVakVhYaFUzh08eBCDBw+Wvuq0fumecj+Ki4ulsx8DvQMGDBDmOX78uFjvZWVlYjkSTuEaM0WzqKhIglrMjgGAq666Cm1tbWhpaUFZWRkefPBBCXzpfjAa4uJ78+bNQ35+PlpbW9HS0oKzZ89K8yqTqbc5WXh4OKqrqyVgHRoaCpfLJa2Je3p620F3dnbK0WvXXXcdgN4upaNGjQLQ6yFGRkbC5XKhoqICw4cPh81mw4QJE3DmzBm4XC6UlZXJmHVWGPmjuroaLS0tOHr0KBYvXoz09HSMGzcOqamp8Hq9KC0tRUFBAUwmE0pKSqQ3PvnlzJkzYvyEhoaisLAQLpcLNTU1yMzMFIjKYult1saj70JCQnD06FFcc8016OnpwcmTJ/HSSy+JYKdC57gJ1fn9vdWa+/fvR2xsrJyJy+eHh4djyJAh8Hq9yMvLEyNswoQJOHv2LNrb23H+/Hl4PB4AkANkuru70dXVhaFDh+LMmTOoqanB+fPn5SxlBuYJFzmdTlitVrS0tAhWX1ZWhuTkZERFReGKK65AQ0MDmpqaUFJSAq/Xi5KSEukUSXnS1taG8vJynDhxAocOHRLDp6KiQjwSZiJ1dHTIXtrtdnR3d6OtrU08XPJLZmam9KCpqKiQdufMPvuu619qP/A/dU2YMMHYtWuXEBoJgC7UoEGDYDKZUFdXh46ODumsd/jwYWGCoqIiBAIB5ObmikvOBXK5XDh9+jS6urowZMgQDBo0SI7xS09PR2xsLNra2lBRUYGwsDDExcUhOjoatbW1cLlciIqKwsCBA+UcxvPnz2PUqFFwOBxyhBuzJZjtcurUKdG4PT09cu/Y2FjExMTA5XKhtLQUEydOlDzcgoICEZzsU19YWIiQkBAkJyejra0NWVlZQQVEFIrFxcWyNuXl5Rg8eLC0M3W5XIiPj0dsbKzENs6cOYOcnBxhYvYaLygowKBBgxAbG4tAoPf4wJaWFsHUmZlSWFiIoUOHIioqSgg9LS0NDQ0NqK2tlaPAdD4+g3/V1dVyHqTNZpMufjyftKioCCNHjkRVVZUE7HgaU3JyMiIjI3Hq1ClkZWUhIiJChH58fLzAcTrTqLq6Gm1tbRg3bpzsD9B7tNkrr7wiqZC0JnUxVkVFBTwej3SSZK6/yWSSY/CYzujxeDB27NggeIdK5ty5c7h48SIyMjKk42RRURF8Ph+GDh0KoNfLOXv2LKxWqxz0EBLSezCM1+vFkCFDYLfbha45hsbGRpjNZjQ3N0s3Uh5iAfQmH5w+fVp+bxgGiouLMXz48KA+MSdPnoTZbMbll18OAJg8eTL+9re/wWKxYMiQIQB6BfvJkyfR1dWF3NxcRERESEvp6dOnS3C/rKxMmpwxNsDfM+ZBgeVyuaQ7KD1xoNdgS01NlQNZOjo6RLkWFxcjEAhItlloaCi+/vprjBw5UjKKvF4vKioqxACyWq0iG1jqTyXZ1taG9vZ2xMbGoqKiAoMHDxZ+8fl62wjHxsbC6XTC7XZL6wAeyt7e3o7Kyko0NTUJ1m6326XlBtDbVmDAgAGor69He3s7zp49iyuuuAJ2ux0tLS2C/dODoRKqra2VDpHE8mm5P/zww/+w/cD3Rrhv3749KKLOVDpd+s58Uwo3Xfmm04L4G7rX7N6nO0BSw+ucYRaL0KpjVJ+pULSQWXlKa53FMox662frtCzm0DKuwHvwuUAvw7rdbhH4upovEOhtJKY7UernMDMC6MsUISRCWIFCVOcCs8cK79c/v17nRDPgyHno/is6L5p7yXGx/J1z5Hro1Lz+OeFAX7Bd50dTCNMaYs2ATrPTfUP4TOLoq1evxowZM/Dss8/i+uuvx5gxYzBw4EARyLoARqecMTDPdeq/twzQ0Vrl9/p7MBwX98vr9Qo0xJxtrjWtM74mL/A7pGWm6ul0Sr2OPT094l31HzehHK4reWzx4sVYuXKlpIZyDbTl2N3djY0bN+L6668X6I20RDogb+nAOd/TvE1hRlqKiIiQ+IbeA/6W6YLMbiMEyvtwL3R+Oj16jod1IYyr6L2h59G/SLC1tRWdnZ2SpFFdXS3FWUAvTEbDxW63S7ZYeHi49I8qLi5GVVUVpk2bhqioKGlNwoxBZroRYtQ58KR3k8mExx577H+tt8z/yKWZEegrYOJmkCj43f7FHSRibbHTC2CEmkEa9oYg8/H+Whj4/X5JfSMGaxhGUDthjs9kMknObf/WCHQv+T2dJaBzmLUAITapx8f/KTz0mHR75LCwMHR2dgoWx/xyWqocN5UDc6VZFde/6lUXXHGPNLxCAtOFKrTUmOPM+3IPgOAOfhRaFID8js5R1wVs/C0LfyIiIgD0BVApJIG++gN6DSEhIYKB5+fnY+HChThx4oSc0EUrjJitzWYLqh/QQlA/h3vNNeVrXhTGWvHqKl6tIGmIAAjKUeczSUs6n5t7yz3ozx+kOe47vxcRESGKhcqks7MT7e3tOHLkCKZPny50QLoh/MQg6r59+zBx4sSg5m7aa9YGB8fC+evWC9zjixcvIiIiAhaLRTKRtMIiDdDI4n2IX1NhMQ1Xt4AgP1PA67RMGlBU3gkJCSIPWP/AcSYkJEiF7+HDh3HhwgVERkbCZOptPEdvIyQkRAQ8m/XROMvJyUF2drbkzlPe0KA1mUxB8UB+pnmCPPCPru+FcKdgBhCEJeniBF0erC14bWXReiMBc2EoaHiRySms+F0Wo9B150IDwb04SOz9rWcSVGhoqDANGYwKRTMSBURPTw/cbrccgEwBqS1fLcB1ChwFqq6oY1aCruzj/CmsaNmTGb1er1gm/RmJDMT70/pnUy6+p6trOVcSJfdWF2ZQ0epCrYsXL4olZRiGPIOFVhQc2mvidyn0eCA0GUZXABuGIemvFotFYKuvvvoKubm5cmITgKAGdrT2qJip1GhIUDnRqKC1RWHL+dFy1oyuLXubzSZjozDV3iR5g0JQ0x8Fan9+ogLlONn9lN6pFn4WS29l7owZM+T8XADSfIvz9/v9iI6Oxpw5c4QuuQderzeoJzxpib8nz+pgNc8ToHdBuuacSIvaetVGAhUflQyrWNktlsc96p5T9FJsNpvwBNdSrymL8TgH0iXrX3p6eqRKmggBlQLfo1wjHfCid6AVc1tbG8LCwgSa1YqQCjg8PFwOgPlH1/dCuFMIcFE4eQ1rkKi5yHqB+B5fk3h0EyNtkfNzXZ3GDaXQ1daGhlooJIA+YuNzqXA4BgpdegaEeHQeMH9Li68/xKItdy18Obf+1hkQDEvxPc5dW+caeuD4gD5PShOiZn666Vqg6XXgd/V66vn0t/Y5Fj6La8x91x4VP+dFJtQCQa8N15tCgXPkvbOzs1FUVITKykpERUUhPj5eaAHo60CqPcn+cwH60iV5aWuTypWwEe+r2yRwTTgPQmnEj7X3YrFYxH3XdKrpSQtRLQj7f86907zCvSEdcPykfx181lXBXF+9//TiSJcUlP09UhoDhFB05pIeN2WF9k6oRDQsRYNQH9LC52lITMMc+lCf/nCcNu4sFgvOnz8vQpj8oJWC7n/DveF3OWd6KPQ0+sObGsLj2AnpaqjsH13fC+GuNbS2LLQ240bQAiURaPgE6GsuRkvSZDJ9a4tPjWtyDFqo0GrXxNPd3S2WgMb7eQ/2Zec49HsaeyahkfnJyDqFk/PWEXHdVE1b5lrAcIwcLz/nPAAEQSSayYA+JUBLj9+jVQP0Hdah//b5fGKJci3JnGSwiIgI8ax0uTitV2ZI8Hf0prg/WuhSwfWHr8g83EdtPWrLkp7gxIkTkZGRgQ0bNqC4uBgOh0MOHabAYIYDrWltCQ8YMCCo9apWULy018D70mvifbifXBvWFGjYSu8l70uviXvF1/y/v+InPZGO9HgoVDkOHa/g87iWXq9XYD2uDT0sQkZasPUXyOSN/l4QW0CzxQWVmI6zAX2HlFCQkldpwNATbWpqEpiSFm94eLicdhQeHi5zplIgf3BNSKc9PT3wer04ffo09u7dK0qO68h0WpvNhvDwcEmy4H35fa6J7v2vDb6BAwfCbDbD7XbLsZY+X2/La9Z8cD7fdX0vhDsAwZ3oUmnrSwfSGhoaEBUVJQKQBEKmotDQQqZ/wJRET8bQwR4dAKJ7ry1xrQy4UQyokmi1xawxUSosEiIFVH+clMxOhtEQh2EYkj3hdDqDmlRlZWWJtUrYQlv/2tXUlqcW7Byrtka0ZcY1BRAkAJqammAymTBw4MAgRtbQEe9FC7W1tRWBQADx8fHw+XyorKxEREREUH45BQ7Hp+EN7omel25epi1tQglagHGMdrsdaWlpOH/+PEpKSqRSMhAICE5NAc0eLyaTCa2trWhvb0dWVlYQPKNxcaAPCuOa6pgLA/vd3d2orKxEWlqarBv3TNMw11YLXM6bwkUfGQn0nUzW35MivRlGbzyJ56SWlpYiMjISSUlJQQJeKyntbei9pjLnmDlWrgv3iTygaUpDghR6HD8/6++N0hugMUM+pMHAA24Mw4DdbofVakVYWJhYzRqL1/TKSxsTbrcbVVVVknLJS8eX2D6htbVVxhwVFSVzIbzIsesgOeeklS4NXSo18oLVav2nlvv3Ks+9u7tbGuSw/eWePXvw+uuvi4DfsGEDqqurZcIk0k2bNuGxxx4D0NebpqurCxcvXkRbW5sIfAZYAEjJMYmWQoeLxh7aWtPyn8lkkuAmLV0SMbFpMrB28Ql/kEGYJ6shA46RiojjZKD11KlTuPnmm+FwOIRYn3nmGbhcLjQ1NUn/HVq2GsriunINiOW9+uqroqRY8g/0MbVmMOYR854vvPAC3njjDfz5z38OIlSOXR/MoYUBAKxatUoE4cMPP4xVq1aJx6XdVO3FcT49PT1B+0Fa0MU03AOuOyENndMeFhaGa665BjfccAN27tz5X5Q0GYxeIb2Ljo4O3H777fJ9zpkWLNf5r3/9axCsxx4h3AugN6j605/+NEhwkRZJC/yuFpy6CpQwDgWPhtL6Bzh1IFHDX4FAALfddhs2bdokz6flSiUfEREh3iatUs6VhkVERIT8zX1jLEt3cPV6vSgoKJD9pBLm+GnA6cwYHQfRh3LQINPescPhQEpKCrKzs7Fu3ToJcpLfeE96n36/Hz/96U9lLblHxcXF2LVrF/Lz8+XkNypni8Ui7VDKy8tRWVkpKYtbtmwJgmPoVWuoSK8xeZQZQFarFWazWRSEw+FAUVGRQELfdX0vhDuxJLqVTCsKBAJ4/fXXJUDj9/uxadMmyXsnQwcCAVx77bXS950LRwIiUdJSpuCg5qYw4fN1WbXW7ux1TtyeykBbKGazOegAAlontFSI7fW3OunaWywWREREyKZSw5ORfD4ftm/fjmHDhmHgwIGIjo5GVlYWZsyYgcjISCQkJGDhwoVB1izdUs6PgSQAklvOoBAFEIlWB8No9bKDJueydu1azJ07F3/9619lTwnb6OfQEibkcODAAdxyyy0yZ6/Xi1tuuUWIXge4KFQ4FsIXVBS6n5AWANotpuVOmiMuymdlZmaitLQ0SDFTcPE1GdFsNmPv3r2Ijo4W4U63OSwsDJGRkUILmzZtEouUe8Cx6fVi/jLXjbABYUb2FeLc+CyNq3PPqSQoaLWhwedrXJjHC1qtVkRFRUnLDyoHehtRUVFSnEVcW1vMWrGbzWaBbqjwdJomAOzatQsxMTGiiK1Wq6ydhvDIO/yfXggFLHmJe8Dv2Ww2GIYhFamxsbFBZzDQw+WYm5ubMW/ePOTl5eHAgQMoKipCfn4+Tp48KbnwnZ2dqKurE6XjdrsxcOBAHD16FB988IFkx6SlpWHSpEmyboSEiL8TYmEqpPawOFc2CAsJCUFERAQuXLiAkSNHCtT8Xdf3CpahUCHUUl9fj4qKClxzzTWy+dXV1WhsbERXVxfS0tLE4q+rq0NycrJgbjxc1uFwICoqCoFAQCwuBqxo1ZaXlyMuLk7y2H0+H2pqamC1WhEbGwu/34+qqiqkpKRI5WX/wgOfz4fU1FQEAgG0tLQAgMANZAIAUsQTFRUFu90uwpTpVgAk3YwES+XCDJfi4mLp/1JSUoLc3FyMHTsW4eHhOH36NLKzs2EYveX87JLY2tqKyMhI6fzY09OD1NRU6QXjdDpRU1ODxMTE/wIJUBh4PB40NzfDZrNJtSp77euUM3okGk5ra2uDy+VCdHQ0oqKiAACHDh3CDTfcIPPPzMyUFDQd19B4K+9XVVWFQCCAxMREtLe3w+fzweFwoL29XYrF9DwqKiokLY0C6Pz587BYLEhISEBbW5vgmy6XCzExMfI8DUWZzb3FQh6PBwUFBRgyZIhYXV1dXaitrUVkZCTi4+PR2tqKrq4uREdHw+12IyYmRui1oaEBcXFxck/SS0VFBUJCQpCYmAjD6K16vHjxolTD8iorK0MgEEBycrLsT3t7u3hUycnJAPpOSHI4HJKuR8+Wz6YBUVFRgZiYGAwZMkSqn/1+P5qbm9He3o6MjAwAfTn3/G1oaCgqKipgNpths9kEXuMYDKO3SZzL5UJPT4/00mlsbMT+/fuRkZEh9E5FQiVosfQWO3V0dMBkMklxltfrlaKt+Ph4oXen04lAoLf4LiQkBJmZmaiurkZtbS2cTieqqqokI6qtrQ0ejweJiYkIDQ1FY2MjDhw4gAsXLkjxIau3W1paEBkZia6uLkmTpXFJhcR+NW63GxaLRfjE5/OhpaUFVqtVAuQsvKKCIM4fFhYmBX6EXqh8Acih4klJSUHw0bdd3wvLXVu9QF+yv9VqxeDBgzFv3rwgqCIsLAxutxvLly9HaGgoPvvsM8ydOxerVq2CYRh48MEHsWfPHhQVFeHll18OslRJMD6fD5s3b8bzzz8Ph8OB+++/H83Nzdi2bRv+9Kc/ITQ0FE8++ST+8Ic/YPny5WhoaMD8+fMRERGBXbt24f3330djYyMeeOABWK1WnDlzRtoBFxcX48c//jFaW1tx/PhxFBQUIDQ0FAUFBbj77rsRFRWFl19+WSCoQCAgxAL05cvr4gsGAg2jt3I3JycH69atQ0REBEJDQ5Gbm4vPP/8cb731FpqamnDp0iWUlZVh/vz52LNnD3bt2oXnnnsOr732Gjo7O9Hd3Y1NmzbBMAx8+OGH0l9dY6wad3/55Zexdu1aREdHY/fu3Th58qR4P9OmTZM+JTpnnQr0+eefx7Zt2+B2u/HSSy8B6O09s2PHDrFMv/76ayxevDgo44jEz1iFFvZ1dXV49dVXcfLkSURFReGxxx5DWVkZIiMjcdVVV+HDDz8EAHz55Zf44x//iMjISPz617/Gn//8Z3R3d+O1115DT08P3nvvPWzfvh0RERGoqKjAvffeGxQv0QFbk8mEdevWoa6uDjabDdu3b8fixYvh9/e2D37hhRdgs9nw/vvvo6CgAOHh4SgvL8c999wj67l27Vq8+uqriIyMxHXXXYdTp04hJCQEZWVlGDVqFAYMGIAXX3wRPp9Puo1euHAB8+bNk7149913RcDu2bMHFosFhYWF2LFjBwDgpZdeQmRkpPT8iYiIwAsvvCAGEmmKB08bhiFrtHz5ctx1113CIy+88AJOnTqFnp4e/PGPf5TAOT0EBi9ramrw4osv4uTJk/B6vZg9e7bQQkVFBT755BN0dXVh//79OH2696jl8vJybN++HYmJiSLQCfnRMq+oqMCGDRvQ09ODZ555Rg6J+fzzzxETE4MDBw7g7NmzMJt72+ree++9KCgoQGdnJ+bPny9B8U8//RSzZs2Swz9eeOEF/P3vf8e5c+fw05/+FAcPHsTHH3+Mc+fOYcmSJaiqqsLRo0fx6quv4vjx47BYLHjnnXcERo2MjIRhGNJHhrnrM2bMwLvvvou3334bhw4dQnR0NBobG+FyufDb3/4WALB27VqsXLkSHR0dKCwsRGhoKP7jP/5DPLzm5mZs3rwZTU1N8Pv9eP7558XD7ujowMsvvwwA/zTP/Xsh3IE+ods/3Wn8+PFIS0uD2dyb08xGWLrbGosBhg4dCpPJhL1798rpMNdee20Qvgr0pV7+53/+JzIyMpCQkIBJkybB4XDIe0lJSUhOTsaGDRsQFRWFwYMHIysrC6mpqZg9ezaSkpIwZMgQDB8+HHFxcRg8eDC+/vprXLx4ETk5OUhLS0NkZCSOHTuGjIwM9PT04Ouvv8bevXuFWWlJARA3l+6ZhhmAvtPnu7u7kZCQgISEBDQ2NiItLQ0Wi0WOnuvq6pIDSDIyMjBkyBDMmjULM2fOxEMPPYTPPvsMXq8XXq9Xys/PnTsnfe2Bvlx6rtnFixdx7NgxzJkzBykpKZg6dSry8/Pl+MKcnBykp6d/a3CNgdJrr70WKSkpyMzMhNVqRUFBQRAsUlBQgGHDhgUF3nw+n2DcOkvE4/EgOzsbTU1NyM3NRVxcnJT+JyYmircUFhaGt956C+np6UhKSkJSUhLWr18vzaI6Ojowffp0jBw5EjabDVVVVRg/frz0UOmfFWWxWLBs2TLk5uYiNTUVAwcORHZ2NgBIp8Tk5GTExMQgLS0NdrsdNTU1GDt2rEB6K1aswLhx45CSkgKbzYaUlBSYTCacOXMGt912G1JTU6UF7Mcff4zrr78eSUlJgvd7PB6sXbsWXq8X7e3tyMnJgdvtxnvvvYecnBwAEOv10KFDuHTpElpbW3HdddcFBWUJ5YSEhEifGY598ODBMJl6+88MGjQIU6ZMwdChQ2XNqHy5Ln6/HxkZGWhqasKECRMERgN6ocBVq1Zh5syZSEtLQ25uLo4cOYKwsDApq2c+t4ZAufZr1qzB7NmzkZGRIXP96KOPMHPmTMTFxWHSpEk4ePAgurq6kJ6ejpSUFIwbN06weCrV8+fPIzU1FXa7HV1dXSgsLERycrLEPg4ePIja2lrpttje3o6BAwfCZOotUqT3T2ucSiMyMlJkkdPpRG5uLqZOnYqUlBRs2rRJYnOEZZ1OpzRL2717N0aPHo2YmBgAEG8tNTUVnZ2dSEhIgM1mE6iG8CBbE1RWVn6nTP1eCHdaAMTZiY2uWrUKCxYsEKz8q6++ws9//nPZ9J/85CdoaWlBVlYW7rzzTuTm5sLlcuH111/H2bNnce+996KhoQFut1uUBXF49tOeP38+vF4vnn76aenrsGjRIoSFhWHXrl0YN24cFi5ciP3798uzk5OTceutt8IKM04AACAASURBVOLAgQO477774Pf7sX79eowaNQqLFi3C8ePHcc8998DhcOCTTz6Bw+FAINB7GMVrr72GX/7ylxJNB/qi7RR0tMa1krPZbNKk67777sOPf/xj/OhHP5I2t3a7HQsWLEBpaangswcPHsRjjz2GsLAwZGRkIDMzE2+++SbefvttLF68WLIhLly4AJ+v92QjCmS6xREREVi5ciUeeughgXE2btwo/VCOHz+Oq6++OigQrtP+1qxZI21sN2/ejHnz5qGpqQlLlizB/Pnz5TjCpUuXwul0SgZNd3e3pJHSAmRswOl04vDhw2hoaAhK6/T7/dJD5kc/+hGqqqrgcDhw6623wufzYc+ePZg4cSKA3gDy8uXLsXLlSiQkJKC+vh7vvPMOMjMzxbNhEJW47oULFxAfH49AIIALFy7g1ltvhd1ul949s2bNQn19PdatWyeW81tvvYVBgwahu7sb1dXViI+Px5w5c9DS0oIFCxYgNDQUbW1tWLp0KYYOHYry8nLs3LkT27Ztw5IlS+BwOMDWHK2treju7sYrr7yCd955B4sXL4bT6cTKlSuxdetWpKamIjw8HM8++yyamppw+eWXo7S0FL/61a/Q0dEBoC99kkqzpaUFjz/+uDRY27BhgxT7PPLII1i4cCEAiOJjphrxfsIHR44ckUy2zz77DEOGDIHL5YLH48GSJUsQGRmJjo4ObN26FSNHjoTJZMJbb72Fm266SQKg9CoI+XzyySd48803ER4ejs7OTvz85z/HihUr8OabbyIjIwN+vx9bt27F0KFDMWDAABw7dgw/+9nPYLVasWbNGmRnZ8PlcmHNmjVoamqCy+XC+fPn8dZbbyEtLQ3Hjx/HunXrBEqKjo7GsWPHMHz4cGkKNnLkSMTFxeHUqVOYPn060tLSEBsbi9DQUDQ1NaGurg7t7e2C+yckJGDUqFG4+uqr4XK5YDabkZ2djR07duDKK69EaGgojh49ivHjx2PTpk2iHENCQjBmzBjx9lwulyjRpKQk6cG/efNmzJgxQ+Ct77q+F8KdwRMGXhh4+vDDD5GWloavvvoKFy5cwLJly5CZmYlNmzbh73//O0pLS7Fnzx4888wzmDt3LtasWYMVK1YgNzcXP//5z6UpGAM5zNYggTudTnR2dsLtdmPjxo3weDzSGKigoACLFi3C3/72NzQ2NuLDDz9EXFycFJQ0Nzfj/fffR2xsLDZu3IiioiJcfXXv6YIlJSXIzs4WZly6dClqamrwf/7P/8Fll12GX/7yl9Io6de//jWOHDkCoK8sm0FABuSYmdDS0oKlS5di1qxZoogo/ABg586d+NnPfobNmzcjLCwMH3zwATIzM4Vh6uvrcebMGTz//POYPXs2LBYLmpqacM011yAvLw/79u0TggL6slGSkpJgs9nQ1taGjz76CMXFxZgxYwYslt5zOwcNGiTFPmRM9n0JCQmRlsPLly9HXV0dtm7dCrfbjeuuuw6lpaXYvn072tvbsXr1alRWVmLlypXSC5vrTUVDWOajjz7C3LlzYbFY0NjYiEWLFiEqKgp79+7Fo48+iuPHjyM2NhYJCQno6enBqVOnsGjRIrz55pu49dZbUVpaij/+8Y9ihW3cuBEXLlxAdXW1YLkMrOsgV1paGurq6rBkyRJMmzYNS5cuhcViQWZmpljmbW1teP/997Ft2za0tLSgtrYWra2tsFgsSEtLE5qYMWMGPv30UwB9aaivvvoq5s+fj5iYGCQkJAj8OHv2bGzYsAG33347SkpK8O///u9Cb0OHDkV6erocpLFjxw4sX74czzzzDJ555hk8+eSTcDgcAPrad7DfSWhoKKKjo2EymfDee++htbUVH3zwAWpqaiTF0+124y9/+Qv+8pe/BAVwyVeBQAAffPABbrjhBgQCAaxbtw6/+MUvsHnzZnR2diIxMVHaGpw7dw5XXnklBgwYgJaWFlx//fVYs2ZNUDok1zs1NRUpKSkC1+zatQuZmZlISUlBW1sbVq1ahcLCQkyfPh1Ab+YVT6vauHEjnnjiCRw4cABr1qzBmDFjsGzZMrz33nvS6M4wDJw8eVLibkCvsQJAMPysrCx0dXXh6NGjyM7OxqFDh8QSZ599i8WC8vJynD17Fq2trQgJCYHb7cZPfvIT8cDLysowYcIE1NfXY/r06bjrrrukeVl5eTnGjh0rSRVfffUVxo8fj5iYGJSVleHGG29EZWUl3G43QkNDMXnyZJhMJnz00UffKVe/VwFVYssk9KlTp6KwsFACQBRiZrMZWVlZaGpqwqRJk7Br1y5UVlYiJSUFKSkpctr47bffLgcoMGALQAKiVqsVJ0+eRGhoKJKTk+X0lkOHDqGpqQn/9m//hvDwcBE0zNi5dOmSnEJeVFSE6upqPPjgg+KO5ubm4ty5c6iursaIESMkHevqq69GVVUViouLsWjRIgDA/v37MWXKFPT09Ii1oHPqiauREJubm+WEnuzs7KDqOZar8xQmXRDk8/kkiMtj9xik4ck6kydPDnKnGae4/PLL8dlnn8Hv98PlcuGBBx6A2WyGx+ORtC+uKzOTGBibNm0ajh07BrfbjXHjxuHUqVMYOXIkxo4di9raWkyZMgXNzc1itWRnZ+ONN97AD3/4Q+m7TpiKsA+x8ClTpsDn6z2QZfLkySIcWltbBcagRVdVVYXbbrsNoaGhmDt3Lux2O/Ly8nDvvffCarUiKysLo0ePxunTpzFz5kyhSXp6Pp9PuvrxRKLTp0/DZrNJ8PHEiROwWq0YO3YsYmJiYLfbMWrUKOkHzlS/oqIiREdH4+zZs4iJiYHD4cCdd94pJ2YtWLAA7e3tmDlzJk6cOIGxY8ciJCQEo0ePxrXXXgu73S4nZEVGRmLSpEm48cYbcfz4ceGRMWPGICIiAvn5+SguLsb9998ve8Q5+f1+WK1WLFq0CCdOnIDNZpOxJycn4+6778bBgwcxYMAATJkyRXqdEDoF+tJtw8PDMXXqVPT09CA9PR0dHR3S7fOqq65CWVkZ2tvb8eCDD4p3PmrUKDQ0NEirbhp2hGg4Lx40bxgGJk+ejBtuuAElJSVoa2vD/fffL3Sn2wRkZmaiq6sLI0aMQGpqKlpbW1FTUwO73Q6n04mWlhZERUUhPT0dTU1NSExMRFtbmxQgmc1mtLW1ISEhQboxNjY2CgTjdrsRFxeHzs5OhIaGStA3IiIC0dHR6OzsxMiRIyVOyINAAIjlPWHCBDQ0NMDlcolnmJaWBp/PJ0kRNJQI7cbHx6O9vR0pKSm4/PLLcfLkyX8oU78XXSHHjx9v5OXlSeMfnaBP64mFIAyo6spGAGLdAn2l2DqFEugjbF0gQaWiK/RYgcnn89m8n2EY+N3vfofS0lJ8+OGHUuZM4c4UK2a3MFWM49DpaBaLJSgwYhiG9GHXMQJm0BCi0Cl1QF/FJi1deifMASbDAH3ZOPwNhT8vWhucq85Lp7A9cuSIWOPLli0TjBPoy6Vmto8uO9cFLKwdCAQC8nu/3w+32y15vVqQ6PFzHSkQtDLjfXRhmc6Z1xdjGbyf7kPEsWuaYsCX1YVayHHtSUPsjcQEAN5T90UilMX10c3IdGVnZ2en5IpzbroYTmeb6bkT+iOvGIYhcRnyA9eWipl0SiVK3tBeL6FUph0z6H3p0iU51UrTdExMjGTT6PHr9fL7/dI1kXTC3jpcZ36XY6d3yHuRBsPCwtDe3o7CwkI5dIbQJoO2PKWNtQ4ul0tSKdkjPj09HSZT7wlcumc/eTYjIwMNDQ2IiYnBmTNnEBcXh4EDB0qfqJ6eHnR2dqK5uVmOZ4yMjBQPqv/h2HrPtFHDe3EPyaePPvroP+wK+b2AZXjR3aNw0XmfQLAg5oaTMfm3FmbM7aX25DNInDoopIs9uJBAX1EKszfIlEVFRUhJSZFncty6uIQpkhSeOnee2SQ6uEWsHejLzqAiYICJc+f4KVBI7LqghUqFn7GZkg7Okil0ERfQJ9g0RKOx9DfffBPnzp3DD3/4w6C8Y+4XhQ0FDy9CLFxT4uoUbBaLBTU1NTJ3ZtzoAjCuI4Nl/B2JX++9tjT7F4mRfngfzo1z1Xnofr9fsHi9/kBfEQrhg/b2dlE6fL5uZEYBpLOKuOcUUFwfjp00SoHG/QL6+t9wLfgdnW2lA5bMade0zu9z7zX/6dx1rbg5B9Iq78UzSonL0xLW1d+a5/m/fi6fwzFR0ZN+GQ/RtQt87ff7UVFRgVOnTqGwsFA+ozI2mXpbktjtdrS3t0tKKguWXC6X0FRHR4fMq6WlRc4G5pnKLJT0+fp6u3s8HrS0tEg8pbS0FD09PWhsbJRiL86NqZXMCqMM0YYI58l5cJ//2fVPLXeTybQMwA0AmgzDGPXNe04AnwDIAFABYIFhGK5vPvs1gHsA+AE8ZhjGtn82iHHjxhl79uwRa5yTY38XEiEtF1q1FLY2m02Euk6p0lV4tDz4PVoMtJD4HC00SGR0YSmoGHwko7C4ROdj08LWp75oTJr/aHkwM0SXjlMYG4YRlINPBcDsCRICBaruZkkcTzea4ue6So7C3mw2Swc9bTlzPagovF6vFDHxOxS+WiCQiHWBC61BEirXnczPjpxaqFAIcA+5R7rQTCtDzoVrTq+Ll7ZoOW4qB46bUBeVjO4tRCiAljBphEpetwvWRgEFDK000p4umKInw73mHPQpTFSM7CLJ9wgbkP6oeBh4Jh/Qq3C73bh06RIcDocIFhac0bOiBU0low0q8hdxfPbtIc3x2VxT7pU2rrRhxf81PEs+0gJN0w5ft7a24uDBg6ipqUFDQ4MoF6vVKqemcc/5fWaWcY8vXrwoMFtbW5sUH3V0dCAmJkbkQGdnJxwOh2RyXbp0CcnJyYKLky5Zqaz3hllmujiOgWrueX86Z/CaMiAiIgLh4eG44447/qV+7ssBvAFgpXrvaQA7DcN40WQyPf3N30+ZTKaRAG4FkAMgGcAOk8k01DCM78y25waTsLgoLGYiY9P9o2vHwqRAICDNhrQw4e80YVNRsIJVBxBJ0CzC0VWK1PxAr+C12+0iCDo6OoI+o+dAi1OXrms3VEMFhmFIgyoyD79Pa1EfuBsa2ttWmKXULJWm4OZakFG5fmRCvibMpT0UKgK67Lq0m68JV/Fz7XFx/zR0RIteW65k9P7Vm/QEaKFSgei/tbVI5tDxAv0sWs68Jy035lNT2VBY695GHJsueuNRh4S1uCYaNjQMQzr9kbapUKhY+7vhpDEqWGZ0UejzexSmQF8HR96LipV7oZvmkc6APniDNSXcDwoVrjPXpLOzU84OoKKmIvX5fHC5XKKsCDXoWAzpi/MnBMr1oQHGveN4tUFExcQ14H1qa2tRXl6OM2fOoKWlRQwQs9ksypi4N3Hr0tJSUZAejwdxcXFwu91wOp0wmUyIiYmR7CwqNyqFqKgoNDU1yYlaXBumQ3ItebYCaSEQCMDj8QhPkze5vpy79u65PoQvWW3Mdtnfdf1T4W4Yxl6TyZTR7+2bAPzgm9crAOwB8NQ37682DKMbQLnJZCoFMBnAwX/yDNk8EiA3SLvLJAadB01rjQxD60bjWCQoakCNY2nGIzRAItLErq1AoA+/JyFzoWl1UaDS0tLFQRTw/L0uzqEFrKECPptCTQsK4r9AX9MxCiy+5hppC5jEz/nxM+3pkCmJQ3PNKCA5Ts6b68A5UNloz4vP5qWtZhIuBZX2pDj+/u2XeVFZ8jU7K3LNuba0JMm0vKg8NGzHFEjOhe0OaJFqxcR11tYnIaj+86cQJo3x2RqWIEynlQv3hftK7F7TFp8B9EFrHJN27TlP0pFeB9KPHoumI/JsSEgIOjo6ROBz/nw26VobSfyc/2vviM/gPmnPQK8jhXJ9fT06Ojpw6tQp1NTUBNEoPSD2eGGwFABaWlpgsfS24bDb7YiPjw+CRFhJnJSUJCc7mUwmdHR0wG63i3DlEZukYd0lU3sWnBM9Oi1fNP9oo5PeilZoNBx8Pp94bN91/XezZRINw6j/ZnPqTSYTEy5TABxS36v55r1/etGKogXOyVJwuVwudHV1YeDAgUFWL4Wthj36C3agL+Clg020bnWPDS3AaTH19PSIu0Ym1VghX9MCoVbl4nM8ZAhuLF19nbnAv9nOk56Mxl7JMGRuWg9cBxKayWRCU1OTpG31twwoHPVJM7pqkUyp11L35dDMyzVgfxR6KtxHrgcVKItHEhMTg3BnjYHrQCSfQyGvFYVeLx1/oLXFSwfnNNMAfQe00xoHIBXBFOz0HDQUxHn2x8a1wNMemlaCZrNZxq0FM9dLrzMtONIfISL+lvMFgk820vCYTi4gH7S3t6O2thYjRowIUlQUUAyGEzrUcRke8sxx2Ww2gb4YsGRBFXmDa2Cx9PZTIqzK93kv3kd7uRcvXkR1dTWqqqrQ1NSE2tpa8doouPmbpKQkmM1mOVc1ISEBDQ0NuHTpUhDsyGpTVgkzf7yyshKDBw/GwYMH4fF4kJmZKWeqhoSEIC0tTbxBKqrw8HB4vV4EAgGBUQcMGCCWPpEJ7k9nZ6f0lWloaAhqD0FFQdokrk+l/s/a/QL/8wFV07e8962gvslkut9kMh01mUxHW1pa/ot7zEwPv9+P6upqHDx4EAcOHEB+fr7gzNx04u+6KRgJlVah7iKpha7P55NeK1xQ/paMfPbsWaxYsULcVWbG0LqhAuju7kZhYaGcr0gB4vV6RSgGAn2nKmlFo91sClbOCehr20pG43tAr4IpLi5GXV2dtDLlWHfu3Illy5YhIiIiyNqnINOBZm29E96h8tAWGxUp3UuuY/9e7jrwR6XLz3fs2IGVK1cGzZljACCwju6wyTlr4cffM4OKSurMmTNiaXMOvKfGfrl/xGcZ4DWbzcjLy0Nzc7N04+Q8qIx1FTHnyrUgnKENDO4916k/ZEOFS9rjxftq4aeNH9KLxne5pzSY9N5x7n6/HwcOHMAbb7wBoFeI0HvSjau0EuyvOHQzL3rNhmGgsrISX375ZVAcSkNQpGdtqJFfNETHvz0eD44cOYJdu3ahoKAA586dkzkSw7ZarUhOTobNZkNiYiJiYmKksIueodnc28ysvb0dgUAAUVFRspcs6mtpaZFgaVpaGtLS0qQuhZf2cjn2mpoa4Tv2k6eSo8FFHqYFD/Sd+aBje/p/oK+Cnfvt8Xj+11r+NppMpoEA8M3/Td+8XwMgTX0vFUDdt93AMIx3DcPINf4/5t48PMr6Xh++ZyaTPTPZ95CQkJAEAkkIO4ICsguKoggiCGhdW21ta3uqB2ttPedq5ViXumNRoSIgmyCi7CD7lhAIIetkz2Qyk8lMtpl53j/i/eEbfqf2vO95r9/lc11cCcnkeb7Pd/ms9+f+aFpRdHT0gJBCZ2enxKR+/vOf46uvvsKCBQtw99134+OPPx6QOORBoFXK5JYai2QclQeGWp5VesTF85+avKML9Pnnnw/YwNyYVEb8V1FRIQvIz6ubGrjRIUiNddMyZSyboQGSQXGzUzjwZ11dXejs7MTVq1fR0dEh70B3/PTp08LloYae+EwVVsoDx43FkmtVCVDhMDZPWBctIAohCgqGrbg+QP9GPXv2LK5evYqenh4pCqHQVhUYPQ51o6tWoBqe4f7p7u4WIis1JKEKWl60pKxWq1hanJtLly6Jt8aEs5r/4R77/hyIcaIm1Pg9BSaVHNedY1SFHO/Pd1VbuJFVUP0ZhTXnjfuIwoPGCAWaurdfffVV4aThvFI4UZDwvRmeYpxahR02NTVh4cKFEhIzm80Sz+fe4ecZflB7BavCnrKAyKMDBw5g+/btWLduHVpaWtDZ2SnvScZEJj5JwaFpmnj67HugaZrUeoSGhkqZP6vYzWaztNnMysqCw+FAbm4uenp6EB0dDY/HIzF07ku1LaDZbEZwcDBiYmJgMpkQEREhBWJfffWVvDuVotfrlfNNgd3X188bTxZLeoHcJ4zNR0REiNL+Z9f/V+G+A8Dy779fDmC78vPFOp0uQKfTDQaQCeDU/+SG3JgUjHTzdu3ahYKCAomHTZ8+XYQT8bR2u10OYFBQEDo6OkRbAjdirgyD2Gw2sbR8Ph8KCgqkqMnn88Fms8HpdEp8ddOmTQgLCxNmOCZxmZBiSMHtdmPRokUSw3O73ejs7ByQbANuWKUU/EQsUMAxCcbEKQUgcMOK07T+Cjq+97x585CbmysMc5rW3z39m2++wcqVK+FyudDX14f29nZYrVYRwBScHR0dsNvtAPrd7c7OTimw4PuSpoBrQaFOxakillwul1DD0jJlGbnX68X+/fuxYsUKcVVVN5lxXH62p6dHwjiqt6NpmswdBRHncM6cOQO8MdWq9Hq9QklRV1eHjo4OUUJMOPf19WHz5s0Ck2PYpr29XUI9Pp9P4q7d3d2wWq2ilCi4Oe/853K5ZG+qiTTej0yFFBis+WBXHu4T3oucQpx/CklVkHLsDJtQQG/evBmrV68WgUMlydL37u5uOSuqgm1tbUVbW5sI4+7ubnz22WcwmUyCb9+wYYMUqDF0w73f1dUFq9UqY6PSoWDjZ86dO4ctW7bg1KlTqKysxJAhQyRsoVIiq0ZDZGQk4uPjUVlZCZvNBovFAqfTKV6c0WhEZWUlQkJChJIhMDAQCQkJcDgcgqKxWq0wGAw4ffo0Ll++jM7OTin2c7lcaG9vF5gkY/R6fT8KiS34uru70dHRgba2NmRmZgoKzGAwyFmjN8sQInNCPDtdXV2yL1nkSOOAIct/dv3LmLtOp9uI/uRptE6nqwPw7wBeAbBJp9OtAlALYNH3G+myTqfbBKAUgAfAE9q/QMrwBYEbiSZ+TwH0+uuv41e/+hVSUlIwdepUuFwulJaWoqKiAvn5+bh+/TomTJiA1NRUtLe3Y//+/dA0DUOGDEF2djaam5uxe/dujBkzBs3NzWhtbcWsWbMQHx+P69evY/v27fjVr36Fnp4eVFZWory8HE6nE3feeSd0Oh2Ki4sRFxeHI0eOICQkBNOmTRNkA8fY29uLvXv3YuTIkcjKyoLL5cLly5dRWVmJyZMnIz4+fgA+monU48ePS7XcrFmzBghDo9EIp9OJ4uJiWCwW3HvvvWhraxNe6HXr1uHhhx/G5cuXUVtbi/nz58Ng6C/Hv3r1Ktra2hASEiLkVh6PBxcuXEBTUxMSEhJw6623wuv14siRI2hpaUFUVBSys7OxY8cODBs2TDbxkiVL0NDQgJMnT0qnoKqqKtx1111wOBzCwldYWChwsKNHj8JkMsFsNiM/Px86nQ7Nzc04cuQIDIZ+OtTMzMwB1mtvby9OnjyJ+vp6ZGdnw2KxSOFJXV0dIiMjMWHCBEmW1tTUoLi4GOHh4RgxYgTCw8NRW1uLffv2CWEacca1tbXIzMzE0KFD0dDQgDNnziAuLg41NTXo7u6WtTYY+ovKwsLCBsTm29racOHCBTgcDgwZMgR5eXmor6/HhQsXMGjQINjtdjQ0NGD69OnSjYrVy3q9HoWFhTCbzdi3bx+uX7+O5557DteuXUNtba3QCFRVVeH8+fMwmUzIz88XxVJaWor6+noAwKhRoxAREQG3243q6mpUVVUhNDQUU6ZMGQCRVFFb9EhoXGiahqtXrwpyhJY0Pcrdu3fDaDQiLS1NuFamTZuG3t5etLW14fLly3A4HBg6dCiys7NRWlqKw4cPIzo6GpWVlcjIyEBxcTGqq6vR1NSEwMBAzJ49WzzR06dPw+l0YuzYsUhKSsKhQ4eEyresrAyhoaFISUlBcXGxeOJUbklJSfD398eVK1eQn5+PpqYmdHd3Iz09HTabDW1tbUhKSoLZbEZbWxuqqqoQFhaG69evo6OjA0lJSUhLSxOmRk3TEBERge7ublRVVYnlTiF65swZOJ1O1NTUIC0tDeHh4WL0MPnqcDjQ0NAg7RnLysqQlZUl81FdXQ2TyYS6ujokJCSId20ymVBWVgaHw4Hk5GTY7Xa0tbUJEV5VVRWCgoKQm5sLAKJ0Ojo6kJGRIYVW/+z6l5a7pmn3a5qWoGmaUdO0ZE3TPtA0rU3TtGmapmV+/9WmfP5lTdMyNE0bqmnann91fxmIfmCVJK24kpISPPnkk3jttddwyy23yObweDx4/fXXoWkalixZgjvuuAN1dXV4+OGHcffdd+O2227DgQMHEBgYiD179uDgwYNYu3Yt5s6di/Hjx2P9+vU4cOAArl27hg8//FCKC+bMmYOwsDBERUWJ9VNSUoLFixdj7ty5+OUvf4nGxkbxCpg1P3z4MNLT01FaWgoAWLhwIdavX49JkyaJ9c64LsMx77zzDlJSUnDnnXfi97//vVS9MW5nMBiwefNm5Obm4rPPPoPb7caf/vQnKXEuKSnB1q1bkZ2djc8//1ySeE899RRGjBiBW265Bffccw/S0tJw6tQpzJ49G6NHj0ZYWBh27NgBna6fn8JgMAg/+969e3H8+HHs3LkTd9xxB8aMGYO//e1v2Lt3L8aPH4/3338fubm5GDVqFJ5//nk88cQTmDBhAgoKCnD58mW8/vrrePzxx7FgwQK89NJLOHTokHg5jz/+OO644w6MGzcOd999t7B9MiR08OBBeL1evPzyy0L+9atf/QpFRUVYvHgxnn32WVitVnR2duK+++7D+fPnsXTpUuHY37x5MwICAnDw4EHs3LkTBoMBy5cvh9lsxoIFC7Bo0SL84x//wM6dOzF27Fi8/fbbWLx4MQoKCiSBx2RYe3s7Zs6cKWG9J598Erm5uRg/fjz2798Pn8+Hffv2YcKECXj99dcxZcoUjBo1SjpJXb58GcXFxZg9ezZqa2vR3d2NI0eOYOjQofjkk0/g8/nwi1/8AlevXkVwcDBqa2tx6tQpLF26FI2NjZIgvXDhAi5cuIBZs2YJ7URISAjmz5+PU6dOYcqUKdi1a5dw7avhNuYXaO2qMeLi4mLceeedAt3j7w8cASY0TgAAIABJREFUOIDk5GS8/PLLGD9+POrr6/GLX/wCvb29+Oijj/DEE09gzpw5WLx4MZYvX46wsDBMmjQJHR0dePnllzF8+HCcOnUKJSUlGDt2LBYsWIDf/va3gjZ58sknMWXKFBQVFeHy5ctob2/H+PHjsW7dOuTl5cFgMODChQv46quvpDEGm5eXlZUhPDwcdrtdoIhDhgzB6dOncejQIWRlZWHr1q2orq6WhKXT6URwcLCQbl24cAHr16+Hn19/R6bq6moRrAkJCTh16pRQFzidToSGhmLUqFESuz937hxMJhNiYmJQVlaG4OBgdHd3IzIyEs3NzQgJCUFbWxveffdd2Gw2jBw5Eu3t7Zg2bRpGjhyJ2tpa6HQ3CskyMjJgsVgQGxuLkSNHIjo6Gl9//bVQYjQ2NiIyMhJbtmxBbGwsMjMzsXPnTgnJ/q+E+/+NS43T0n2ie2QymTBx4kSsXbsWzz33HI4fP47MzEyMHDkSqampyM3NFYu4qqoKhYWF8Hq9aG5uxsiRI+Hz+TBjxgxUVVUJ0VRFRQVGjhyJzMxMTJo0CQkJCRI60ev1+MMf/iAYVYPBgNTUVIwaNUo4myMjIwcgF/R6vZAK5efno7u7G1FRUTh69Ci2b9+OoUOHDkicAUB1dTXWr1+P+Ph4Cd/QqqcF5fV6ceutt6Knpwdjx46Fw+FASUmJhHIKCgowZcoUeDwe4WKprq5Gfn4+IiMj0dDQgPz8fGiahg0bNqC5uRl79uxBVVUVfvKTn8BgMCAsLAyvvPIKOjs7cdttt2H27NmorKzE1KlTodfr0dLSAk3TMHv2bHR1dWHu3LmIiIhAbm4utm3bhujoaHz55ZfYs2cPpk6dio0bN6KgoEDCKoWFhTAajbBYLCgoKICmaWhsbMTw4cMlrMOvGRkZGDVqFLKysiR2Tn4Svi+xxbW1tRg3bhzcbrcc+nHjxolLfvvtt6OyshIFBQVI+74ZhMvlQktLixCvzZkzB729vcjOzh4AodPr9fK33d3dqKmpEa/EarWKMpgxY4ZY/UajETU1NcjJyYFer8fWrVsxdepU+Pv749q1a4iNjUVKSgpOnDghsWar1Yr8/Hz09PRg06ZNuOWWW+ByuVBeXo6oqCj4fD5s2rQJkyZNgs/nw5UrV6QhSXNzM7xeL3bs2IGHH34YZrMZNTU1gjdXYaPqpWmaxK337duHkydPDgj5ZWRk4PTp08jMzISfnx+qq6uRl5eH3t5erFu3Drm5uZIvUI2RxMREREVFAQBOnz6NwYMHS1hq+PDhACDj//LLL7F3717ceuut0oFr5syZ0Ol0aG1tFWOA1nNUVBT0+n4isc7OTgQFBaG1tVVQJDabDUVFRdKUOicnB3l5eRgxYgQaGxsHdEZjOEvT+rHnLpcLDQ0NYimbzWZJpg4ePBgtLS0Sr6+rq8OlS5dgMpkkhETFSX4ahosHDRoEo9GIpqYmNDQ0iLWu1h1Q7lmtVoE4MkTT0NCAlpYWDB06FO3t7UhOTpb8ltp28IeuH4VwZxKFriQF529+8xsAkMRFeno6hg0bJkRAy5cvh8FgwNdff43nn38eBw4cwL333gufz4d169ahoKAAZ86cQWpqKhISEjBnzhy0tLTg7bffRmFhIZKSknDx4kX88Y9/RF1dHS5cuIBz585hx44deOyxxxAcHIzq6mo8+OCDiImJwVdffYXf/OY3qK6uFjeWiigtLQ0fffQR+vr6cPXqVfzud79DcXExiouLBXGhZts3bNgg8Uiv14ucnBwUFxdLMpTJkrS0NGzcuBELFy7E/v37UVdXJ/HaRYsWIS4uDi+88ALuvvtunDp1Cl988QXuueceaJqGTz/9FFlZWThx4gS++eYbLFmyBIsWLcITTzyBxMREnD9/Hk6nE7t27cJTTz2FdevWCU/5xIkT4XA48P777+OBBx5AWloaNm/eLIKR8dbnnnsOCxcuxMqVK9Hb24vW1lYsXLhQ3ikvLw8ejwcbN27EXXfdBa+3n9ExNTUVR48eFZSIn58fUlNT0draKqRqZWVlWLlyJXQ6HTZs2IA1a9YIHerMmTMxePBgXL58Gfv27ZOwS2BgIFJSUjBlyhRs2rQJDzzwAHQ6HZxOJ4qKirBy5Uqkp6dj48aNuP322yVJyfwGC97efvttTJo0Cf7+/tiyZQvuueceGI1GbNy4EcOHD8fJkycRFxeHjz/+GNOmTYPNZsN7772HoqIiXLhwQVhEe3r6+wDr9XoMGjQIb731FiZPnixCLycnB01NTfjggw+QlpaGq1ev4ptvvkFlZSUsFgs2bNiAIUOGwOfz4ciRI8KBcu+99+Kxxx7D0qVLhTmxr6+/0QOFBoWFCtf1+XzYs2cPnnzySdx3331Yvnw5MjIypN4hLS0N27Ztw8qVK6Fp/WyGv/3tb2Gz2dDU1ISlS5fCYOgvUhs1ahR8Ph/OnDmD5cuXIyoqCrW1tdixYweWLVsGn8+HQ4cO4bnnnoPFYsGxY8fwwgsvYNGiRXj44YfFgt26dSu8Xi/eeustQa6FhYXJmFwulygMJsDJyBgSEoLk5GTExMTA6XRixowZiI2NFXQKwRp9fX1IT0/H+PHjMXjwYMnVtba24tq1a4iOjkZNTQ3y8vJQW1sLr7e/R25UVJTw+zN+39jYiOvXryMxMRGtra1ISUnBlStXxANtbGzE5MmTERQUhAMHDggzKVFUPp9P9tq5c+ckWWu323Hu3DlMnz4dhYWFyM/PF7rgsWPHwufz4dixY0hKSkJfX5/E7f/ZZVizZs3/37L6//X1zjvvrFmxYsUAWF5AQABef/11CY989dVXsFqtklB97733xGo+fPgwfvrTnyI2NhZlZWU4fvw4Tp8+Da/Xi/DwcAQFBSEhIQE5OTnYuXMn9u3bh8jISAwaNAi//OUvMXr0aIwcORKnTp1CQEAAmpubER0djQkTJmD9+vXST/KFF14Qa4CWCQBZmPr6ehgMBgwdOhRVVVXweDwoKSnBggULBiSsfD4fEhISYLFYkJGRgX379sFkMiExMRGpqamyAQjx8/l8sFgsKCkpQUdHB0aMGIEjR45g0qRJ2LRpE7Zt24bIyEgkJSWhsLAQJSUl0pyXMcVJkybh9OnTyM3NxZ49e6DT6VBXV4f6+npJKK1evRoNDQ04ceIEMjIy8NFHH2HmzJlCVfrqq69ixYoVoqQqKyulfdj58+eRnJyMK1euIC4uDidPnpQ44+jRo5GYmIiysjKcOnUKZ86cgZ9ffys5NvmgxfLxxx9LH9U33ngD9913H9xuN/7jP/4DaWlpcDqdyM7Oxr59+5CUlITNmzeLhTd69Ghcv34dSUlJKCgowLBhw7Br1y6YTCa89957eP/992Ew9NMc/+Uvf8GqVaskeUoIpZ+fH9ra2rB9+3aEhoYiLy8PSUlJKC4uhsPhwJ49ewRbbTQa8eabb2L16tXYunUrvvnmG0RGRiIzM1ME5vHjx9HZ2Yl58+YJAickJETaFTY3N+OWW24RQbdlyxYZz+TJkxEeHo6+vj5899130uEoJCQE//jHP2AymWCxWGC325GUlISpU6ciIyMDhYWFAxAqLN2vqKjAxx9/jJEjRyIxMRFGoxEHDx7E0aNHMWrUKCQkJAgE9PHHH0dHRwcaGxsRGxuLjIwM1NXVSbu8zz//HG+88QZ0Oh3Onz+PsLAwXLlyBaNHj8aBAwfwyCOPoKOjAy+99BKGDRuGrKws5ObmYsuWLbJfYmJiEBYWhueeew4BAQEDqi7Z3ESn6+fxv3z5Msxm8wBhbTKZ0NHRgczMTAQFBeHChQsICgqCzWZDVFQU7HY7rl69Cj8/PxQXF2PmzJnQNA1NTU1IS0tDa2srhg8fjoyMDISFhaGiokJaDFJBNDc3Iz09HQEBAYiOjpb90t7eLu0Z6UFERkbCarVCr+/ndU9OTsa5c+ekGUlERITQDjDk++2332L8+PGIjo5GREQE4uPjUVNTg9DQUMHyjxo1ShpzlJaWIiEhATExMWhtbcWlS5ca16xZ8+5/J1d/FML97bffXkMrnOERo9GInJwc1NTUwOFwYPTo0ULF2tvbiw8++AAzZ86ExWLB8uXL4e/vj+joaJw5cwbTpk3DxIkTERAQgMLCQuh0OgwePFj4uAcNGoTCwkLExcVh+PDhcLvdyMjIQEpKCo4dOwabzYalS5fC398fXV1dyMjIgMfjwciRI9HV1YWJEydKA2vCL4nSmTFjBsLDw1FZWYna2lrce++9UqTBQhBN0xAZGYkRI0bAYrFg7Nix6OjowIQJEwSPTcWh1+sRFxeHs2fP4oEHHsCYMWPkb0jNmpaWhuzsbOTk5CA2NhYXL16E1+vFokWLYDQaMXr0aGRmZiI9PR3Xr1/HbbfdhtTUVAwZMgShoaGorq4WPvTz588jJycHHR0dmDp1KsaPHy/InaioKAwbNkw8rcLCQkFLZGVlIS4uDnl5ebBYLLjttttgtVqRnJyM5ORkRERE4PTp05g6dapYxEVFRYJkUsNRREUwJmkymZCbmwuDwSAJVbrJ9913n3SHCgwMxJEjRzB8+HBERUUhJCQETU1NsFgsWLhwofxdb28vIiMjkZOTMwB7T0RTcHAw8vLyoGkaUlNTYTabcenSJfT29uLuu++GwWCQxscxMTEYMWIE0tLSkJSUhNGjR4sgO3z4sIQqioqK4PV6JWzI8F1qaipSUlKQmZmJU6dOYcmSJUhKSsKYMWMQExODjIwMnD9/Xrr1FBQUQKfTISMjA7W1tYiJiRELOjY2FrfddpvAVdUwYFBQEM6cOQOr1YrQ0FCkpqYC6KeczszMRF9fH4YPHy6GR0ZGBoKCgtDY2CiGx+jRo6UA6I477hAakNTUVJSUlGDo0KGIjo5GfHw8hgwZAoPBgJycHHg8HhHAdrsdbrcbmZmZiIqKQnV1Nc6ePSvQSafTCZ/PJ31Og4KCkJSUhIiICHR1dQkjY29vrxQ0Em4ZFRWFzs5ODB8+XEAXiYmJ8Hg8GDx4sPTPjY+Px5UrVwTSy3vrdP20A6xKpeFCL8FutyM8PBxmsxmRkZHwer1ITExEUFAQYmJiBPHV1dWF8PBw6QDm9XqRlJQ0oFiPqBmfz4eUlBRBdRH51dTUhIyMDMTExCA0NBSNjY3o6+unE3e5XBgyZAiCg4Nx4MCBfyrcfxSUv4WFhdq+ffuEr8Xlcol2Dg0NHcC6p2ka9uzZg9/97nf47rvvBlSeqnErwsyCg4NhNpvhdDoH8JIQCqlW7QE3+GW6u7sRFhYGj8cjiRVaFjeX1tPSpoDifdQKM8L0yPvCYiMmv7ioVGzEIzNWSDgb4Zd8Z7WqEcCAKt+enh6YzWZJFjNHQOQE8fR8Zm9vLx544AF89NFHgtEnZhq4AcNkSEZt5K3iwBnPdLvd8kyOTVWIHDfnlJ8hhp4wSyp9leeHz6P7vmbNGsyfPx+vvvoq3njjDan8Y0EZC9y4J9SiOV5qHoXCnjFV4AaklqEc4q1VBcGaih07dmDhwoV455138NBDD8mh5lyocD5y2agFVswRfPnll5g/fz7WrVuHpUuXCtcMn0Vhd3NtB9eVewXAAAVKCKpKXcH7cowUcvQg1QI7tT6DF5FGer1eeM6BGwRiXFvGpXfv3o3y8nIEBgZKTUVHRwdcLpfw8vj7+6OlpUXCQEB/DcD169elmTqbobe1tUni0+Fw4L333sO0adOQnJwsbf0cDgfMZjNKS0sFnkj4JhUo559oGoZKExMTBXba3d0t8G1y1pCXpqurS5Qtcyf0Ljk36l7zevu7LdGip4FLEAafx2peQl6NRiMeffTR/xVx2P+Viy8N3OAeUcm9ePn5+eHAgQOIiYlBc3OzJCQDAgJEs7vdbvj7+0unE5XgyuPxDGjNxkOsfs8CHP6MiobkQMANXL5qaVNRkJOaFytqKcz5XhQE5H5XebKJbKCC4KGgEgFuVCCqDIhMUGnajfJ/9T7qoWRrvPDwcAAQ7O65c+cwbtw42eQUFmplJ59P74ZzR5SGWnFJYcmvHJu64YnvJ00qN7TKs8N15fypBVINDQ04cuQIVq1aJUKEh4DKkTFRjqOzs1PuwfGwUERdK1r0VMKkO1B5vUNCQiSE1tHRgfPnzyM8PFzCH3wHriP3D4Ut95xKquV2u3Hu3DlERkYKR7jqZXAPsUqU863uEZ4ftdqT+5o/V6uL+TtCcjnXnAO1aEs1QPi39GJ5jqigaczQWCouLkZlZaVUcjocDjnDNJ5IPMZ6BavVKsItNjZWmmLQG6GC8ng8OHfuHHp7eyU/FhISIh603W4XeCFpevkuNFgYJmpvb5f909raiq6uLqSnp8s8cr2Mxv7ObmrXN3qKVHIcG9DvSVFuUcGqRiyNmZ6eHtkvapEdPdAfun4UljubdajJVB4GWoacFLXEmxuB1hf5VNQNqfKb0DIiZJDWam9vr/AzAxDaUpU5kUJCrTrkAeAB4oZkQQKtMVKK8r3IC0JLRw1HcWFVAiEqDL6/SrTEv7nZuubPVa4UtdiDB4f/p9ADbtAaEIvf29sr92CmnvzkFPAUdBTs/L9er0dHR4esDalLu7u7JcnMNablybmmYuUa8DMUtHw/Vvdy7/DdTSbTAKoKtRJULavnAeReUNedBWFUwirEkIeaFAyqQuBaM4nGIiti9LkfKMjpIfDwUpHQ6lWpIG6ucOWaqZa0mkS9eV05h/SE1HsAGOAlcs55/tRqX1qZKucPgAGKgnuX73v9+nVs3boVbrdbvCh/f38kJCQICoaUAGFhYXA6nVKkyGpx7iPmxnQ6HaxWq1RMs7G0x+OBw+FAZ2cnOjs7Zb7ZTL2zsxNnzpyB2WyGy+VCaGioxM05f6Ghobh48SKKiopkz8bFxUlxWltbG0wmExoaGpCamioWNnMlvAdRPlTa9NiIzElISBDjk3uJ600DhEqO62cwGLBs2bIft+WubiAeNGbKVWwuhSI1GDeTak2oiRlOws0bWbV4KRCcTqegJijUurq6BnR+ASBUrKzG4wEwGAzSlZwVcbTSVD4SCl7ySdBSMJlMIiQoROim0QKgUOM88WAxSUMLnYefn1ddbgBSsamik26GAnJcFJwUbnxPxghpQdC15aYjTIwhFlXoEclAOB09HlpxDAHodDfoiQGIu07hrc4nQzWsxGQpOoUvk/RqKE4NU1GoU4lwjFS2FCpcT747lQvng6EMtZuY2oWJio6GAr0Xrgu9Kd6X+5e0sjcLZioqCnLVg1L5jWiMqEpNDSmq3oDT6ZRCO+4fKhWurcFgkLWg4KWSYsiOysnn66/6Li0txb59+wD0e+VxcXFS8TpkyBD09vYiLCxM9lh7e7vkFVgEWF9fD7fbLbkyhkJiYmKk6M1qtUq/geDgYAQEBMBkMuHq1asYPHgwzGazVCZHRkYiMjJS5j4sLExChKQqSEtLE0/MYrFg+vTpQlsQERGBtrY2mM1mtLS0yPP8/PzQ2dmJuLg4dHV1SbgpKSlJPAiPx4OIiAghC+Rzudc6Ojpk/QAMqGqnLPqh60ch3LmxaAkBEMuXFguLDngYaN1wA1PQqZYrIVvqxFFYq1a3Stur0hIwLMGDzL8HblAIM5RCxaBpmpAvUdEAkHvw/nwHChkmLfkcCi5aunyuim0lMRHnTH1/xvk4Rwyf0FJU76N6SjzI3FAGg0Goc2n58HkA5P+cDwo/fk+Fw/WkwAduxNepIEimRAWhJgQZTuN7UinfbM2q5f+0mKlo+Xf8qj6L+4lry7GrcXm1cpXvxMPHEnuG/KhsqCg5VnLC8L3J5Mj5UGmdmVfgPuNeVmtB1FwK9446x/wdFbVqcHDe1dAYBT0VuXp/ho84xyq0j3uWXpvqfQDAyZMncfXqVRHIYWFhEg6JiooSfn2z2QyDwYD6+noxWOglmEwmURZ2u11i3aQvYFiPXiWVYE9PD5qamgbMCYuACH5gopjngrkqem96vR7R0dFITk5GdHS0KB2n0ynnHein7khMTJRnkUaAeQHOE88hzw+fq8oHnhc1Ka6GZNTmM//d9aPAuQMDtRAXlIfvww8/xKOPPopt27YBuNFTNDQ0VEiKuKEYFuHhJLEQDzeFNSef4REKRmAg5zoLmbiRabXx9zwkDBExfs7n8zCqMWC206PFpWkayIwJ3BCoPT09Ysn99a9/FU+Dmpshh5sFGq1Ujl0V+rxURsuAgAAZExVaY2OjHGhicmnFU2C9//77cDgcwndy8+8ZJ2fMU3XdObdEGISFhWHt2rVYuHDh/3FAmWBvamqC3W4Xha8mYhkGYjhBpXqw2+1YvXq1wNb4O5XYDLhxEIEbyUcqj9DQUHH1NU1DfX09fv/73+Obb77Bo48+in//93/HwYMH8dRTT+HXv/41Tp06hRdffFF6n/JiolRFTlCpUqHxUq1xehJqUpl70OFwyLj5LO4VCtibk3gMKcybNw9HjhwRlIrKxsnzoCb61PyNavWrhpBerx9ALHbmzBkUFxfDbrdLyIOFT8nJyWK9+3w+1NTU4Ny5c+KdJyUlISwsTPhoCGFmEREL4LgHOjs74XQ6xUvhXiZCJywsDMAN44wKwGAwIDc3F83NzXC5XCguLsbp06fR0tICp9OJoUOHDjBODh8+jDNnzsBiscBsNksLRnq5DQ0NSExMlIp0GqdcM7Wwil8ZGqLRw8Q9140yg57BH/7whx+UqT8a4a4KIQpFoB/XuX79erhcLnR0dMjveMAZkgBuxAqBG+gRusT8ucfjkZgnDxI3NAU1Dx7j44x1qVY1v+eCUiEQpaIKU1rgKsmRqhgYM+fXjo4OHDp0aIDQ3bZtm8SMaSWqsX+GBvhM1StQ49mcY4YqVE+JeOje3l6sWbNmwMFR7wP0k6R98cUX0tScAhW4IcDa2trw/PPPizDiXFJIMH4PQFx4t9s9wLJmCMhoNOJPf/oTTp48KUqMio17hUlJ0jwwPBEaGor6+nph8DMab1AIc314aPh3XG/1nbxerwjrEydOYOXKlZg2bRrCwsKwfPlyTJ8+HRMmTIDb7UZOTo4geRhfZshGtUZpBHB9OMdcGypoNUENQBo5A8Dzzz+Pw4cPi3HBc8F9zzOjvpPRaER8fDw6OjqQnp4usEYiaUJDQ0WYqNBKCiYaLRROKq6eHgyF0rlz59DV1SVQQjUkSOOMqCyvt79gkbmR7u5utLS0oKqqCp2dnWhpaZGWiTExMTIW1ftm6IvKjUqotrYWHR0d6OrqgslkknAn2SBramoQExOD8PBw+Pv7Y/DgwTJ3DN8ZjUa0tLSgo6MDXq8XsbGxAvHkenm9XunOxjg9G2YzXEUvR6fTCZ1IdHS0jJfFWmpITM0V0qP/oetHI9wZX6Vmo3D68MMPkZOTg08//RQLFy4UjQf0E+kQ8fDfCUsAA4Qm3e3AwEBx8QAIjaeakLrZhaN7rFrvPIz8yrAAK+cAiPCl0mLHF1V4mc1mcfe7urqwc+dOFBYWimusaRr2798vh5zCjvFP3lNlkKPAYjKOQuNmS0tVRrSSv/jiCxQXF8sB45pQKHKjf/nll0hOThZ0gYqk8fPzw/79+1FcXCzCi+Ph+vl8PrGk9Ho9tmzZghUrVgxAflDp+vv748KFCxgxYoSEQ6hMAUhpt8p6SKXCWLcqgPjeXFvOARUGFSkFntvtFte/sbERV69eRXR0NPr6+rBnzx7BxZtMJkyePBnbt2/H/PnzodPpYDKZxOLX62/wq/Owcq+oqDCGfbimnFsaHWyq7PP5UFxcjPz8fLknz4hqADFExT3d1dWFuro63HXXXUhLS5PzRyXAdaRFT2ppCiYaPrwfzxu9HYOhvwBo27ZtcDqdMJlMiI+PR3l5OSoqKlBRUSFFOklJSUKGl5KSgsGDByMuLg7JyclCuTx69GhERETAYDDAarWKl6Vpmuwht9uNwMBAZGRkwO12IzQ0VAwBwiFVtFN4eDh8Ph+Sk5MRHx+Pc+fOwWKxoKKiAomJiYJtZ3FRa2srqqurxVuaMGECoqOj0djYiEGDBgliymAwIC4uDq2trXA4HMJSSzlH74vrROoRvsPN+5DGJtewq6tLqrR/6PpRxNyBgdSk7CV68eJF7Nq1C/PmzUNxcTGOHTuGkpISZGVlwd+/v9vKnDlz8Prrr0vS4qWXXsI777yDpqYmsaIOHDiAu+++Gw0NDdixYwcKCwvx6KOPinBXe4q+++670Ol0cDgceOGFF2A0GnHt2jXs378f/v7+yMnJwZgxY/Daa6+hqqoKd955J8rKynDp0iUsXLgQFy5cQGlpKTZu3ChhjzfffFOQALNnz0ZycjLeffddtLa2YtKkSejp6YHFYsEzzzyDo0eP4qWXXoLdbseoUaPgcDhw5swZjBo1SoqcvvvuO2H1e+SRRyRWSIFw4cIFHDt2TBjuKisr8fOf/xzNzc04c+YMLl68iHvuuQejRo2C2+3Gpk2b4HA4MGXKFLhcLvzlL3/B6NGjceHCBfh8Pvz973/HiBEj4O/vj5KSEvz+97/Hq6++ivz8fEyfPh2BgYFoamrCjh070NTUhNtvvx1WqxWvvPIKioqKcO3aNRQWFsJut+PTTz+VMvWVK1eitbUV3377rSiQmTNnyvpTSX377be4cuUKhg4diq+++grLli2D3W7HN998I2iKmTNnYsiQIXC5XHjrrbcQGxuL3t5eCccsWLAAZ8+exeXLlxEaGorFixfjnXfekbE/++yzkn+hV8ODSOHK2GhcXBx+/etfy95Vcztz586FTqfDrFmzYDAYcPz4cTQ3N+PnP/85/Pz629J98803cLvdaGlpwaOPPorGxkZs2bIFISEhGDZsGAoLC6HX61FVVYWTJ0/i8uXLuOeeezB+/HiBz3GMb731FjIyMrBz50489NBDMBgM2Lp1K6xWK9rb2zF37lxkZ2fDZrMJsdrw4cMxcuRIHDp0CPfffz+am5vx6qumb2i/AAAgAElEQVSv4mc/+xlCQ0PxwQcfyBn46U9/KslcGi6Eir799tuoqKhAX18f1q5dC5PJhFdeeUVCcPv374fT6cTo0aPh8XjQ2NiIadOmiXKxWq2CiCFXitvtRltbG2JiYiSs0dPTg/379wsPEOG6EyZMwOnTpxESEoK0tDTU1NSgqqoKRUVFCAkJwdGjR1FXV4fc3FxUVVVJeKmurg5GoxHZ2dmS1G1paUFoaChaW1tRVFSErq4utLa2SkHR4cOHkZOTA5PJhIsXLwqjZWlpKaqqqqRTU3l5OVJTU0XIp6WlwePxoK6uDna7HdXV1Zg1a5bQOAcHB6OtrQ319fXi3Q8fPhzd3d04c+YMgoODYbFYMHfuXGiahpMnT8JoNErl+w9dPxrLndatWlQzbtw4mEwmrF69Gm1tbdA0Dbt27cIDDzyA+fPnw9/fH1u3bsWMGTPwxBNPoL6+Xiy2efPm4fDhw5g0aRJuueUWlJaWYtasWcLvwQbHKjrC7XbjypUrWLVqFSorK6Fp/RC4NWvWIDk5GY888ghOnDgBr9eLGTNmiBu2atUqtLS0IC0tDcuXL8fRo0fF4n377bexZMkSPPHEE3jvvffg9XpRWlqKO+64A7t27cKtt96Ke++9F+vWrQMA3HbbbdDr9fjJT36CwsJCXLt2DbNmzcKlS5cA9Fu4n332GaZOnQqTyQSbrZ+Qk1aq0WjEoUOHMG/ePLzzzjtYtWoVMjIycObMGaxZswb33nsvDAYDDh8+DK/Xi1dffRUPPvggKisrUVVVhRkzZkCv1+PBBx/EiBEjcPjwYRiNRgwfPhyrVq2Cx+PBxYsXMXfuXFy5cgUBAQFobGzEmjVrsGzZMhw6dAhHjx7F7bffTqgWcnJy0NXVhf/6r//CXXfdhWXLlmH//v1wOBz4wx/+gNWrV2P+/PlCysaYLq2ZqVOnYvLkyXjooYeE8Gzjxo04fvw4Vq9eLdZ6YGCgsBEuXboUx48fFwsyPz8fPp8Pc+fOFQ+mpKRECMaAG4lauryqV0YvgRYtscx2ux0jRowYEOP3eDwoLy/HlClTsGLFCllbTdOwdu1aHDx4EPfffz/27t2LtrY2rF27FqWlpZgxYwZKS0vh8/U3WXnxxReFy+Xo0aPSlo6eV1BQECZPnowVK1bgkUcekUQ5y/+JGPL5fPjzn/+MVatWYfr06SgvL4de39+Crq+vDydOnEBmZqZ4c1evXsWKFStw/fp1ORvcYwy5MKFoMplw6NAhQYfMnTsXb731lpB4aZqG7777DllZWZg4cSIuXbokEEiGwhwOB8rLy/Hll18iIyMDmZmZqKioQFtbG3bt2oWUlBQUFRXh7NmzEtq6ePEiwsLCUFBQgP3798NqtWLMmDHYvXs3vF6vnIvjx48jKioKEydOxOHDh9Hd3Y28vDzs3r1bmvTs3r0bTqcTOTk5EjayWq2IiIjAqVOnBJ1TVVWF5ORkREVFITMzUzzE5ORk4bevq6uDyWRCeHi4UDYz7xMfH4+zZ89KkRWVZE9PD2JjY1FYWIizZ8+it7cXFRUV0DQNmZmZ6OjogJ+fH2w2G/z9/TFs2DBUV1f/S5n6oxDuKtaWuFiGMHJycpCYmIhhw4bhwQcfxIQJExAWFobo6Ghs374da9eulaIClgyvWLEC69evx+9+9zuEh4dj27ZteOCBB0TwzZ07d0BohNaBXq/H9u3bkZWVhcWLF8Pf3x979uxBXFwcrFYrNm3ahGeeeUYOzKFDhzBu3DgcP34cVVVVGDp0KL7++mux9vR6PdavX4/IyEg4HA7MmzcPBQUFKCoqwgcffIAJEyYgNDQUNpsN06ZNE7cxLy8PdrsdXV1dWLlypYwfAJ555hm89NJLyMnJwdKlSxEZGYmPP/4Yr7zyCl588UX09PTgqaeeQldXF9auXQuPx4PVq1djxYoVSEpKwsaNG7F69Wo89thjkhfIy8vDwoULcd9998HlciEvLw/jxo2Dz+fDY489Ju/Z29uL//zP/0R+fj4+++wzPPjgg/B4PJg4caIw/+3atQs/+9nP5EAUFhZKvHX79u3YsWMHPvzwQ3z++edobm7G2LFjhWvlwQcflLATY74Upq+99hrGjx8vCJRPPvkE//Zv/4aenh5cuXIFI0eOlDj6kiVLUFBQgLVr16K3txd///vfsXPnTuTl5SEuLg7Lli2DpmnYsWMHVqxYgcWLFwO4UWHJcA1DG2q+RC0yCQgIwKFDh/DAAw9IqIlu89ixYxETEwOHw4Hp06dD0zTYbDbs2rULmZmZ+Pjjj7F582YkJyfDYDBg586dWLZsGe6//34AwIIFCxAXF4cNGzZgxYoVePTRRwfUabBW4NVXX5W1cbvdePrpp/Hss8+ip6cHly9flk5CBkM/59FDDz2EZcuWoaysDLt27cInn3yCxMRE3H///TCZTDAajdixYwdyc3Nxzz33ALjRxJror7Nnz2LWrFlYtGgRCgsLJYzKfNatt96K9vZ2FBUVYdSoUQgICMDhw4dRVVWFOXPmIC4uDhEREYiJiUFPTw9CQkKwe/dupKWlCR49KysLb7zxhljBfX19Utw2ZMgQabDj7++PoUOHIi8vD263GwUFBRKzz8/Pl31ZVlaGzMxM5Ofnw2AwoKioCFFRUXC5XEj7nr7D7XYjKioKpaWlEgZZtGgRRo4ciaamJgQHB6Ojo0PoPdrb25Geni6kdT6fD+Xl5RJ9YM9Wr9eLgoICBAYGYu7cufD398eRI0cEo+92u/Hmm2/i3/7t33DbbbehuLgYf/3rX9HU1IS9e/eKkffyyy8jMzNTmrnQ+Pln149CuBOVwZgk0F+Q1N7ejnnz5iE6Ohpmsxnl5eVYvXo1gP44+bfffovbb78dAQEB2LBhAy5duoS4uDhUVVVh//79UiBx9uxZ+d7Pzw+/+c1v0NjYKHFrJkAeeeQRNDY2oq6uDs888wzcbjdKS0vx3HPPYdmyZbjrrrvQ2NiIkJAQfPLJJ8Kz8umnn2LKlCkwGvuJpNasWYMvv/wSjY2NmDlzJgICAnD+/Hk8/PDDOHDgAEJDQ3HkyBGsWLECRqMRW7Zswa9//WuUl5fj22+/xUMPPSShlx07dmDXrl24cuUK7HY7vv32W9mUQH+C+L777sNvf/tbPP/88xKr++STTzBmzBgAELf68ccfx5IlSxAdHQ2n04nHH38cOTk5qKiowHPPPQer1Yqvv/5ahOyVK1cQFBSEESNGiEXK+N9XX32FS5cuyUafMWOGeD82mw0HDx7E8uXLodPpUFlZCY/Hg3nz5gn3fltbG3bu3In58+ejubkZr7/+OhISErB582b09fVJIwKDwYDdu3cLg+SXX36Juro6zJ49WwjKvvvuO+zfvx+bN2/Gm2++iYaGBmzbtg0ffPABrl27hn379uHvf/87Fi9ejI6ODuzfvx+PPPIILBYLxo0bh6effloUKxU3E3PExjNko1bI2mw2fPjhh5gwYcKAOoqKigqsXLkSPp8PGzZswLPPPourV6+it7cXCxYswFNPPYUVK1aIOz5s2DBUVVVh7NixaGtrg7+/P4KCgvD0008L5QC7BrH+AujPJx09ehT+/v7Yu3evNKWJj4/HiRMncOLECRw5cgRnzpxBdnY2iouLMXr0aFRUVGD9+vUoLCzE8uXL8fTTT0sjiCeffBLXr19HVVWVhJ78/Pwkfm00GrFr1y7JeezcuRO33nortmzZgtbWVrzyyitISUmR3E55eTni4uKQn5+PzMxMGI1GxMbGClggLi5OajxGjx4tFj+LgDIzM2GxWFBaWoqJEyciKioKVqsV48ePR2dnJ2pqalBUVASfz4dTp05h1qxZkg9pa2vDpEmTEBQUhKCgIEyaNAnNzc347rvvMG3aNPT09KC8vBzh4eGizA8ePIiEhARER0cPoNe+fPmyQCCvX7+O6OhoQbacPHkSgYGBOHTokODok5KSxOsJCQmRMCIbBJWXl6Ourk54/1988UWsWrUKxcXFqKmpgcFgwJw5c0QZ7Nu3T2DPAQEBGDJkCBoa/tsOpnL9KIQ73WTC3+gG19bWIjs7Ww7VxYsXkZaWJpnwyZMno6WlBV1dXfj666+x5nsStPPnzyMwMFAKB6Kjo6WLTFFREex2u7jytLhcLhcsFgsAwGq14s4774Tb7caYMWNQVlYm2fampiYYDP0NqYcPHw4/Pz+UlZUhLy8Per0e7e3tGDZsmEAy6aJv2LABYWFhKC4uBtAPwUxPT0d9fT02btwIo9EIs9mMsrIyDBo0SDq//O1vf0NhYSGKi4sRGBiICRMmSKXekSNHAGAANYDP199+q6SkRDDVzNbX1NRA0zRs3rwZLS0tqKurQ3x8PJqamnDXXXchMjISV65cwZAhQ1BZWYmEhATYbDaMGDFCQhReb3/fx6KiIpSUlCA2NhYRERFoaWmBTqfDP/7xDzQ0NKCqqgqpqamora1FRESExA4BSB5k2LBhcDqdOH78uDQ9ZpEYk8ddXV3SA7a6uhr19fWIjo4Wy4Wx6uLiYly7dk3QCKWlpbj99ttRXFws0FK32y2YaIvFAo/HA4vFIh2szp49ix07doh1ziQ3D76aQGS45/LlywOKeQwGA0pKSpCeno62tjZ89tln0Ol0gsiwWCyw2Wyw2Ww4cOAA2tvbBQPd0NCAyMhI9Pb2Cj97b28vvvjiCzQ0NIjXwvARk/wWi0UgeSNGjIBO19+4JDAwEKWlpdDr9UhKSkJLS4uwPHL/sv6hqqoKfX19sFgsCAgIgNVqxV133SUoNrIhapqGgoICmEwm6PV6nDt3DllZWZL7aGhokOS60WhEfn4+oqKi4OfnJzz0gYGBCA4OlpyQz+eD2WxGZ2enKM22tjZERUWhr68PERERaG9vxx133IGoqCjB4QNAU1MTBg0aBJ/Ph7Nnzw4ouGtpaZEw37Vr1+RMXrhwQeCSiYmJ4tk0NTUhPT0dwcHBKC0thc1mg9vtxtWrV4VBlcCD8vJyOBwOSYy6XC5UV1cjJycHNpsNHR0dgvwiAig6OhrBwcEoLy9HUFAQGhoaJP7f3d2N2tpa5OTkyJwx2d/Q0IChQ4cKn7vFYkFUVBSampp+UK7+KOgHRo4cqe3bt08y7qwMfP755/HCCy+IK+pwOBARESGFKUxGWCwW4fI2GPqbcRgMBiQkJECn00kFmb+/P65evYrAwEDptsLEh16vFwFPJjjC7QjDIj0tyfSTkpIQHByMiooKsUYsFgt8Ph8GDx4Ml8slFmhISAjq6+sRFxeH8PBwtLe3IzIyEh6PBy0t/f3Fmb2vrKxETk6O4Hbr6uqQkZEhKBEmX9LS0iQ5Rdc4MDAQLpcLjY2NwmYJ9MeIm5qaoGkaBg0aJHBIdu9JS0uTg15WVoaUlBRRgB6PR5pfc66uXbuGtLQ0qRhuaGiAx+NB2veNMTwej0DLKCTtdjscDgdCQkKEUInzyP6gUVFR/weFAiscQ0NDYTab0dfXh5aWFnR3d2PQoEFob28X7pD29nYRmGazGXa7HT6fD5GRkairq4Om9TM99vb2Sks4CgQSxDHnQ0SICitk/LmhoQHt7e2CYElPTwcAEfrx8fHo7e0VhAnnoLGxEXa7HYmJiQgLC4Pb7RZe/+zsbAA3oJnkZo+JiRGIJlEiDFfZbDaEhIQIyR0TlxkZGbBarQINZQyapfAejweRkZHw8+unOA4KCkJoaCh6enpQX1+PoKAgwaCz1oFeLguAiIJpbW1FRUUFqqur0dDQgMGDB6Ovr0/47FlIRMoLJlGJ/Glvb5dkM2GYRC81NTXJWnKszG+xIQ2rdtmMWq/XIyQkBA6HA8HBwbhy5QrcbjeysrKEH95qtWLQoEEyhw6HA4mJiYLBb2lpQXR0NBITE6HX6/HCCy+Id5qfny9jcTqdck4PHDiA5cuXo6amBk1NTcjJyRG0Dznbm5ubRTCfPn0at99+u9AksOWhz+cTQ8Rms2HYsGFSsc/3bGhoQHp6Oh577LF/Sj/woxDuBQUF2sGDByVhVVdXh+LiYmzfvh0ff/yxFP/wsAMYgEvn4rIYRi2FZwJIrVYjtpSwKFpcKib45upFYGArQBWmyLgaf86LG4UxQ45HJZLiYgI3ClA6OzsFk833oIWjJtMYKvF4POLaqnE4teCJ41YLmtS1p6Vlt9slfMU5JYTOaLzRkJjl3VwTbj7i/YnoUDHlfK5aoef1egdUhqrJVLXilBWW/HpzwpOWs0rPwHfk90w4c605V/yMWjXI9aai4lg5TxRwfD8KQMJ5ST/AdVbDaCounDhozgHXiMqaJfH8Oc8CjRKOne/BdSCcUSW142f5HjrdDfI4lZZALdpTi8L4O6KzVKjsp59+KrBkk8kkSiAwMFAscO5fUvtyLYiOoiJgM3oVS851aWpqEow65QDHy/PP+WGxGLllOCf0Mshh4/X2sy1GR0ejo6MDLS0tUtzIOTlx4gSGDh2KlJQU4Z8BgC+++AKJiYlISUmR6lvyvOt0OkRHR8Pn66cXUWs6goODxajjOKjg2IybvV157vg+5HoPDw/H008//ePnlqEA48Zva2vDyy+/LBuWVgMPqpq07OrqQkhIiLhAPFQszCAKh0KHwoh4Vx4Oeg4sViHTnMouSAHs8/mkUIHCkFWStPjZBJdEZHw2FQ0FJw+pmm9Qq/0o5FmqTHeNv6fFTXysWpCiFuEQ4UEWSFUAUjHw0FPxUfhS0FJxMoFGZUVFwq/8p1IAqNYHu9uoY/N6vVKlR2gsv6o4bJUNj0KZ78G5VqkNuLYqdl5Njvr7+0vYgVh2NUHKsTPhSqFPBUtrmjF7f39/dHZ2oqenB+3t7QPoGijMuKc4ZioN7h3SCbtcLtTW1opLT4XGAh56VKqhwr3HcQMQ747njfOlKluOk+dHNUhUbh2+o8fjEXgfACmzd7lcCAwMRFJSkiRhaQSxqIsxdxYIsvyfCq2npweJiYlyHlmTYTab5bMmk0nml0YAwRGqAlR5fohcSU9PR1dXl1SGBwQESK/b4OBglJWVScOPsLAwJCcnIzU1FQ0NDejr68P169cRGhqKsWPHoqGhAd3d3cjNzRWkDZPfzc3NMJlMUuRENsju7m6kpaWJdx8WFibt+Ww2m8ggla+H79be3o6YmJh/2SD7R2O5Hz9+XEqmaf2x5Fq1YGmlqZY7NwsVAHCjWrWnp0eQFzyAAESzk4yMwp+KALhRkKGyI/LeKpKC4+Jc3my9d3V1CaaeykYthKLQIZTOaDSKN0Dhrx5C9f48cCriiDwt/J5C2mAYSBjGSklWgbKkW/UsVE4SWtUcK6vxqEBU0jbghuXLcdKyVb0GWl0UrOq88B70OjjHqnLmuG5WmlxjHjLgRkGbKtS5FvwZ309Fb9HS5//5t1RkXKfOzk4R1BRiFIbq8yjUmbxVq20pAKk4XS6XCDauAzmWDAaDFMAxcafX93cB4n6nglUNI64fqzq5r/lMcrbU19dj3bp16Ovrw9y5c1FUVCReDM9DaWkprl27Jk2sKXTDw8PFU1b3NC1RxqnVPcR55HwQdcIYPYUgsfY+nw/Xr19HZmamxKN5r6CgIJl3FvmxapYeF+8fGRkJk8mEa9euyb5uamqSFnp6vR4xMTFob2+Xd7Hb7VJMGB4ejtbWVqkuz8jIgNPpREREBMxms1S20mil0A4JCUFlZaWEe/neLFLjGaXx4/F4pCEJ5dIvfvGLH7flDmCA66tW89FK58GmoOXfAJCDplpwAKTqTWWnCwkJEUZGWku0UijUeR8KQX42LCxMNiufy2pNPl+18JjgpHdBocdNz3sAEEuLB5JWNA+RWmlLYUiiLeAGBlmv18NkMsl4aPnSUqSQpQDle1OZUGDRM+A8q8qJSpcWEi1YNZzBi4qa1rAquCk06YGR6ZPCmxa1amlS6avVtnwGLUqGsLi2PCCq96V6AOpaqWsL3FCOvPhsde44Bs4bx841JmUBDRPOEd9NFbp6vR5Op1NCFFwbh8Mhljhj1Nx75BhyuVyIjY0FgAHsimplqhq64Xh43rjeKobf5XLB6XSisrISo0aNEo8M6Ach1NfXC6NqaGioKFiGSfhehG7SWKN31d3dLTF2FanEuDz3Gs8Nc0U833wmlTS9c84tLX232y3JXuYaSIdAZRUTEyMd1eLj4xEYGIjk5GRZv8bGRqSkpKC4uBhjx46F0+mEw+GA3W6H1+tFVlaWePQqvYfBYBC+eo/Hg87OTqEZjo2NFQgpK6wZSlKVOOeTNCxUnj90/UvhrtPpPgQwD0CLpmnDv//ZGgAPA2j9/mO/1TRt9/e/+w2AVQC8AH6qadre/8EzxO3m4bw5HkcCHgoUus+0BGhxk0KW1j0nhovNMmAeftUVVePMqtak0FWFEoW2GvvlZPNZFPQqmx/Hz/cDblTnUnBR0TEJRauRCoqxZT5PVYAsFaelSAXE+6hl+0QP0Nqm56KWsPv7+4sFSguQgo3PZXIMgFggN1vjtEbUmDoFgVrur84j15T7g54ElTcFNK0Y3ofzrgprxp+p7FWvhAKU68i5UAt41K5SHJPP55NiFIZnGMbh3mFVJxUX31lVqlQQDodjgJJjtyfir5kEJAqDc2Sz2SQ0QSROX18f4uPjkZWVBaPRKCyM9Po431wjKi16keRWIUlXVlbWAJI1TdNQXV2N9vZ2QbRwb9BIYzKeRhGJs9QQIBUJ54ANOVR+GIZ1OD6dTid0u/Hx8dJL1d/fX5RafX09AgIChHpa0zRpnuLz+VBRUSEeQW9vLzo6OsSCJtae4U4m/l0uF1pbWxEbGwudTofs7Gz4fD4JlVCoR0RESCcwm80mnEl1dXVyPhwOh3APmUwm2TcMuajQcMoYtgmkHPhX3DL/E8v9IwBvAFh/08/Xapr2Z/UHOp0uF8BiAMMAJAL4RqfTZWma5sUPXIzbcrAMe9CaULlmeKjUrki8B90xupoUZGpjCgpAVRBTObAQxO12Izk5WTYyD7rNZhvAtqha2hwD3WAKVLWbEIW3GrcnIkW1CHn4Ge/X6XSSUOHh50V3VhUonEM+Q21qQmuYnBcBAQGShKKloSb3uLHUuDEFO4Uj34mbkd4Xf8cNTuibao3Tqqe1wkbDnDPGamkx8rkMy3FctIj5ngxn8N3Uz/Ar9x33kRri4R7iu5B5MTIyUt6bcXoKKP49Le2uri7h96bVyr0SFBQkVdcMH6hNGRg+4NrTOGAcmbkPVi2qIQmGcRoaGqDT6aTXLueQAkP1zKhQVMPDaDRi8uTJAIDk5GRZUwBobm5GcXGxdE8zGo3yPc8U35koJJ4xvf4Gvw73P40JPttgMAzwiLkHGP5RFQPpgtVuT35+/SyMKispw7MulwshISEDWkEyjMZxOxwOADe8OrvdjrFjxyIoKEhyDKrnEx0dLeFhdoBTPaWQkBDxOLh2hGhzzYODg4VPiEYhWVOp/FRFrMq1/+76lzh3TdMOA7D9q899fy0A8A9N03o0TasCcB3AmP/JHxJNoJI40friwaGw9vl80lCAE0z6VjVurQoC3oPan64qP8Nk1R//+Ed8++23sNlsAxJwvb292L59u8Td3nnnHcyePVvieKrQpgtOIRgcHCwbj4eGv2MyjpauXq/HHXfcIRuXnB78nF7f3y/02LFjQnm6bt06WCwWsQq+X7cBcVZatNwce/fuFQghN6CKHFIRGGqiWk2WsrMMC8Q4B+o4/Pz88NBDD+Fvf/ubhKu4phRILMMOCwvDp59+KnPsdDpl3Yh/p+BliEYl0GI8nAnid999VwQlv/LwMGbrcrkkJsuELt1ku90uoQk1QWa322Gz2eTZqoVPK3Pfvn0ICgoShRYaGioC2s/PDxs2bJCDe+rUKbzwwgvitbKuIy4uDnFxcTLPbBBTW1srMdvQ0FC8//772L17txT+tLW1Cdzv2LFj2L17N/bt2weLxQKr1SrGUXt7OzZu3CjUAQAk4U0vZ+jQocjOzpb+xnq9Hm1tbTh//rwkl2mB6nT9BWs8kzS2XnvtNRFsAKT+w+VyobKycgCqh8gRh8MhWHQqWJ/Ph7179+Ls2bOIiIiQ+o3g4GDExMSgsbERf/7znwUCGRgYiOjoaJELpDrwer3SCPvSpUuw2WzQNA1VVVVwu92orKzErl275LmBgYFITU0VAyU3NxcxMTGoq6tDSUkJqqqq5AwNGTJkAFiCIAqbzQaz2Yzw8HCBmTocDjlbVKrZ2dkICQlBQkIC0tPTkZiYKPuXiVubzSay5oeu/00R05M6ne6STqf7UKfTRXz/syQAFuUzdd//7AcvutzMoFMrq3Fw/p+ChTFfTgxjl9xURI+w8lBN1vj59Te15d8xllleXo7Zs2dj6dKlUkTAQxsQEIBNmzYhKSkJvb29WLFiBb744gsRGgxtUAGRpIjCS435EtpEi4ljZ8Jp06ZNADBg7Nyg/v7++Prrr5GSkiKFJCtWrJBu9pqmSUUeXUsePFrSLpcL27dvx/Dhw4Xjg2OXjfG9wmRYh3NLgUzhSsGrQk15kClsrVYrlixZItYYrQ5/f3/xQoiv3rt3r7i9DE2pDVfU0AuTmEzEc4w+nw/Hjh3DfffdJ40WentvsEACEAVD95cC3+v1SmKUFjGVLy0uktQRUqcyN/r59dMOHzt2DCaTSThH2tvbJdnKvchKzOPHj6OgoAAABqCu7HY7uru70dPTA5fLJVC62NhYEYyc36KiIlk3o9EoeHoqCovFgosXL+Ls2bOor69HV1cX4uLicP/998u541kMDAwUD5h7iPPGojPGw6OjoxEUFCSWM0MMxcXF0ks4Ly9P3qG9vV3WW9M0JCUliSHidDolKcw8gNpgxs/PT4oEzWYzTCaTePD0rh9++GE5B21tbQOS9LTGqcgiIiIwZMgQmM1mVFRU/D/UvXl0m+W1Pvp8kix5tuRJkuc5HmJncoAQEkIgDdAEKBCmNlAK/fV7wAgAACAASURBVME5paWHrp5CRzrRcy7ltJz2MCRQUtIGAiEhQEISQhwyx3ESx7HjRJ5t2bIsyZbl2bKs+4d5dl7za+m55/7WulytlZXElvR93/vud4/Pfja6urrQ3d0Nm82G4uJiFBYWSi2A0N729nZ88sknaGpqwujoKHJzc2G329HX14eYmBg5B/Hx8YiPjxeGWL1+BsJIqKzX64Wmaejv7xeQAVNc3Ee/3y8puPr6elgsFtGJ/J7Pe/1PlfuLAPIBzAfgAvDcpz//W+bkb8JxNE37X5qm1WiaVuP1emflSYHLSAduECvTat6blKEAZnlebrdbDiTDQL7oCYyOjsLtdosn63Q6ceLECWmMUvO6zGerXYL03MLhMPr6+gQW5vF4xPOlMRodHRVvi+EfUyzAZXQGu2pVSCa7/qjEurq6kJycLB5Ib28vAoGAKFg+m9PpFCWi5qHD4TA8Hg+6urowMTEzEpDQK/7d19eHrq4uacDh+gMzHYGkJaU3wehATQnw8E5MTCA7O3tWt+fQ0JBA0OgNEl/MZhtGbmywYQGOqZyuri709fUJhpmeGZ+zrq5Ovhe4nI7zeDxSx6EM0RjRg+QzBwIBWUd+x8DAACYnJ+H1emW+wPT0tBiG1tZWoXpluoxKmU05TU1NMu2ehbqioiL4fD7huOdZ4IEPhUIytIJG0mAwICEhQRQsIZbM/1NBsk5Bme/u7obL5RJyPKZzmH7q7++XexsfH5du3qGhIbS1tcmIOq/Xi+HhYfHgIyIikJ2dLTDi0dFRtLW1SfMQEVler1dQMMyXU37ogPHc8F64FuyIpg7gcwEQT7++vh4+n0+Uo9frhcfjEWeEkSzrTrxPnmGLxQKDwYCuri6YzWbZC6bXiHZhQ9jQ0JAYUeogNsUFgzPNlwQ5qMVwNszpdDNTnlgPooypAILo6Gi0t7dLao+Qy897/Y+UezgcdofD4VA4HJ4GsBGXUy9OAJnKWzMA/E0ChHA4vCEcDleGw+FK8jSrA5aNRiM2bdqEZcuW4ZVXXsHo6Ci+/e1vi2V79tlnpfK+fft2BAIB4RXp6urCgw8+KJOa6FUx3/zhhx8K+953vvMdwVYfOHAAJSUl0kAUDM5wnPDeCgoK0NLSgr/85S9YuXIl/vrXv6KhoQHNzc1Yvnw5fD6fEFzFxcUhLi4OL7/8skxnr6ioQG1tLY4dO4YVn5IrnTx5Etdddx2MRiM2b96MtrY2vPDCC9DpZrhdyH73gx/8ADExMXA6nXjkkUeQk5ODhoYGdHZ2YsWKFRLSf/TRR0J8tG7dulkoFkYNb731FjIzM/Hxxx+ju7sbjz76KF566SW0trZi3bp1giR48cUXJS3z5JNP4i9/+QuSkpJw/PhxNDQ0YPPmzejq6sKLL76IyclJrF27FnFxcfB6vdiwYQPi4+PR0NCA9evXIzo6GmfPnsXu3bsRERGB3/72t3jxxRfR3t6O999/H+FwGE888QQWL16McDgMh8OBf/mXf0FUVBTWrl2Ln/zkJxgaGsLGjRvR09ODhoYGPPHEE7NQLUzbtLW14d1330U4HMbPf/5zfPvb34bD4cDHH38MTdPw+9//Hps2bcLOnTuxceNG3HLLLdDpdKivrxe+7hMnTsDlcsFkMuE3v/kNgsEgdu/eja1bt6KlpQVJSUn45je/iT179kjB7YUXXhBOk7KyMkxOTsqhplMSGxuLvXv3YtWqVbJno6OjSElJgd/vx/bt2yXsPnjwIAKBADo7O/HKK69IeodGNCkpCW63G7fccovQFtTX12NsbAzJyck4ePCgGKv+/n7U1NTA6/Vi165d2LZtG37wgx+gqqoKhw8fxu7du/Ef//EfOHz4MB566CFs3LgRu3fvRmtrK4xGI5588kmcPHkShw8fRiAQEJrtffv24Xe/+92swj2pbA0GAw4dOoS8vDzpkuU0rH379kGv1+PcuXPYs2ePIECee+45jI+Po729XRAjO3fulNw9h6PQyDGVQwfn+PHjyMnJgd/vR1VVFVpbW9Hc3Izf/va3MBqNonDJK9PX1wefzweHw4GysjJkZGTg2LFjMJvN0Ol0ePHFF3H48GGpn505cwYTExOoqalBTU0NrFYr8vPzAcx0oft8PrjdbvziF7/A6OgotmzZgqamJnzwwQc4ffo0kpOTceHCBaSlpWHv3r0wGAyoq6uTYvw777wjnDnV1dWwWCxob29HSkoKkpOT8fvf/14cstdf/2wZ9P+Actc0za789ysA6j/993sA7tE0zaRpWi6AQgDV/53vZK6SqQsAKC0tBQAUFBTA7/ejrq5OEAHbtm2TOYrLly/HgQMH8Nprr8FkMuH48eOYP3++5MyAy8NlGxsb8fzzz6OoqAg2m00aFYCZlnKz2TwLFUBvOxAIoKKiAk6nE3PnzoVeP8OyNzAwIF4B2+hVRMDmzZuRm5sr5GeRkZEoKytDfn4+4uLicObMGQnVSURWWlqK6elpHDp0SDilv/GNbyAuLg6NjY0oKyuTIQEAZkFFn3/+eZSVlcFsNmPt2rWzCp5MGdXW1sJsNiM/Px+ZmZl46qmnkJCQgMzMTMyZMwdWq1W8IobGDocDy5cvR1xcHEpKSnDy5EmUlpZKnpCpgMnJmfFp2dnZ0DQNp0+fRmFhITRNEwbPgYEBjI6OYt68efjTn/6EFStWwGq1AgAqKioQDofx2muvYXh4WDr7cnJycPDgQfz5z3+G0WhEQ0MDysvLBQkCXG5CIxWxzWaD0WjEkSNH8F//9V8oKiqSPHl5ebnk3PPy8qDX63H+/HmYzWY0Njbi1VdfRVpamox5IwVsV1cXsrOzZyEcIiMj8f7772NoaEiMen5+vnjRbF6hIWptbRX46NDQEBYsWIDMzEzBcMfGxsLhcGDv3r0wmUzo7u5GYWGh7DWjAcoz+VOamppQVVWFYDAIl8uFlJQUBINBZGRkSL8HURcs3g0NDcHpdGL37t1ISEjA0NAQMjMzodfr8cc//hHFxcWIi4tDZGSkRHNE9XAABqPbyclJSa0wddDR0YHIyEi43W588MEHSElJgdfrFbgqoyWmgbKzs0X5M/1XUFAgUZfNZoPJZEIgEEBvb6/MWuX4RZvNBp1OB4vFgqGhIRmAzUgsOjpaoiF2rXZ1dSEiIgIpKSnS/VlQUIDMzExJiZJUr7m5WfaRqWQWTNX5rnQmb7jhBhQUFGBgYEBoAwoLC9Hf34/CwkIYDAakpKTI/WZmZiI6OhoJCQk4fvw43G43Pv74Y6HXIGKGg7s/7/UPlbumaW8AOA5gjqZpTk3THgLwf2madl7TtDoA1wH4FwAIh8MNAN4CcAHAHgDf+kdIGQCzwnMWIIaHh7FgwQKUlZVh6dKleO+99wT5ERkZiQ0bNmDjxo246aaboNPp8OMf/xiVlZXCfviTn/xEwikai4iICHznO9+BzWaTKen79+9HbGwsTp8+LSkB4lHVosWf//xn3HnnnVi2bBmKiorw0EMP4eqrr8aqVauwefNm3HXXXYiMjERVVRUee+wxBINBuN1u4XHp7u7G1772NZSXl6O6uhrf+MY3EB8fjzfffBN33303xsbGkJeXh1//+te44ooroNPpcPfdd8PtduPxxx9HV1cXOjs7sWHDBmRkZIgX8+yzz+JrX/saxsfHBZ/LHOUjjzwi+VPWDzRtht7hvvvuw69+9St0dHSgvLwc999/P6qrq/HP//zPmJiYwBtvvIH169cDAF577TU8/PDDyMjIAAB8+OGHKCkpQWlpKZ599llce+21mJ6exrx586DX6zEwMICVK1fi4sWL2Lhxo/C3fPzxx7Db7YiNjcWzzz6L6667Dq+99hpSU1PFC7v11lsRERGBt99+W7jMo6KicMcdd+BnP/sZFi5ciIiICDz88MP47ne/K40zhHSywLxo0SIYjUbs378fy5YtQ1VVFRISEhAdHY1f/epXuPLKK3HLLbfg4MGDuPPOO+FyubBjxw5YrVY89dRTsNlsSEhIQH9/P44fP47MzEysXr0abrcbycnJiIiIQE5ODr785S/DYDDggw8+kMjl3LlzWL58OWw2G5KTk5Gfn4+MjAxYLBbs2bMHPp8PExMTGB0dxZkzZ3DPPffAZDJh7969uOaaaxAKhbBx40YUFRUhMTERK1askNkGKlbeYDBg165dwl+ydetWZGRkiJK79tprYTab0dfXJ2ykREe1trZieHhYuikPHToETZsZ0FFaWgq/3w+dTocTJ06gvr4eVVVVUuStra1FZWUlLBYLzpw5g6uuugp9fX2S/iMGm1FEIBBAU1MTzp49C7PZjIyMDNx6662YmppCXl6eDJ6ZmJjAokWLhHuH9bVFixZhenoa+/fvh9lsFu+f+oJe//Hjx0Ueent70d/fD7PZjEuXLsm0JZU3qrCwEGlpaWhoaEBFRYUUyvv7+zE1NYX6+nokJSUhLS0N3d3dOHfuHB588EGUlpaipKQEGRkZcDgccDgcklfPycnBJ598gsWLFws54JkzZ7B48WKYTCaBl27cuBFz585FfHw80tLSMDg4iLfeegulpaUip3RGkpOTkZycDL1+hqo4Ly8Pvb29/++HdYTD4XvD4bA9HA5HhMPhjHA4/Go4HF4fDofLw+FwRTgcviUcDruU9/86HA7nh8PhOeFw+MN/9P18sXBJRIbRODPKqqKiAhaLBa+//jpuu+02bN++HXfddRdcLhd+9KMf4YorrkBcXBwWLlwITZuhHKivr8fWrVtnwfh4IHJycmCxWDAxMYEf//jHeOSRRxAfH48dO3Zg/vz54v2pxbTu7m5s2bJFrPKjjz6Ke++9F5s2bUIoFILL5cJXv/pV+P1+LFmyBDfeeCP8fj9iY2ORkZGB/v5+/PSnP8Xtt98u6ZaioiIMDg5iYmICN910E1wuF/7pn/4JZrMZmzdvxh/+8Af84Ac/wGOPPYZHHnkEdrsdb7/9Nnw+Hzo6OrB582YhUnvggQfQ3d0t8C9iz99++23JEbIgODAwgJtuugmrVq3C2bNnceTIEbz99tvweDz405/+hKysLOzYsQPbt29HQ0MDdu/ejaysLOHzeOGFF3Du3DksW7YMo6Ojghc+f/48bDYbvF4v9u7di6ioKOzatQuDg4PYtGkTzp8/L/lShtoNDQ3isRw8eBBr165Fd3c3pqenhbTpo48+wrp165CQkCBNNGlpaWhqaoJKNsdCFpFQiYmJaG9vxz333INnnnkGKSkpSEhIQFRUlFybab2ioiJhb9TpdFIjGB8fx/PPP49169ZJnnPlypVS1P7hD38Ip9MpXZmFhYW4dOkS3nnnHblPi8Uiw7U5g+BLX/oSHA4H4uLisH//fsyZMweJiYn45JNP4HK50NbWhrKyMmn8cTqdaG9vh8/nkyIwi4M0PgMDAygvL0coFBKWxKamJhw8eFDSGADEk66qqsLNN98Mr9eLkZERwaKPjo6iu7tbjIjH48GZM2ewdOlSyW37fD5kZ2ejv78fycnJmDdvnqBo7Ha7FGg/+eQTlJeXo66uDoWFhaKgRkZGUF9fLyAJFg27u7uRn58v1ArqbFODYYZ35dChQ+LgEXI5NDSEnp4e6PV6dHV1YWxsTIrU4XAYVVVVWLVqFRwOh0SiRHSR+XTRokW4dOkS/H4/srKypKC9ZMkSKTaTJ95kMmFgYAAJCQmIiIiA1WpFQ0ODsEFqmoYrr7xSZr5mZGQgOTlZisculwtWq1Vqgiy6JycnCzKOE+iSkpIQExODoaEhfPDBB1I4P3PmjEAu/65OffpTmtz/L18vv/zy0w8//LCEacyrG41G7Ny5E5OTM0xuwWAQN954o3yusbERRqMRS5cuhdVqRWdnpxQhOVVF7UCdnJyE1WpFR0cHxsbGUFRUhNWrV8NoNGLDhg1YuHAhli1bJjAmFmODwSDa29sRCoVQUVGBEydOQK/XIyUlBUVFRTh37pzQo3700UcwmUyYN28eTCYTDhw4gNHRUezduxcPPvigeBxELfT09CAUCmHp0qU4duwYTCYTli5dCrvdLrA2h8OBW265BcFgUMZ4zZ07F4mJiTh8+DAiIyNRWVkJk8mEY8eOAQC6uroEwsYCDtMQkZGRKC4uxoULF+DxeLB27VqEQiF8+OGHWLduHbxeL+rq6lBSUoKKigrk5+dj586dwsT4la98RVI309PTcLvd8Pl8aGhogMFgQF5eHqxWK4LBIBwOB0pLS3HVVVchGAyiv78fLpcLZrMZRUVFElYHAgG0tLRgZGQElZWVcLvd8Hq9aG5uxpVXXgmbzSZpkXA4jKGhIcydO1eY9Iiw0LQZ8qiOjg4MDw/j5ptvluk6AwMDcLvdSExMRGFhIaKionDixAkYjUa8++67KCsrw+rVq2G329HT04PJyUnk5uZi5cqVMJlM0hWckZEBt9sNl8slxoFFbzoTU1NTWLBggRTxgJkItaOjA+Pj41iwYAFSUlKwZ88eqXMcOHAARUVFKCkpEaXDRi5GEuzqJA3BpUuXkJubi4yMDOTn5wsAgfj2S5cuzeqUjI6ORnZ2ttRyiAhJSkrC4OCgtLjHxsbC7/fLjF6mEXW6Gb76srIyxMXF4cKFC0hMTJRuaavVKkVv0nuUlJSgsLBQDACRWETTtLW1ISEhAXq9HomJiYiMjERHRwcASAoiPj4ezc3NyM3NRWpqqiBMEhISpIDf2dmJuXPnIioqCqdPn8Y111wDo9GIuro6Mb5MX2VlZQl+vKGhQQwJHUt2mHo8HhiNRqSkpCApKUkKpXq9HklJSVJbYIqGEE4+LxsCOzs74fV6MTExgcTERCQnJ8PpdELTZoZ15OTkICYmBi6XC8PDw7Db7YIGIsz5woULuOaaa6DX63HixAl86UtfwtatW11PP/30hr+lV78w3DIff/yxCODIyIhAsOh9s42agqPCJ9klptPpZDI6BZo/Zysvc+LMCxJd88Mf/hDPP/880tPTJWfOdJFaYWeBkfUBwsWYdyTcLTIyUmBVhw8fxlNPPYWTJ08iFAoJjp95x/HxcSFm4vWIfyVLI71Mrgc7H4kRVwnRCAtTce48fERecK1JKEXFzwIVlZIKf5yamhLvhfdK2B7XhzQJaj8BoyDmitXmL51ONwsdQiQS6WqfffZZ/PznP0dERIQ0bbBJhWvF52KXJqMUUsCq/CLMWQeDQVRXVyMtLQ35+fl45JFH8NOf/hRWq1Xul5h/nU43i8ud1+DvyTao8tADl7tw2WRD+C4A2QPmdFkkBS7PZOX3RUdHy8xRAAgEArNmbvI72bDHKG5sbAwjIyMYGRlBf38/SkpKBHvOvDGLuoTdjo+Pw+12Q6fTwel0IjExcVZ3LPdncnISNptNGoIAyBQr9owQPsi5AmqXLwDJ3QcCAVitVpGZiIgIQYexkYmfJ9yU9SNi1r1er6RzAKCurg7j4+MoKCiQulkwGMTJkydRUVEhg90pyzwfQ0NDaG9vh043wyfDgRhMO3k8HhQUFGBqakrmObAr3mq1IhAIYGBgQM4r1yIQCMjZ4RkbHBwUyO/o6CjGxsZgs9kkhcrIPikpCe3t7Xj77bfx5JNPyvOEQiF8/etf/7vcMl+IYR1UFCoWm/lhNvEQr0psNKln+XMqXCJkqCz4PcQoq116wAx8at++fbjtttuEnpPKkxtPpUqFAmAWNI+pAXqQPAD//u//Dq/XixMnTuCuu+6ahVwhtJAFQbU2wGImC3FqxyBRCQCk+UrlMWG08VmjTaNCHLXa/EXIFTHDKk0sDY1ef5mYinvEdeF6sztUbZjiOrN3gY07REaFQiGBudJIHzp0CA0NDcIRPzg4KF6xCmtVG6b4bwCiJGgg+fyxsbGzmskcDgdGRkZw5MgRrF+/Xoq6VKo8RHypz8nWdV6bh1SlHVDXkFhs7iELkcTh01Hhc0VGRsJsNiMhIUFQKFarFXa7fVahl4aFskLHhntrMpmkmM+WeKPRiOTkZMFNq0My+IwkxGLBkPBDQjJTUlIkSqLcMfXD9n21H4GKH7hMG0FFT2ZL9osQoUbZ5t7yO3k9Gkg6F3QAx8fHcfbsWdTV1c1CJJlMJmRkZODChQsSXXm9XrS3t2NsbAy9vb0IBoMwm82IiYmRRid2eHNdvF6vpJNU/n/m+S0WizBe8lzz3KnnhI4N751RCL35wcFBHDhwABMTE+jo6MAVV1wxqznzH9EPfCE89/nz54cPHDgwq4BJr4geH5U3KV254Z/9zPj4uBxgbjwwe5QfC47EhA8NDcFqtcr30rtUuV6mpqaQkJAgConKS238IUKAiz84OCiHTi1qqp+jIlKfgdEJr81DQRQPC868H7Wxh7hlVYDoXapt/yp3BT1h4tJVegOuiUrToBpOlWaAWGYaB3q3qtIFLjNlqnQMmqbB4XBgYGAAfr8fPp8PiYmJMJlMctgyMjKE+oFrqHbU8hCqUQTzuVxj/s00HZkJ1U5jRgFGo3EWXbOaYuH7iDtmlESDR1mgHNK54LN+tveCe8dnYXTAtWX0wGdnQ8zIyAja2toAQLx0l8slfQVDQ0MYHh6G0WiUcXJDQ0PIzs6WYRv0Pru7u5GXlyfFeSLAONGMOH2SajFKnZ6elhRDfn6+KDaeI07qYpTtdrsFox8TEyPdslNTU8IFD2BWtMo6Etde9eopb+ytGBsbk45QOg40UmrvAAdV09GoqKiAXq+Hy+WSKJUduxzzabfbceLECXR3d2P58uUoKiqadY4ISQUgiDkaVTYtApejFpX+OC4uDj09PUhLS0MgEBC6EyKdEhISZO/ZGPnggw9+sVkhuej0xllo0TRNDiaVC4sbwGXrTY+T3haLsiqBlgq1pEdHi0naUV6P30dFS8vMqe1s+AAuswfSyKjPYLPZJA3CMJKCQAoC8uao6R9CAEmoRIXFZwiHw7M4aVRlC1zmWqenzOvSO2bKi0pdTXMxjULFzfSJSqlM40hlwzVS0y2MKniPND7cEzUK8fv9MsKQ7d42m008sUAgIHtts9mkFZ5GkN/NtRoaGhJoXVRUlHSmqt269Gw5/YpywX9TBqgUuN80ggCEeI4GhDLIBjbuBZX330pbqUaSBlJNL6qpQCJKSDxFp6GoqEhCfzaiqc1eXG++hoeHxfN0Op2Ynp5GRkYGcnJy4PP5xHCNj4/j/Pnz6O7uljF4TI0NDAxAr9cLrQJhherAGCKd6O0zPcS1Vam6uV+USVIIqGvKpiF6wDSQbIgiSZhOp0NJScmsSHlsbAwWi0XkkogbUhSMjY3B5XKJjLOhUKfTicGJioqC1+tFTk4O5syZg9TUVHR2dsp9FxcXw2AwwOv1Ij4+HrGxsejq6pJnY5cxZYqeP50Gko6p8sUUMmccUD8wRfW5evV/pI3/D79UxaQKIj0wNVWiphwYplPBAJdTGvSkgMser4rJVT1/HtzPhvwqyuaz4SDvhfentg2reVUqH4ZbDD9VegIeNIauPNxqrpswPzUU47NTsdFwqHlNrgWbJLhG/J0aOdBLZOqLv+P6UmnwGmQ7VBU8150KS41QaPS4L3xvT0+P4JRJNgVg1ntp3EZHR6V7kp2kdAb4edYQmA5g2oGKmevD97BGAVxmoqRM8t98BjUKIH8OHREqa5XMjPtNY8bojUqEa6I+Lw0c15YGgntKY6Hup6ZpiI+PFz4aRhT8HjWNODw8jN7eXskxk1BrYGAAXq9XFBBrATTYJDBjBKgONeE0KTUl9VnqCjUVyGvSS+d7udZsIuJ60CGhY8YzrdPpRNlR1ik/bEakHNC583q9sodZWVkIh8OIj4+XYij3nZBJn88Hg8Eg/Srs7SBahj0VNFoxMTHicXPUIIBZVB90RBhZhMNhMbp8hujoaHE+1BoXZYzP+fdeXwjPXX1R8aoHnIJNj5kCxA3/LPe4ahn5e4apVLLqIWO+zG63z8p3c2IK74VGgIpFDZfZXEVvQ60TfNZghcNhKeDQG+e9BAIBSRHQMyS3Bb1P3iMwY3TUAiw3Xk1XcU1VRckDonqGXBM1T82/VYMKQOBkVGhUbmwxpyfH3gT+nzlilTvE6XRicnISFosFIyMjcDgc4qkwbcDIjfdBildGMFQmanRE2BqjJN6nypUyPT3D8a+m79QIRq3bUAYYbamFXKZguB7qunHvVSeGB57rTwVPmaKnx+cCLtNS8xpcC16XjktZWRnGx8fh9/thtVol+qEizMrKQmpqKiYnJwWlwjQVZZOKmLKsUjSQS8bv9wu7oqqA1M+pvCykLmb9Q80dq4M1WJSljDGy5Pg75uXj4uIkPTk8PAyr1SpKkpEMi5cARF/MnTsXg4OD4kDRay8uLhbjTjoJAEhMTJRpb3q9HuXl5XJvRUVFMkh7ZGREoo6hoSHYbDb4fD6RP9WB5XWo2+gsGQwGeDwe6HQ6qQGGw2FBKtHJ4yzhz3t9ITz3z+Yg1Tw2BYAbTIsHYFbKgl4tBUn9DiosHmwqdB4On8+HEydOSD6dRRlen9dSC2G8XxqLiYkJvPTSS7OKieqzUKFSAU9MTMzy9jVNw549e3D8+HHs2rULW7duFVSCqjypVOjV836ZG+QzqflbChUFhX/Ue1M/p+btuTdqGAxc9rTo9fIzRKWoh3JqagqdnZ3Yv3+/rCe/m0RS9IojIiKE6U9NIdHLIyvi+++/PyviYF1ErYlQ2alFTzoJ9AhVI6zWIrgmVD7q2nFP1EIp/63WsBhRce1oJHp7e4UFVZUx9bNqyovPp6YpqcxHR0fx3HPPwefzzYpGY2JikJKSgtzcXBQVFcFut0sxlh2bXCfuJVlIuc+9vb3o7e2VSV1xcXECL6bnTu719vb2WWumpkfZCavmn1XngzJMFFlDQwPOnj07K5XE9aOnHAqF0NPTg08++UTOa29vr3jo/BmNAQ1wKDTD/0OWVdZ+MjIy4HK5hLedUWJ8fPz/5hyw+cvv90s9g8g88tazE5byp57FAM43QQAAIABJREFU6OhoQcepPQvcc8oio1+mtviHUYuq4/7W6wuj3Omd8AHoTaoQPypX1QMCLg93Zs6Si8I0ATATAZDjm9fkgXz55ZdRX18vHgxweZQcP89UAO/PYDCgp6cHR44ckfvcsWMH4uLi5IAQBsXPswBEr4iKBAAee+wxLF++HKtWrcLNN9+M1tZWQUqoxTSGtXFxcVKppyeqGhyVMK2zsxMff/yxFAVVNkQAwmbHSIHPqSIUGDGpxT71WfgZALOUKQBZD3ZS0tgODw/PGmDg9Xrxy1/+EgUFBdDr9bDZbLBarVIo42FkAZQGhMpGRSXwbypwNR/MCJAesEorzMiCB5IKmsbzsy/KIhUxybvUNaBc8hlYKFPz8FQC09PT6OrqwvHjx8VbVmGSdDKIHEtMTERnZyeSkpJmOT98ZqYI8vLykJ2djbKyMlxxxRWIjY1FUlLSrLkIweDMmEWv1yv7wtTQ0NCQeKdEs5BYToXTMqohyRYdmejoaGkMoxdPJcg6l8lkwjvvvIPx8XFB8lC+SDVCIxAZGYmmpia0t7cjHA4LIILnn8aTEakamfE88nkPHz4sUY7FYoHb7ZbxeEzpkaqXQ7T9fr+8x2g0SvQwPDwszWvkxhkYGEBnZ6dEfYyQIyIipOmQUQSjKdanVOeVRorZCNXZ+luvL0RahqE7w3jVylJx0hozzKWCUPOWLKLS0vGlpiDUoqLBYMCFCxdw33334Te/+Y2EePwMDyqVND9HJMyrr76K+++/H8CMECUnJ0sBhYVXGhh6ErTYanGNhuEPf/iDKKX169fDaDTC6/XOGnFH0v+IiAh4PB7JyVEReTweqabHx8ejr68Pr7/+Ou6++24hMCOviJrqoGEj3zmhb9wXek4qyoZ1A6KTqChYtGKIrNfrsXXrVjz99NOSU3Q6nQId4/N9+OGHMBgMcLvdSEpKEv4Qi8UihdeUlBS88847uPfee2fh49k1SKNKpa7T6dDX1ycsfmp+3ePxCDSThXrKXWxsrDgDKiKGVL9se+fMWUYCiYmJszxttVhKGY2PjxfmUuZ0gRnFHQgE8N577+HGG29Ec3OzpOi47qSXUJXrnXfeKe/h+jLVwWHoExMTolg0TUNlZaVwl3NwS3JyMoxGo7Bs8jkvXrwIs9kMm80m59VoNCI7Oxt+vx/V1dX40pe+JAqeTIiBQEAcAxppvV4vMwTYIMSCfSg00+19yy23yOxSDpY2m83Cq09DcPbsWVx//fVCcWAwzPC00Ei63W6YzWZ0d3eLzMbExCA+Pl5G5/n9fqSmpmJ6ehqtra0SPTidTilkulwuDA4OorKyUhxC4tRTU1MRFxeH7u5uIXUjvTFTyrW1tZgzZ44wlEZFRclgmqSkJOnSDoVmGFnZn6FGlVT+LLgyAv281xdGuVNRU/GpaQLmwD6LLmC+lUVN4HJOWFXk/H4uCL9nbGwMZ86cwR133IFAICAHmE0Uhw4dgk6nw5w5c4Qvnd7gwYMHceTIESxatAh5eXmCJKirq0NzczO+/vWvS3PHqVOn4PP5hLho7dq1s4ptjFxef/11lJSUYPHixcjMzMTIyIgw5pWXl8PhcGDlypXIzs6Gy+XCyZMnMT09jcTERCxbtgwtLS3Yu3cv8vPz5aBYLBYcPHgQ5eXlKCwsRGFhIU6cOCHIFE3TcNttt4lyOnTokGB9y8vLUVtbi56eHqxfvx5jY2Po6OhAUVER9Ho99u3bJ2HytddeKykK7mFNTQ1cLhfGx8dRU1MjkUEwGMTx48cxPDyMQCCApUuXorW1FXV1dbDZbAJji4mJwYULF8SwVlZWIjY2FufOncP8+fMxMTGBvLw85OTkSJv89PQ04uPjsXjxYuj1ehw4cEDyqiRS83g8uHjxotAR5+fni5dNZ2FwcBCnT5+G3+/HvHnz5L6qq6vR29uLlJQUZGRkoL6+Hn19fcjLy8Pg4KDkZAOBgODzw+EwcnNzAQBNTU1oaWnBLbfcIrBF8rgsXLgQJ06cQE1NDTIyMpCRkQG73Q6Xy4XR0VH4fD6sWLECwEx3djAYREdHB1auXCkDI+jUXLhwAW63G0uXLhW2yGXLlsHn86GmpgYPP/ywDIXw+Xzo7u4WVAvx1qdPn8aCBQvkPJBgj9DLm266CZqmyUi7cHhm4IXBYIDVakVXVxf0ej1KSkok0mNuv6+vT5oVy8rKMDY2Jp/t6emRDlK1qKsS0wEQdExHRwf6+vowf/58uY7P58PY2BjOnTuHzMxM9Pf3y6AVs9ks09ZaWlpgNBrh8XhEyRKllZCQgM7OTqESUeHEbrcb4XAYc+bMQU9PDy5cuICCggJMTk5i//79KC0txdjYGBobG9HY2AiLxSJNZv39/UJ7nZqaCo/Hg9raWtjtdtm7iooKxMbGigNDWgXSOLS2toqx+HuvL0RahggN4HJahoqAP2OxQS04qSOyeDDV6rrqITPPOjIyItC1V199FYWFhfD7/ejo6BDIXDAYxG233YaSkhLMnz9/VthOnpZweIab+dZbb4Ver8fRo0fxwAMPYM2aNejs7MTw8DAMBgOqqqpw/vx5LFy4UDgvmAogUmNkZATHjh3D8PAw3njjDdx2222Ij4/HqVOnYDabsWHDBixZsgQOhwPr1q3DH/7wBzz11FO4/fbb8eyzz6K6uhqBQACHDx/GwYMHsWDBAqxduxalpaXQtJmBALfffjtKSkrQ0dEBh8OBG2+8EW63G8888ww0TUNLSwsefvhhLF26FHFxcTh58iT27t2L8vJybN++HZGRkdi2bZukgrZs2QIAQiugpnlIO3Dp0iXccMMNGBgYwPDwMKKionDhwgWsX78e1113Ha699lps27YNoVAIeXl5GBkZwX333QebzYbOzk48++yzqKyshMvlwpYtW6TxxuFw4Nprr8Xq1avx4IMPCnfPDTfcgIULF+Ls2bOIiopCX18f3nzzTWmVB2YikZ/85CdISkqC1WqF2+2WFAIV/BtvvIHvf//7WLJkCY4dO4Zjx47hueeew3e/+11EREQgISEBv/jFL3Dx4kXo9Xps374dSUlJKCsrw0svvSRTvJ5++mlJJwBAe3s7LBYL9u3bB7/fj2eeeQY7d+5EZmamyBSjT7vdjlAohFOnTuHkyZMyK6Cvrw9vvfWWeJ779u1DQ0MDqqursWfPHnz00UfYu3cvLly4gO7ubuzYsQMOhwO9vb04d+6cKCx2XL799tt48803UVpail27duH8+fPQ6XTwer0ScRQVFSEtLQ3nz58HMDNyb8+ePcJKyQHRbOCh53/06FHxvPkyGo3YunUr4uPjMW/ePKSkpODUqVOIj4/HwoULcdVVV2Hp0qUS8ZLf/oUXXkBGRgbC4TD8fj/C4TBycnKQnZ2N9PR0nDx5UuoBJ06cwPT0NLKzs6HT6YRSNzIyEmfOnBFlbbFYcPHiRdxwww1ChJadnY2cnBykpKSgubkZHo8H6enpiIyMhN/vh8PhQGdnp+yZw+FAT08P2traYLPZYLfb8e6776K6ulrGAQ4PD6OyshLZ2dnSNMVu1UAggK6uLly6dAnHjh1DWVkZ3nvvPdEHmqZhy5YtmDNnDsrLy+H3+9Hc3Ayn0ylUw3/v9YVQ7gxfPwvJY9jK8EQt9gGXc7tqwZGeteoVA5fhYmrePhAIwOVy4fTp0xgYGJC8HHN4P//5z/HSSy/JVHPm/6anp3HkyBGkp18eMnXu3DlcffXVGBoaQnNzs+SXt2/fDqPRiNraWkRFReHee++d1ekaFRWFwcFB2Gw2fP3rX8d3v/td1NXVIRwOIzs7G3V1dcjKypI0zOTkJLZu3So0usFgEAsWLIDJZMI111yDrq4upKenQ6fTYdGiRTh8+LB4OtPT09i5cyeWLFkiVKfFxcXQ6XTYuXMnLl26hKioKCxatAj3338/lixZglAohEWLFmFsbAxnz55FUlKSNHW88sor+OUvf4nKykqJUmhsd+3aJdfp6OjAnDlzAAB79uxBW1ubhKN5eXmCArJarVLAOn36NJxOJwyGmek7eXl5sFgs0Ol0gopgMayzsxP5+fnQ6XRCoMVUTUNDA371q1/JXNbGxkZJi3R3d2P+/PmzeiDC4Rl2xdTUVPT19eGKK65ASkoKPvroI6SkpIicMqQuLCyE3W6X8XC5ublCqWs0GrF37140NTXBYDDAbDajpaVFeHlSU1PR3t6Ojz76SNAjDocDFosFg4ODGBwcRE1NjfCbR0dHw+Px4MiRI8KbTjgkvVuen5SUFPT29srs0+bmZkRERKCjowNNTU2iVN5//33J109OTiIrK0vSk0zjpaenQ9M0lJaWwmw2o6enR1rm29vbBdURExODvr4+WafJyUkUFBQIYonYcXbY0lGjBz48PCwRjlo4np6ehtlsxltvvQWj0Sh0CBUVFYLcYa1hZGQEZ8+eFYQPYcXkRY+LixO6b9J3jI6Owu/3IzExEVarFbGxsdDr9bBYLKipqcGePXukmYtdr/39/cKpk5mZib6+PmiaJnlys9kMo9Eo3P+ssdXW1iIcDqO1tVXqGSkpKRgYGMDcuXMl5ZqXlycc8pQXk8mE9PR0HD16FACEf+fvvb4Qyh2AKHA2mqgIDHqEfA/zpvzDAoV6QFUMOnC56MqC4bZt2/C9730Pa9aswZo1azAxMSHzJf1+Px5++GH88Ic/RHt7u3TU0YBER0dj9+7deOCBB3Du3Dn09PRg586dSE5OFuV16dIldHR04KOPPsIDDzyAm2++GXfddRcyMzMFNhcOz0x7/9nPfgaj0Sg0n8Te5uXl4f3338cDDzwAg8GA6upq/OhHP0JXVxduv/12TE5Oori4GBUVFYiIiMCcOXOkcYrIIX7+7Nmz6OnpwcaNG5Geno5gMIi9e/fioYceQkdHBzZt2oQVK1YgFAqJ0JaUlOC9997Drbfeivr6ehw8eFBC4NbWVvzxj3/EsmXLsH37dgCQ/LTJZMKGDRsk3XHgwAHcf//9cDgceOWVV7By5UpRVN/85jdF6a5Zs0YY8LZt24YFCxbAarXizJkzeOihh2RMG1kIydlz5MgR3HTTTQiHw9iyZQuKiopw/PhxbN68Ge+//z6+973v4fHHH8fk5CSOHz+Oxx9/HKtXrxYSOhr+kZERdHV1obm5GXl5eWhra5N6gMvlwsqVK0XJVFRUSE2Egzfq6+vx1a9+VVAmv/vd7/C9730PmzZtkk7b3bt346677sLAwABWrVqFp556Ct3d3Xj11Veh1+tx/PhxXH311UJQVVtbi0WLFmHhwoUoLi7G0aNH4XQ6hbPFZDKJcmMnLx2GCxcuiNw2NDRgenpaphtVV1ejvr4eXV1dYryzs7ORlZUFAIK2CYfDwleTkpKCQCCA2tpazJ07F16vFw0NDViyZImgpN577z2kpqYiMjISmZmZSE5OFqdsYmICx44dEwrfwcFB7N27F0uXLgUAnD59WhqyiJKy2Wy4dOkS1qxZg9WrV+PNN9+EpmloamqSjtITJ07gqquuwsjICKqrq+F2uxEXF4ehoSHU19djyZIliI6ORktLC66++mpYLBZR0DabTZw+m80mUUZPTw88Hg/Wr1+PZcuW4dy5c5iensa5c+dgs9mQk5OD4uJiGYAeHx8Pj8cDAFiyZAnKy8uRkpKCmpoaXHvttejs7ERUVBROnTol6VHSM9MY5OTkoLe3F0uWLEF2djaMxpmhMQsWLJB5yb29vTh27BiWLVuGuXPnfq5O/UKwQm7YsOHphx9+eBaZFENC5tBVDDZ/zoVRUR2sXtNzVw3B5OTMRPj9+/fj4MGDuPnmmxEKhXDmzBkJrRcuXIjR0VE0NjbKiK/Vq1dL6ojRxY4dO3DttdciIiIChw4dwtDQEG699VY899xzEtZVVFQgNzcXbW1t8Hq9QnPK3Jqmafjwww9RXV0tOPlTp07h8ccfl+LySy+9hIULF+LixYt44okncOWVV8LhcMBkMqG5uRnj4+Pw+XxYvHgxurq6YLPZBG0CANu3b8c111yD6OhoYcWbnp5Gc3MzWltbkZmZKURK3d3dwvQXCoWQmZkp0c2ZM2dkEMOiRYtQVVUl3tC6detmoT+4VhMTE3A4HGhpaUFOTg4qKiqkEKnT6WRQydVXXw2XyyW46LKyMiQlJWFsbEyoe/Py8lBRUYGYmBhUVVWJIv7a176G1NRU1NXVoaurS9bYbDYLra7T6cSaNWuQnZ2NgoIC7Nu3DxMTE2hra5vlMXs8HjidTkGPhMNhdHd3S76URcuCggLceOONiImJwalTp1BZWYnJyUns2LEDubm5s7pqh4eHRRFwPfLz85GQkIC+vj4ppK1evRpJSUk4cuQIUlNTJf2Tnp4Or9cr2O6CggKMj4/DYDDg0qVLMnSZSjQpKQnT09M4ffo0pqenhVfc4/GgtLQUExMTcLvdyM/PF2505tQHBweFJqC+vh5WqxWJiYmYmJjA0aNHsWzZMqSkpODkyZNYsGCB8MKnpKQIyog01PX19SgrK5MOVqZY7Ha7pC1bWlpQUFAg0duePXtw3XXXST2N9Qru4+TkJMrLy5GZmYn3338fc+fOhdFoRFVVFa688koZlMIemKmpKdjtdmRmZsJsNqO9vR2rV68WNsrh4WEZoqHT6YSyd3R0VGpSJpNJhmjEx8cjOTlZ9ElHR4c0ReXl5cFsNqOjo0NQQQkJCairq0POp5OhMjMzkZaWNovojUg1DlN3u92w2+1y1tLS0tDe3i4DQxISEpCbmwu320398XdZIb8Qyv2ll156+sEHHxQ4HvPe9KjogdPbZXGUaBoiBJgzpXdPoSLmNhQKiQCnp6fLRCWXy4WsrCzk5uaisLBQ0CQGgwHz5s2TsIgGg5Y2Li4OixcvhqZpKC8vR3Z2NpKSkmC327FkyRIkJSUhPT0dbrcbERERyM3NRc6n01PUZpmKigpo2gw5lM1mQ2FhocD+WlpacP311yMmJgbl5eXQNA1paWnSQMHJ7KQIJlUo1y8hIQEpKSlYvHgxjEYjMjIy4PV6YTabUVJSgpiYGJnIRCVWUFAg4TE798rLy5GXl4f58+fDarUKMobGhAqRxpdFLLPZjLy8PAmH7XY7LBYLkpKSUFFRgfHxcaEmDgaDMsUpOzsbCQkJsFgsMg2ooKBA0llJSUlYsGCB5E7dbjeKi4tleHJxcTGKiooQCARgMpmwYMECwWZz6LLFYpHOzO7ubqmD5OTkSFqA8LjMzExpwiGdKyO5rKwshEIz1Ah6/QyPu81mE5wz942yazabYbfbZbh2Tk6OdDiS6TE7O1uQHSzEpaamwmq1Ijo6WlgGExMThaiKKBKiR7KyspCVlQW9Xi/rRMRMSUkJAEjxNDU1VepSvA+DwSCY+OjoaBmgQWQR0TUkt9M0TdgWe3p6kJ+fPytVOjw8LE1MAJCZmYnU1FSJNI4cOYJly5YJTJEwYIvFImc9OTkZ0dHRYujoSMTExCApKQlDQ0NIS0sTZBkNJeGy6enpQm1MVFZ6erqkUMbGxiSNSTy6Xj9D7xsZGYnExESMjIzIQA0i4ZKSkmQoNteTBG2JiYmyfuQE0uv1sn6EN0dFRcnw88TEROmAJYzaZDKJTPT09HD4yxeb8nf+/Pnhw4cPC6SMXrja0aVCEal4qcgJo1Jx6fTmWW1XjYKKkQ8GgwIdpLKl5VXz9ioKh9/P7+F7WFRUibcAiNfHtVa7VdkAQ+w4MEOXMDIygg8//BDJycnilai8GnxeFUvNa6mdqvRi+Dfzsvw9P8PGMHbescDNyEeNlvh5tYCq1jRIysT3EnPP9WTOmkKtdiBzbbnfPDxqlyevzWIV15voKLVDkgV64txDoRniLA54Jp6YKAleh3BKdb+ZZ1ebbgg17OvrE9wzcJmagN/PPWZqxGQySeFVTfkRD01uEq6J2ojDXCxz2Ix4yIjK7spQaGYAxPDwsDg5lGPKEOGC5DMiFUR8fDzsdrukKtkQFB8fL3NIed9cs/HxcbS2tsJsNqO3txfLly8XueM1CZGljE1OTuKvf/0rcnNzEQwG8eUvf3lWU5hKacy0B1EtgUAAoVAIKSkpIhdEwxDS6Pf7Jf1BGfX5fNIDQGAGIYxerxfFxcVwOp2IiIhAcnIyRkZG4PP5YLFYZvWKsHkpGAwKfJGyz/PIISgDAwMSJaqOF/d3cHBQGjApN+p+8VwR7srO2/vvv///H8RhPLCqMlR/zg0kllZts6d3wHCM36u+KIgUHl6D76ci4zWpjJjXZyGWSpWoHrW1GLjM5wLMLvryc1QYaseqiiE3mUxoaWlBZ2cnAAjZEotnvFcqbT4L00YUIHrRqtFTm4h4f+rrs0r/sy9+jt/L9eIaqURpKpyVa0kDo6bbVENFI8DfszjOe+Wh5+9VpU8DqSp4vo/e7MTEhKTbqCg1TZPmM76Xa6b2XJCE7LNGRH1+7i2f57MySU+TTUZETlHOuDdqByedHt4Ta1M0GJQ9ngsWoakYjUYjBgYG5LyQf4cItMnJScH1qw1TTLMQTUTjTodKpVBgkxL3Yd68eRJBq/0rKoUH5dNiscBsNsugD71eLw1nlA8AYrzo/LCXhJE2nYvh4WG5LzI7jo2NSVMgodU8a5zpyj90cBITE0WhUk5ICcEicFpamuwzDRmNL1M6Ho9HjDiNP+VSlW0WgLmHJDzki3qGOpIsmX/v9YVQ7lQ49H7VLkm1pZsLoTIpElVDoafAEh9PAaTl48FRvVOVFlbdKAofFYzaBcrvUA8a74UVcypt1XuksKhQL+L11caf0tJSFBUVyXVVxU4vDphNlDYyMjKr41BVCPy32t1G70/1xicnJwX5QsNENAnJoaampkTJUSnxHtR9VENrGmO+l140MGO8enp6MG/ePHlG3guxzlxHGiNePzY2FsDlQRGMMKanZ4aP9Pb2CoUtgFleFpuvLBaLdFlSPthVyRF6xFhTTtThMeFwWMYQ0gkYGhqSepAqB9x7FrypiNQ1UuUQuIznZuGOskDK2qmpy0NU6FEODAxI4xDRH/SU6aXzfHEdBgYGJK3Bbk+mg6gAR0dHRYlPTEwIKoz9Dtddd90s3nYahZiYGPF4qYS51mvXrgVw2WFhdMMURjgclhSfTjfDGUNZYpMRh3szF11RUSF0x5qmSYTFwi+bojRNk27YiYkJpKamIiEhAampqZiamkJ7ezv0+pnZqPz+3NxcoRcYHx9HSkqKQG/ZcUolznWn1860GZ0j6hoafDocNKg8C3ypMqUq/r/1+kIodyoA1VNkaA5A2sXVVAgFBZjtvbEdXa/Xo7+/Xzo+qeRUnDz/zz/01KgM2eo7MjKCyclJaZlWvWF2ELJjk92cVOgUbnojasqB16CwqwRYajcu10eNDngQaMy4TlSan61X0OtUh3qo6Qx6MioElZ4gPRGVvoHrxYiBxSE+L4t0wAzyggZZ9aZphBMTE8XQUh64Pvwcm9XoAXFP1evRs+ru7kYgEBClTmVLRUTDRqWocuCoESHXjvtO71Dl6edzcL/9fr8ofnqrIyMjQoRGR4UdoeRFJ8kdX9xrhvmMNomq4hAJQkc5aIMeZm5urhgssjeqcN9QKCT7wzUxmUwCgQyFQrBarVKnUqMVonNodJkDHx0dlQ5vyhL3nilFne7ycBU1nUhZUg0gjXdfX58o53A4LHTASUlJQj3MTufIyEhhcwRmajPEtQ8ODiIUCkkum+c0MTEROt3MbOOenh709PQgMTFR7nloaEjy/oRlkg6A9Q+fzyfDU1gPodPIuiBpFRgZkLYZmHEwSa6nZiHU8wBA0sXquf97ry8MFFK1TsyRU3lRYao5ZBWzzv+reWV6MPRs6dnxEPI9akel6n3Su1SjAt6nisAJh8MStlNBsDBHD5BKg8pXTS8Bsxu3VIGnIeJ1aLXVWgD/VlMhNJQMQdkYw+9QU0VUtFxf/s1nVfOlvI6azlLrIWrdQx1aoq4tv5fUvVQCDN3V9acsMPWhRin8Tu5PKBSCx+OBy+WSoc9U4PS4+R3kPFG9IOByf0VERISw8NHAqKkiXp8duXyph5FKVv0c/03FT/kjtwqNgZrqIpKD8s6/AYhCJ6qDMs38MKcl8SzweSkDLOLRcSBnEfeZSpR8MKTV4J5S5rkH4+PjkrZRMfeqMlLTLmp6EMCsWQ2MrvgdlDmj0SiRAx0KNaK32+1Sq2AUy9+zUzkUCgkqhqmTvr4+9PT04OTJkwIb5efsdrsAMlj4p3PEebM0vJStUOhy4+VnnQVGSmraknJP3cFzxb2nzlE5kVRn4G+9vjDKXRU+HmZ6NPRgAcjv6DWonhyAWYqEgkTFTz7l6elpoTVVU0K8NhcXgNASqCkgek0TExMC66Ol/dd//Ve88MILIsAqWRg9KDV6CAaDkjKiceLGOZ1OuY/29na8/PLLorB5SAHMUrhcGzLHsVikKlYaEtUwqfzYNEKvvvqqNMoMDg6ivb191qg9pmkAiHdNAWVKhnUKtXYxMTGB7u5udHZ2Cs8814R5c+bluZc0tNz/sbExkY1gMIjW1lY0Nzejs7MTTqdT9onXJ848Pj5e9k/TNHi9XrS1tcm1uL7j4+P4/e9/L4pcrQUwbKb3S/4cnU6H2tpavPTSS+JRAxD4HMNoeqRqrYFRFH9GznM6Ccw3c/CD2WxGZmamKIzh4WEZLh0TE4O4uDhJSzL1wo5JRgtsjmpra4PP50NfXx+am5vFA6bsMxqmx8uUDJUsydx0Op10p5LXiB2m9EZVB0eN4igfk5OTcLlcOHTokIya4/lWQQeJiYkYGhoSyCD1h9/vlzF4ACQ6YVdrfHw8qqurZ+XbSQGRmZmJkpISpKenC2otLy9PBlVz8pTf7xeZIM2EyibKdeeQd2DGeUpOTpb8/+DgIILBoMCOaegYlXF2MteFhphEaz09PWhpaflcnfoPlbumaZmaplVpmtaoaVqDpmmPf/rzRE07QPHTAAAgAElEQVTTPtI0renTvy3KZ57SNK1Z07RLmqat/kfXACBejFpc5IFRW8PVQhvzZ/S8OIJNLVLQQ6EAAZfTOFwofl4tCtLiGwwGGaTB/1NhqLlDehWffPIJCgoKxJLzHqh46aGqBVF6sPxbp5uZdH7vvfeKog6FQpgzZ44cLlp+/pset5r/56EgZSuVrGr8KEBcF/U7582bJ2v6b//2b3jmmWfEK+P7RJB0lwenkKSNB0A9wDTITqcTfX19knvmGoVCIeERomIBIN4O14upkM7OTly8eBHd3d2zFAsVOTHgHEk4NTUlk+wJPcvNzRWjRqQDr0evVTXKqufO++S+nT9/XtJqXFve144dO2AwGEQZ0dtmhMF7IBKH5GIs4BF1QWXCa4+MjKCvrw8TExPo7e2F0WicNSqO1+BoNnU/srKykJGRgfHxmYHiZWVlAGaUJzH2NARnz56Fy+USxkXuMesQPFeU+02bNgnahs+RkJAgMgpcbi7kegaDQWzZsgXz58+Xe+S5o8EmZLe/v1/kkNS+9Ny5PpQri8WC0dFRdHd34+jRo9Dr9ZLTJ4ePTqdDcXEx5s6dC5/PB4/Hg5GREXHSSBLGnD+RMHq9XlIyer1e9ocvOmEejwcejweHDh0SSu+pqalZNQR1SE8gEBCdR/55RoXvvPMOMjMzP1en/nc89ykA3wuHwyUArgLwLU3TSgE8CeDjcDhcCODjT/+PT393D4AyADcCeEHTtM+fB8WbUdAtVHzA5bQFAMmxApiVxlAPGZUcPQF6XfQy+H1qSMhFVAtmvA9W0dXCJA+6WlCk1ea4LYaTanjFv5l64e9U6Fw4HMb58+fFm5uYmEB9fb0QgtHQfRYpw+9gyM8CpJpWYkhHZcWX2gzGP4sXLxaPqbGxEWVlZXKY+QxMIfD/NJZ8Tn5eVfKhUAgFBQWCyadxUfddTZeoz8zX+Pi4TA7y+/2iJCIiZvjgGamxhsFIj2vA71XfxzUlNDI3N1f2jLLCvWLUw5/xHru7u5Gfny9KmpSxbNVX033qulGeKcdU+AMDA7P2ioMhWNSm4SHnz2fXkQaeThDXkWk1pm1Y8COdLYeTM7JjmoNKjcqMKVNOaVJTUxERM9OMmB7lmaJR4M+5FtPT03J9Foi5X/Ra6XHTI+f9cwQmf8Z1MRqNgpLiXsXGxsLv9yMQCMjMWJXdMzo6Gl1dXcKQyuhHBSdQ3hgVq3qBSCI+L+WDe+l0OqUBkUgfyml8fDyio6NlfixlXk1TEz//j17/sKAaDoddAFyf/ntI07RGAOkAbgWw4tO3/RnAQQA/+PTnb4bD4QkAbZqmNQO4AsDxf3QtHggWZd59910cO3YM119/PTRNw9mzZ/HEE0/A4/Hg6NGjiImJgdPpxDXXXIOBgQFpGS4oKEBjYyPuvfdeNDU1wefzweFwIDU1FbfeeitCoRBOnz6NQCAgpP1ZWVniTaanp6OzsxP33Xcfmpub0d/fLymiG264AVVVVTh9+jSWL18uYe7y5csxMDCAnJwcOJ1ObNmyBUajEY899hgmJiZw6tQpaNoMI+Ftt90mYSo3l2kMg8GAhoYG/PGPf8SCBQtw8eJF5Ofn45VXXsGTTz4Jp9OJM2fO4NFHH4VOp8OpU6fgdruRmJiIuro6fP/735fwcXx8HC6XC5cuXcLRo0fxxBNPoKurSxqEvvWtb+H73/8+jh8/Lm3r+/btw5o1a+D1epGamooVK1Zg//79cDqdSEtLE47quro6oZyNjY1FSUkJPvjgA2mAOXToEH784x9j//79UvBas2aNhPTENZ86dQp//vOfcf/996OyshKbN2/GI488gvXr1+Puu+/GypUrsWvXLlitVpw9exYLFy5EMBjEBx98gIiICNhsNvT29mL+/Pno6+uDxWJBY2MjSktLUVhYKEqOxdaGhgbExsaiv79fGCEnJydx6tQpmcN5ww034JNPPsE111yD7u5uNDY2YunSpYiKioLD4RBlEhsbi4KCArS3twtjosFgwBVXXCGHnoXl7du3w+PxoKamBsXFxdDr9WhsbBRoZn5+vnDnqEo/FArB4XBA02YGm5SVlcHj8aCjo0PSHYsXL4bH48Hu3bvFQ+3s7MSCBQsQDofR29uLiIgIWK1WtLa2ShctG2I0TYPL5cKFCxcAzAzaZtonOztbFDUww2qp081QS5eXl2Nqagrd3d3o7+8XNk8yWBYVFcHj8QgOnYoqMjISPp9PmBtDoZBMR3r33XcRGRmJvr4+KYyysHnp0iVMTU2htbUVBQUFgpRqb2+HTqfDuXPnUFBQIAY/Pz9fokM6NR6PRwavAzNIpNbWVgAzqZuysjJcvHgRTqcTkZGRcLlcaG1txcqVK/HBBx8gJSUFiYmJ4kheffXVGB0dlT0ZHh7GwoULERERgZaWFnEACgoKkJ+fLzWW6Oho7NmzB7feeiump6dx9uxZ9Pb2orKyEj09PWhqasLVV18twz9o+Pv6+nDhwgXExsaitrb2c/Xp/6Ocu6ZpOQAWADgJwPqp4qcBSP30bekAupSPOT/92ed9r3idzClysT/88EO4XC4sWrQInZ2dCAaD2LFjB+rr61FZWYmBgQGMjo7i9OnTyMjIwHvvvYd58+bBYrGgrq4O27ZtQ3l5Od555x3U1tbKdf7617+ivLwcWVlZqK2txalTp5CWlob33nsPUVFRSEhIQGNjI7Zu3Yq5c+ciJycH586dQ0tLCwYHB7Fnzx709fWhsrISmzZtwujoqHTl2e12DA4OYuvWrTAYDKitrcULL7yA0tJSREdHCy+1WjhTm0IIu1q1apWQhnV1dSElJQXz58/Hli1bpIHj3XffhcFgQHFxMZqammZZe51Oh5qaGsTHx6Oqqgrj4+M4cuSIpE9IR2q1WtHR0YGCggIEg0E4nU4htTKZTCgrK8OcOXNw/fXXC7viG2+8IXBNh8OBpqYmWCwWtLa2isJsbGzEyy+/jPnz50uzklpv0Ov1aG9vx5EjR9Da2gqfz4dt27ZJLryrqwvHjh1DVFQU5syZg6amJjgcDjkI1dXVsNvtsj56/Ux3aG9vr/AB0eOdnJzEiRMn4HQ6hQmUstfV1QWTyYSSkhI4nU4AwKVLlxAREYH4+PhZY9qOHj2K9PR0ZGdno6OjQ35WVFSE5ORkpKenIyoqSuTMZDIJooJd0JGRkaipqUFiYiKys7PR0tKCqqoqKZxRNgwGA1pbWxEfH4+8vDzU1tYK0Z3NZkNubi4OHDgAj8cDt9uN/v5+NDY2wm6349ChQ+jt7RV+cL/fD03T0NjYKOk4cuBzLJzBYBDWTbbfM61nMBjEqcrOzhbPksYvNjYWaWlp8Pl8SE5Ohtlsxvz585Gfny9eOfPgXq8XJ06cgMViQX5+Pnbt2gXgcuNYbm6ucMerReQ9e/agpKRE6g+apuH06dOIiopCWloampqaZFwmGSzT0tKEmZNY+vz8fMTGxiIuLg4XL16EwWBAdnY22traEAwGZS4Du3o5yKejo0MQeHa7HW1tbZiamsKZM2ekwcnhcIjBI9KMeXdGKHReh4eHxbEjFUQoFEJ6ejpqamoQGRkpETidhdjYWCQkJKCsrAx5eXmfq6//28pd07RYAO8A+G44HP48ImHtb/zsf2uD1TTtf2maVqNpWg07z2gNWf2/8847UVFRgbvuukuIpvx+P1577TU88sgjiI6ORnNzM4qKinDPPffAZDLh8ccfR2xsLO677z48+uijSE9Ph91ux9jYGL7yla/AZDLh+PHjKC4uRnp6OoaHh/GVr3xF+FGeeOIJrFixAnfddRceeugh4ZQYHh7G7bffjvz8fNxxxx0oKyvDbbfdhsjISDQ2NmJkZASbN2/Gk08+iby8PNTV1WHx4sVS8JuYmMB3vvMd1NXVCe6YRVfmiVVE0Ny5c4WQKhgMory8HEVFRZiYmMDChQsRGRkp05VIZXr//feLN8ewcdWqVUhPT8dXv/pVZGRkYOvWrdKB+9BDD2HJkiXYu3cvHn30UWRnZ+PXv/411qxZg4MHD+KOO+7AyMgI6uvr8eCDD0pr+d69e2E0GnH+/HnU1tZi3bp1yMrKwsGDB/HNb34T+fn5+M///E/JoX7rW99CXV2d4JTViUxr165FSkoKbrjhhv+bujcPj7I+14DvWTLZk8m+LxASSEjYl0R2lF1xqwWtKKKtbT3dzmk9PbWnxdrj0XPsqd20ilJLLQhIVVAJEMKWAIEECCH7vk6WmUwmk5kkM5N5vz/G+8kbTvWc7zrfH35zXVzGLDPv+3t/v2e5n/u5Hxw7dkye/5YtW7BhwwY8/fTTMBgMuHr1Kh566CGsWLECM2fORH19PR555BGEhITgzjvvxOuvv46AgAA0NTXhvvvuQ35+vqTsTMmPHj2KLVu2wN/fHx0dHYiPjxfHevDgQbz88svYsGEDuru7UVJSgpKSEgQGBmLp0qUICwvDzZs34fV6UVNTg7a2NsybNw+tra0ypcfpdGLdunVTYBrAB//U1NTgzjvvRExMDLRaLY4ePSoCbU6nEzdu3IBG42umCgsLE0rcuXPnkJycjICAADz88MN4++23ERoaivj4eISFhYm+SX5+Pjo6OrBmzRqEhYVhcHAQNpsNc+bMEZEts9mMjo4OUeDkmRsaGsK7776L9PR06blYunSpsGYcDod0hHo8Hhw4cACtra0wGAx44403oCiK0E/z8/MRGBiI8+fPY/r06UIaYKQcGBiIl156CZGRkaI709zcLE6/q6sLBQUFUyi2Op1OPvt3v/sdKioqEBISgsOHD+P06dMiKbJp0ybMmDEDiqLgrrvuQmhoKNrb25H+meRHbW0tjh8/jrlz56KqqgptbW0oLS2FwWBAb28vtm3bhvDwcDgcDmzcuFEyOUJRy5cvR3NzM5YuXYrIyEjce++9ePfdd1FaWoqOjg5MTExIJO7xeFBSUoKOjg6sWrVqSvZTUFCARx55BBqNRiDEkJAQtLS0IDk5Weom8fHxUl8gZBoSEiL6RIShPu/1vzLuGo3GDz7D/ldFUf722bf7NBpNwmc/TwDQ/9n3uwCokf5kAD23v6eiKG8qirJIUZRFUVFRgjsx4vF4fFKuu3btQmRkJA4ePIjOzk4YDAasXLkSMTExuHDhAi5fvowrV64gMjIS7777LpYtWyYR2+joKO6++26MjY1h6dKlSEhIgF6vR0VFBTZs2ACr1Yo33ngDubm5iI6Oxv79+5Gfny9G1uFwSDv0nj17kJaWJmPNtm/fjtDQUBw+fBgvvvgiYmNjUVRUJBvS4XDg2WefRVdXF86ePYv9+/fjRz/6Efbu3QuDwYC6ujoZ5KvRaKbwj0+fPo3HHntMjFBjYyN27twpcr8/+tGPcPXqVbS0tODxxx/H2rVrcffddyM1NfW/sS4iIyNx5MgRbNjgq2v39PRgYmICFosFd911F0JCQnDmzJkpnXOFhYUoKipCeXk5mpub8eqrr2LJkiUyOKO9vR07d+7E8uXLsWHDBtjtdsTExODMmTPiuBRFQWFhIfbt24fvf//72Lt3r1yXGq+32+0iSLV3714sW7YMExMTmDVrFlJSUqDT6bBixQoUFBTAbrejtrYWVqsVaWlpyMzMFDhJo9Fg/vz5mDdvnhRP3W43goODRWmSWjQ3btxAdXU1uru7UV5ejrKyMrz88svYtm0bysvLcezYMWRmZqKgoADXr1+XYnp3dzdWr16NBQsWYOHChdBoNLh69apIDB8/fhwRERGor6/nuZH7nTVrFrKzs8UxzJw5U6Kyvr4+PPXUU1OKyoCvrpCRkSETr8haogCb2+3GihUrJIpOT09HVlYWhoaGUFBQgIULF8LlcuHkyZNISkrC+fPnMXv2bKHskXtPrXKXyyWFVIqTkdVhMBjQ1taGJ554Aps3b8aZM2d4jnHXXXchNzcXs2fPFnE0Qjzt7e0SdVKLxeFwIDc3V7DvlStXYnR0FGNjY4iOjsa0adOkoE0efUNDA771rW/hrrvuwtmzZ+HxeNDa2orc3FxkZ2cjNjZW6KelpaUiznfhwgXodDrU19cjICAAPT09cDqdmDNnDsLCwpD+mchfZmamNGjV1NQI/HL27FksX74cH330ERoaGqYU871eL9ra2jBt2jSkpqaK2F1jYyPq6+vx9NNPo6ioCBUVFVOokKxbzZo1C1arFcHBwbh48SJSUlIwMjKC06dPY82aNeju7obRaBT9H46lrKurg5+fnwxN+bzX/4YtowHwNoBaRVH+S/WjowAe/+zrxwF8pPr+do1G46/RaKYByARw5Ys+g1Hc7YXO6upqeDwe2Gw2VFZW4vHHH5eNbLPZcPnyZQQEBODWrVsYGRlBdXW1FEUmJiZEorS8vBxJSUloa2uDx+NBdnY2hoaGUFxcjBs3bsBgMGBoaEgkUlnZj4mJgd1ux7Vr19De3o7a2looioK6ujoAPtjoxo0bWLx4MQBIgcVms0nKRGYIdSvy8vJgMBjwq1/9CpcuXZIClJo7bzKZEBcXJ9TDmpoazJo1Cy6XC+fOncPEhG/I74IFC9DU1ASz2YyGhgZUVFTIe9GwAJCGDGpOW61W3Lp1SzSn1XNDvV4vPv74Y6SkpKC5uRl6vR4mkwmjo6NoaGiQ1vLm5mYMDQ0JnMLag5qn3dbWJjK9c+bMEeaLmuNPuMZut2NsbAxjY2MCEXm9XiQlJaG/vx/d3d0yXYg1EjpHPz8/JCUliRBXU1OT8PpZ5CWDxW63y+Fob29HZ2en6OQ7nU7k5OSgvb0dGRkZCAwMhMViQW9vr6hBkmY3ODgoMrljY2Nobm5Gf38/Ojs7YbPZhLMOQBpl2KBD1s7o6Cja2tqwbt06pKenT+Hvcz+QSjg6Ogqz2Yy4uDjBq2/evIkVK1ZIDYHMrrq6OhQUFEjmEBISIjWptLQ0kRwgOyg8PByRkZHweHySshxDB0BqQaT98nymp6fD5XIhJiZGHADXn8O61R2i6mwmJiYGIyMjGBgYQHV1NVauXCm2IDExUXoMqN2i1/sUU8n4IRstNzd3CjOMhVMyh+icXC6XYOIs2gYGBgqV1Gw2Y3x8HAMDA1LQBXxQntVqRXx8vIyvZFbE7uc5c+ZMKZ7b7XaBisfHx5GRkSGZAwvcBoMB3d3dIr1NaDAtLU3qAvyZuoDPgJU4Pgdrf97rfxQO02g0ywFcAFAFgNy3n8CHux8CkAqgA8BDiqIMfvY3zwHYBR/T5vuKohz/os9YuHChcu7cOTkM3JTr16/Hq6++isHBQRnWQCaJ3W5Hbm4u2trakJqaCoPBgJs3b4oUKQA0Nzejvb0dOTk5Io+ak5MDALhx4wYKCwtx7tw5nDp1CoODg6iursby5cvF0bS0tEj6zcOVkJCAJ598Er/85S8xNDSERYsWCV2xqqpKhlKUlJTI5zU3N8NutyM0NBQJCQkwGAw4evQopk+fLhocwCRF0+PxoKysTGhqzc3NmD17Nmw2m7AYUlJS4Ofnh6qqKhk3FxkZOaURxOl0CkWM0278/f1hs9mQkpIinaFtbW3IzMwUI8KBIwsXLhSBK8qaer1eGSc4Pj4u4+YmJibQ3NyM7OxsuZ+amhppg+f1kqmh5ttfuXIFExO+QRdtbW1CT5yY8Il8lZeXT5mNywiLTSVkuHR1dSE8PFxGooWFhclh9vf3R2trK5xOp+ybuLg4hIeHo7e3F2azWQaINDU1YcaMGQgKCsLZs2eRmJgoHGXS11jMCw0NRX19vYyFGxkZQUJCwhQD7fV6UVdXJ89/dHQU3d3dIkRmNBqFRUMuN/dgf38/nE6nKDK6XC7BzwMCAgR/vn79OlJSUoSCl5SUBH9/f/ldi8WCjz/+GDt37hQGDzn7iuLr+hwaGkJUVBQ6OzuFPkpnxP3rdDphNBqFfUSuOADRk/d6vejv70dAQIA0EkVFRQnjY2BgQDB+Ki+OjY2ht7cXISEhmD59uvRnMJgjjTEmJgahoaFSo2IwMDo6ivj4eAQHB08Zt8ieBafTiczMTHR1dcHr9Uqtxmw2Q6PRyHNhljB79mz09/djdHQUIyMjiI+PF9kGm80mOjiBgYEYGhqC3W6XoRyhoaGyH9PT0wVPJwRIx0UpA5fLherqamRmZsJgMKCpqQkajUZqV5QscLvdKCkpwfj4OB5++GFMTEzgiSee+FzhsC+FKuTChQuVoqIi2TCkD86ZMwdVVVVwu90YGRkRutbtTUqMJsj5ZaONWmWQDsNsNqOurg6rV6/Grl27kJSUhJ/+9KdTuuVYhCOfm3gXi6NPPfUUysvLBcsFIC3i/BzSzwAI55mR2/j4OLq6upCSkiKUL4/HI3RGNu1oPmtuUad0agokI2FmDGzOIr1MLSOgjp7UDUXk5jI6o6YKZUkZGfPnVM2kQVCLLbEgTIohG3DUNMPbo3d19yZ1dlhnYEfxwMCAGEMaODXHmAaAz4H3RmkI4qbcW8SVb6dEsoGN1EgaNZ4RFjoVRREYTe1MuW6kxDkcDrkXYHK6kMPhEOVF9XtzD6qdPY0qI1P12gIQ2t2JEyewbds2YcewDlBYWIjU1FRER0ejq6sLCxYskKyD/Gyv1yvDxdWdk9Qh4vlLTk6WngtGkmoxNTprPmu1gBfv1e12Y3h4WGiUvHd2Fs+dO1cMMjtkqXSp7odQNwh6PB709/fD4/HI6MSWlhYkJibK/zOj5P5Wa9dERUVJVOz1ekWL3Ww2IygoCDabTYIJSv6SMslzSFonJUh4jQxAeBbtdju8Xq9IhjscDuj1eiFZ8LkTniarqbGxEWazGS0tLcjKysKSJUvgcrnw9NNPf7lVIXngmN7odDrU1NRg3bp1MiyBBk6NSd6O36oxezULhWmbx+NBc3MziouL4Xa78YMf/ABZWVn/7YDRkKsV/XQ6HZxOp1AzOVAXwJQ2dDV3nhAPx4xRBImMGBofRVEkwlEbdV4XNwYNKY2Gej1ooFnEZFTFa1M3FtG4c8CGw+EQI0/HQiPHz+ffMfXX6XTisIjJ3t7qTUPIZ6zu9qQxYPRFWiijehqP7u5u3Lx5U96bQw2Y4tL4qQXP/Pz8RBCN7elkLvA50tjzcKq7JRlBqznbNOy8X75I7VTz1RVFkYiXh50GkU6QcBQpsfx7OnreExkk/HvubzrS4eFhmfV569Yt0RXnZ1Fjxu12Y8mSJQAgTBn2cNBx8F4ZHPFz+Gxo7NXYOdeRv8s1YaABQGAE1gwINzECd7lc6OjoQFRUlGj48HPUglvsluY+4PPX6XSIjIyUZsfAwEDk5OTIfYaFhQHwdZtTJ/3mzZtwOp2YOXMmrFarQDU8J6wRkN3CzI1ibdHR0SL5zGKv2+2G0+kUp8W9wXoiMz6eee5z9R5hwxnhZe5HMmfuuOMOxMfHi5jcF72+FJH7ggULlDNnzsiB5GJw49BrsylJvTiMXNjlRmhDfV9qo8IX2+bVBhGAFHH4d4wUaTC50Ri182saMn4+nZTaKKizA34mDQ4NML/P++PP+Z58v9sbjrhRAEjkwGibn88Uj0bi9oiUkYe6IYlGlvfPteVm5Zozeqeh4nswleV68DmonRGzHHWzEvU+Wlpa5MDwHpOSksSZEq5hFM89wyxQ3T/A5+V0OuV6KNhFA65W5aNTAyARLNdAfYgZxXPP0tlRa53Pi79P58jITh2NMtKnmBu7i91uN6xWq2jQBAcHSzZrsVjkffhcUlNTRdaYzk5NveM+YHZCZ0mGkcvlkoCDwYXRaBQ2Fp0SgwIqkrL1Xp3t8nMURZHMWl2P4H1zfzBoUDtbrVYr926322VuK3X5qVBJ7XrCjxydxzNCR9fd3S0sGEb1wGRjJLtfY2JipEGKWDy19j0eD+rq6pCSkgK32y38d9oLisjxfAcEBCAiIkLOWHh4uEA6tAts+mK2zeAyMDBQakwUHNPr9fjWt771uZH7l0Jbhp5efcDVhVEeDv7s9iYPGjguxu3pLQ02NzAwafD5d2o4iO+nNvJqA63uTCXkQviBURY/V+1Q1NAA75vvr3ZG/D122TKD4D8aHd4XDSXxXRoNGlF+n+umLlzze2oBLB5eHkBgEtq6HRZjtsTmDH4mAKFuqT+b98174f+zmYXfGx4eRm9vr2xo/qMh535RHwQ6Gjoytegbr1ndGcqioRriUGeI6kyQ981nT3iCn0UeN7FwGngebt6nWn6Az5bOh3+vprLyetSGjwdb7cxYqFNrgtNJcI/R8Kn/0SEzewIgOjQAhGbIgIOfe/tzVmd96v4COgx1QMJASg17co+q4UxmB+rzy31IR0rnwjUi7EF4j9xxAKJjRCfNWodOpxPdekbu1JOhUBmzKWYtlCgh9KTWzlHvS55Dfs2OeT57Xqefn59kOtwPtCtq3Syuu1p25fNeXwrjzgfFaEodVQOTg7H5UFgwUzcBcUMRN2XkRS/OKI4PgOmvWjeED4ROxul0ymAD9SGmlKcau1ZDOTzANAg8gDzIxBtDQkKmRIQ8ZMCkoeEh5KGlwiFfNHSMDHmoCEWojRHxYW4WGnDCRmpjyHWgsaWTovNSO9LbMx9uaDWsxbXhhuRnAZBZmWqox2w2ixYKBcrCw8Mlde7p6cGhQ4fEQHPgg9rhqzF+XgejJq1WK3xyDrVQZ3fEUdW1De4jqpGqawlcR0bsGo1G2Ehs3fd6vbBYLBLREzYgo4QZA88D9wsZHCwQ+vn5RgMaDAZhg5BWGhsbi8jISNFDIgPJ7XZL447RaJTzFBISgtjYWPmZWiSL60eohwVY1nzUZ4Lnj9kaMDlDQVEUDA8Pi+HVan0SAOzRmJjwteYDkOI9z6haG169Z+x2u2QsgYGBwpsnjEMHx2zY6/WKDC/HK2o0vo5xo9GImTNnitHk3ggODsbExKTMMhU0NZpJhc6cnBwEBwfLrNnBwUHZB3QM/v7+CAkJESloRVGmCPpxzQnZhIWFyf1y73Z3d0tNgOv6/x6YBQIAACAASURBVAvjToxSDVvQaPb09KC3txcABNflQaVxpUEDJg0XDTwdg9rI8vfpAJqbm9HV1SUqg/wcblhuUOpcA1OnFHV0dGBwcHAK9KKO4JkdMIqhQ6EBUhtrwktqY6mmNVLDw+12o7W1dQrUAUx6dzVerIYGGMF6PL4ZkuQhq/E7Rhk02mrpY0YSNODqyJKpr7qIyuhSHQGrGTNer1d0QwBftNbY2AiTyQSv14vQ0FApgvf09Ex5tjSOfMaEVW5/7urrolNXF5bdbjdMJhNaW1vlcPO58W/V66LOCGlsg4ODRUqCk3cYJTLaZvqvLpZxH9MRM3ggVEGnTsx7aGgIbrdbmBk87DwvlITQaDTiDHg/3Cu8ZtZPeI3ErNVdqyx0q2tJvG71PqMBVT9bNTzHa2WwxCh7cHBQ/oZGj2eLRdOhoSE5m2rI0OVyiS4MnaL6LNEhBQQEwOl0SkFanbHHxsYKRZYRNNeG8BFpjizIMhgiFMYzS7tFYgQnOanPLZ+1OotVF/FZm+O549minfB4PAIPqVGBv/f6Uhj32zFeRgajo6P42te+hj/96U9yGL/5zW9KAUyv18ui8X3UqbM6quTPaJy0Wi2OHDmC5557DkajEceOHcPDDz88JdJUb16tVotvfOMbU9gqvKaHHnoIR44ckQOqrnqr4RwayO9///uSAqpTSjUOrjYqvJdXX30VNTU1ckC//vWvixFVsxV4vbfXH3joud7/+Z//iZKSEilKqddfzXqgY7i96KeGkggDqJ3v7caUz4iHixuf6bKi+JgepaWleOWVV0QBMC4uDpcvX5bCnlarRVxcHL761a9KKzcHqqiVOAEfHfbkyZOynsx+tFqfXOuFCxfwzjvvICIiAqGhofj1r389ZV/p9Xp5TlwDYNKJcu15L8TB9Xq9RJ0UORsfHxfq4u1MKzq64eFhVFZWCmWPES4DDHLAaWBpeCcmfEMoGLgQW6bj4dqQ78/Axev1SuZA1ozZbBYDxr+n8VVDUDS4ariQcAeNVkBAgEAdHHri9XrR1dWFlpYWvP/++1AU38CW0dFR1NTUwO12o7i4GFarFR6PB3v37hUSBJvSWFj3er1ITEzE2NiYzE+NiIiQoKG3txd79uxBdXU1KisrERUVBY/Hg7y8PAQGBsoUJtYVuH79/f3o6OiQZ8x6AxlR7CugIeYaqsdhhoSECIxDQ826B4vYfH6sGdHBsY5Ip8ih8jabDW+//bZMivqi15fCuAOT9ENGXB6PRwb3PvTQQ7J5XnvtNYFFqNp2+/vQwHAT00iqMWK9Xo/nn38e9957L+Li4rBz507s27dPIhB6SeLRTHt1Op1wVoFJbe577rlHDgswWYBjlE3DrigK3nrrLdhsNhm2wIeqZmEAk7UIFmOOHDkiWKiaGgdMzv3kzzhfktEjDwPhg8bGRmzduhU7d+6UVPb2CFWd6aijP94Prw+YVLcknEQK4d+Lkvk9r9cr8rFut0+TvaOjA52dPmmiY8eOyTSlpKQk0b7h+/GzSf9UQxHDw8NwuVz429/+JlQ4ZlXkxQcHB2Pfvn1YtGiRGLGf/OQnU+onVOrj/dIwezw+pUer1SoOk5h2UFAQBgcHhXVCo8oIm9fKfUh2Eo1mRUUFUlNTBbIgbn97HUSn06Gvr09E7ZjlGQwGMcgApFjLf+wXULPTWJjs7OycwioKDg6Wwd/cIxw9qMaLuee4Xxm5q58Fxxbq9XqcOHECc+fOxde//nWJwAMCArBoka82SPneiIgI2Gw2rFixYspwbc57ZfGTfQcBAQEIDQ0VaJfONC8vD7NmzRJ7QUepjrz7+vrE+EZGRiIzMxMejwcREREYGRkRlpr63qkuyQ5vr9crYyPNZrM0CLJOEhkZCQCSzRGeJVefTYWEgGivOKawsbFR3pO24PNeXwrjro4AGV1rtT66VGpqKhISEuDx+MR4iLFaLBYMDg7C6/XCZDIJK4BGaWxsTMSU1OkLF4zCSDqdDp2dnTIRnZGaxWIREX9GrykpKXKQR0dH0dvbi8HBQWRmZiIqKgqArwDV19c3haWiLugNDw9jYGBANkN7e7vocFssFgCThVZSJScmJtDa2ipNJexapCKiyWQCMCmNPDIygs7OTvT398s1qDONvr4+3LhxAzqdT2WOPNu+vj4ZW+bxeGAymWCz2WA2m9HV1QWXy4Wuri7YbDYMDw+jp6cHdrsdNpsNJpNJBvbyfnt6etDf3z9l3f38/ASvVGcGDodDjOP4+Djmz58vE3F4eOjAyP9mZEdn1N3dLV2FQ0NDsFgsaG9vF2YFjbWap8/op7u7G21tbbKOXq9XiomsCXi9XvT29k5hWPH5UImQtLjx8XHpzuVAF/4d/9FYqtPs0dFR9Pf3y/1ZrVbZ04qiyJxT9RAMBh2EEycmJiQAYYRvt9sl0uYzCg4OFu3ziYkJCTjoQMjZd7l8052YQaqhGHU3J1/MVKln393dLY6ezUq9vb2w2WwyuJvYONeLXawjIyOIjo6Gv7//FFiFgmx+fj7VTTpJ2gidTjdlqI7JZEJQUBDCw8OFccJrdjgcYvS53/i3Ho9H9gmhQ6fTKbAko3c6c8JDrHPRoaphYnVww+ZIOhgGiKxR0AmzK9lkMiEhIeG/Qal/7/WlMe6kbXGj+fn5oby8HDt37oTRaMR7772HlpYWvPfee2hoaMChQ4ewadMmvP766zAYDKLFoNFo8LOf/QxHjhzB2NgYfvazn0nxFYBE+4qiID8/XzTSf/GLXwjccuDAATQ3N6O+vh7PPPMM9Ho9Kisr8cQTTwgt6aWXXkJbWxt+//vf47HHHoNOp0NDQwOOHz8OjUaD3bt3S1s7o7bCwkJcuXIFmzdvxsTEhOjSPProo/B4PFi5ciX27dsn2DwNgL+/P9rb2/HEE0+I+lxnZyeSkpIQHByMP/zhD0JFq6+vx5EjRzA8PIwXX3xRIlYaRkbdRUVFUkTq7+/Hu+++C6fTicLCQjQ1NaGxsRGVlZW488470dfXh5/85Cf4zW9+g4qKCmzZsgVjY2M4fvw47rvvPtmcDzzwgDRjPPvsswCAsrIyDAwMyLOlsSGjh86mp6dHDlVcXBzWr1+P+Ph4KIqC2tpazJs3Twpb6kJqUVERvF4vqqurMTAwgJ/97GdwOp146623YDAYkJiYiHnz5snhUhfMtFotZs+ejeTkZBGiYiTa0tKCsrIyDA0N4Xvf+x7sdjsuX76Mf/qnf0JERASCgoLwt7/9DTExMSgtLcXhw4fxxhtvwGg0oqysDOPj47h48SI0Gg0KCwv/m6xzcHAwCgsLcejQIWi1Wpw9exZ6vR6XLl0SeqfdbkdNTY1g4UePHoXH49MtKioqkoagN998E9XV1XC73di7dy/MZjN++9vf4sSJEzh//jwOHz6MgIAAkbZVFEUMVXBwMEZHR1FSUgJFUQRmICb+/vvvY3BwUGQ8SMG8cuWK0FXfeOMN4eMTDtHpdKiursbx48dhNBpx9OhRlJSUwOv1oqOjQ2o9NIjkzAcEBODixYtSqKT8cFdXF/76178KT350dBQffPABenp60NfXh9deew0RERGy/1iYHR8fx1133YWYmBgEBQWhq6sL3d3dsFgsOHz4MPR6n7RBUVERTCYTNBoNLly4gI6ODvzlL39Bf3+//C4nJL344osSAHV1deH8+fNwOByorKwUEgAj+/b2dpjNZvzqV78SpzYx4WseLC0tlSCAk6JMJhPKy8sRGBiIEydOoKKiApWVlRgaGoK/vz8qKiqwatUqyUy+6PWlMO7AJHeaHt7r9eLatWsiF5CRkSFCP319fZg3bx4mJiYwd+5cYQcQ07p58ybmzZuHwcHBKSk1AOnM0+t9wj2JiYkAIJNfLly4gN/+9rdISUmBxWLBzJkz4fV6UVZWJmJPNTU1yM3NxdKlSxEdHY2srCzodDrRBHe73di0aZOkfYAvDRsdHcXixYuRkZEBm80Gg8EAi8WCuLg4pH+m1cFiKzAZwSuKIjII6mG+X/3qVxEZGSlRqcfjweuvv4758+fDz89P2u+BScVNYsTUrwkODsb+/fuxfv16pKenY+nSpTh79qx0MXq9XuTl5eGZZ54RffSJiQmkpqZK6j5jxgxERUUJxKIoCs6fPy+dfmQBMGIht53XxAxrZGQENptNKGpz5sxBb28vdDqddBHabDbU19dLZhcTEwOHw4FZs2ZJxpaUlITt27cDADIzM5GcnCx7S1EU2QOBgYFIS0tDcnIygoKCJIsZHx/HyZMnsXjxYhiNRqHkRUdHy/McGxtDfHw8NBrfCDyHw4GkpCRERkZi0aJFKCoqQkZGBgCImBodEmG/kpISgXWSk5Ph8XjQ09ODvLw8hIWFSWOOVqsVrfrg4GDEx8fj0qVLcDqdwqyg/klfXx/i4uKwaNEiJCQkoLS0VKJuykczYmU/xI0bN0R8Kz09He3t7QgODkZmZibCwsIQGxuLwMBAwfFNJhNOnTolezo5ORnAJOzJCPTjjz8W+V8/Pz9UVFRIP0RwcDBiY2MFaiLkQj53TEwM3G43Ojs7sXjxYhEFozxGc3MzIiIiZA8lJCQIE41wYUBAAMbGxgT+6OvrQ2FhIUJDQwW7t9lswmWn8N7q1asRFRUFl8uF5ORk0ahiPUdtT8rKymAymYSZQ5E5Zq/j4+OIi4tDdHS0zJ8gy+jy5csSdKSmpiIoKAhFRUVITU0VBxAbG4tz584hNjYW4eHhiIqKEnkLdbb0915fGuPOBg81hLJnzx7Ex8fDZrNh1qxZePHFF7F69WpRoaMEbF1dHb773e8iLCwMe/fuxXe/+10kJiYiPT0du3fvnsKr5UG7ceOGKD7+9a9/xdatWzE2NoYf/vCHopz3wAMP4IUXXoDZbMbevXuRmpoKm82G73//+9iyZQsCAwNx9OhRSfPvuusulJWV4amnnoLb7ZZhw4ria4Hfvn07Tpw4gZ/+9KcICwvDjh070N3djWeffRYulwvp6em455575HpJxVMUBW+88YYIJlksFhw4cABr1qxBY2MjTp06JVzcjz/+WPRrdu/eLRtN3SVXVVWFwcFBKbj9+te/loNfWlqKjIwMrFq1CgcOHMC2bdswMTGBBQsWYOvWrfjzn/+MrVu3QlEUHDx4EI8/7tOOq66uxve+9z2Jql566SV8+9vfxne+8x1p2KEDByahG+q1UHxqYGAA4eHhsFqtWLFiBfbt24eMjAyhpZ04cQIPPPAA/Px8OuMZGRnCfz958iRWrlwJf39/JCcno6ioCBs2bJjSGxESEiIFxvr6eixevBhWqxVFRUUAfBS748eP46OPPkJISAiuXLkCg8GApKQkZGZmYsaMGdDpdCgvL0d+fj7Gx8cxb9489PX14d577xVphDNnzggP+sEHHxRYyul0wmw2Y3R0FNu3b0d2djZefvlllJaWSvr9wAMPwOPx4Ny5c1i7di1cLhdef/11kY7t6ekRTD46OhqJiYnIyMhAcXGxTIFKSEhAWloadu3ahenTp+OFF14Q50eao9vtxqVLl3DmzBmkpaWJ/g4dYW9vL+644w5MTEyI+uXo6Cj+9Kc/SRFz9uzZ2LRpk9RbgMnpSOHh4SgoKBD9l2nTpolwW3Z2NkZGRoStQ+y7vLwcZrNZ4MBz586JBn51dbXUjd58801kZ2fD7XZj5cqV2LZtG/z8fANJ/Px8E6BSUlJEi31sbAz79u1DWFiYqIY2NDTIfTKoYBdpf3+/nBFme4SOMjIyBEZ58MEHMX/+fLzzzjswGo0YHx9HWFiYQGSzZs3CwMAA1q5dC71ej9LSUsTFxcFut2PDhg04cuQIXnrpJWFTnT9/XqiumzdvFllo2oLly5fL6ER1Uf7vvb4Uxp2FNhaCAIhA0nvvvYf6+nr8+Mc/hlarxYEDB+Dv74/i4mI8/PDD0Gg0eOGFF7Bp0ya8++67yMnJEfU2ADh//vwU7jAr0B9++CHSP1Nro5zwvn37kJubK9FmU1MTjh07hg8++AA2mw379+9HaGioRAI9PT2wWCx455130Nraiu985zt44YUX8Pzzzwv9CpiUVyguLsZ//Md/yGQZu92OTZs2IT4+HqdOncK//uu/ygAIsoUYYfb29kpqevnyZbS1tcHr9eL5559HXl4e3nnnHZmWRFre6dOn5bM1Go3IDBw7dgxz584VzDguLk5wwytXrmDNmjWYmJhAZ2cnHnroIXEwrHts27ZNsPwHHngAw8PDeOGFF1BQUIDR0VH85S9/weLFi/GTn/wEiYmJwq//4x//CGCSgz86Oor6+nopjg0PD+Py5cvSMs7vZWdnS+E4OTkZiqKIBIHFYkFLS4vIoS5ZsgTV1dW4ePEijh8/jqCgIFRXV0On003RJVcUBWfOnEFsbCy0Wp/M8dy5c1FSUoLQ0FBERUVhbGwMR44cwerVq4XGFxMTg46ODtERJ4snPz8fSUlJ0oMRExODtLQ0xMfHo76+HkNDQ8L/Zo3DarVi9erVyM/Pl6Bj0aJFMBqNqKqqwtmzZ2GxWGTWq9frRWdnJwoLC3HPPfdIFkJY49q1a9LklZ2dLfj2+vXrkZ+fL4XG4OBgmM1mtLW1we12IywsDMPDw7h58yaqqqqkIF5cXCy67+Xl5bBYLGhqahItcaPRCKvVioaGBjidTtnv6oKq1WpFb28vFi5ciEcffRQxMTFob29HXl6ekB3UNL9z584hOzsbFotFlFSJt2dkZKC8vBxDQ0Mi7EbN9sbGRtkvNptNOOf19fXo6+sTKKqrqwu1tbUoLi7G/PnzERERgcuXL4uMMsXRysrKkJeXJ70jy5Ytg1arRUtLC5YvX47a2lqhaS9btgzz5s1DcHCwZJ0MpCYmJtDU1ASj0QibzYbR0VFcvnxZhmw/+uijghp4PL75vvzMrq4uWCwWUew8f/485s+fj4sXL8rw8i+0q/+fWOf/44scdGAql3jWrFnweDxIS0uDy+VCZmamjOzq6uqSr7Va30DptLQ0LFy4EFevXkVNTQ2uXr0qei6MGBkVt7W1ISwsDDqdDgsXLkRdXR2WLFmCHTt2wOv1ora2VrQ6UlNTkZWVJRX/r3zlK7h58yauXbuGWbNmISgoCImJiVi7di0qKipQVlaG5cuXS8MNXw6HA7GxsWhpaZGJTBxuwFoDR6Kp2TgAMHPmTNTU1GDevHno6urC3LlzZd2Sk5ORlJSEuLg4bNy4EXV1dWhqahLjTayXLeednZ3Izs4W5samTZtQX1+PK1eu4KmnnhKYZ+7cuVPU/DweD+bPny90siVLlohSZ0hICGpra6UhhHKu27dvx/j4OJxOJy5fvixsFzaJ0TgFBASgsbERnZ2dUxqPli5dKs5Jp9MhKysLnZ2dGBwcRFdXFyIjI9HX14eOjg5kZWWhr68PdrsdixYtQnZ2Ntrb2wUuYsGcmGh/f79w7HNycuDxeJCTk4P58+ejoKAALS0tsNvtmDlzpuDk3DuRkZEwmUwi00o1UDKrCgoKUFtbK3LBVD+k4/b394fRaER3dzdCQ0OxevVqWCwWpKenQ1F8Q7WNRiP6+/uh0+mwcuVKNDc3o7OzEykpKcJ7J5RhMpnESJjNZslQoqKi0NXVJTUGPlvABy1NmzYNixYtwvDwMLxer0zNIuWPsAGVDlNSUrB8+XIEBATAarViYGBgynsDECZHaGgoOjs70dXVhbVr18pe5PNm5E74wuPxYMaMGcJh7+npQWJiojjz0NBQREREICwsDMuWLRNtdqvVKoM1AAiEwqYsGsHp06fDbrejqqoKsbGxyM/Pl+xx+vTpQsdlBD9jxgxxUJmZmVIUd7lcohdvNBrR2toq2Tv3ABlmWq0WWVlZsNlscDqdSEhIEK2amJgYmEwmxMbGSmS+ZMkS1NbWYmhoCA6HQ/oZ2NPS19eHkJCQKdz6z3t9abRlOOiCQwKIDZMiRgobC400gOqKMTcYq/o0bmSdaDS+4QpGoxHPPvssDh06BGCyUYielu/LIg8A2ZQ8HKyEs3GFxocPibQ7fs0oS439chPwIQUFBUkBlp6f18WJPSxC0amphzQTg2NNQW0kSUuzWCx47rnn8Ktf/Up0NYBJ0ScKKKnvmdASACmEktJGlhKzLwDyfMi0ACadNgBxYlarVcbaabVaST9Zq1DTEdU4PfVf+F7cG2yAIU+d2CuNK9e2pqYGSUlJ+N3vfodf/vKXwmigOBPnd7pcLhw4cACPPvqorCnvj9kHG8xYBKVh1uv1U9QEST/s7e0VRUu9Xi/fp+Ilu2YdDscUfjjZMi0tLdDpfNo5UVFRogBJyJHrxMyGe5pzSf39/TE2Ngaz2SxNYNHR0RgdHcXg4KCM5fPz80Nqaqo4bBY3qcfCvcVC6vj4uBg47inWbLg2lKtubW3F8uXLYTabpWdBrR/jdrthNpsF3uBzIbWZCqxcb9Zi2EjocDgwNjaGtLQ0nDt3DosWLRJYkFCGw+FATEwM/Pz8BHdXa9zwOZPCyGyGn8UOYvLk2TzHYIWjFQMDA0Wjh8Vh9puQN28wGGAymaTTlgw9SlzweZOdQ9YRgC+U/P1SRO7qJhw+SGJd/JpGnMaT0T152OyeBHyQjnpYg5q//corr+DkyZPYuHEjgKlt6Tz8LGwCkwJfZDuQQ0/IhJ/Lz+ImYiROrrK6o4/RmxoL5vcpgUDjfXs08Pc8Nt+DbfrqLkE6LtLoioqKsH79ehiNRll7bnquGaMTrg3fS+3EaDRpsNnApZYwYIMG740GnkU5wiOUlFB3MtIoMepXUwfVn817ZBBA48J0nw6XmZDH48GHH36Ic+fOYdmyZVIkZCDg9Xrx4YcfyhzWO+64Q96DDkctV3t7tybTfz8/P1ljh8MhzoBTo8LDw6cwHnhwXS6XMD7UHb7qTk021QAQCI5OmEwXaphz/9LBABAjSGiQ+1oNZ/IZELYiL5s/43PiWSF7BIBooJPa63Q6odVq0fbZ8BZq/nOdaKjIiVfbAXVfCvcHnbX6DPH58m/PnDmDzs5O5OXlyf2HhoYKFTI6Olr45+pGQlJ0aRu4Z9Q9M+rrodMn95wZGrNkNQ06JCREAjKKkamDPIraKYoi+0utN0ObODIyMqXT/nPt6pclcj937twUY0FvSeiARocvVqx1Op1U/bk49HY0TOpOTafTCbvdjsTERDkITJPJtwZ8cAhTRXpaemB19sC/pSfnS611QwyS2QQ3rtqIkf/NCJIPTr1xeT98Zoyc1REl75P3rdFo5PBptVqYzeYpuCDf3+VyyWHntbNww5ea8UL2AjMCYFIyQt3BC2CKge/s7ERnZ6e0cxPzVTf6cG0JafE5hISECB9Z3TDDtXK5JmV/2XBEo63uFGZDSmJiolwb6YZkMjidTmHHcJ+o71dRlCkNSOo14l6yWq0S5fKZ03EywuQ98hnycNNI0TGZzWZ5H64D1zQ8PFwMBg89IUH+Dp2P0+lEW1ubNMDExMRIhzCLiiMjIwgKCkJlZaVAPYQY+DX3vNlsFnYL9zvH5xGus1gs0l1psVgkC2VxVK2tHhsbC2BStZUZA88I+xwYTDCgcjgciIiIQGNjozhCOicaUj7HiIgIDAwMID4+XpwzHRA1YrhfOKKRNoYUVe5t3jOL9PweHbpO5xsTaLVa5VnTHtG+8Z/b7UZERIQ0ADIb4hkBIE163H+7du368kfujH7YxcZUX81RV3sqYqBMU8iTV0fyPGT0rnq9HqGhoTLVBoBAJcPDw5J2csPRwfB9AMjXjCjosRnpqw2NOpvgf2kseM28DzXGzkifm47RpToiV0fSxDJ5feromu9JwxweHi4/J/xEbjKvm9EiG0D4e/wMOjlgMlKi8VIrZLKITQdisVjQ3NwMh8OBkJAQGI1GhIWFISkpSdLx0dFRhIeHT9EI4RqoYTJGpTS6/Gy1FgvvSd0zoNPpEBwcLPrcNPiER3Q6HcLDw5GUlCRRF5tP+GzJhacjoCPVarUC8TAjURQFERERkoEyeOCwcLXxZaMMU3nuiZGREYFoXC6XNEUxmlQ3ZwUFBUnLPz+T0abNZoPNZhP8mu9lMplQV1cHnU4nFEOPx4NPPvlERk8yS+I6A5AisTqi1uv1UixkhkshNU4Ko5YLW+yNRiPi4+MRHR2N/v5+dHV1ScbB9aWj4/uSykjjSaYKYZGEhAQkJSXh/fffl7OsVnV0Op3o7e2Fw+GQ/aIODmlQ7Xa7XIsagmNAwEyMcCCNtFrKm818AwMDElioBcQ4nnBwcBBWq1Uyfzo8RVGkCxnw1Y5ot77o9aUw7jxAjFhooGmkaVy4oPwZb04NdVCrmxGZmk7IqJIGlsaHHpfaKswS1F8Dk7K3fIBM92j01FIHvC7eA6NtQjx0GsAkrMLfYdSg1oJhizeLaDSY/Ll6Pfg9XjujDH4WISZg0hnx0Py9iVIApLjGiIKQCfFiHmxemzorojKhyWSS6Gd8fFwq/pw9GRwcLOkon5vNZhOFPkaxbDWnwaEDp5EjnsmoXa09dPsa8fPUNRP1M1RrqHC91XuUho7QBZtXWFTkXlPvIf6XGQ/3uLr4zuyBn8n6DtUPgckZmoxm+Sx5jQyYiAkzmuUeIrbOgIdrS5x8+vTpcDgcAqdxf3KvMUqPiIgQw8aOWzpZvV4vBUqXyyXt9wy+uC4DAwNiUJl9UQqA+09dh+L98n6YCcXFxcn7ajQa6VVQkxsYlGm1WqHgMpCjwwMgHaoMMml0uR4M0G4/B2pbwmwrNDRUuoEJV6qDIAACEWm1WllTddDAgI8yBWopir/3+lJMYgIgBo9FDKb11PlmZMC0nBuL3u92nJWbgjAHH5LNZpNIjdGnuqDKxebGodMhHMEUkcUjPjx1AZdwCLFgbhxmFxS/4s94sHmYWcjki99jRMj3t9vtUthksZVrwohcDRkw0lU7Id4z783pdArOyu+rIQB1tMAon4aN6T35wDwIXq9PRmWSEgAAIABJREFUkKqjowMAptQsGGlyziSfIdeZa8sIlc+BjUMsIqqjZ3X7OZuH3G63NCRpNJMKnYyA1M6Oe0d98DhajVLO3Jfj4+NS0GQwwb+j41EbAf6O3W5HUlISFMXXA6EegkFsXq/Xy7xTrqPNZpM6TkREhET56sHJlDBmNsSsicVCZnVkwUREREjkSHjB5XLhkUcekX3mdruFnz0wMCAGkgMp2KJPCimFsiwWi+xrFhPpLCnrMDw8jISEBKE8ApMCbIQd2z6b88tmNj5jsp7YU0IjzFd6erpowNTX16OhoQE7d+6cYkgZMPJvCTkyMqc9IHTJZ8yf8exSo4kOnUV9nU435brp7CmMp3ZQPNvU2lHvHwBCi3S73XKePu/1pYjc6YkZndLrarVaVFdXY//+/bh27RrOnDmD9957D9euXUNxcTEaGhpgtVrxxz/+Uf6ebAAukNpoM9WhCA9fjLDVER69OTApAqbVavHxxx/j0KFDUwqm9ORqOIWGnJAJI1l2zamLjOqCDTcWH6Bac0INLwGQgtPEhG849f79++V31feljupu17CgEaIz4Ibi+hNbpYqlupgETGYzzA6YXQGThSg2KVGTndg6B0Q0NjbKz2lc1XojzLBuz5J4D8zKqEXCA0joxmw2o6ysTLIhOlQAclCZPVLfRh1VMYICIHzuW7du4fr165LyqxtN1MWu22sufAY0gBy7x2ujEVfr6IyNjQn8wazAZrOhpqZGjDIdK+UD+vv7pahNKIABh5rFNTw8LPo/AASuIVWRbe/h4eFyJnhGHQ6HNPow2h4eHhbMnk4/NjZWIlm9Xi8dluHh4VAUBYmJiRL86PV6hIeHizIijRvhTzVUA0CgPK4vzxUHpPv5+cZSqqdVDQwMyB4kdHd79smsOSAgYEqwRkhV3QTI6yGczOekntZ1u7ibuoYwMTEh+5AZN8/l4OAgqqqqJGMm1DYyMiJ6Vp/3+lIYd2ByeIN6Af39/XHo0CFs3boVGzduRExMDFJTU7Fy5Ups3LgRQ0NDCAkJQXFxsUSjLB4SC6PRofcldMMDqGa6cHMwZeQ1UQnu6NGjmDt3rgxhoGFlVAZMzpzkZ9PB0DCziYSGlUMcuGnJhqDD4yZT1xv4tyyKMcV+7733JAoljs5oUY3Ds6bAA0QMUO04uC40rupiLj+fEAY3Ig+c1+vTDykuLoai+Oh7/f39ssHJFomPj0d4eDhaW1slO+MBZLpOOQcWt8hV7+vrg8VimUJ3jYqKEliE2LpaJZDRtsfjkaIVBaKY4dTX1wOYFIfyeDwyCo0G/7/+67/Q2dkpzUqs17DzkwdWbRC4VjTWBoMBJ06cwNtvvy1ZIot1hHQATKn/AJMDQ9555x1Mnz59ioMgC8Zms6GpqUm03VnUJeOHBU3+PmEbarvs2bMHXV1dYqiYEVmtVoEEdDodYmNjpcmJAYLH45FGIJvNBo1Gg4iICBw+fFhYUhUVFejr64NWqxWZ4s7OThliwoI99xrgG8bCAjkDM3LzObpOXZD3eDwYHh4WcbmAgACkpKRg48aNqK2txcSEr9N7ZGREajB08upzwLmtXCNG5OwoZT9CaGiodP/SKHPAtp+fH2JiYmRv6vV6RERESJBHFhSfXU1NjawlzwX1jEZGRmTAuJpg8vdeXwrjTq/Hoga98MjICL71rW8hLS0NVqsVb731FtavXy+RMTvZHnroISnQMLrk4VJH04ySyRNl5KvRaESNTo2zcxMTdz106BDS0tJw//33y4MhFkjHwQIMoyNyVZmGqTFEVuoZSTK1ZgRG2INpHgCBHajeSIO4b98+GdjNar3dbhfjQCM2MDAghk0NqdChqPHZkZER0cqgQST2R/61x+ORDTc6Ogqn0wmHw4EjR44gJycHAwMD0Gg0wo5gNhAQECCMl9WrVyMwMFAUBuls+KyYxTCCprGJjIwUXFbd+EUjyqipsLAQM2fOlBSfaTEjXmqdAMDs2bOn1GqYzRD+cDgc0kSWk5MzBdLjf1kwJDWOzyQoKEjYWiaTCaWlpVi4cKFAFmNjY5LaOxwOkSkgTENmi9VqRVJSkkz9YVaTmpqKmJgYREZGYtmyZQLXsMmG+06v92nRGI1GkVc2GAyS+YSEhGDGjBkStZL5EhUVJcwcrVYrWjw9PT2yjlwnPz8/REZGwmAwoKWlBemfaUL5+/sjKysLS5YskT3E5h4aUWLdAKQASqeoZtRRt0Y9SNtsNiMiImJK9h8ZGSkGWK/XY9myZdDpdIiOjkZoaCiGh4fR2dk5pUZG407YhgEhP5+wDo01bQZh0dLSUqSlpUm2zqh7dHRUsv3g4GBxUMwST548ibi4OHHY165dQ2pqqnTsBgQESN3i/4y5azSaFAD7AMQD8AJ4U1GU32g0mt0Avg5g4LNf/YmiKJ9+9jf/AuBJABMAvqsoyon/6XPUXHQurE6nQ3p6OsbHx2Gz2VBZWSkHlg+qoqICYWFhOHToEBRFwTe+8Q1hh1y9elU6Fh977DFotVpcvXoVbW1t0Ol0uO++++DxeNDe3o7r169jZGQEO3bsEL4xo2W9Xo/r16+jpqYGn3zyCRYuXIiYmBg0NTWhtrYWeXl5mDZtGoaGhnDs2DEsXLgQnZ2dcDqdWLlyJT799FPccccd6Ovrg8lkwp133omKigqYTCbce++9iIyMhMvlQkNDA3p6eqAoCu65554phk0NrZw9exZDQ0PIzs4W41BeXo45c+ZgbGwMDQ0NaG1thb+/PzZu3ChSCJ2dnbh27Rry8/NFaIkZwmfPDRMTE7h27RpMJhNSU1ORm5sLrVaL69evo7e3FwkJCcjLy4NGo8HHH38sQwTa29sRExODpUuX4tKlSzh79ixCQ0MRGxsrVMeWlhZJk6dPnw5/f3+cOXNGBNFqampgs9mQn5+P69evAwBWr14tUa/aabJ57OrVqzAYDMjKykJMTIxkLmporaWlBdnZ2TCbzZg9ezYiIyPR2tqKnp4eBAUFwW63Y86cOQgICMDp06dFm9/tdqOhoQF9fX1ISkrC9OnTcePGDbhcLty4cQN33XWXGH610+vv70dzczMWLlwoxre1tRV9fX3CDOG9ms1m2WuEA5jhaTQa9Pb2oq6uDunp6YiMjERPTw9qa2sxNjaGuro6WX91Q9mNGzeQmZkJPz8/tLe3w2QyYdmyZbh8+TJ0Oh1WrFghbJre3l50d3cjJycHPT090oB18+ZNzJo1C7W1tUhPT0dVVRWio6ORmpqKsrIyJCYmIjExEX19fbIvuc84UGbFihWw2Wy4ePGiGM3o6Gi0tLRg2rRpiI+PR3BwMKxWq0TRer0e0dHRgsfn5eWhsrISDocDycnJyMrKwvj4uOzv1NRUqXk4nU5cvHgRmzdvlppVf3+/ZBN0zAsWLEBAQIDIXaxbtw5msxlmsxkJCQnwen3a7t3d3YiJiUFsbCzq6uoQGxuLgIAAVFZWIjc3F2FhYZiYmBD2EuCDSi9evChdsMuXL4fXOznIe2xsDLm5uVOeN+ALaLq7u6esc3h4uDR+Xb16FTk5OVJjc7vdaG9v/0Kb+r+J3D0A/klRlGwA+QCe0Wg0OZ/97NeKosz77B8New6A7QBmA9gI4DWNRvOFbHtGPAAkLVJDJW63GwcOHMDw8LBgeTqdDoODg9izZw+2bNmC/Px8/PznP5do+l/+5V8QHx+PO++8E1euXBFscs+ePVi3bp1MQSkpKcGlS5ewdu1ahIWFif6HOopVFEUO6le+8hWkpqaKrs3XvvY1PPjggzh8+DA++OADnD9/Hr/5zW+wbds2LFq0CB9++CEuXLiAF198ERs2bMCWLVvw0ksvYcOGDVi5ciX+8pe/SHSwb98+rF27FjabTbBXpojj4+Nob2/HV77yFdx///3YuXMnnnnmGcHnOjo6sHPnTrhcLuzduxdr1qyR6N7r9WLHjh3w9/fHQw89JNfLSJgRqqIo+MEPfoBly5Zh27ZteO+99zA+Po7HH38cc+bMwd13340TJ07g/vvvx4ULF5CamornnnsOS5cuxfbt2/GP//iP0Ol8OhxDQ0MoKChAUFAQBgYG8PbbbyM3Nxf9/f343e9+J3IDeXl5uHXrFqxWK7KyslBTU4OJiQnMnj0be/bsEfog4SnWMoqKivDKK69g1apVyMrKwtGjR6W+oXZWVVVVqKurw/z589HY2Ihnn30WN2/elHXKyclBbm4uWltb0djYiPfff19gpp/+9KfIy8vDqlWrcPLkSdjtdixbtgzLli3D3XffPUWvm1nNL37xC3zyySeYPXs2uru7MTg4iJ///OeIi4tDTk4OLl68CL1ejzvuuAN33HEHli9fLvADoYKenh709PRgz549MBqNmDt3LioqKkRBNCYmBg899BAeeOABBAYGoqmpCYODgwgICEBbWxsSEhJkQEl0dDS6uroQEhKCzMxMHDp0CKOjo+jp6cErr7yC/Px8LF26FL29vcjOzkZYWJjs3dbWVrS0tODo0aNITEzE6OgoqqqqMDw8jKKiIng8Hhw4cAAajUYYTxcuXMCCBQsQHByMgQFf3Ge1WrFy5UrMmjULBoMBJSUlaG5uBuCby0BJ5zVr1uDIkSOC2R88eBC1tbXIyclBR0cH/vCHP8Bms+Hll1/GjBkz0Nvbi6GhIYHNEhIS8Mgjj8BgMODMmTN46623sGDBAhw6dAi3bt2Cx+NBZmYmOjs7MTAwAIfDgbNnz8JgMCAkJAQmkwlarRa//vWvER8fj+zsbFy9ehV2ux3Tp0/HJ598Aq/Xi7S0NJSUlEg9gvNRAV8GFB8fj/HxcSxfvlwCIz8/P8ydOxdJSUkoKSmRTJx71t/fHxkZGRgfH8eWLVuQmJiI5uZm1NbWIiwsDENDQ/jtb38rzuS1115DUlLS/824K4piUhTl2mdf2wHUAviid70XwHuKoowritIKoAnAkv/hM8SYElOjUSWWd+PGDaSkpAgGpSgKBgcH0dDQgLCwMLS1tQm8UVlZif3796OkpATvv/8+Fi9eLAXBU6dO4Yc//CFWrlwJr9eLQ4cOYWRkBEeOHEFzc7Pg6WSWkEvLVFaj8anyzZ8/X/RmHA4HTCYTNm7ciObmZtx9992YmJhAbm4uNm/ejKamJskSuru7sXXrVmi1PtnemTNnilzxqVOn8Oyzz+LOO++cwkxhevn++++jtbVVinFZWVliFJKSkhAVFQWNRoPTp0/jueeew5o1awAAnZ2dyM3NRUpKCgBflECteTJqiMneunVLjNuPfvQjhIWFobq6Wpwmh0lkZWWhrKxMZICdTidyc3PFWcTFxcHl8ikDVlZWoqOjQ2Ch9PR06PV6xMTEoKqqStQux8bG0NraKpor7Clg4YusBoPBgCNHjmDatGnSbLZkyRIZwKzunKyoqEBsbKxcu9PpRFxcHDIyMpCamgpF8U3kycjIwMyZMxEXFyd7sLu7G4WFhSgtLcXdd98tHZQzZ84Udg4jMDVEUl5ejitXriAjIwOK4htacvXqVVRXV4sWt8lkwowZM6QGwboGC8mVlZXo7OzExMQEIiMjBabU6XRobGwU5oXBYEBUVBSio6OF1tfc3CwNWoAveqV8MPnXGo2vKe/w4cO4fPmy6AU1NjZK63x6ejq6urpQUFAgCpzTpk1DZ2cnMjMzYTAYEB0dLc+8qqpKipiLFy9Gb2+vsDoIuaampiIiIgLR0dFoa2vDtWvXMHv2bGFHKYpP2yUyMhJJSUlITk6WqDwrK0sojfv27UNWVhYSExOF18/6GACcOXNG9GBGR0eRk5ODnJwcUacMCgpCSkrKlKHxVHvs7+9HaWkpampqsGrVKmkkosCby+XTuSKriww+BmK1tbWIioqCn58fBgcH8emnnyIyMlKKzf39/VKL497mGkZFRUmwVVVVhaSkJFitVoyMjMj8hZ6eHvj7++PmzZtfaLv/X2HuGo0mHcB8AGWffesfNBrNTY1Gs1ej0UR89r0kAJ2qP+vCFzsDAJN6KOTgcjOw8l1UVISnn35aOJ9erxdHjhxBbGwsNBoN3n77bdx///24fPkyioqKMDExgYcffhjbt2/H/fffj9HRUXz88cdobW3Fv//7v2Pz5s0YGhpCYWEh/uEf/gFPPvkkfvCDHwiuR5oTcfOBgQFs3rwZiqLg3Xffxa5du6R7bvbs2di1axeysrIQHx+PrVu3Stt8WloaYmNjsWnTJsHGCwoKMDQ0hD179mDJkiVoa2vDyZMncfPmTfz4xz/Ghg0bxFCwaOLv7489e/Zg06ZN8Hp9wmY7duzAwMAArl27hieffBJOpxMnT57ErVu3sHv3bvndgwcPCiylKAqys7OxY8cO4coSvujt7cXatWtFmyMqKgrd3d3YvHmzrMvJkyfxz//8z0hJScGxY8ewY8cOaLVanD59Gj/84Q9hsVhQU1ODDRs2oLu7Gz09Pfjb3/6GefPmITQ0FBUVFSgoKEB/fz9iYmJw5MgRWeuysjLRvT5x4gTWrl0Ls9ksuKzX65UoiTKqTqcTn376KTIyMuSQDw0NoaOjA/7+/igsLMT06dOhKApKSkrw5JNPStp/3333ISIiAuHh4YiMjERHRwe++c1vwuFwwGazYc2aNVi1ahVWrVqFxMREREZGoqysDPPnzxdcnE6YMy137NiBN954A01NTbBYLOjt7UVBQQGWLVuGvLw8aZ46fvw48vLyRI+e9QY6jMLCQsybNw8OhwO3bt1CRUWFFMAHBgaEAkqIisPDQ0NDcerUKRH2amhoQExMDFwuFz766CPMnj0bfX196Ovrw65du7BlyxY0NDSgt7cXxcXFKCkpQVtbG6xWK6KiomA0GjFr1qwpGDbVNxVFwaJFi4StcurUKSxdulRqMSzaJicnC3R1+PBhrF+/HgkJCTAajSguLhYaZENDA+6++26kpqYCANasWYPExETYbDYMDAzgscceQ21tLR555BGR4+bEKRbWPR4PIiMjYTabsWDBAvj5+cm5bGtrw6lTp6DT6WSwR2pqqmguBQcHw2KxYOnSpVi2bBkKCgpE+fLSpUvIzMyEv78/zp49K4JxasdMu1RSUoJVq1ZhYGAAn376qUyKc7t9s2HXrFkjDlav18vYPpvNhtWrVwt76dKlSygoKBDYc8uWLfB6fXMuHnnkEaxcufILber/2rhrNJoQAEcAfF9RlGEArwPIADAPgAnAr/irf+fP/5vGgUaj+YZGoynXaDTlZrPZdzHayfmSDodDoqeysjIEBgYiPj5eOvx0Oh0+/PBDPPnkkxgdHUVdXR02bdqEiIgI7Ny5E+vWrUNtbS1qa2tx8OBB2O12lJWVobGxEaWlpXjzzTcRFRWFF198ESUlJSgvL8ef//xnwesZqZKad/PmTWzatAn+/v74+te/jpMnT6KmpgYvvviiYM8mkwlPPvmk6FdoNBp0d3dj165dMBqNqK+vx4ULF2AwGPDhhx+ip6cHR48ehdFolOi2qqoKb7zxhtAEGX15vV58+9vfxujoKE6ePIn9+/cLE6K9vR0hISE4ePAgrl+/jra2NlRUVOD3v/89FEXBzp07cfr0aTQ3N+OFF17Ahx9+KPxaOrKxsTGEhoZicHAQ7e3tqKysxJUrVxAfH4+enh40NTXh3XffRXFxMR577DEp4OXn58NkMuH111+HxWKBn58fqqqqoNfr0dXVBavViq1bt0Kj0aC1tVWUPA0GgwymaGhoQHBwMEpKSrBu3TqMj4+jqalJFCEpcMYozO12Y/78+ejs7ERRURG6urpQUlIiBigiIgKxsbHQ6XR48MEH4Xa7UVZWhldffVUGP5w5cwYLFiyQteXgYUbPsbGxcDgc6O/vl9mu/v7+qKqqEjiGGR7Tehbc2trahCAQHR0t0rUDAwPo7u6GXu+bf2uxWFBZWYnAwECEhYVJk5LRaMQDDzwAm82G3t5emM1m/Nu//RtycnLQ0tIirBIAMj4vNTUVISEhsNvtSElJwbVr1xAeHo6ysjKsXbsWbrdboBfK7nK0IhuXSB/UaHwNdna7HYsXL5bOVhb/udeLi4sxMeGThvZ6vdiyZYt0o1ZWViIrKwuhoaEiYUzYKSoqStgy69evR3FxMc6ePYuGhgYsWrQIQ0NDuHjxIhYvXgyNRoMPPvgAc+bMkfVj5vntb39b6IAsUrIQnp+fj+7ubrz//vtYt24dgoODcfDgQWRkZKC+vh7+/v7S4U6+uNFoRHJyMsbGxjAwMID+/n60tbXB398ftbW1ACBYd319vRTJiYMzMCUde2hoCPfccw+WLl0qU6vWrl2LqKgoCazYg2Oz2dDc3CznhoJuJALMnTtX+mvuv/9+1NXViXbP5710u3fv/t8Ydj8AHwH4QFGUvQCwe/dux+7du5Xdu3crzz//fAuAH+3evfu1559/fi6A8N27d5cAwPPPP/8MgGO7d+/uUr/n7t27K3bv3v3m7t3/T3vXFhvXVUXX9tjjx4xnxvXY4/EjdqZ2qhYaP6RA0qYkRREhAYX0r2lB+YgEUvng8YFaKqHwCVIjviuoRAUBIYWqbb6oCih9KCqJmvcDp5aVErt+zziT8Thu5vBx79o+DkkV2qQzvT5Lsjy+Y3vuPvecffY5Z6+197/48ssv79+7d+8KtiSj5sOHD2N0dBT9/f1YXFzUpQmXYNu2bUNDQ4NqSGzevBk1NTXYunUr3nrrLZRKJezatQuJRAJ9fX04evQoqqurVTiM+6BNTU3YuHGjNjaX/1zmvf7663jssceUhTg+Po7R0VHs2bNHI89isYjOzk6lSnOwsxCGMQatra3o7+9HT08P2tvbsWXLFqTTaTz44IM4evQoampqsGXLFgDLeiNMSxwYGNDtjCeeeAInT57UMnGnT5/Gpk2bMDg4iGPHjiEc9koPcj9vYmICIyMjeOqpp1YoPTKbiOmH7e3tGB4eRiQSUUnUVCqFS5cuYXBwUKNgwMtW4MFob2+vEmdCoRDOnDmDDRs2oLGxUSs+xWIxZDIZfPzxx1pUIZ/PY9OmTRrFcLmfSCRQKBTw8MMPa4oZDy8LhQIymQwmJibQ39+PwcFBhMNeST0evnLLoq2tTSO1VCqln0PdEZs4kslkMD09jUwmowzN0dFRJJNJdHV1YWlpCa+++iq2bdumAUAkElEORH19va5W2JcAoLu7G6O+TDBzvNetW4eZmRn09fVpJsv8/LwWJWlublYNlMHBQa2fe/z4cVRVVeHxxx9HJBJBsVjE3NwcmpqaUCgUlAzW0dGhUg7sN0yJbGlpQVdXF8bGxlBV5ZUaBIC1a9cik8kgHo8jnU4jFPIEyHj2k0wm1ZnNz8+jt7dXa/lGo1E0NDSoZDNz18lHyGQyCIfDGBgY0MmIWSf5fF4LciQSCSQSCZ2kAWifGRgYQFdXl9YM7uvr03RTRtA8m3nggQfw0UcfAfBqACQSCdx///0olUoYGhrS8T0/P4+uri6t1sWUVGqwp9NpPZtas2aNrgx6e3u1WhTPtegrmpubsbi4iA0bNqiPOXv2rJ578G8ol8Esv/b2dly4cAHr169XeYVUKoW6ujrkcjlEo1G0trau4I68++674/v373/xln7bZnPdxrELgN8DmDXG/Ni6njbGjPuvfwLgq8aYJ0XkSwAOwttnbwfwJoA+Y8yN//3vHoaGhsyRI0d0C4SHqIwmuBdnn0jzQbJBbUIAsCz/a0e+zA2lQ7NzvpnGxOUd070OHTqETCaDl156CS+88MIKVqOtOcJsH5KBKA96c+44nQnv0ZYYYG4/t2Lse6cwGXP56aAAqCYJI31OLHaapS1BYKcM8vnz1J6sPLYtV1AAdAnJzBXaysFVLBZx/vx5LXxsqydy8qivr0djY6NqbzNjgAdTpVIJ165d0/zqc+fOoaenR5+PTQhin7Dbjcw+m3DD/U2SSshcZapidXW1MjgpDVwqlRCPx1Uk6sCBA7qdxkIZ5GXYjFl7i4B55lVVVSqgFYvFtD3JkWDiwMjICFKpFOLxuKZCUvLWGIODBw8iFothx44dOjFxYhkbG8O6deswPj6OQqGA9vZ21NTUYGxsDDdu3FAnRf7DlStXVN+GhdFbWlpQX1+/QguGVYri8bhqs1AqgExy3iNZ43Rk3CZjGuzExAS6u7vVqV2+fBn5fB6xWAzFYlGrJF28eFFrFLDfk/zHn0n4s/sAxxJL2XE1u2/fPu33/OyGhgY9gymVSpienkYqldLgYHp6Wsch0yyZnsjJGFhWQ+UY5lYNt2xJwOP4Z+owuRrkYFDpkmObAQlTV5nowb6dTCYxNTWFffv23VY47E7kBx4F8D0Ap0XkhH/t5wD2iMgAvC2XUQA/8AfrWRH5C4Bz8DJtfvhJjp2gM+FS146w7CiTTC3+roisUFjksox/y5x3Omz+H+4/09naOiR01PX19QiHw3jnnXewe/duJWcwP5xEJ87EdDikRfOh84FTE4RsQd4HH77tiGmzLQPANmG0mMvlUFtbqzRzm+5uE0DoTDhJMOokA5AkIX4eJwKmFJKBRyYil59M4aMtZEJevXpVl51sZ67KqN3BQ3SmdXG/lKQp2sRBDCxLVNCZ2GQ06nUwN7u62hOwolY5ByYHCydkinJxtVUsFjUiJi+Bz+7DDz/E9u3btX9x245MUDoXewKnU1paWkJbW5vupzKKZv/IZrPo7u5GNpvVA1Ruk/A51tXV4ZFHHkE6nUZNTY1ug9Gp2oxOPqPm5mZ11uwHCwsLiEajuHHjhlb+4sRHe8m5qK2t1bxxe/VE5u7k5KQ6bH4G99e5kmOBDQZcTJjgRFIqlfQZU6ee/ZSTJyNjrqqz2aw6OhIK2V/m5uYwMjKCZDKJnTt36v/nRMtAg5PrwsKCSijbEhF8BjbPgpOQzUjnVgzvmYELnw0jc7Y/A9ZEIqEpm7wXbtXYLHMASpZioGhzZm6HipH8ffPNNzUSInGBg8ZWhmRHt8FHGZSnAAAHC0lEQVRGBZa1ZOgsOPDJBLXz122yAh0ZI0Zbl50dlw6TzFE6Il6PRCI68/L3+QAikQhmZ2dXUN3pTHk/hUJBVy6hUEgdEnVHgGVnwUHICYERh012YiTLCYT2UFeEbcFlNwfwrSjqbJPOzk79HC4tc7kcjDE4deqUUtbJXKQUAJ2NLadqfGYtSVt0dGTyMSLP5XI6+G1CFwBtR4o3UYGRjiGfz69YsbEPcaDTYTU3NyObzSopBcAKqQL78JRbT5Qq4CrJnkxJNqNWSzabVZ1v2snV1pUrVwBAy911dHRgcnJS+x77JNNC29raVEKWTm1xcXFFxE/OAx1TKBTC1atXNepk37p+3Sv4vmbNGi23yGibOfvRaBRzc3M66dC5cwVUV1eHqakpbedoNIpwOKxVqOjIeODY2tqqh6QAdMXE/082LCNmPm8WwwiFQujo6FBNHlYm4sTJDBRO/lNTU0p0YtaKzS6fnZ3F9evXlenK55LL5VaMb64UqGPDyJzjhZW2ZmZmNMLn+wwo8vm87iLYyrMknJHAZbNYSfZjAMOAb2FhAc8888xtI/eKcO4DAwPm7bff1p85G5LtxUHF/T8OXEblANSpctkFLFeJ4XKO+fOcSelAqIXC9wlGwuyU/EwOmKqqKqUEc2a2i3NwIM/NzWl+vu2IGHmQmUiHzEmJSzVgWRPdHnh0dOw0rO3KaMSO0mtrvTqb3HLh5GOzXLmdxE7K94tFr75qLBbTbBXaCACXL1/WepXJZFLbAYCmZ5IKnkwm1dFwL7GpqUmjXUZYnJxZyYjtAXjRjC2sdbNgGtmAtJ9txUmfg2NyclL3uDlRkdzCZ8HnHIlEdBlPJ80Jnk6dfYVtS+lXPjv2N9qQzWaVcUtZDE5uU1NTKBaLmjnCfs+cfwYTvG+uQlh7lltOXFFQs4h9j8+CUe/w8LDKHDc0NGB8fFzbhjbOzc0pKY2OkXbzmdfW1mJmZgbXrl3TCT0ej+t4YJ8tFouYmJhANpvVlFaumqgBxfMiZrZEIhEda3xmxhid/LnlxkPxUqmEWCymssOtra3KuAaWtdgPHTqEmZkZPP3004hGo5idndXtXlvAj4EPgxI7eOTWKPs2GddsQ65clpa8IvCFQgE9PT3a73lOxFVksVhUoTsqqNpaS0zDrHjnLiJTAK4BmC73vZQBSTi7VxOc3asL99rubmNMy63eqAjnDgAicux2M1CQ4exeXXB2ry6U0+6KEA5zcHBwcLi7cM7dwcHBIYCoJOd+y0T8VQBn9+qCs3t1oWx2V8yeu4ODg4PD3UMlRe4ODg4ODncJZXfuIvJNEbkoIpdE5Nly38/dhK+WOSkiZ6xr94nIGyIy7H9vst57zm+HiyKyvTx3/dkhIl0i8g8ROS8iZ0XkR/71QNsuInUi8p6InPTt/qV/PdB2EyISEpH3ReSw/3Pg7RaRURE5LSInROSYf60y7CZZpRxfAEIAPgCQARAGcBLAQ+W8p7ts39cADAE4Y137NYBn/dfPAviV//oh3/5aAGv9dgmV24ZPaXcawJD/uhHAv337Am07PEXUqP+6Bp409sag223Z/1N4ulKH/Z8Dbzc86ZXkTdcqwu5yR+5fAXDJGDNijLkO4M/win0EAsaYIwBmb7r8HXhCbPC/77au/19FTioV5vYFXgJtu/GQ93+s8b8MAm43AIhIJ4BvAfitdTnwdt8GFWF3uZ37pyrs8QVHyvhqmv73Vv96INtCVhZ4Cbzt/tbECQCTAN4wxqwKuwH8BsDP4NVZJlaD3QbA30TkuIh8379WEXbfiSrkvcQdFfZYJQhcW8hNBV6o6XGrX73FtS+k7cZTQB0QkQSAV0Tky5/w64GwW0S+DWDSGHNcRLbeyZ/c4toXzm4fjxpjxkSkFcAbInLhE373c7W73JH7fwB0WT93Ahgr0718XpgQkTTgaeLDi/CAgLWFeAVeDgH4ozHmr/7lVWE7ABhjsgD+Ca9IfNDtfhTALhEZhbe1+nUR+QOCbzeMMWP+90kAr8DbZqkIu8vt3P8FoE9E1opIGMCTAF4r8z3da7wGYK//ei+8Cle8/qSI1IrIWgB9AN4rw/19ZogXov8OwHljzAHrrUDbLiItfsQOEakHsA3ABQTcbmPMc8aYTmNMD7wx/HdjzHcRcLtFJCIijXwN4BsAzqBS7K6A0+ad8LIpPgDwfLnv5y7b9id49WWX4M3a+wA0w6tONex/v8/6/ef9drgIYEe57/8z2L0Z3nLzFIAT/tfOoNsOYD2A9327zwD4hX890Hbf1AZbsZwtE2i74WX5nfS/ztJ/VYrdjqHq4ODgEECUe1vGwcHBweEewDl3BwcHhwDCOXcHBweHAMI5dwcHB4cAwjl3BwcHhwDCOXcHBweHAMI5dwcHB4cAwjl3BwcHhwDiv52BLX3m0/pAAAAAAElFTkSuQmCC\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "imshow(img, cmap='gray')" - ] - }, - { - "cell_type": "code", - "execution_count": 70, - "metadata": {}, - "outputs": [], - "source": [ - "def make_prediction(img):\n", - " processed = img / 255.0\n", - " processed = np.expand_dims(processed, 0)\n", - " processed = np.expand_dims(processed, 3)\n", - " pred = model.predict(processed)\n", - " pred = np.squeeze(pred, 3)\n", - " pred = np.squeeze(pred, 0)\n", - " out_img = pred * 255\n", - " out_img[out_img > 255.0] = 255.0\n", - " out_img = out_img.astype(np.uint8)\n", - " return out_img\n", - "\n", - "def path_leaf(path):\n", - " head, tail = ntpath.split(path)\n", - " return tail or ntpath.basename(head)" - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "metadata": {}, - "outputs": [], - "source": [ - "pred = make_prediction(img)" - ] - }, - { - "cell_type": "code", - "execution_count": 66, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 66, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAADECAYAAABk6WGRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd3zURf7/X7vJpmx62TTSSSMECBA6AlGUEwRF9BQUhZ8eHnZOVDj14MATDhVFREFABTT0KjUQUiGkkt4TkmyyyWaTbHY3m+37/v2R2/kmJKFjudvn45FH9tNm5vOZmffnPfOZeQ2HiGDGjBkzZv674P7WCTBjxowZM/ces3E3Y8aMmf9CzMbdjBkzZv4LMRt3M2bMmPkvxGzczZgxY+a/ELNxN2PGjJn/Qu6bcedwOH/icDjlHA6nisPhrLhf8ZgxY8aMmb5w7sc4dw6HYwGgAsDDABoAZAGYT0Ql9zwyM2bMmDHTh/vluY8FUEVENUSkBbAPwOP3KS4zZsyYMXMd98u4DwIg7LHd8J99ZsyYMWPmV8DyPoXL6Wdfr/4fDoezBMASALCzsxsdERFxn5JixowZM78fdDodOBwOLC0todfrYTQaYWFhAYPBAB6PB41GAysrK3C5N/e9c3JyWolI0N+x+2XcGwD49dj2BSDqeQIRfQfgOwCIiYmh7Ozs+5QUM2bMmPn9IJFIoNfr4e3tDQDo6uoCn88HAHR2dkKr1cLV1fWWwuJwOHUDHbtf3TJZAEI5HE4Qh8OxAvAsgBP3KS4zZsyY+cNgZWUFDw8Pts3h/F9HB4/Hg5OT0z2J574YdyLSA3gdwDkApQAOEFHx/YjLjBkz/WMwGH7rJJjpB6VSCdMoRaPRCFtbW3aMx+PBwsLinsRz38a5E9FpIgojosFE9K/7Fc+9QK1WQyQS3fzE3wiJRIKKigq23dDQgNLS0vsSl16vh1gshkqluudht7W1QafT3fH1arUav7ZEtcFggFQqhUajueuwdDodGhoa0NbWdg9SdmOkUuk9Da+zsxNNTU13FYZcLkdJSfdo6K6urnuRrFvGYDBALBbfsJ4TEbq6uu5JXg+ETCaDSqViL16j0djruE6nu6s60hPzDFUA+/fvx4kTv69eI1PmP/3008jKysKlS5ewd+9evPfee/D19UVgYCAaGhrY+WVlZRg/fvxdx2tpaYlZs2YhLi7unhvSL774Ap2dnXd0rdFoxJYtWzB37ly27/Dhw2htbQXQ/bw+/vhjSCSSe5JWExYWFti0adNNvalz587hs88+G/B4V1cXeDwe5s6de9/L2qpVq7B79278/e9/v6twTNe3tbVBoVBg9+7ddxQOEWHlypXIycmBi4sLRo4ciSlTptxV2nqi1+vR2dmJp59+esD4LSws8Oyzz2L//v0DhsPhcNDZ2Ynt27ffMwN7PQqFAoMHD4a1tTX0ej0AQKPRQCaTwWAwQK1Wg8fj3ZO4/ueNu0QigYWFBXJzc28pQ2/X4N2JB6zRaJgxcXV1xcyZM/Hiiy8iMzMTdnZ2AABbW1ssWbKEXRMREYErV67cdlz9YWtri7Fjx/bqC7wXpKWlwcXF5Y6uJSK88847OHbsGPN2Dh8+DHd3d7Zta2sLgaDfgQN3THNzM86fPw9LS8s+eW+qnACwY8cOtn19d4hWq2VptLOzw4QJE+5pGk2Y4t23bx/eeust/Pvf/76r8LKysgB0v/ATExMxe/bsOwpn7dq1WLZsGWJjY+Ht7Y158+bh448/vi0DeqN6Z2FhAXt7exw8eBBqtbrPcVM51uv1mDZt2g3juXDhAubMmXPPDOz1mD6YGgwGWFpawtLSElZWVnB0dASHw7mtFo1cLr/h8f95415VVYXp06fDwsJiwAxtampCR0cHAAzYZdHe3t6rKdzW1obOzk7Y2tqySt/S0nLDZmF7ezvkcjmsra0hl8tRWlqKtrY2NDU1obm5GSkpKQCAa9euQS6XQyaTsWvr6+uhUCgAgHmzRDRg81yhUPQyTiaICG5ubvD29mbh9KSzs7OP9216HmKxuN9rKisrodFoYG1tDaD75dXS0tLnvOrqahBRr2PNzc3st1gsZmk/efIkqqur2f3GxcWBz+f3Cbe9vZ09J4lEwjx7kUjUryHoSVVVFQoLC+Hp6Qng/4yE6TlbWnYPNqutrUVeXh57nqYXc3NzM+RyOaysrGBvbw+1Wg1HR0fY2tqivb29V1wDGS+5XN7nZSGVSvvt2uns7IRMJmPp69m90PO5EBHLs7a2tj7luaGhAZWVlSwv7e3t0djYCBsbG0gkkn4NUFtbG+rq+h+4weFwenXpTJ06FWPGjIFcLmdx92yFmuj5jDgcTq9zetYjmUyGmpoadHZ2wsbGBkC3Ib/+GUdFRYHH4/Ubl4m8vDz4+PiwbbVa3ac7yvTsBypDDQ0NrI4QUa90lJeXg8PhsLIkkUhQWVmJxsbGPnktlUr71DWNRgOJRAKFQsHK34AQ0W/+N3r0aPotSE1NpcOHDxMR0Z/+9Kd+z2lubqbs7Gx6+eWX6eLFi9TY2EjDhw9nx+Pj4+nnn38mIqLXXnuNZDIZnTt3jpYtW0axsbFERDRv3jwSi8W0ceNGmjZtWp84ampq6NChQ6RUKik+Pp5ycnJY2PX19ey82NhYMhgMpFQqSafT0SeffEJSqZS2b99Ob7zxBg0dOpRKSkro7NmzNHHiRFIoFLRmzRqKiopiYXzxxRdUXFxMRETz588nsVjcKy2FhYW0dOlSamlpobfeeotMeaPT6Wjz5s1UUlJCRESOjo6kVqvphx9+oEWLFlFkZCQREUVHR9OGDRuIiKigoIDWrl1LXV1d1NHRQYsWLaKUlBTKycmhyMhI+vTTT0ksFtOOHTuIiGjs2LH0zTff0IkTJyg8PJxWrFhB2dnZtHTpUkpJSSFfX1/au3cvyeVyWrx4MS1cuJCUSiURET3//POkUCioq6uL5e2+ffuoqamJFi5cSHK5nHJycujll1+m5ORkIiIaMWIEEREZjcZezyAnJ4fWrFlDREQxMTE0f/589gzWrl1LRESbN2+m6upqds3TTz9NCoWCba9du5Z0Oh01NzfTypUriYiooqKCPvroI1IqlWwfEdGGDRuosrKSNmzYQBUVFUREVF5eTn/9619ZWFKplJKSkmjPnj1kMBjogw8+oLi4uD5lSafT0bvvvktarZaIiJYvX047duwgg8FAsbGxVF5eTjt27KDXXnuNvL29SavV0ueff04SiaRXOImJiZSUlMTKn7u7O7u/qKgoUiqVpNfraefOnfTBBx8QEdHly5dJKBT2SZPBYKDJkyfT8OHDKSYmhoiI1Go1lZWV0fz586myspIuX75M48ePZ3mXmppKLS0tLI8kEgnl5+fT1KlTKSkpiWQyGW3bto2IiC5dukRHjx6l/fv3ExFRRkYGlZeXExGxtF27do1WrlxJMpmMVq5cSVKptE86jx8/TuHh4Wx7z549VFhYSEREwcHBpFAoaM+ePfTCCy9QYmIiERFFRETQrl27WLpff/11Ft+XX35JxcXFdOXKFdq4cSMRET3yyCNUXl5O9fX1lJqaStu3b6fk5GQaMWIEzZo1i5qamig7O5v+/e9/E1G3bXj//ffJaDTS5s2bqaysjHQ6Hb3zzjtUWlpKALJpALv6P+25FxUVwcfHBwqFAjqdjnk8PVGr1fD09ISFhQUmTJgAhUKB2tpadvzbb79FREQENBoNNBoNuFwuLC0t4eLiwjwAX19fCAQCxMbGYurUqezarq4uaLVabNu2DdOnTwefz0dUVBRMY/4VCkWvbgwbGxtwuVzw+XzI5XJERESgoaEBoaGh8PDwwLRp08DlcmFjYwOBQAB7e3tIpdJeHvrevXvh7OwMsViM0NDQPt0Y1dXVmDlzJgQCAWxsbKBUKgEAV65cwblz52CabObv7w+DwQALCwtYWVmxPtTW1lbmjX377bdoaGiAra0tHB0d8eijj0IikYDP54PP58PX1xfFxcUYNmwYAOD111/HpEmT4O3tjeDgYDz99NMYPXo0ZsyYAX9/f4SGhsLX1xcODg6orKxEWFgY89QqKythb28PW1tbSCQSbN68GWFhYSgpKYG1tTVUKhV8fX1ha2uLmJgYdq8A+nQ/7dixg3mGfD4fc+bMAQAkJSUhLCwMAODg4MDGKQNAeHg47O3tez1HS0tLODk5ISQkBABw9epVPP300+Dz+cy7LykpQWhoKEJCQmBjYwN/f38AQGZmJo4cOQIAePbZZ+Hs7IytW7ciMjKSlbXBgwfjetrb2xEcHMxaoQ0NDZgxYwa4XC5ycnJQUFAAR0dH8Hg8zJo1CzweDy+88ALc3d17hVNbW4shQ4bAz697usqwYcPY/bW2tqKrqwsGgwHvvPMOHB0dUVVVhfLyctbK6QmXy8WxY8dw/Phx2NjYoLy8HEqlEh4eHhAIBAgJCcGgQYOQk5MDANizZw+GDx8OZ2dnFkZrayu8vLwwduxYTJ06FR0dHSzNXl5e8PPzY2PFKyoqUFVVBQB47bXXAHR75C+88AIcHR0B9N9Sqq2tZa3LsrIybNmyBVFRUQC6u/wsLS3h4+MDPp+PBx54AABQV1fHwvrpp59Y2be0tERwcDBsbGzg5uaG4OBgAEBNTQ2srKxgY2OD7du3IyQkhOV5aGgovLy88NVXX0Eo7J7g7+DgAF9fX+zbtw8fffQRXFxcUFVVBTs7u5t/4B7I6v+af7+2567Vaik/P59EIhEREWk0GvLz86Pa2tp+z//++++Zx7127VqaPXs2EXV7SVZWVkREfTyW6OhoSk1NpYsXL9LFixeJqNuzSU9P75WOdevWEY/HY/u+/fZbunLlCkmlUnr++efZfqVSSQcOHGDbJo/YRGxsLEvD3LlzadOmTSwdptZJV1cXLV26lORyOfPsDAZDr3Q//vjj7HdUVBTzhgIDA2nu3LlERCQWi2n79u3svMmTJ1NdXR0RET3xxBOkUCjIYDCQnZ0dJSUlERHR3r17qaOjg13zwgsvsP8mr/HAgQMkEolILpfTl19+2Std7e3t9M9//pNtjxo1ihobG0mj0RARUUhICBF1t7RWrVrF8sWEwWCgAwcO0EMPPcT2TZ8+nfR6PV0Pn8+nhIQEIiKaMmUKicViEolEzItsbW3t9ZzWrVtHzc3NbHvLli0UHx9PRN3en0QiIZFIRA899BCp1WoiIho3bhw1NTXRI488wq574oknWL40NzdTfn4+vfDCC7Rz504i6m4tEXV7hQMRHx9PVVVVLO7U1FR2TCAQUEZGBhF1t0jEYnGf+zcYDFReXs48bI1GQxKJhLVYiouLac+ePUTU7VW+9957REQkl8v7TU9rayvl5uZSV1cXyWQyamxspO+//56IiM6fP89at5s3byZ7e3v68ccfqdss9a53SqWS9u7dy+rsp59+yloSOp2OZs2axc6Ty+V04sQJevzxxykuLo5aW1vpiSeeYGkaOnQoERF71iZCQkLYfQ4fPpzVP4lEQtu2bWPxmdJMRPToo49SU1MTERHxeDxKSUkhIqJDhw5Rc3MzyWQyWrhwITt/+PDhlJubS2VlZWRnZ0ddXV1UV1dH48ePpytXrhAREZfLZfl25MgRam9vJ3d3d1q9ejW1tLRQa2srEXW3BGH23P8PlUoFsViMpKQkeHt7o7OzE1ZWVrCwsGBvy+vZt28fZsyYAQDYuXMn3nnnHcTFxcHS0hKBgYEAuj+U9RxNUF5eDh8fH3zxxRcICAgA0N0XO2bMGHYOj8dDZGQkRowYwcKura3FuHHjIBKJUFZWBqB7eFR8fDz7ECeRSHDw4EFcvXoVAHDy5Em89dZbLFyhUIhFixahoaEBY8aMwYQJEyCVSmFrawsulwuNRoPKykpkZ2f3meLc2NgIoNtTGj16NCZOnIjW1laEhYVh8uTJ0Gq1WLduHSZOnMg8h7lz58Lf3x+bNm3Chx9+iJaWFnC5XAwdOhQRERE4dOgQNm3axPpk1Wo1G9v7008/wc3NDZWVlZgwYQK8vb1x8OBBvPjiiyxNEokEM2fOxF/+8hdcuHABYrEYY8eOhUAgQGZmJkpKSjBp0iRcvHgR6enpeOCBBzB8+HAAQH5+Pn744QdwuVzs2LEDkydPBgB88803+OCDD3Dq1Kk++R0TE4PQ0FAcPnwYjz32GCoqKmBnZ8fybvXq1aiqqkJKSgqEQiG++uoreHp6IikpCUB3Cys8PBy7d+/G2rVrUVpaCmtra/j6+sLa2hrXrl3DM888Ay8vL9byMPXtXr16FTU1NXjqqacQFBSEOXPmYPTo0QDA7snDwwO7du1CTU1Nr3RLpVIcPnyYefRubm4s/M8++wy5ubkYO3Ys2tvb8dJLL8HDw6PPKCAul4ujR4+iqqoKzc3NICLEx8fjueeeQ01NDZYvX44//elPSEpKQlBQEDQaDYgIBoMBFy9ehFar7RVefn4+KioqYGlpyT56mvL2008/RWRkJKRSKerq6pCWlgZXV1eMGjUKALBt2zbMmDEDBw4cAJ/Px65du1hr6euvv0Z9fT0SEhIwe/ZsyGQyZGVl4eDBg1i2bBlmz56N+fPnIygoCCkpKayfXSgUIjIyEhkZGX365JVKJR555BEUFhaCz+dDIBBAo9Fg/fr1iIyMZH3tptb39u3bsWzZMvZtIigoCD4+Pjh58iQ+/vhjeHp6wmg0IjU1FcD/tS5NLZShQ4eCiHDw4EFWRwHgwQcfhI+PD3bv3o3169fDxcUFM2fOhEwmg0AggJWVFU6ePMk8/oGwWL169Q1P+DX47rvvVvcc+XE/EYlEOHToELKzszFnzhxYW1sjPT0dP/30E1xcXBAbG9vnmqNHj+Lhhx9GWFgYUlJSWLPM3d0dRASFQoHm5mYIBAIMGjSINYG1Wi28vLwgFAohEokwZMiQPrPPPDw8wOVy0dzcDA6HgyeffBJOTk44evQoMjMz8de//hVGoxEXLlzAqFGjYGNjA3t7e1RWVkKpVGL8+PEQCoWoq6tDV1cXwsLCkJ2djVmzZsHa2ho7duxAQEAA6/q4fPkyLC0tIRQKMWrUKNaUNZGeno4nnngCXC4X33zzDQICAhAZGQknJyeUl5fD2toaTU1NaG1txQMPPMAqSHBwMFpaWlBXVwej0Yjg4GC0tbWhtrYWHA4HLi4ukEqlGDNmDCwtLXH69GkQEezs7CCVSqFUKhEeHg5ra2ucP38eEydOZF0LHA6HVQw+n48hQ4YgOTkZlpaWGDt2LLy9vXH+/HlEREQgMjISwcHB4HA4UKlUaGpqgp+fH/z9/XH48GHMnDkTISEhUCgUKC8vx5QpU/rkSXt7OyoqKmBjY4Nr166hvb0dsbGx4PP5KCgoQGdnJ1paWjBx4kSEhoZCIpHAysoKBoMBwcHB8PT0xOHDh2FrawsPDw8oFArExsbC3d0dWVlZKCwsxAsvvABra2vw+XxkZ2dDoVCgoKAAo0ePxpAhQ2AwGKDX63H58mXMnTuX6ZC0t7ejtbUVAoGAGXsTnZ2d2LNnDxYsWAAAEAgEOHToEKRSKXQ6HWJjYyGVSiEWi2Fpadlvtw7Q/ZIwxREVFYXk5GSMGDECVlZWuHbtGqysrNhH9+LiYlRXV7PuCFMXlIm0tDSUlpZCIpFAKpXCzs4Ofn5+4PF4WL16NcLDwyEWizFjxgxERERAIBBAqVRCpVKxbsWJEyfC19cXp06dwlNPPcXKsb+/P2JiYlBcXAxXV1eMHDkSAQEB6OzshEgkQnV1NZ599lkkJiZCp9PhiSeegEKhwKVLlzBhwgSEhIT06pJLTU0Fl8vF5MmT4e7ujvb2dvB4POh0OjQ1NWHq1Kmor6+Hq6srAgMDIZVKUV5eDgAICwsDh8OBSCRCWloaiouL8eabb8LGxgY2NjZobm5GS0sL0tPT8cwzzyAiIgIymQwajQYcDgeNjY2wt7fHyJEjweVyIRQKkZaWhqqqKrz22msQCAQQCoXg8XiQy+Xw9/eHTCbD5s2bm1avXv1df/l4X/Tcb5c/graMVquFlZXVrxJXUVERzp49C0tLSzz22GMICQnBjBkzcO7cOXaOXq9nX8t7/r4XmPrS+2OguIxG402Fjm7lnPvNje7tRuj1enC53Du+x577e/6+F3mnUqmQkZEBmUyGffv2Ye/evXcVXn/odDr2su35G+ju2r3RsNmeI6V6smjRIvz4448sPIPBAA6HAy6Xe8f5ZEKlUvWa+Xm/qa2thZ+fHywsLPDqq6/C3d0da9as6XWO0WhEXV0dBAIBLCwsYGNjA6lUykaoAd0j81xdXWFtbY2//e1vUKlU+Pbbb/uNs7S0FJGRkTlEFNPf8f+5bpk75dcy7ACwdOlSBAQEYPz48bhw4QKkUilrCppexj0Nwr007ABuWKkGiutWjPZvbdg7Ojr6dGXcKpaWlnd1jz339/x9L/Kuuroan3zyCcRiMZ555pm7Dq8/ehrz64cM32w+xPWGnYiQl5cHW1tbaDQaFp6FhQV7Nnc7Bf/XNOxA95yLrKwsiMVieHh44LnnnutzjkKhAJ/Ph7W1NWxtbcHhcODs7NzLtmRmZuLSpUvQaDRwcHDAwoULB4zzZmXH7LmbMWPmV8f0rUupVN7xxLbfE2q1mo1K6zlq6k7DksvlvcTF+kMkEmHQoEEDeu73S/LXjBkzZgbEZAB/zRbx/cT04fpehXUvwjN3y5gxY8bMH5CbqX7+7o270Wjso5zWE71e/6srzP0eUalUveQI/sgYDIbbFhi7U0GyO0WlUt1Tdcfr5STuFRqNpt/JebfKneRFf8jl8n4lJ/7oGI3GW34+Wq32ps9AoVDcVBbDxM261H/3xr26uhqXLl0a8LhMJsMnn3zyq6XnXkup3isqKiqwadOm3zoZd4xWq2VCUkqlEh9//DGqq6tv+GLvydmzZ9m8gF8DIsLq1avxyy+/3JPw9Ho91q9ff0/C6gmXy0VFRQVOnjx5W2kxvWgUCgVOnz7NZnzeCRUVFTh16hT279+Pffv2DTif5HYxGo1M8+m3wmAwoKSkBOfPn7/heXq9Ho2NjTeVE66trb2hcmVPbvaB/3dv3Ds6OnppmffEYDDAzc0Nubm5v1p6XnnllfsmB3o3ODk5IT4+/rdOxh3zySefsBaYnZ0d6uvr4enpecvKlCUlJWw42f1Gr9eDz+ejuLj4nn0MdHJyui9rCvB4PHA4nD7zGW7Ehx9+yBRHnZycUFRU1K+swK1QVFSE+Ph4zJ8/H2+88QZOnjwJPz+/u5aTNhqNKC8vx5///Oe7CuduMQ3htLa2HrCbxPSBdMmSJX1GuOh0OhgMBhARdDod+Hz+LY/0uZmH/7s37oMGDcJLL73ECkNnZyckEgkbB1tVVYU33ngDDQ0N/aoc9sSkKtgTk5KeCYPBwCZkiEQi9luv1yMtLQ0PPvhgr2FaWq0W7e3t7EHrdDomxSmVSvttavenOX59xdbpdH1m0N2IEydOICQkpN9nYDAY+u26am9vHzCOnh7z9eeIxWLmgZhedO3t7ewaU17J5XLWZO0ZRktLCzQaDfR6PVpaWlBWVobjx4/DyckJTU1NaGpqwrPPPgt7e3umbQN0dzFcn4emCvXLL79AIBD0aVk1NDT0UQFsaWlhHp9MJuvl/d1KE9t0nzExMZg8eTK7X1MFNaFQKNixlpaWXsfa2tp6pauurg7z5s3rFY9UKu2VDz21RHruN/2+XgLWdC/nzp3Dgw8+2Etutj9UKhW0Wi0SEhIwatQoSCQSdHR0IC0tDQ4ODujo6OgzA7Wzs/OG3aI///wzpkyZwp6xaWif0Whk1/XUajJxfUvBlK9isRhtbW3o6OjAsWPHYGlp2et+Ojs7+63nQHcZNKXDaDT2uheDwdCr9SiRSG5qPE11Oy0tDVOmTGF2oaOjg9Vxo9EIvV6PH3/8sY/KY0NDA9RqNdPp4fF4OHbsGKZOndqvEqhJ892ESSdnIH7Xo2UqKiqwceNGvPzyy0zsKSUlhckFxMbGor6+Hh0dHYiPj4dGo8HSpUv7DSs1NRVGoxHx8fGYMmUKAgIC0NTUhNLSUhARwsLC4Ofnh4SEBLS0tCA8PBzXrl1DTU0NVqxYAalUiv3792P48OHIy8tjU6QzMjKgUqlQW1uLJUuWIDk5GUKhENHR0dDpdEhJScHy5ct73ZNMJkNeXh57UeTm5kKr1aKoqAgPPPAAbG1tceHCBQgEAhQWFmLMmDE39bxSU1OhUChQVFSEiooK5tGIRCKUlJTAysoKzc3NbL9MJkNRURGICIGBgUwioWc6c3NzwePx4O3tDZFIhCeffBKZmZnQ6/VoaGjAggULwOPxUFJSgvb2dlRVVWHRokXgcDjIycmBTCaDUCjEk08+iaSkJKjVagwbNgytra2oq6vDokWLwOPxsHfvXrS3t6O6uho8Hg+ZmZmYOHEiuy+BQICYmBjk5eWhpaWll664VquFra0tysvLkZaWhqSkJHz44YcA/m/lH71eD7VajZCQEMjlcpSXl7PZpJmZmejo6EBERAQcHR2RkJCAZcuWDfic8/PzoVQq0dDQwGb9cjgcXLlyBZ2dnWhtbYVGo8Fzzz2HCxcuoKKiAu+//z5Wr16Nxx57DDNnzkR1dTUrW1OnTkV4eDhyc3OZSBXQPQPTVCZef/11AN1a4z4+PnBxcUF5eTlGjRqF8PBw5OTkQKPRoLa2FlwuFwsWLEBRUREaGxuhUChw9uxZrFy5Evb29ti6dSumTZvGBOB6otfrUVlZCYlEAkdHRzYeva6uDkVFRThz5gxmz57Nrs3IyICFhQWqq6sxb968fsddDx48GB9++CHWr18PJycnTJs2DQ0NDSgvL0dSUhKmTp2K9vZ26HQ6hIaGsrInEomQmZmJ2bNnQyKRoKCgAFqtFj4+PsjKysKkSZMQFxeH0NBQVFdXIywsDESElJQU2NnZ4eLFi5g1axYzgFVVVVAoFCguLsbzzz+PkydPIi8vDx988AEUCgUyMjKYxEh+fj5UKhXkcjkmT57cb90rLCxkq2qdOXMG7777bq+84/P5EAqFWLhwITo7O3HhwgX4+/ujtrYWAQEBrP6ZXiJ/+ctfAHRr6I8bNw6lpTp3UXEAACAASURBVKXw8fFBWFgYW3nNysoK1tbWbFGegSSWGQOJzvyafwMJh9XX11N4eDiT19y+fTudOnWKsrKymBTnc889Ry0tLVRdXU0uLi59wtDpdFRdXU2PPvooSSQSSk5OJrFYTHK5nObNm0cJCQmUmppKBQUF1NraSo2NjfTGG2+QVqulmpoasrW1ZeJajz76KAtXqVTSihUr6PTp05SQkEBLliyh5uZmqq+vp9dff51J0To4OLBr3n77bSZvu2nTJjIajTR58mRau3Yt1dfX0+eff04Gg4E2bNhAp06dIiLqJTR2IyZOnMgEyiIiIkihUJBGo6H33nuPhEIhyWQymjx5MhER1dbW0rhx46i1tZVkMhnV1NT0CqumpobOnz9PK1eupLfffpuIiPLy8mjZsmV05swZIuqWkiUi+vnnnyk/P5+IiL788ksyGAy0fv16SkxMpPPnz9OKFSuYBGtPwSUfHx8mJDZ79mwmE0xELJ2pqamUlZVFhw4dIiKiWbNmsefXk66uLiahKxKJmDCVSdwrMzOTkpKS6IsvvmB5mJqaSufPn6fi4mKKi4tjAmfLli3rI39r4vLly0xI7fvvv2fpz87Ops8++4xEIhFt3ryZ1q5dSyUlJZSRkUGPPfYYERGtWrWK9u7dS8uWLaNHH32UFAoFrVu3jk6fPk0ikYimTJnC4vnkk0+ovr6ejEYjNTc30/79+6mgoIB++eUXVu5XrlxJISEhVFFRQVu2bKGOjg7as2cPvfnmmySRSOjEiRNERLR7926aNGkSC/v8+fP93lvPZ9kzLxYtWkTLli0jom7Z32+++YaIiLKysmjDhg3U0tJC3377LRNw64/MzEx66aWXKCQkhJqamuj06dNUX19P06dPp0uXLhHR/4l5zZ07l6XRtO/UqVNUV1dHr7zyChERE0aLjo6miooKVj9ffvll0ul07JhGoyGNRkNLlixh8sDnz5+n8vJyqquro6+++ooaGxtp27ZtFB0dTUTdQmn4j3CZSTyuv/vZvXs3GQwG+vHHH9m1lZWVNH/+fJYek9AcUbdQW21tLRUWFtKhQ4do6tSpVFdXR8uXL6fw8HBmL3qWg4iICKqvr6eXXnqJiLrF5EwickTd9QF/VOEwLy8vREZGMsGsBx98EGvWrMHVq1cRHh6OqqoqdHZ2QiAQoKSkBA4ODn3CMBqN8PDwQHZ2NpYvXw4ulwsPDw9cvXoVHh4ecHd3Z14Yn8+H0WiEWCwGj8fD1atX4ePjw5q9165dA9DdhCstLUVcXBy8vLzQ3NyM2bNnw9XVFUSExsZG9rY3SaaqVCrk5uYy72DBggVsXcrQ0FAkJSXh8ccfB5fLxYwZM7Bq1Sq8+uqrTFTsZnh4eGDo0KHo7OzEtGnTYG9vj5qaGnh4eMDX1xdlZWXMswoICMDw4cPx7rvv4rPPPkNQUFCvsNzd3REREYGCggJMnToVer0eer0eP/30E6ytrXH8+HGMHz8eRUVFWLduHdM4mTdvHnJzc/Hll1+y+GfOnIlRo0bBw8MDXl5eALqbmHPmzGGaLiUlJb3SYBJiGjx4MKqrqxEZGQmgu5tj6NChfe69rKyMnSMSicDj8ZCbm4vQ0FCcPHkSRUVFGD16NDZu3Ah/f3+cOXMGpaWlmD59Otzd3VFWVoYhQ4YA6O4+uV7+1sSRI0cwcuRIAN3CcKb079+/HwKBAKWlpXB3d8czzzwDPz8/FBYWsm8GPj4+CAkJwYEDBzB48GAkJSVh/PjxGDNmDMrLy9koCqFQiP3798PPz48tzNDV1YWoqChcvXqVabfU1tZCp9Nh79698PT0RHFxMTw8PPD888/j4MGDLJ3FxcXMIwaA6dOn37Ac5eTk9NKcqaurY7OjXV1d2SIbu3btgr+/P1JTU+Hp6dnvePWWlhaoVCqMGTMGmzdvxpo1a5Cbm4sRI0bAy8sLvr6+TIxNo9FArVZDLBYzobSHHnoIANg5pnSY+v+9vb0RGhrKPizW1dWxMh4bGwsrKyuo1Wrk5+ezehcTEwOBQIDW1lZkZmbCx8cHBQUFrGfAysoKEREReOWVV1BUVAQAfb6xmcoBl8tlgncAcOrUKZSWloLL5cJoNPZqYXp5eSEgIACWlpZISUlBW1sbXFxcIJFIEB0dDZlMxrSoelJTU8Pyo7a2FmFhYcwe9VxUpF8Gsvq/5t9AnvuhQ4eYN5qRkUGfffYZEXVLn3711Ve0ceNGtsjG888/T2vXrmWSpj357rvver0Nr127RitWrOi1UIVJPnflypX0zDPPEBHRn//8Z1q3bh1VV1eTUqmkcePGkUQioePHj9Onn35KlpaW7Pr29nYi6l4cYN68eUREVFdXR//6178oJyeHLl++TEuWLGHn6/V6qqioYN5ITzZv3kxE3R7Cv/71r17h90d1dTUdOXKEiLpbN9nZ2SQSiWjr1q1Mxnj16tX0l7/8hUQiEZ04cYJ++eUXIiJ66qmnmITo9Tz//POk1WpJpVLRsmXLiMPhEFF3a0gul9Pnn39OdnZ2RESkUCioqamJPvjgA7K1tWVhmCRuRSIRnT59moiIdu7cSfn5+WwhiIkTJxIR0ZUrV0itVtOkSZPo6tWrRERkbW3NwnrzzTf7yLRqNBp66623qLKykgoLC+mVV14hg8FAcXFxpNPpSKVSEVG3vCufzyepVMrCEAqFVFpa2msxk1GjRpFQKGTSsiaEQiGTEU5MTKQRI0ZQS0sLtbe395IX7ujoYNKwNjY29P7775NKpaLjx48TERGHwyGFQkFKpZJJND/yyCP01FNPUX5+fi/PzGg00qxZs5jE79ixY9mx6OhoSk9Pp4CAALZPoVCQXq/vtS8mJoauXLlCYrGYOjo62PPoD4VCwSSR29raiKjbO2xra6PCwkLasmULCYVCysvLI2dn5wHDMfHmm29SYWEhKwMpKSksX2tqaujYsWNERBQXF0e5ubnU2tpKb731FhERnTt3jjIyMljZ/Oc//0lqtZotrPLzzz8zWVyhUEiVlZWshXHq1CnKyMggsVhMtbW1rN6ZrlWpVPTRRx+Rk5MTEXVLP1+4cIHkcjnl5eWRWCymhIQEcnV17fe+euZ3VFQUXb58mbq6uggAvfvuu0TUXUauXLlCCoWC8vPzWUu8uLiYtFotzZs3j7q6umjEiBH0ww8/UGlpKSUlJbHFRfbv30+nTp1irTipVEofffQRlZWVUWlpKR08eJDq6+tv6Ln/rlUhP/30U0yZMgUSiQT29vZIS0uDm5sbRowYgb/+9a/YuHEj3njjDQQHB+Pzzz/HnDlzEBIS0mcEw/Hjx+Hu7o4zZ87g7bffRlhYGLy8vJCeng69Xo+rV6/C1dUV9vb22L59O5YsWYKgoCB8/fXXePbZZ+Hl5QU3NzckJCQgODgYI0eOhI+PD1xdXeHm5oasrCw0NjYiNDQU27dvx+LFixEaGorExEQEBQUhICAAQ4cOZfF1dnZCoVAgLCyM9WHLZDJIJBJ4enri2LFjcHBwQEJCAt5++23weDz4+/vjvffe6/f5/fDDD5g/fz54PB7Wr1+PiRMnoqWlBTExMcjPz2eLJYwdOxaBgYFITk6GRqNBRUUFeDweHn300T5hVldXw9fXF2FhYUx90crKCnZ2dhCLxcjPz8f8+fOhVqvh7OwMkUiEyspKLFy4EDqdDm5ubigqKkJ9fT2Cg4Oxbds2PPfcc2hsbMSOHTswbtw4NDY2Ijg4GFlZWQgNDUVgYCA4HA7y8/Ph4+OD0NBQJue7ZcsWzJkzp4/MqYWFBYKCgpCRkYGysjJMmjQJoaGhiIyMxNatW8Hn81FTUwMvLy/weDzU19dDp9OhrKwMQUFBiIuLg62tLebMmYPq6mqUlpZi2LBhbFEOE46OjnB3d4dKpWJ6P4MGDUJERARiYmLYUoyFhYVsgYby8nLY2NjAzs4ODz30ELhcLhwcHCAUCkFEKC0txZAhQ5CYmIiAgAAIBAKMHz8eq1evhp2dHdLT0zFjxgxER0cD6JbtnTJlCk6cOIEVK1Zg+PDhGDp0KFsSLzs7Gw4ODggICIBSqURTUxNycnLg4+OD0aNHw9PTE+Hh4b369nuiUChw7tw5jBo1CvX19eBwOPD19cWIESOQkpICGxsbWFlZYdSoURg/fjyamppQW1uLioqKfhUmN2zYAIPBAJ1Oh/T0dKjVauaN79y5EzY2Nujq6kJHRwcefvhh8Pl8XL58GUD34jCWlpYQiUQQCAT47rvv8Nxzz7GWUGVlJfvgGRwcDFdXVyQnJ4PP5yMxMRF8Ph8dHR0YNWoUMjMzwefzIRKJkJeXh8GDB4OI0NraisjISFy+fBl+fn4YM2YM62s/duwYFi9ezL6rmCAiODs7Q6vVQiwWIyMjg7WE3d3dwefzodfrkZSUBIPBgDFjxqCpqYkpQDo7O2PLli1swZrU1FSEhYUhPDycKbq2tLRAqVTi6aefhq2tLSoqKpCfn4+srCxER0fDwcEBZ8+eRVRU1P1TheRwOLUAFAAMAPREFMPhcFwB7AcQCKAWwJ+J6IaDwwfSlklPT4eHhwdcXFxgY2ODyspK6PV6+Pr6wtPTExcuXEBUVBS8vLyQkpICBwcH1hztSXV1NTOokyZNgkKhgJWVFYqKisDhcBAcHAwnJydwuVycP38ew4YNg5eXF65cuQJ7e3uEh4eDx+MhMTERw4YNY4W8tbUVIpEIPj4+4PF4EAgESEhIYB9nTfKqvr6+sLe3R0lJCdra2hAaGgpbW1s4OTkhPz8fWq0WgYGBsLOzA5/PR0lJCZM8DQoKgpWVFTZu3Ii//e1vfe7NaDQiNzcXw4YNg7W1Na5evcq0qF1cXJCbmwt7e3u4ubmhubkZ4eHhaGtrQ2trKyQSCQYPHtyvLnRnZyfkcjlr+hERmpqaUFNTA2dnZ7i7u8PT0xNNTU1MGtXHxwc2NjYQiUSQSCRwd3fHoEGDYDAYUFhYyJqy+fn5sLOzg6+vL6vQXl5ebLWaixcvYuzYsbC3t0dycjKCg4Px0UcfYd26db1WPzJBREhLS4NAIEBgYCCbun358mUIBAJYW1vDw8MDEokENTU1cHJygqOjIwIDA5GdnQ0Oh4MxY8agq6uLfYjrbwhmfX09mpubMXjwYJSWliI8PBwCgYB9uLWxsYGvry+Tgs7Ly0NrayuGDBnC5IoVCgVqamrg6uoKBwcHeHh4oKSkBK2trYiKioKrqyuTTnZ2dmZdKlqtFitWrGAfrCMjI2FhYYGuri4Wt729Pdzd3aFWqyEUCuHp6clGokycOBEHDx7EtGnTbriIeEZGBtzd3eHq6go7Ozt0dXXB2dmZpcnb2xu+vr7o7OxEbW0tnJyc4O3t3edjqkKhQH5+Pts2rbzl4eEBo9GIRYsWYdWqVWhtbUVwcDAr+0lJSdBqtYiJiYFIJIKjoyNsbGxQWFjIXgxA92iWhoYGeHl5wdnZGTweD9nZ2XBzc4OTkxPa29vh7u4Od3d3lJSUQKFQICgoiBnVlpYWCIVCuLi4sHV6TVr/pg/uPj4+vVaDMtHQ0ACxWAwfHx9cu3YN9vb2GD58ONra2tDS0gJ7e3vY2dmhsrIS48aNA9DddajX6+Ht7Q2VSsWkk/Pz8+Ht7Y3JkyeDx+MhPT0dnp6eGDRoENzc3NDV1YXc3FwMHTqUjZbx8PCAUqmEVCq9oSrkvTDuMUTU2mPfBgDtRLSew+GsAOBCRO/fKByzcNjNkUql/xUCS7dDfX09fvzxR/j6+kKtVuPVV1/9rZP0m7Fz504EBQVhypQp91wF9NeEiPD9999jxYoVvYY0/69iMBhgMBjY0okDfe8ZCA6H86sKhz0OYNp/fu8CkATghsbdzM35XzPsQPcch0GDBrEPb/dat/6PgkQigU6nA4fD+cPfv2kRldmzZ/fRhf9fxMLCAnq9HhwO57YN+824W8/9GgApAAKwjYi+43A4HUTk3OMcKRHd0DKZPXczZgaGiGA0Gv+nPVwz/XM/PfdJRCTicDgeAM5zOJxbFvfgcDhLACwBcNO1AM2Y+V+Gw+GYDbuZ2+auxrkTkeg//1sAHAUwFoCYw+F4A8B//vcrg0ZE3xFRDBHF3OgDz73kblopfyRuJgX6e+H3lB+/p7T8L9JTNuNWxeLM3Jg7Nu4cDseOw+E4mH4DeARAEYATAExL178I4PjdJDA9PR1ffvnlgMeNRiNeeOGFW6qcRqMRCxYswIMPPojCwkJs3br1tpUUTfH0p4fRE7FYfEvqbrejkPf111/3muY8EHV1dXjllVduOdw75fvvv78jGdfy8nJs27YN1dXVeO+997Bv3z6Ul5fjs88+w/bt228pDJMBWLBgAbKysm47DSZ++uknbN++HWfOnBnwnJqaGiQmJiItLQ2JiYnYu3cvEhIS7jjOntTV1d1V+ntiGoJ3+fJlJCYmYt26dX20YG6GSCS66b3pdDo2sc1EWVkZjh49eltxabVafPHFF0hKSsLp06cxevRoXLlyBfX19XjrrbcAdH9Uv1H9/yPS1NSEtLS0uw7nZiqod+O5ewJI43A4+QAyAZwiorMA1gN4mMPhVAJ4+D/bd4xYLEZ9fX2/x9RqNZM0vRX1QAsLC1RUVMDJyQkRERGIjo6+oZzwjYiLi7uh/nZbWxuKi4sHPK7RaGAwGLBr165bli01regO9J011xMLCwvk5eXdUph3immI1p1cV1BQgOjoaISEhKCurg4TJkxAeHg47Ozsblla1jQrsaam5q7Wy7xw4QIcHBwGlGLNyspCYmIi/P39MXnyZIwePRp///vf74lx1+v1bFjcveDw4cPIyMhg46YvXbp0WysdyeVyKBSKm+rK83g8tLS0IDU1le1zc3NjwmO3ikkkLiwsDM7OzigoKMCoUaMQGBjIRMVsbGxuKpP7R0Kj0aClpQX79+9nImImVcjbpT8Bwp7ccZ87EdUA6DM3nojaADzU94o7Y9asWXjiiSfYSImeIyZsbGzQ3t6OBQsW3DQc04rzEokEb7/9NpRKJRPgUSgU/UoX9AeHw2ETGUzTz/vD39+/z+rnPbG2tkZXVxdsbW37HUvbH6mpqUyK9UajDM6dO3fT9RcHQq1W39ISXxYWFvj6668BdLdmtFptvyvcX09bWxuUSiUb/5uZmclEy0aOHHlb6dZoNPh//+//ISoq6pbTfT3+/v549tln+3RlGY1GVFVV4dChQ1i+fDkEAgGICI6OjoiNjcVjjz3W72gPUzm7FSwtLeHr69uviJdWq4WVldVtjRCaP38+SkpKAHTLJk+bNg1Ad/7civPD5/Ph5eWF8PDwfo/3TMulS5cQGxvLjgkEAowdOxYdHR23nIclJSWIjY2Fj48PNm/ejOHDh7OXkUkO4PDhw3e86LdKpbqtF7/pxWRlZQW5XA4+n3/Lz/5W8l0qlaKoqAg5OTlM2sD0QnRycur1fDs7O2FpaQkigrW1Nbhcbp/y1lMxtT9+1+OqysrKsGzZMrz66quYPXs25HI5Tpw4AY1GA61Wi6VLl+LAgQPg8/k4dOgQLly4gK1bt/YbFpfLBRHB1dUVM2fOBJfLxZdffokJEyYww56SksLO7ejowKxZs3D27FkYDAYIhULMnDkTbW1t+OGHH/Dwww+jvb0dnZ2d2LhxIzw9PTFy5Ejk5uZiypQp2L59O6ysrLB9+3YYDAbk5uZCJpOxGZ0eHh745JNP8OSTT6KsrAwRERFIS0uDUCiEr68vVCoVHnnkESQkJKCpqQmDBg2CnZ1dv6uqA90z9oqLi+Hm5oaNGzfi008/BRGhoqICBQUF8PLyglQqRWhoKNra2iAWiyGRSDBp0iTU1NRAqVRi0KBBqK+vR3R0NJuZZzQasWvXLvj4+CAxMRHr169Ha2srTpw4AT8/Pzz88MM4duwYWlpa4OrqCltbW2RlZeEf//hHvx8B/fz8sGjRIhZ2zxek6WW7f/9+WFlZQSgU4uGHH0ZISAjKy8tRUVHBJrVMnjwZ+/btw+TJk9Hc3IytW7fivffe61fBTywWIzMzEzqdDrW1tWwy2PHjx+Ho6Iiamho2gapneVmwYAF6juIyGcjPP/8cTk5OSEhIwPHjx7FixQp0dXVh9+7d+Pjjj5GWlobTp09j/PjxcHJyQmJiIl566SUcPnwYw4YNQ2pqKlavXo3U1FT89NNP+Prrr8Hj8XDx4kUcOnQIa9aswY8//ojLly/jyJEjSEtLQ3V1NVOENBm+65k0aRKefvppvPTSS1i4cCFTI+VwOIiLi4ObmxvS09OxevVq7Nu3DwUFBWyy2JUrVzBnzhysX78ecXFxAIBDhw6htLQUkyZNgkgkwrRp0+Dl5YWTJ0/i73//O44cOdJr+J5UKr2lF7yJnjOjjx07hmeeeYYZ5EWLFkGr1WL37t144IEHkJiYiDNnzmDDhg3QarXIycmBhYUFNBoNOBwOxo8fz3SERowYgdzcXPD5fDz66KPo6OiA0WiEra0tYmJi+rzokpOTcezYMfztb3+Dn58f1q1bh5UrV2LNmjV49dVX4e7ujtOnT4PH46Gurg4vv/wy4uPj0dHRgYaGBkyfPh0//PAD3nvvPaSnp8PFxQVlZWVYsGABK98qlQp1dXWorKzEt99+i4ceegi5ublwcXFBZmYmU8zct28fFi5ciJycHGRnZ+Pxxx9HUVERvL290draioaGBkRFRWHChAk3XYj7dy0cZmNjg7KyMvZGLS8vh0AggLe3N2vGVFRUYPTo0YiIiEBycvINw2tubkZQUBBEIhGOHTuGiIgIJu967tw5HDhwAFOmTIG/vz/a29uRmZmJTZs24bHHHoNAIICrqyuGDx8Oa2trjB07Fq6urigrK0NLSwuSkpIwbdo0yGQyVFVVwcHBgXlRxcXFOHnyJGJjYxEcHIzCwkIEBARApVJh1qxZ8PX1BdCtfc3j8TB48GCmf56YmMjijY6OHtA7MBXsBx54ABqNBkOHDgWHw8Hhw4dRUlKCBx54AFOnTsW5c+fQ0dGBgIAA7NixAwaDAZMmTUJmZib8/PwwcuRI5lUAQFJSEuzt7TF69GjWFdHa2gonJye2fJtp1l99fT0efPBByGSyW9Ki7+rqwpAhQ3o1SWUyGZqamjB37lz2Qu7o6MD333+POXPmYMSIEexlLBQKweVyYWlp2Uey2IRarca+ffsQGBiIKVOmoKCggB2zsLDAn/70pz6G3ZSegboDXVxcIJfL4e3tDR6PBxsbG+Tl5eHMmTMQi8UoKytDVVUVhg4diqlTp8Lf3x87d+7ElStXMHLkSKarLhAIoNPpWHy+vr7Izc2FXq9ns1yBbiPL4/EQGBh4Q5nX5cuXIyYmBl9++SUOHToEnU4Ho9GIzMxMAEBISAgyMjKgUCjg6+uL6upqjBw5EtOnT0d7ezscHR3Zy5GIEBERgYyMDPj5+SEkJASlpaWwtLSEg4MDnJ2d+0zNFwqFsLKyuu1+fqC7m3HatGnM8FpYWKCmpgYtLS0ICAjApEmTcO7cOQDdHvapU6cwYsQIBAQEoLW1FUKhEN7e3ti7dy8CAgIwatQoODg4ID4+HuPHj0dgYCDKysr6fKzt6uqCnZ0dVCoV84oLCwsBAA4ODiAidHZ2MmkLa2trqNVqWFpawtbWFnK5HMOHD0dQUBCOHz+O7OxsxMbGwtPTs9dz0Gg0UKlUcHZ2hqurKyZPngxfX1/odDpYW1ujoqICOp0Op0+fRldXF3Q6HROOO3XqFAQCASIiInDo0CEm8XzTtQcGEp35Nf8GEg7r6uqi119/nW2XlJRQVFQUvf3229TR0UGFhYX0xBNPEBHRrl27yN3dvd9wTGzevJmJMBERxcbGUlNTExmNRho8eDB9+umndOLECUpISCCdTkelpaX01FNPUXR0NC1dupSIiJKSkigmJqZXuNOmTaMzZ86QXq9n+6ZPn86EkcLDw+nHH38kpVLJxJ0uXbrEJE21Wi0dOXKEtmzZQqWlpXTu3DkiIsrJyaEDBw4QEVF8fDyTe/3uu+/oiy++oH/84x9ERCSXy+nFF1+kjo4OUiqV9M9//pOIuoWSYmNjSavVMklWk5zuxYsXKTs7m4iILl26xH5v3ryZ8vLyiIiosLCQxo0bR0REZ8+epeHDhxMRkVQqpVWrVjGxNY1GQ8uXLyelUkkVFRU0derUG+aDiaNHj9KFCxfYtsFgoMWLF1NraytJpVJatGgRO7Zr1y4aPXo0kw2urKyk4OBgiouLYzLE/fH888/TjBkz2PaiRYuYsNfatWtvKKTl7+/fZ59JfEutVlN+fj5Nnz6diIjmz59PL7/8Mnsm1z+DLVu20Pjx42n69Ol0/PhxMhgMVFVV1UsGubq6mknqXr58mQ4ePEjnz5+nL774gqqrq+nMmTNM0vZ6KisriYiYKJqzszNt3bqVzpw5Q+Hh4UTU/XwlEgnJ5XLKz8+nhx9+mO03Go2UlZXVKz0FBQVMkvfChQu0e/duIiJaunQpLV68uFf8CoWC1ZHbQavVkkwmoz//+c/s2Zl4++23WZhSqZTJTCcnJ9O//vUvio+PZwJ4RN0yzibpZqJuyd5Vq1ax80xSvD1Rq9WUnp7ORNmEQiHt2bOHlEol5eTkUG1tLT3++ON06dIlOnjwICuvOp2O1q5dy8TItFothYSE0JkzZ+js2bMUHx/fKx6NRkOlpaV06dIleu2116i4uJikUilVVFTQO++8Q+fPn6eSkhKaN28eVVZWUnFxMV25coU2b95Ma9asoYqKCqqvrycnJyd69913Sa1WU3Jy8g2Fw35zw043MO4bN26knJwcKigooCNHjrCM9vLyop9//plee+01eu6554io25ju3LmTqc5dT3NzM8vA9vZ2kslkTIFOpVKRra1tL13ztrY2euWVV6irq4uSk5NZBZk6dSotXryYCgsLWUHqaYSIiCoqKmjTpk3U1dVFGo2GLC0tqb6+nhkVhUJBIplRtAAAIABJREFUsbGxNHfuXBIKhawC91RnlEql9P7771NdXR0REb344ot07do1Sk5OJr1eT21tbawySCQSeuedd4ioW0kzKyuLLl26RAqFgu2XyWS0efNmOnr0KBER/eMf/2AF8+WXXyaZTEZE3S+8+vp6unz5Mv373/9m2taLFy+mDz74gDIyMkitVlN0dDSJRCK6dOkSpaSksJfARx99RIsXL+5XnbMnGo2GHnvssV4Vuq6ujnx8fIio2/CPHz+eZDIZrVq1ij3rV155hZRKJS1fvpwWL15MHR0dFBgYOKCR5vP59NFHH5HBYKDdu3fTtGnTiP4/e2ce19SV/v9PEsIaIOyLrAKCqLjvuFUR21q3urbVTpexdTrtdK/ttD/b2nVGO77sMtZqN6tttYtrpeIOAoqoLLKEPQQSkhBCErInz+8Pes+AgKC1TjtfP68XLyXcnHvvufec85znPOf9UMfAx7HWe+swuYGws7jcAlwduru7E1EH6//ChQtUUVFBRMTIoBaLhVEGiYj++te/Mgpmeno6bdy4kcrLyxmpUKfTERHRjh07qK6ujv7+9793eS85hnxnFRYW0vLly4mogzaq1+vpzjvvpJycHHr11VeZ0WOz2RiJcvXq1bRixYou5cyYMYPWr19PEomErFYrY/kTES1btoxRVD09PSknJ4cRW4k6+PYcldFisTDGf3+0e/fuHjnzM2fOpMzMTGpra6P33nuPqqurqbKykjZt2kQ1NTWsPTU2NhLRf3IMcHr99depuLj4qkRVu91Ozz33HCOePvroo1RdXU35+fm0Y8cOqqqqopCQEGa4mc1mam9vp1OnTrGBUKFQUFlZGT355JPkdDpJpVJRU1NTl3dSKpXSpUuX6OWXX6Y9e/ZQfX09SSQSOnXqFMXHx5NWq6VTp07RihUrqKamhvLy8hg19vPPP6f6+no6dOgQvfzyy+wdKysr++Py3LOysiAQCNDW1ga9Xo+LFy8CAKZOnYrU1FRIJBJGzKuqqkJUVFSvoXnFxcVobGwE0LHYpNFo2BRUpVIhMTERxcXFsFgsKC0thdFohEQigVarRVVVFW677TYAQGNjI8RiMerq6qDX69HQ0NCNuV5UVISBAweipaUFrq6uGDNmDBoaGmC321nmKJPJBF9fXxYpMXLkSBZdU1JSAqfTiVGjRrGogbNnz6KpqQktLS0QCAQQiURssUgoFCIwMBAOhwMnTpyA2WxmJE1ukfHIkSNQKBS488470dLSgry8PDYFLi8vZ8dVVFSwtYGEhARG+8vPz4fNZoPVaoVWqwURoampCVFRUTh//jxbkCwsLITD4UBLS8tVn21ubi6ys7O7+D/9/PyQkpICk8mEc+fOwWAwoKmpCY2NjWhvb0dTUxNcXV3h6emJvLw8DB48GCaTCREREb2GEy5duhQCgQBWqxVlZWV4/PHHAXT4obk0d71tEFqyZAl754COhXeOpw50kCI5LERrays0Gg1cXFyg1WoxePBgOBwOuLi4dEmvxufzma+5uroasbGxUCgUcHd3h0AggFwuh9lsxpAhQxAREYHZs2czAFdlZWWPi8Yc7ZB7r3JycvD8889jxIgRmDhxInOflJWVseiw2tpaxknn1NDQgCFDhqCmpgZCoRCZmZmMWBodHc0WSr28vBAUFNSlrUkkEkyYMAEOhwNnz57FK6+80mOdXim73Y4TJ070uAjb3t6OhIQEqFQqZGRkQCgUwtfXF3PmzGHthiMuAugWXjhs2DBIJBLodDpG4rxSAoEAPj4+8PLygt1uR2trKxobG1nmrujoaNx1112QyWQwGAy4dOkSzGYzysrKurgluQVRqVSKmpoa5joBuqbIs9vt4PP57N1ub2+HSCSCQqGAQqFgnBmn04nQ0FAMHz4carUaGo0GMpkMaWlpzC3Z5yJ5b73+zfzpzXJvamrqYs0qlUoqLy9nv58+fZpZnLW1tcwq6UnFxcWMa81ZrFVVVcxy1Ov1VF9fT21tbezvGo2GGhoaqLq6uts1cNaeyWRi18BJo9FQeXl5l2lgc3MzqVQq9j29Xt8tA1JdXR1JJJIu1qxUKmUW09VcCHK5nIqLi1n2Hk5arZbVjcViYS6azvXIuWSIOjjbXFYYu93OuOMWi6VL/UqlUubq6cxmr6urY26dq6mwsJBOnjxJEomky+cOh4N9ZjKZmMWkVCrZ/RF1cLG5empvb+82pefkdDpJqVQyrj0RsevmZhvc/fb03ba2ti7uts4yGo3U0NBAGo2GdDodO4fFYun2rLjr71zWxYsXmRXG3W99fT0plcou35VKpczt0pNqamqotraWiouLSSKRsExSHEfdZDKx7EWcysrKut13bm4ulZaWst/T0tJILpdTUVFRl+MuXLhAFy5cYM+iqampi9ujpaWFuXP6kk6no9zcXCorK+s2K6mqqmLtpbW1tUu2J6lUSlKptEsb47KWOZ1OstvtZDQaqbi4mORy+VWvwWq1UlNTEykUCrJYLFRVVdXtmXfuA4g6XMTnz58ni8VCRqOR1Go1nTp1inbu3EknT56kc+fOsfrR6/XU0tLC+pgTJ07Ql19+SQ888AAdPnyYiouLKScnhyoqKuj06dPU0tJCZrOZtFotaTQaOnnyJMnlcpadi6uTviz3X8WWuVG6XraM0+lkCxLXo98SXMQt3FwtPOpawuZ+SxH1L1TuZpXX3/C/6z3PTz/9hKFDh+Kee+7Bli1bkJSU9IcGct2o9+jK+nz00UexZcuWq9azwWDAyZMnMXny5BsOt7PZbODz+Wxm1RdB8ma2J7PZDDc3N7S2tsJisUAoFLL+xOl0wsXFhWVm4/pYrh6VSiXy8vJw/Phx3HvvvYiMjIRIJIKLi0uXaC+73c7qwM3NjbULrpyioiIMHz78plIhb5q4m75e/ZZEuv68ZL+Hjh3ox/TuJpfX3472es9TWVkJh8OB7du3Iz8/HxqNBlOnTr2usn4PulHvEVefRqMR586dw8iRI/scQIkIM2fO/FUbyXrTle2zL77OzWxPnHvMaDRCo9GwVJNXisfjdau/0NBQLFiwAPPmzbvqNbu4uHRpC9z/+/ve/6E791u6pesRt7Ud6AhRu3jxIsRica8N9P+aPD09MX36dLYJ6mrq7+a//1UZDIbr3jD4awejnvZ0dNatzv2W/k9r4MCBaG1tRXl5OcvOdEu31F8FBATccA57f9XnTOYmXcct3dLvUiKRCFOnToVYLMbBgwf7jPK5pVsCOmZ8RISgoCDweDzY7fabTrPsa7PYH6pzt1gstxofOpgcXN5NmUzGfoD/Hi7VZDJdE+ES6MACtLZ2pNe12Wyora1lv99M8Xg8REZGwtvbm4HZ+iO1Wo2ampobdh297Yi92aqvr2fJp/9XdKMDR65cixAIBF3cLBaLBQqF4jd5n7kQzL70u+/cMzMz8fHHHwPoqNCvv/66xwZ44MCBfuFw/+ji4vC5ZNNeXl7w9fXFU089xba+f/XVV9fEdOfqtb8vzZV67bXXsG7dOmzevPmavmexWBhyWSgUYtWqVdi2bdt1XcOvVVJSElatWoUPP/ywX8cTEWQyGZYsWdLnsZ988km/yIYcAO+/HcG2dOlS7Nmz56acS6FQMEzHbykej4dPPvmk7y37AF566aU+j+m838BsNjP20Q8//IDc3Fy0t7ejoaEBr7zyyg3r4J1OJ+x2O3788ccuycd70+++c9+xYwd7+O7u7sjIyGAbazrrzjvvxNtvv32zL++my83NDfv27cOIESPg7+8PkUgEb29vzJo1C/7+/khOTsZ99913TZEkOp0OlZWV/Voc66nj2b17N5YsWdKvRtFZp0+fxsqVK5mVaLfbsWDBgmsq40aJi2ror+XO4/GQlZXVLbKnp6ny119/3a+oLm6B7EZEL/2aAYLH43WBev2Wys/PvylhqK2trZBKpX3CtnQ6He6++240NzdDrVazzyUSCfR6PZRKJdvMZ7PZYLPZIJPJIBKJsG3bNjz88MOIjIyERqNBbGws5s2bB19f32sytjrPvjt/j9v8dPvttyMmJqbPd+p33bm3traitLQU/v7+bPTj2OdNTU1dRuGamhr2ktjtdmg0mn7Bq4COXW6dK9TpdKKlpaULM52bMkulUsjlchARuxZudyInvV4Pk8kE4D/c9c6JDerr66/KY+9NXINtaGjAqFGj4HQ6UVFRAavViiFDhoDP5zPGOp/P72IxKJVKtttVpVKxZCMGgwESiQT+/v79On95eTlzAXW+JofD0Se6WKVSsd2vQId7KSYmhllBsbGxPQ7cPYnbMQh01H9zczOICLW1tT0+d6VS2c1q61wPSqUSWq2WxSZfTQqFAmq1Gs3NzRg9enSXvzU0NLBOgduZ6u3tzSBena+fex6c3N3doVQqu8HBtFptt0GjqakJ9fX1rExuZiCTydDa2soGCO759kcKhQJ6vR6jRo1CQEAA+9xms6GlpaXb9XaWxWJh71tJSUmP7lOj0QidTsf+r1AocOrUqT47KZvNBqVSCZ1Ox+7XarWiubm5yzPt7NZSqVTsd4PBgJqaGvj5+XXrZJuamliZHCxQKpVCpVKhqqoKp0+fRklJCSQSCaqqqtDc3Izq6mo4nU72nri7u0MkEqG1tRUqlQrV1dVoaWmBl5cXBg8eDKCjj2luboZCoejSfrjz6vV6Vr+1tbWoqKiATqdDWVkZysvLQUQwGo0oKSlBWVkZXF1d+xwwftedu8ViwbBhw/DMM8+wDRJcw6mpqcHnn38OoIMJPmPGDHz00UcAgEWLFuH06dPYs2cP3njjjV7LP3jwINavXw+RSIR58+ahpaUFX3zxBd577z3w+Xw8/fTT2L59Ow4cOID6+noMHjwYvr6+yMjIwIcffgiJRIKFCxfCz88Px48fx4ULF/DNN9+gpqYG06dPR01NDU6fPs2sk/Pnz+Ouu+5CUFAQ3njjjX4PPpy4BltcXIzk5GR89tlnICK4urpi8uTJOHHiBF577TW2zfn8+fOYMGECMjIysG3bNqxduxbbt2+Hm5sb9Ho9Dhw4AJFIhNdeew0PPfTQVRdo3njjDWzfvh1hYWHIyspi5Mjq6mrMnz8fcXFxvVqc69atw65du6DRaLB+/Xq4u7vjwIED2L17N1vxP378OB544IF+hYfJZDJIJBKsWbMG+fn58PPzw8qVK3HmzBkMGDAAQ4cOxffffw+ggwf+/vvvg8/n47XXXmOM/RdffBFCoRAbN25EZmYmgoODceTIETzzzDNXrYfvv/8ejY2N8PHxwXvvvcewuhy6d8CAAXjrrbdQXl4OLy8v5OTk4OGHH4bBYACfz8enn36KTZs2QSwWIygoCBUVFQA6BoWRI0fCZDIx15BCocAPP/yA+vp6hgowGo344IMP2KaWzMxMAB1b77/66isEBgbixRdfBNDRRn766SfEx8dfdVZls9nwwQcfwNfXF2+99Raee+459iw/+ugjnD17FlqtlvH7r5TRaMSxY8fwzDPPYPfu3bBarV0GvdLSUuzatQuNjY04dOgQzp49C09PT2RnZyMvLw9hYWE9lmu32yGRSPDhhx/CaDTimWeeAZ/PR11dHXbv3g0vLy/8/PPPqKqqYlv/09LScOzYMVRXVzMuvcFgwKZNm7pkbFu7di0++OADaDQaPPjgg9DpdPjhhx9QUFCADRs2QKlUorKyEk888QR27doFgUCARYsWQSKRMOOOx+NBKBQiJCQELi4uWLx4MR5//HGsWrUKL7zwAv75z38iLCwMFy9exKFDh5Ceng6TyYSPPvoIa9euRV5eHr799ltotVqkpaVh7dq1qK+vR1ZWFl5++WUcO3YMYrEY8+fPh81mQ3V1NS5duoS//OUvMJvNfbqYftedu5ubG0aNGsWmUkSERYsWAQCio6PZxonY2FiIxWLGmdm5cyeMRiMiIyMxd+7cXsvnOitvb28MHToUQqEQn332GeLi4uDn5wdvb2/s378fQ4YMQWxsLBYuXAhfX18sWLAAY8aMQVxcHGt0o0ePRm1tLUQiEQYOHIi4uDiIxWKUlpay+Om8vDzs3r0bHh4eWLx4cb+s5StlNpsxcuRIJCUlobGxkYVhtbe3w8XFBbGxsRAIBBAIBBg0aBAmTJiAOXPm4N5778Xzzz+Pbdu2wcvLCyEhIezaOXxpb1l7nE4nSkpKkJaWBrFYjHHjxrFMT35+fkhMTERveXBtNhvkcjkmT56MoKAg1uCKi4u7WGxnz57tNUlET2Vyz3/o0KEAOhZ0x44dC1dXV6jVarYBZs+ePRCJRAgMDIS7uzu2bdsGq9WK7777Dl5eXlixYgUmTpzI6jAhIeGqluSPP/6I0aNHw9XVFcnJyQgPDwfQsTYUFBQEd3d3hIeHIykpCa6urjCbzUhMTGRc7+zsbAwcOBAikQgJCQmIjY1lM7D58+cjOjoaAoEAarUamZmZmDx5MgICAuDj48Pu85tvvoGrqys8PDwwYcIEaDQa7N27FwMHDgQRISQkBECHy0MoFILP5yM9Pb3L7LGz8vLyEBkZCQ8PDwQGBmLAgAEAOgZuDw8PjBo1CnFxcThw4ECPHYrZbEZcXBz4fD7mzJkDV1dXNvtwOBz47LPPMHPmTCQkJGDSpEkoKCgA0GGoORyOXt87m82GHTt2YN68eYiJicHw4cNhMBjw+eefY/bs2RCJRJg+fTouXrwIlUqFqKgojB07FsOGDUNAQACbyfv5+cFqtSIwMBAuLi5wOByQSCQICwtDe3s7Wltbcfr0aZjNZkRGRiIsLAw+Pj6IjY2FUCiEUChEeHg4+Hw+RCIRq1+uPjmu/IABA7Bw4ULce++9CA0NxaeffgqdTgeTyQSHwwF/f38EBATAYDDg559/xqFDhzBgwADExsbCbrdDJBKBiBAQEAAej4fw8HCEhISwWTe3Jubi4sJ2x15VvXEJbuZPb2yZdevWMa4KEdH3339PSqWStFotrVu3jjFUWlpaaMuWLey4o0eP0r/+9S+Kjo5m+NwrpdfrGc2RY8M4HA5G1yMiGj58OD388MNERPTll192Y8Hs2LGDGhoaSKfT0aZNmygrK4uIOvC8GRkZREQ0depUdnxhYSEdOHCARo0aRfn5+T1eV18qKCigHTt2EFEH++VKBgZHI9TpdLRv374uTBKLxUI1NTX03HPPUVhYGPt8xIgRREQ9Mjjsdjtt3bq1C3/m3XffZfjh48ePd+HUXKlPP/2UER03bdrEuDPx8fH07LPPks1mo+rqaoqOju6VztiTvvzyS7r99tuJqIOdwhEelUol3XfffVRXV0etra10//33s++MGTOGkUUlEgmtX7+e1q5dS2azmZqammjy5MlXPadcLmckxaamJtq8eTPZ7XbSaDS0YsUKstvtVF1dTdOnTyer1UpKpbIL+lcqldKqVavI6XQyumF7ezu1t7ez59bQ0EBDhw6ljz/+mOLj44moAxfMcXCIOvg9r7zyCkVERJDdbqd///vfJBaLiaiDocQxYxQKBW3dupVSUlLYu3mllEolDRkyhEwmEzU1NVFaWhrj9HRGWzc2NrK20JN27NjBEMJr166lBQsWMBaMp6cnEXW0s7///e/0008/EVFH+1q/fn03NhNRBx/m/fffJ19fXyIi1g9s3LiRAgIC2HHvvPMOe7927NjB+oQXX3yRFi5cSEREW7ZsYWjm1tZW2rlzJz333HO0a9cu+tvf/kavv/46/fjjj5SVlUWPPfYYbd68mYqLiykzM5P++c9/0qlTp+jdd9+lxx9/nHGPLBYLqdVqstvtJJfLqby8nIqKikiv11NRURE1NDRQcHAwNTc3k8PhoLvvvps2bNjA6nX16tUUFRVFGo2GJBIJTZgwgQ4cOEByuZw2b95Mc+bModraWsrPz6fp06dTc3Mz2e12mjNnDj3xxBPU0tJCBw4c+ONSIT/55BMEBwfjhx9+gF6vxw8//ICgoCD88MMPOHToEKRSKXJycvD2229jwYIFKCkpwQcffIBRo0bhySefRHJyMvPxXSkutZVKpYLNZkN+fj4qKysREBAAi8WC/Px8PPDAA9iwYQMUCgV2796N2NhY9v2WlhZ88sknCAwMxDfffIPi4mKkpqYC6LBCOWvdy8sLubm5uHDhAh577DGkpqZi3bp1zLL529/+xjJA9aW2tja8++67bDbCWeicDh48iOXLl+PgwYPw9vbGJ5980oViWFxcjAsXLuCpp57qktFp4sSJOH78eI85X51OJ/sBOtwPFy5cYBbzkSNHek2UAXT4nblr/OGHH0BEuHjxItzd3bFy5UpcvHgRe/bsgdPpREFBAXJycvDee+/1mcdzz549uOOOOwB0uGkWLlwIoMMKfeSRR6DX6+Hi4gKhUAir1YrCwkI89NBD+Oijj5Ceno68vDw8+eSTMBqNaG9vx+7du1FYWIjm5uarussCAwNhsVjw5ptvIj09Hbm5uQA60vUJBAJs2rQJdrsdFy5cwPfff4+SkhIYDAa0trbC4XCwXKNvvvkm5syZwyiDnLtxw4YNePzxx5GcnMxcCN9++y0WLlyI8vJyzJ07F1lZWfjrX/+KBQsWQKPRICAggM1uBw4ciKNHj2LdunX485//jD//+c/YtGnTVf2zYWFhsNvtWL9+Pdra2lBYWIiqqipERkYyX/6GDRuuGrCwa9cuzJo1iz3nNWvWYN++fXA4HGzW0tzcjPb2dsyePRtAh9to0aJFOHLkSLfyeDweRCIRmxF6eHhg7969iIqKYp9t2LABZWVlGDduHDsv5749cuQInn76aVRUVODrr79GYGAgDh48iK+++gr79+9HcnIyhg0bhpqaGgQHB6OlpQUqlQqZmZkYNGgQBAIBysvLkZiYiKioKOTl5WHx4sVsLY3H4yEgIAAOhwM+Pj44deoUzp49C5lMhsDAQEilUmzevBne3t7g8/morKzEyJEjkZWVhUWLFmHDhg2w2Wzw8/NDYWEhZsyYgQEDBsDV1RUHDx7EpEmT4Ofnh7y8PNx9992oq6tDWVkZPDw8sGLFCtjtduTk5PT6PIDf+Q7VuXPnoqKiAlFRUbDZbGz6Fh4eDl9f3y5uCKPRCFdXV8yfPx8ymQyXL1/Gn/70J9x11109lu3v788empeXF8LDw5GYmIjg4GAcO3YMMpkMDz30EEQiEerq6rr4BZ1OJ1QqFZRKJSQSCZxOJ1asWMH+PnnyZBQWFkKr1SIwMBB2ux0TJ05Eeno6FAoFampq8Je//AVAx0s4c2b/Us6eO3cOGo0GlZWVSEpK6hbd4uXlBT6fj6ioKABgjYB+8Q+GhISAz+cjPz+fDT7cdFUsFrOMUJ0lFAqxbNky7NmzBzabDe3t7XjkkUcAdCwEFRYWwt3dvVcGyYIFC3DmzBn4+/sjOjoaEokEqampGDx4MEMpl5SUYNCgQdBqtZg8eTL+3//7f7j33nvh5eXVayRFQEAAQ9kqlUrmkhMKhWhvb4dYLIZIJEJcXBwKCwtx6dIlzJ8/HwCwcOFCJCYm4uTJk3jyySfh6+uLlJQUTJs2DdXV1Zg0aVK381mtVoSGhiIgIAAXLlxAUFAQLl++DJFIBD8/P3h6euL48ePw9/dHUlISNBoNJk2ahBEjRkClUiE2NhZ+fn7w8fFBZmYmoqOjcfnyZfj7+8Pf3x+PPPIIsrOzMXXqVMydOxdGoxGLFy9GZWUloqKi4HQ64enpicWLFyM6Ohq5ubkYOXIkgoKCsGDBAsjlcmRlZbGsWIsWLUJYWBiqqqpQVlbGcu9eqaCgICxfvhzZ2dkICQlBYmIiLBYL4uPj8fDDDyMvL49l67raTsygoCCMGTMGDocDwcHB0Ol0GDVqFAQCAe6//34cPXoUra2tWLVqFRvs58yZA5lMhuTk5B7LHD9+PFtX4dw3M2fORH19PY4fPw4vLy88/PDDzD3r4+PD+ggfHx/o9XoMHz4c4eHhMBgMkMvlEIvFSE9Ph9PphEKhwLRp01hmKofDgQEDBjCXjq+vL8RiMQQCARISEtDa2spy6ZrNZgiFQhARbDYbPD09oVar2aBos9kwceJE8Hg86HQ6uLi4oKWlBU6nE3fccQe8vb1x5513Ms6Rp6cnVCoVa9fJycksSKG5uRkhISEsQ5fT6ezy/vemPzQVsjddS1Lha/leZ4rk2rVrUV5ejr179/7q6+xvcue+qHcWi+VXgdSuRdnZ2XA6ndiyZQvLuXmjxO3+62/C6+t93r83/R7v41pJi9wg31/iqtFovCojpTejob9yOp04fvw43N3dER0djcDAQFitVhatExoaygxDLt9DTEwMiAh6vR5xcXGwWq1ob2+Hu7s7vLy84HA4YDAYYDKZEBYWBo1Gg9bWVtTU1MDNzQ1jx45lA47BYEB2djZkMhnmzZsHHo+HoKAgdt/cM+9MvFSpVF3WsAwGA0QiUbe6qqqqQkJCwv8mFbI3XW8D6et7nV/WS5cu9brKf63n628n1lcju1kdO9ARRTF79ux+waWuR/2tE+D6n/fvTb/H+7hWuBXXEfeXuNoX/OpaO3auk2xra4PdbmfWulgsRlhYGAse8PX1hVAoZNayTqdjeWR5PB48PDzA5/Nhs9ng4eHB8Lt6vR4eHh6sY3Y6nTCbzQgLC2Phrnw+n11HY2Mjzpw5Ax8fH+h0Ouba5e6be+ad3atXBidwLrcr66qv96VPy53H430KYC4AJREN/eUzfwDfAogBUAdgKRG1/vK3FwE8BMAB4Aki+vmqJ8CNt9x/azkcDjQ2NkIoFP7qDv6PqubmZgQHB99wXPAt3dKvUVtbGxQKBdtfEBISwnz+3LoDF+qq0+kgFAqZ28Tb25v5yG02G5qbm+Hj4wNvb2+WMQno2MditVrZAMDj8ZhrhSuf47vX19dDLpfDzc0N8fHxcHNz63MjVX9VX1+PmJiYXi33/gzLnwOYc8VnawEcI6IEAMd++R2qaCh+AAAgAElEQVQ8Hi8ZwHIAQ375zkc8Hu/q6LI/oAQCAaKiov7PduwAEBIScqtjv6XflaRSKUpLS2GxWBAYGIjQ0FD4+vrC6XTC4XCwxB+cJW4ymWA0GtHU1MRcI5w1zs082traIJPJWMfN+ddVKhWcTifc3d0Zd50zlF1dXcHj8ZivfuzYsRg/fjwCAgJYuQ6H41djJn41FZKITgO4MnxgPoAvfvn/FwAWdPr8GyKyEFEtgCoA467lgm/plm7plq5FNpsNlZWVMJlMCAkJgb+/P4xGI4tsaW9vh06n68L34Sz10NBQtrvbbrfDarWCz+fDaDSygAVuAxq3FsTn86FWq6HT6WC1WmG1WnvsaLnPOruoONepQCD41cZRX5DA6w2FDCEiOQD88i9Hqx8AoDMaUPbLZ79aer0eCoXiRhR1zTKZTL2GVP7epdfr0dzc/N++jG4yGAx9hjv+t9W5g/hflVar7YY7+LWyWq1wOBy/WZvh3CvcoqbT6URMTAwSExPh7+8Pb29vREdHw9PTEwcOHEBbWxv8/PxgMpmg1+uZG4bzm/v6+sJqteL8+fNsx3BpaSnsdjs+++wzvPvuu6ioqIBAIGBWeXJyMoKCguDq6sqSm3Pumc7XyS0Im83mfsHjrkV9lXej49x7Gop6nHvweLzVPB7vPI/HO69Sqa5aaENDAwoKCpCdnY2jR49elXFxPaqpqbnqi9jc3IzNmzf3C6ebk5PDmDM3UwUFBSgqKur2eWZmJr788subfj1Xk91ux969e7F9+/bfpHybzYYbsYZz9OjR/3nE9LFjx/DZZ5/96nKMRiN777nIk4yMjBuKoC4uLsbp06dx7tw5qFQq2O128Hg8uLq6MutYLBbDx8cHnp6e8Pb2RmRkJGMeeXl5sQ66tbUVJpOJLdwTEcLDwxEcHAyn0wmr1QqDwYCUlBSMGDECBoOhywImF6ZrtVrZPXJlcr8LBAJYrVbI5XLGk+pcH5xbhtt0dDX19PffqnNv5vF4YQDwy7/KXz6XAYjsdFwEgKaeCiCirUQ0hojG9LZ1HQCeffZZHDt2DNOnT8fixYuxa9euG56vUaFQsEWUK2W322E0GvHzzz/3K3KgM8DsZorjil/JRTl37twN6ehupAQCAQoKClBWVvablM8RAH+tCgsLrwsR8UeRw+HAP/7xD3zxxRd9H9yHSktLu5Ak7XZ7N8b5r1FZWRm++uoruLu7M/a+WCyGm5tbF/eGXq+HXC4H0LHRcNiwYRCJRAxsxuPxwOfz4e7uDnd3dwb9czgciImJgVgshtFoxIgRI2A2mzFixAhoNBq4ubl1QS+4ubmxiBhudufn58cWWTlxETZr165FXV1dlzLa29tZh89hCoCOhV4ObcB16hyJsrP66meut+b3A7j/l//fD2Bfp8+X83g8Nx6PFwsgAcC56zwHgI5dqtOmTWNT+IULF4LH43W50f5wyLkR02AwdElE4HQ6kZKSApFI1MU3xk2vXFxcsG/fvn6Fdmm1Wtx3331dVsP7mpV0vrbe1NsIzQ1Gra2tmDdvHlJSUrpxOiQSCV555ZU+r0Gr1bIXinsB9Xp9l2nm1er5SvdFb/AthUIBHo+HvLw8LFmypFtSCO6enE5nF7dNby65K/kaTqcT3t7ejEHU27V3jpjo7dr37t3Lpu6crrSguLpqa2vrkfVxpcFgtVq7ldG5/GuFyV2vZWwymXD48GE8/vjj3ToJrj56Y9Go1Wo0Nf3HZqutrUVGRkYXv/OPP/6I8ePH9/h9vV7f5dl2Rute+Szq6uqQl5eH6upqLF++HGKxGBaLBU1NTVCr1ZBKpYwQW1JSwmLV8/PzUVpaiuzsbLS2trJ3WSaTIS8vDxaLBUqlEq2trTCbzQgMDER5eTlqa2tRUFAAvV4Pg8GAd999F4cPH4ZAIIDdbodarYbBYEBRUREuX74MvV4Pi8WCkpIS6HQ61NfX4/z588zVVV1djRMnTiAyMhJarZYZpk6nEzabDaWlpaiqqoJOp8PFixfR0NAArVaLpqYmlJWVobGxEWazGUajkRE+uXe5L2u/TxOTx+N9DWA6gEAejycDsA7AOwB283i8hwBIASz55WSXeTzebgClAOwAHiOi/oOMe5CXlxfefvttPProoxg1ahTS0tKgVqtx6dIlVFdXY9SoUWhsbERycjIGDRoEuVyO0tJSCAQCBAYGYujQoSgpKUF5eTkGDRoEqVQKg8GAyZMnIzw8HFKpFD/++COefvppAEBubi4D9cyaNQtBQUG4dOkSQkNDkZWVBSLC1KlTe7zW3NxcDBgwACkpKdDpdDh+/DgsFgsmTpzIdo1eqaysLBiNRlgsFgZd6iy5XI6qqio0NjZi+fLlqKurg7u7O0JDQ/HBBx/gqaeeQklJCRoaGnDPPffAZDJBq9WipKQEYrEY7e3tDG5lsVhw+PBh2O12iMVizJo1C0SEQ4cOQaFQICkpCXFxcfj++++RnJwMPp8PrVaLBQsWsEbh5+eH8PBwlJSUYMmSJbBYLDhx4gSADoBbYmIimpqaUFxcDJvNxqBrQAdeNTs7G+Hh4QgKCkJqamqXeHYiQn5+PhobG5GSkoLCwkIEBATAw8MD9fX18PDwwIIFC2Cz2SAQCFBSUoLq6mqIRCKMHTsWZrMZ9fX1yM7OxogRIzBz5kyGxm1sbMTAgQMxatQoNDU14ezZsxCLxazBL1u2jDU8zhLkYF0uLi6or69HcXEx+Hw+IiMjMWzYMFy6dIltUVer1VAoFJgyZQrLw1pYWIiGhga4uLhg+vTpEAqF+Omnn1BZWYnnnnsO+fn5MBgMmDFjBoAO67eyshKurq6YMWMGq5uioiKUlZXB398fI0eOhK+vL8xmMyorK1FXVweRSIS0tLRrWqArKCiATCbD4MGDuxgPZrMZ+/btA5/PR2xsLC5evIjY2FikpaUB6BgM8/LyoNfrERkZyQB2mZmZ8Pb2RkVFBRITE3H+/HnceeedOHnyJOx2O0MTKBQKnD17Fk6nE8OHD8fAgQNx5swZiMVihIaGoqKiAh4eHpg5cybKy8tht9vh6uqK9vZ2HD58GPPmzYPBYMD58+cxadIkNDc3Q6lUYvTo0SxD2YQJEyASiaBSqXDx4kXweDyGdx4+fDhiY2MZvtdkMmHcuHHQ6XTIzMwEEWHcuHE4c+YMmpubceTIEZhMJlRVVbEwycuXL6OmpgYGgwF33XUX1Go1jh8/jkmTJsHHxwcZGRkMXtbY2Ijs7GwEBQWhrq4OqampcDqdMBqNICIUFBSgra0NSUlJaG9vR01NDRISElBfX88WaxctWgSVSgWVSoXy8nKYzWasWrWqzzXI/kTLrCCiMCISElEEEW0nohYimklECb/8q+l0/JtEFEdEiUR0uN9vWy+6fPkynnjiCWzZsoVZkfn5+RCJRDhx4gQsFgsWLFiAe+65BwaDAY899hhmzpyJ0aNHIyMjA0DH6H/o0CG88847mDt3LmbPno0jR47g66+/RmtrK9th6XA48MADD2DAgAGIjo5mI2NjYyOWLVuGKVOmYOXKld2sCyLCkSNH4Obmxvjgd999Nw4fPoxly5ahvb292305HA688cYbcHNzQ3p6OjZu3AhXV9duo/H58+eRkpKCjRs3AgBeeOEF1ulwoV8hISHYtWsX27ixfv16DBkyBCNHjsSKFSvg7++PgwcP4vbbb8eECRNgNBoZz2PTpk0ICAhAVFQUgoODUV5ejurqahw5cgTTp09nNMCCggKMGTMGu3fvRlJSEqZOnYrVq1dj7dq1mDNnDgYOHIjq6mq88847eOGFF5Ceno7XXnsNR48eBdBhsb788stYunQpYmJicOedd3bblJGdnQ2BQIDNmzcjMTERS5cuxdq1azFp0iSsWLECa9asYbHJU6ZMwalTp7Bw4ULW6XKWNodSBcBw0YsXL0Z6ejqOHDmCc+fOYcyYMfjyyy8xa9YsTJgwocsMQigUwmAwMFokALz55psIDQ3FuHHjcOjQIQAda0EzZszAxo0bkZaWhtTUVBw8eBBAR+d59OhRzJw5E1VVVVCr1bhw4QIiIiJYnaxbt465psrKynD8+HHMnz8ftbW1zH/NWZ/Lli1DUVERi6GeN28eDh06hAULFuDYsWP9yjDUWRUVFViwYAEjGnI6fvw4hEIhtmzZgrFjx6KoqAhPPvkkAGDjxo146KGHcPvtt2PFihVYsWIFAgMDkZqaCqVSiU8//RTx8fH4+eefIZPJkJSUhOnTp+Oxxx4D0GHhP/3005g/fz5iY2PR1NSEqqoqDB06FN9++y3jIH333XfIyclhaAe1Wg03Nzfk5uYiMDAQWq0WOTk5sNlsmDJlCrZu3YrvvvsO06dPxzvvvIMTJ07AYDCgpaWFvRupqalYv349du7cieXLl8NisSA6Ohpff/01DAYDtFotxGIxTp48iejoaNTW1sLd3R3Jycl44oknMHLkSJw/fx7vv/8+lEol4xHp9XrU19fD4XAgPz8fbm5uOH/+PB588EFUVlZi0KBBMJlMePbZZ5GWloa2tjbw+XwQEdRqNcLCwnDmzBkEBwczTPjHH3+MlpYWBAQE4NKlS6itrcXGjRths9kQHBzMMkrd7AXVGyqVSgU/Pz8MHToU77//Pvbt24fc3FwMGzYMcXFxCAsLY7AujpfOWQgSiQTp6ekAgNtuuw3Nzc1YtWoVgA5LaNiwYVi8eDFGjRqFQYMGAejwBWu1WjzyyCNQq9UIDu4IAho2bBgmTpyIlpYWzJ49u5vPn8fjYerUqWhoaGCME19fX3z//fd49dVXGbC/s0pLS3H48GF2PDelvXIqPHr0aGg0Gtx2221QKBQszhboYG8kJydDJBLhvvvug6+vL5RKJQYPHozw8HCUlZWxBnPs2DG0tbUhLy8PNpsNK1euBACMGDECb731FqqrqzFo0CDMmDEDzc3NzK1hNpvhdDpx++23QyAQYNq0aYxTc/DgQYZlzcjIwB133IFDhw6xTpHja9jtdjQ2NmLUqFEAOgZbDjfcWTExMYiPj8eQIUMAdFiJXFlmsxnz5s1jA5vT6cTs2bNhsVig1Wrh4+OD1NRUDBkyBFKpFMuXL0dZWRlGjx6NpKQkAB0uj+bmZsyePRtubm6M6TNo0CAGnOKUn5+PGTNmwGg0ora2FiNHjsTIkSNRVlbGgGUzZ86ESqXC/fd3eChrampYspHDhw9jzZo18PDwgFKpREREBOLj41FZWQkvLy92T1OnToXFYsGBAwcwd+5cmM1myGQyhIaGAgCrV6BjFhceHs7YJvHx8fj4449xzz33wNvbG7W1td3qtCc1NDTA6XTizJkzyMnJ6WJ8DBs2DPX19Yz30tDQgGnTpgHogLUNHjyYBTRw7hy9Xo+wsDCEhYVBp9OhqKiIfb+5uRmTJ08G0LFdXiQSYffu3cjKysKECRPYrG7atGksBn3kyJFwc3ODt7c3vLy8EBsbCw8PDyQnJ7MdolqtFkFBQdBqteDxeJgxYwasVitiYmIwZMgQTJ06FfPmzYNMJkNkZCRD97a3t0OtVsPHxwc2mw0GgwFSqRTt7e1oampCaGgo3N3dERYWhuTkZOYykcvlqKioQEZGBqKjo2E0GuHu7g4fHx94eXnBbDZjzJgxCAoKgtPpxJQpU2A2m1FYWAiFQgF3d3fo9XrGo+F2ygoEAhgMBha+GR4eDq1WC6FQiJaWFqSmpqK+vp6FbWq1WgQHB0MoFPY9U+sNF3kzf3pD/j711FNE9B/c53fffcdQs62trbR3714iIjp06BAdP36ctmzZQk1NTaTX6+n5558nrVZLly9fJiKiRx55hKxWKzU2NtKCBQsYZvTo0aNUVFREbW1tDOdJRAwrWl1dTd999x0REW3dupXy8vJIKpX2eL1Lliwhp9NJRUVFDA+8YsUKqq+v73bs2rVrydvbm4g6kMV/+9vfekTuEhGtX7+eGhsb6bPPPqOkpCQiIqqvr6fGxkbSarW0cuVKIurAt27YsIGamppY/alUKiIiCgoKotdff52V2dLSQpWVlXTkyBEiIpowYQLt3LmTiIjuuusuMhqN1NLSQvfeey+1trYSEdGbb75JZrOZlRETE0Nms5mhXeVyOfn4+FBlZSV7BkqlksxmM/3zn/+kuro60mg09Pzzz1NNTU0XHDGnqqoqhkvOzc2lrKwsstvttGXLFqqtrSWpVEpqtZr+/ve/ExHR+fPnKTU1lTQaDStjzZo1ZLVa6cUXX2T3397eTnfddRdptVpWpyaTqcf6Jup4blKplNrb22nTpk1UV1dHREQvvPACqdVq9h6+/PLLZLPZSCqVUnp6OrW2tlJ7ezv5+fmxstLS0shut5PT6aTExERavXo1ERE9++yzVFxcTBUVFRQTE0NERPn5+ZSamkrl5eXU0tLCcLlExJDENTU19OKLL7LPNRoNORwOKisr64al7klcu+Fw0RMmTCAiYs928uTJDMu7cOFCOnv2LDU1NZG7uzsrv7W1lSF1s7KyGHaXiGjcuHHsHDt37qTTp09Tc3Mz/f3vfye9Xs/eF+58mzZtovz8fCooKKC9e/dSQUEBEREZjUaSSqX0008/0Z///GfKzMyk6upqysnJoYcffpj0ej1VVVXRihUrqKCggE6dOkVvvvkmSSQSMpvNJJVKadmyZaRSqSgvL4/uu+8++vTTT2nx4sVUUFBAhw8fpvT0dNq2bRudOnWKbr/9dnrrrbdo//799K9//Yt27txJs2bNoh07dtDFixdpzpw55O7uTkVFRfTUU0/RypUrKSsriyoqKig9PZ3OnDlDEomEJk2aRFu2bKGqqipasmQJzZkzh6qqqkgikTAcM1EHqvrZZ5+lhQsXUmNjIxUUFNDChQvpwIEDpFaryel0UnNzM82aNYv27dtHbW1t9Morr9Dy5cupra2Ntm3b9sdF/p44cQInTpxAdXU1duzYgcbGRoaa/eSTT2C321FYWIjs7GzMmDEDKSkpuHz5Mnbt2oXy8nLs378fUqkUMpkMc+fOZT5PLrQSALZu3QqDwcCm4g0NDaisrMTatWsBdOBWOZ/onj17YLVae0xbJpVKERgYiIKCAmi1Wpw6dQplZWWIiYnp0d+enp6OuXPnori4GDt27ACfz+81Hj0hIYFN811cXJCdnY0zZ84gPDwcu3btQn5+PgoKClBeXo7Zs2cjNzeXLc7s378fxcXF+Pbbb9HS0oLKykp8+umnKCwshEQiwYULF3DhwgUsWrQIM2bMQF1dHSIiIqBUKrFz507Mnz8fYrEYtbW1yMrK6sKvuf322/HJJ5+gqKgIP/74I0QiEe655x5cunQJX375JRwOBzIyMuDm5oYZM2aguLgYO3fuRE1NDc6cOdPjgt0XX3yB0aNHw2AwYPfu3Rg8eDBkMhlOnTqFlpYWNDc3w9vbG06nE+fPn8e///1vmM1mlhlKJpPhvvvug1AoxJo1a/DNN9/gwoUL+Pzzz7F//374+vqiuroaJ0+e7JVfo1KpYLVaceLECXh6emLmzJm4ePEiysrKcPnyZezfvx91dXWoqqrCiRMn4OLigsOHD6O+vp4d8/nnn6O0tBRFRUXMQuPxeBgxYgR8fHywZ88eCIVCnD17FoMGDcJTTz2FixcvYvPmzfD19UVtbS2ICB9//DHy8vJw7NgxDBw4EEAHXri1tRUnTpxAZmYmLl++DLPZjAkTJrAMVD2ppqYGL730EmOXCAQCnD59GgqFApWVlSy7U2hoKMaPH4+amhr4+/uzBcc1a9YgMzMT33zzDT744AOWuP7ixYvQarXIz8+H2WxGcHAwxo4di9LSUnz44YdwOp3w8/PDSy+9hK+++gqFhYU4cOAA8xkfPHgQWq0WbW1tGD16NEsW0t7eDh6PB6lUCqVSCbPZjICAAOh0OsTHx0OlUqGyshJpaWnw8/NDQUEBvL292WItl4Ly6NGjOHfuHB577DEkJSVhzJgxcHFxgd1ux9KlS5Geno6IiAj4+/tjxIgRiI2NxZQpUxAYGIiJEyey5Bz33HMPHnnkEeTk5MBkMmHQoEGoqalBbW0tS86Sm5uL+Ph4DBgwAGKxGAaDAZGRkZBIJBAKhfDw8GCJrtvb21FdXY2FCxdCKBRi0KBBeOyxxyCXy1FXV4fCwkIUFRXh5ZdfRmNjIwoKClBbWws/Pz/IZLI+F98Fr7766lUPuBnaunXrqz0hSVNTUxlfOyUlBWlpaYyiuG3bNkyaNAn19fVYs2YNBAIBIiMjIZVKMW3aNMyaNQvBwcGYMGEC1Go1Ro8eDT6fjyFDhrAsKyEhIRgwYADCwsIQFBSEsLAwSKVSVFVV4f7772d0u5iYGAiFQkRGRiI8PBwpKSnddqT5+vrC29sbI0aMQFxcHAwGA1ss8/Hx6RYSFhISwnjz06ZNQ2BgYK/RBUOHDsWlS5ewevVqzJ49G0qlEiNHjmRZkDhccUpKCkJCQlgGnTlz5iA0NBQpKSmIjY1FVFQUysrKMHXqVAwfPhwRERGIjo5GeXk5Vq1aBbFYjHPnzsHf3x98Ph/Dhg3D+PHjWeYXLy8v5jIBOqbwAoEARITIyEgMGDAAAwcOhMPhwLRp0+Dv74+4uDgEBwdDJBJBrVZj+vTpuO222xAaGtot85LD4QCPx8OQIUNARPDy8kJycjLEYjEGDBiA4OBgxMfHw9XVFW5ublCr1Vi6dCluu+02BAcHw8/PDz/99BPGjh0LT09P5qYym80YNmwYi2H28PCAm5sbwx5fKS8vLyQkJCAsLAyhoaHw9vZm+VCXLVuGwMBATJgwAVqtFgMHDmRscG9vbwwaNAjDhw9HQkIC5HI5i5AYP348eDwehg4dCn9/f1b/ERERzKXR2NiIFStWYPLkyXBxcUFCQgKio6Oh1WqhUCjg4eGBiRMngs/nIyIiAk1NTfD19cWUKVMgFAoRHByMqVOnss7oSsnlcqjVavasgI41nYiICHh6eiI+Ph52ux3h4eFISEhgvPKoqCiEhoZi0KBBjGo4bdo0ht9ISEiAwWCAp6cnIiIi4Ofnh+HDhyMwMJBlNgoNDWUGlNVq7ZIvNyMjg63BCAQCiEQiuLu7s52f3PtjNpvh4+MDmUwGp9OJpKQkGI1G+Pv7M1cKAAQHB4PP5+Pw4cOIioqCn58fxo0bh9jYWPD5fERHRyMvL4+1Q09PT2a0cFmYjEYjY9FzOGyz2Yzw8HCEh4cjNDQUYWFhiI+Ph6+vL8LDwzFgwAB4e3tDp9Mx9xAHKUtMTERoaCg8PDyYO4XH46GyshLJycldEl63tLQwF5OPjw+ioqKYa2fmzJmsr7BardixY4f81Vdf3drT8/5DIn+3b9+Of/zjH2w3WX+wpNeKLv2/qqVLl2L37t3/7cu4Zr3xxhtIS0vDd999hzfffLPX1G03QxqNBt9++y3WrFmDzZs34+GHH+6Cd72aOqNya2trkZubi7vvvhvbt2/HokWL/meYPna7HUVFRSgpKWG+d6FQyGLOAbC1lPz8fFitVsyYMYOlzCssLERMTAwMBgPCw8Ph4uKC2tpatsaUnZ2N1157Dc8++yymTJkCT09PyGQyyGQyxuQfMmQIxGIxPDw8YDAY2DoH/bKmUVpaysKudTodY7lz18CBA4kIDoeDsdabmppYHP3VIGF2ux1ms7lPkBi345fP58PNzY3tei0vL8fgwYP/d5C/drsdZ8+ehb+/P+Me96fTvtWx9y2NRsPCTFNSUv4QdcYN2pcuXYK/vz/S0tJ+NZDpRlzTqVOnEB4ezqxioH9I3yv3Whw5coTx/rnO548uLvxUJpPB398fPj4+7L51Oh1cXV3hdDpZp+fh4cHojSKRCHa7HTExMSxnrU6ng5+fHwICAtg5qqurIRQKUVJSgqSkJIb49fLyglqtxvDhw+Hm5gaxWAwiYouUHGPGxcUFPj4+qKurQ0tLCwYOHAiFQgGTycSgZN7e3ow3w+1n4PP5CAkJgc1m6/N5u7i49IsQeaWhwg3ufbXPP6Tlfku3dEt/XOXm5kIulyMuLg4hISEQi8Vs/UOj0cDd3Z0NiGazGSaTiXFbuA7YZrOhsbERYWFhzN0jFAohEAjg5+fHOkCDwQCn04nGxka4uLjAYrGAz+ezTEeZmZlITEyEzWaDv78/PD09Gc7b6XRCq9Xi3LlzmDdvHurq6uDi4oLk5GRYLBbGmXF3d+/RAu9PEh7OCr8Wcd+prq5GfHz8/47lfku3dEt/TBERzpw5g8DAQAwePBiurq4wGAy4fPkyRo8eDQBddoo3NzejoqICw4YNY3yY6upqGAwGDB8+HHa7HZ6envDw8GBp9Xg8Xhf2OvevyWRCQUEBRowYAT6fz3InJyQksFh/LjOTq6srPD09YbFYEBsbC7vdjoMHD8LhcEAqlSIsLAwmkwkKhQIxMTEIDQ2Fp6cnHA4HSwcI/Ces2eFwsNDJK3e6X4+LjftOX8jfW537Ld3Sb6Rb6zz/kdlsRkVFBcxmM0JDQyEWi6FWq+FwONgeA6fTyTpGpVIJm80Gi8XSBeHg4+PDonpEIhHc3NzQ1tYGh8PB2OpXujE4MioH9eLY60ajkblezGYzi8Ly8fFh5QiFQsawGTFiBFJTUxEZGQm5XA6r1Qq1Ws0WuLlZAheD7nQ6WVjilfmOb4Z+929ea2srduzYgdWrV/+qfKV/JBFRNybNv/71r5t2fr1ej4aGhj6P27Jly1XZJgaDoV9sHU7PPfcc24R2pUwmE+rq6nrkt/Qlo9GI2bNnX/MuzqupvLwcL730ErKysvCnP/0JGzZsQEZGBv7yl7/gwQcfhFQqxcsvv3xTkMFqtbobo6cv2Ww2tLW1YcqUKcjNzf2Nruw/ysnJgU6nY8nLASAwMBDe3t7Q6/UsoYZMJkNJSQmMRiPEYjFLAi2TydjGpYiICNZpcgk2uAVOh8PRrS5qamogl8sxfvx4tgbi4uLCGEzDrXIAACAASURBVO08Hg++vr6IiIhAXV0d5HI59u3bhz179rDw5BkzZsDf359t1Hr99dfx0UcfobS0FDabDeXl5Szaq6WlBQ0NDfDx8WH++xtJxwQ6+sX33nvvqsf87jv3y5cv49ixY4iPj7+hjfP3LIPBgNLS0i6f3cyBzWw249///vdVj3E6ndi7dy+jUfYklUqFt99+u9/nFQgEPTYCLo/lRx99hNzc3GtuKJ6enuDz+TcsvRkAnD17FvPnz2eRGCtXrsScOXMwfvx46PV6REVFISgo6IYTTHvS5s2bcfbs2Wv6jlAohK+vL3NF9AYJuxGSSCSw2+1QKBTw9fVFQ0MDi9EWiURsxy7nsuAscI7X3tDQAIlEgtbWVlRXV7OO3NPTE0KhEC4uLmzBlM/nw2AwdLkfNzc3OJ1OZm1zbhxXV1e4uLggODgYJpMJarUaISEh8PDwYKGoAoGA1VVQUBCEQiG7F5PJhISEBERGRjL4GRclw61lEtFvMoPz9PTsAlzrSb/7zv3IkSPw9fXF888/j/vuu++/fTk3RdnZ2Rg+fHiXz06dOtXjsb/FgvjJkydRXl5+1WP4fD4yMjJY6FlPunz58jVZhceOHcODDz7Y7XPOt1hcXNxrbHpf6onvc70qLy+HRqNBcnIy7HY7MjMzERISAqvVivDwcMybNw8HDhzAPffcc8POeTUdP378uuqlvr4ezz77LGJjY3uM7LgR1mZlZSUaGxuRmJiIuLg4XLp0CVu3bkVBQQGam5vR1taG+Ph41vlFREQgNjYW/v7+CA0NhZubG/h8PiZOnMjcMNXV1airq2OkRG5BValUwsXFBQEBAV06d1dXV4wdOxZisRhNTU1oa2uDwWCAWCyG1WpFaGgoYmJicPLkSeTm5qK2thbjx4+HUChkKAh3d3dcuHABJ06cgNlshlwuxx133IGQkBDU1NQgKioKPB4PRASBQMBQ0RaLBUajEVarlbFguGTbAHqcafQlIkJpaWmvMEJOv2ufu1KpxD/+8Q+sXr0a2dnZMBgM+Pbbb/Hoo4+iqKgIgwcPRmpqKr7++mvI5XI0Nzfj3Xffxfbt2yGXyxEVFYUhQ4bgu+++w1133QWBQIAdO3Zg9OjReOCBB3o85xdffAGn04nKykq89dZbAMB2cxIR4uLiMHr0aGzevBkNDQ2YP38+Ll26hAv/n70zD2+ySvv/N0mbpGnadF/pXmgplaVALaWMICCCiOIy44YjOI7i6Ig/xWXcEORi3MBBRl5EQdkVEKEIZW0prbTQPS1d0yZNmmbfk2bt+f1RnzMttOA2vs47fq/LS5o8z5PzbOfc5z73/blrarB8+XKUlZXhwoULNFbc7XZj+/btCA4OxsWLF/HYY48hMDAQ+/fvR11dHVasWAGNRgOxWIwnn3wSly9fxpNPPolPPvkE0dHRUCgUOHPmDO655x6a5HTmzBn09vbCZrPhgQceoLwVRseOHUNrayu0Wi1uueUWbNu2DVu3bkVPTw/kcjlOnDiBBQsWoKCgAAaDAYWFhairq8MjjzwCHo+Hl19+GVOnTkVtbS1UKhWqq6spw6a1tRWzZs3CJ598gnvuuYeyfRhq5MWLF7F48WIAwCuvvIK5c+eiq6sLKSkpcLlc2LlzJ0aPHo39+/fjnXfeoSREPz8/REZGYuHChVfdk8bGRjQ2NiIiIgKdnZ2Ijo6G0WjE2bNnIRQKUVtbi0cffZQm73zwwQdITk5GR0cHnn/+ecjlcixZsgR1dXUoLi7G6NGjsXDhQuzcuRMCgQBNTU14/fXXv/dzmZmZSXk1g+V2uzFjxgzw+XzceeediIiIQGlpKTo6OvDyyy8DGJhOl5eXg8/nQyaTYenSpdBoNPjmm28gFAoRFxeHGTNmABjg26jVapSWluKWW265ymVls9mwfft2jB8/HrW1tbj55psBDGRSe71etLa24vHHH0dsbCw0Gg2OHj2KwMBAhIeHY86cOSgsLMRtt92GxsZGHDp0CAsWLMDkyZPx7rvvIjY2Fu3t7XjzzTdHvA4HDhxAU1MTzGYzdRF89NFH0Gg0yM7Oxu7du2nWMsP1iYqKwqRJk2CxWNDe3o7k5GQ4nU5a2JrNZsNiscDpdCI/Px+9vb1QKpVob29HVVUVHnroIbS3t+OLL77A0qVL0dLSArfbjSlTpkAul+Py5ctYsmQJRCIRLl68iK+++grLly9HXV0dxGIxpk+fjqamJlgsFvz+97+nz5TJZILb7UZNTQ0ef/xxdHR0QK/XQyqVQqVS4cCBA8jPz0dGRgZ2796N5cuX47bbbsOFCxdQUlKClJQUTJ06FZcuXUJubi6SkpIAgPraz5w5g8bGRtTW1mLDhg203WlpaTCZTDh9+jSN/Ln11lthMBhQXl6O/v5+1NTU4LXXXoPFYkFTUxPcbjd2796Ne++995rP6a/aco+KikJ2djaeeOIJyOVyCIVCnDx5EjfeeCPmzJkDu92OkpISjBkzBitWrKCc47CwMEydOhVSqRSTJ0+mxEKmc1y9evWwv6fT6dDZ2YmlS5cOqaa0a9cueL1ezJ07FwqFAmazmSJmjUYjli1bhvPnz0MoFOKuu+4aYmXv2bMHc+fOxf3334/NmzfDarWivr6eIm0zMzOxYMECfPTRRwgKCsKkSZPg5+eH2bNnIzo6GtHR0Zg2bRouXbpEj1lTU4OMjAy43W5alGIwIU6tVuPGG29EYWEhZs6cifnz5+PgwYPYvn07brrpJjgcDly4cAGEELz33nv44x//SAl6mZmZCA0NxSuvvIKQkBD09fVBrVYjMjISt9xyC5RKJRoaGjBr1izqCmhpacELL7yAe+65Bw0NDSguLsa0adMQHByMhx9+mMKhNm/ejFtvvRU33XQTtFotTCYT3n//fdx///2YNWvWkKo5gzV+/HiEhITg2Wefxe9+9zv4+/tj27ZtkEgkmD9/Pvr7+6FQKAAMhNnl5eXhzjvvpNfGZDIhLi4OAQEBWLBgAS23dvnyZSxYsICSPH+MCCGYNGkSANDMSmDAILjxxhtx7733UqInMECCrKmpwZw5c3DhwgWYTCa88847kEgkuP322yn8q6qqCnv37sXChQths9mGbaNQKERaWhqefPJJ2rEDA4MhQwNlUvxXr16NZcuWYdGiRXQ9hQHzsdlsmgEMDKCZH3roIXR1dY3I5a+uroZUKoW/vz92795Nj5eTk4MPPvgAhBDExMTAbDbj1KlTSExMRFBQEJqbm8HhcBATE4PIyEgIhUJYrVZs27YNH3zwAU0EKisrQ1FRET7++GMIBAJMnjwZVVVVtJ5pbW0tAgICkJOTgwMHDqCrqwvZ2dn48ssv6UASFBSE2tpa8Hg8TJ06FYWFhZDL5UhNTcWRI0dw4cIFWCwWfPDBB6iurkZaWhocDgfq6upgNptht9tx4MABXLp0CVlZWTRxKiAgAJmZmeDz+RCJRAgNDYVcLodMJkNJSQl4PN4QTAcwYK2npaXh0qVLsNvtaGtrg0wmAyEEdXV1EAqFyM3NRW1tLRQKBc6ePQuFQoEpU6ZAoVBAJpPh4sWLMBgMmDx5MiQSyfUXaUeCzvyS/40EDiOEkDvvvJO43W7idDqJw+Egf/rTn+h3zzzzDCkoKKB//+EPfyCEEGK1WsmKFSvo5wyoiRBC8vPzyYsvvjji76Wnp5PExETy7bffEkIIOXbsGHnnnXfIxYsXyY4dOwghhPT395OmpiZy6623EkIIOXz4MG3H7t27KdyLEEJuuukm2qaVK1fSz5944gmyfPly+t1TTz1FCCHE7XbT82D0zjvvEKvVSrq7u8ny5csp8Mrn8xGr1UrWr19P1qxZQ5566inS19dHXC4XEYvFpLi4mEK/YmNjyZo1a8iWLVtIXV0dBW198cUXRCAQkPr6evp7d9xxByGEUMhRfn4+IYTQ3+3r6yPr1q2jf6emppKvvvpqSJtNJhO5++67CSGEWCwW4na7SXp6OiksLCSff/45MRqNRC6Xky+++IIQQsjJkycp5G04MXA0RvPnzydWq5V4vV56HZk25+TkkDFjxhCPx0MIIeT1118nL730EtHr9UOOkZGRQWbPnk0qKioIIf8Caf0QHTlyhJSWllIYFiPmGfP5fLR9crmc5ObmkiNHjpCNGzcSjUZDCCFkw4YNJDMzk9xyyy10/+joaPLuu++Sf/7zn+TcuXMUeMaIaeudd9455PPly5dToNyKFSuIz+cjhBDy7rvv0vMlZAA8N378ePLpp5+SEydODDlGaGgo4XK5pKamhvT39w/5zul0kurqapKUlEQIIeTDDz8kEydOpN8zUDOZTEaam5vJp59+SpYuXUrWr19Pjh8/TrRaLZHJZMRqtRKTyUSkUimRyWQkISGBrF27lshkMtLd3U3OnTtHIiIiyHvvvUdkMhmRyWQkJiaGFBYWkq+++oq88sorRC6Xk2PHjpG//vWv5PLly+TQoUPkscceIxKJhHR1dZHa2lry+OOPE6lUSnbu3ElWrlxJOjs7yc6dO8kzzzxDjhw5QrZs2UI2bNhAenp6yFdffUXefvttcv/995Pz58+Tf/zjH+TQoUOktbWVrFq1ijz//POkurqaPPXUU+TChQtELpeTjo4O8uqrr5Lz58+TzZs3k4kTJ5KWlhZSUVFBTCYT6evrI06nkyiVSnLw4EGybt06Ul9fTxYuXEh6enqIXC4n+/fvJ3w+nwQFBZF9+/aRl19+mSQmJpI1a9aQlStXks8//5xs2bKFpKenE4lEQtrb20leXh4pLy//zwSH+Xw+dHZ2YsmSJfD39wePx4NYLMajjz5KfVQHDhygSNiNGzeiqakJwACboaysjB6LqS3qdruRlJSE5557jlp6jDo7O/HMM8+gvb0dMpmMFu84deoUnnnmGUydOhVLlixBZ2cnWCwWPvzwQ+oGOH78OMWabt26FVu3boVcLodKpaLY4ZKSEjz55JN0RtDY2IhHHnkEbrcb//znP/G3v/0NCoUCX3zxBdatW4c9e/agtrYW+/btQ3l5Obq7u8FisfDNN99Q69BqtUIgEODZZ5/Fq6++io0bN9K6ktu2bcPMmTPpYhUwEI3y5z//GcHBwQgNDcWyZcvAZrNht9txxx13QK/XY8+ePfjggw8AgFp4DAOEEZ/Pp+6G1tZWGAwGzJgxA1arFQqFAi0tLaisrMRzzz0HYGBWIZfLsWzZMixcuBAPP/wwuFwu/v73v+Pee++FRCLBnj17wGazcebMmauehX/84x/UzbVx40Y0Nzdj6tSpEAqF2LNnDzo6OqBQKHDy5Em8/PLLqK6uxoULF/D3v/8dMpkMRUVFWLduHZ588kl4PB6cPXsW69atQ0tLC2bPno1nnnkGACgC9vvKYrFg+/btmDFjxpDwu+LiYsydOxdOpxN///vf8cYbb1Bw1KxZs3D77bfj6aefRkBAAHQ6HTIzM9Hc3IyCggJas5UQgieeeAJPPvkkIiMjR4xpPnToEABgy5YtUCqV+PDDDxEXF4eSkhJcunQJbDYbx44dw/jx49HS0oLc3FxotVqsX78eCQkJWLBgAZYsWUKn/ytWrIDBYIDL5UJeXt5Vcdg8Hg979+6laz0dHR2YN28eampqYDQaKcufySZtbGxEVlYW7r77bkyfPh18Ph+JiYkQCAQQCoUIDg6mvv1bbrkFbDabfubn54eMjAxUVFTg6NGjWLx4MVJTU3Hp0iVMnz4dfX19TCIPRX4vXrwYMpkMycnJqK+vx1133QUul4vg4GBkZGTAaDSiqqqKFsbp7u5GaGgoPB4P+vr6cPr0aRQUFKCvrw8Gg4H60VtbW5Geng6FQoHCwkKMGTMGXq8XMpkMR48eRXBwMEpLS+mMlVkz4/P5NCa+sLAQ2dnZKC4upvVWz549i7KyMjQ2NmLt2rU4ePAgqqurIZfL8dJLL2H16tWIiIjAl19+iba2NgoNmzhx4nUj2n61nTuHw4FMJsPo0aPpZ1VVVYiPj6cv0tSpU+mKsUQiwVtvvQUAKCwspPvYbDaMGjUKwIC7IjAwECwW66ooBofDQae+Op2OkiDz8vJQXl4OYMAHykxza2pq6HS8qqqKJmFotVqEhYVBrVYjNDQUQUFBkEgkOHz4MAghlF4oEAgQGhqKrq4unDhxgsb6Mm30eDyIi4vD22+/DYFAAIPBgJCQEOTm5tI2X7x4cYj7iMmW0+v1dEGUiUCIjo7Gt99+C7VajbNnz8JgMEAikdDEjJUrVyI8PBydnZ3gcDiUpaHT6VBQUACv10sHFYb5YTKZEBgYiJiYGDQ1NSEwMBClpaXwer2oqqqiECWPx4OkpCRK6+vo6KDVrSQSCWpqalBSUgKdTjds6KRaraYsaz6fj/j4eHrex44dg9VqhUajQWtrKw1dq6+vx8KFC1FSUkJdCwKBAGKxGHV1dXTAl0ql1JddXFyMffv2XfX7w8nn80EqleLMmTNXLYg1NTUhLi6OXmuRSAQWi4Xg4GBoNBpYLBZK6Gxra6PnLJPJ6GCcnJyMiooK6HQ61NbWXjXoMJ292WyGy+WCwWBAXFwcbr/9dgADMK7Q0FA0NjaioaEBRqMRvb29MBgMiIyMRGtrK26//Xa43W6MGTMGZWVlEIlE1EBSKBR0neBKJSQk0AXQw4cPIywsDF1dXbDb7aipqUFcXBwNNxw7dix8Ph88Hg+USiW9b2w2GxwOh8aQBwUFQSqVwufzQS6Xg8fjYdSoUTAYDBAKhejt7cWSJUsQFhaGiIgIcLlcWCwW9PT0YNKkSYiOjkZJSQnCwsKGlJ+MiYmB1WpFdXU1EhISEBUVhfLycggEAgoE0+v1sFqt6OjoQH5+PlJTU1FcXIz6+nrY7XacOXMGlZWVcDqd6O7uhlAohMlkgtFohN1up+X1ysrKkJ6eDr1eD7PZDJ1ORyO9mMLdhBCIxWIQQtDc3EzDey0WC5RKJRYvXozf//73tH4EEyaam5uLUaNGwW63Qy6Xw+v10lqxI+lXjR94++238eyzz1LWRGtr61WFL/R6PUUBMw9cR0cH+Hw+7dT1ej1EIhH8/PzQ3t6O4ODgYcl5NpsNOp0O/f39SE1NpSFM7e3tMJvNSEpKorhUiUSCpKQkWoKNWUCRy+VgsVj0t41GI/z9/SEUCtHe3o74+HgIBAJYrVbqMzMajRQ8BAx0OLGxsTTZwmAw0NCt/v5+XL58maZBDyer1QqlUonRo0fTa8LEnLNYLCQmJtKFK7fbDY/HQ4l6zPWLiYlBYGAg7HY72Gz2VVWTmDUQZlDS6XTQaDQ0o08oFFI8KbON1WqldEPm+rS0tNDFSY1GQ/2+g0UIgUqlQlBQEKXq9fT0wGq10mtgMpkgFApht9uh1WoRExNDS635fD7ExMRQHzwTZTD4vvl8Puj1eoSFhX0vBoxGo4HZbKYJNampqXC5XODxeGhubqbPqcPhoFYon8+HVCqFVqtFamoqeDweBAIBWltbQQhBZmYmnE4nBAIB+vr6oNfrwWazaQjecJLJZOByuYiNjUV/fz/cbjetbmSxWEAIgUgkQm9vL3g8HrhcLlwuFywWC8LCwiASiaBQKBAcHEzjshsaGhAZGYmoqKirfMeMtFotHA4HkpKSYLPZ0NPTA7vdjpaWFtx88800/NRms8FsNkMgECA6OnpYoJter4dOp4Ner6eEVpPJBLVaDYlEgszMTCQkJCA4OBg9PT20H4iOjkZrays9JvO+RkdH0/c2Pj4ezc3N6O3txa233oq+vj46yDCE07a2NlgsFtxwww3w+Xyw2WywWq0QCoXIzMyETqfDnDlz8MILLyAhIQEikQgJCQnUu2A0GqFUKnHo0CG88sorsNvtUKlUmDt3LoRCIdhsNkQiEbRaLZqamhATE4POzk4UFxfj+eefh16vh1KpREpKCgghEAgEUKlUtC/Jzs4Gn8+HwWCgxMn6+npEREQgNzd3RPzAr7Jzv3TpEgwGA86dO0cjVr6PrsVy8Pl8tALKLyHyPZkR34cU+L+l/wsZlkyF+u97Lt/3vo0ks9lMa23+mGP9Es+D2+2mHeJgCuVPUU1NDQwGA1JSUhAfHw+Px0MTk5iwwJHkcDhoog8TV240GmEwGGAymZCRkUGZ+CaTCTabjcLGXC4XHdS9Xi+Fh3k8Huj1egQGBtLF0ZiYGLBYLCiVSlitVoSEhND7FBQUBIFAAKlUCoVCgfj4eAQGBiIyMhJarRarV6/Go48+ilGjRiE+Pp4mPz333HO44YYbEBoaCqlUinnz5iE2NpbmDzAUT6fTSevBMvdAoVDA398f4eHhEIlEAAYY9FarFYGBgRg7diy0Wi0CAgIoII3BVavVatjtdmRlZf1nde7V1dWoqanB/PnzqYX3m37Tb/r1qrm5GQ6HA5MnT6ZExJ8ySDKzoCvFIAm4XC64XC4dvK8nr9dLUb6VlZWYOnUqLZzNJLip1WrqmrXZbIiKioJCoUBSUhINt+zt7aVhzWlpaSgvL0dYWBi8Xi/uvfdeGgFjs9kosZap58C0efAA7nK54Ha7h0S+aDQaOoMczkBgBufrIX9/lZ37b/pNv+nXob/97W8wmUx46623RrTACSFob29HSEgIIiIiftbZ3rU6b5vNhvb2drr2NZI8Hg+8Xi9lotvtdnA4HBp8IBQKERAQgObmZni9XgQFBaGlpQVmsxnjxo0Dm81GREQEent74XA4wGazodfrIRQKaaEQg8FA6+Xm5ORQhs5wHXNfXx/tzCsqKjB58uQR3V/XUmNjI2644YbfqJC/6Tf9ph8mg8EAjUYDnU6HpqYmmlx1pbxeL8LCwhAaGgqHw/GzYh6YznE4C5bD4YxYdWqwmOLYzc3NCAoKQnh4OPz9/eF2u9Hf30871oSEBHC5XLjdbmRlZcFmsyE2NhYGg4GuRURHR0MsFtNEPYlEQtd7mPJ9DMRsuJkLM/CR77g4TPFuq9VKLfvh9iPfYQw4HA51P1/PML9u585isbYBWAhAQwjJ/u6zVQAeA8CENvyNEHLsu+9eBvAoAB+AvxJCTlzvN37Tb/pv1ff1y19rO71eTyNPfk6FhYVRK3Tq1KnDbmOz2eByuRAeHk4LV/wUXbkGMLhzH/y3Xq9HSEgI4uLi6PdM5zecmNqwzOI+E3QhEongdDqHFAdh/Ok8Ho/igxsaGlBbW4uCggKMHj0afD4fcXFxSE1Nhcfjgc/ng0qlAiEEYWFhQ+6V2+2G1+ulawsBAQGwWCzg8/l0psOEe6tUKkRGRqKvr4/655kOnzk3Zl3xes/N97HcPwOwCcCOKz7fQAh5b/AHLBYrC8B9AMYBiANwmsVijSGE+L7H7/wqZLPZ4HQ6ERISctXClslkokVvf07r5JeWw+Gg1L1fYjFXp9NBIBBcFXFzpQgh0Ol0CAkJ+cmLfIM7nV9aLpcLUqkUIpHomtWTfD4f3G43NBoNrFYrLf4+nEZ6kY1GI7q7u9Hf349Ro0YhIiLie/mgv6/uuececDicEQMVOBwOeDweRdza7XYEBQXB4XDA6XTSyKZrnduVxxss5rwZRsvFixeRkpJCQxz9/PwQGBgIp9M5JLz5SneOn58f7dgB0BBMk8lEF9uZd9pkMqGxsZEyZbxeL3p6evDXv/4VQqEQnZ2dQ94bBvPLRE4Nts7dbjdYLBblvTN1mQEMaS/TmZvNZnA4HAQEBAxZ+B5O13tHruscI4SUArh2me1/6Q4A+wghLkJIF4AOALnX2edXI5vNhk2bNmH79u3DVhbftm0b2traIBQK8cEHH4yIp/2xYmo13nfffdfcbt26dTh37hy8Xi+sVivef//9q5KyriWBQICdO3eio6Pjpzb5mmJwt/Pnz8e2bduuuz2LxcLGjRt/FkyuzWbDli1bRkyf/3eJ8e3u3LkTFotlxG3eeustcDgcirR+9NFHv9fxr7SM2Ww2/t//+3/YvHkzoqOjvzdm2eVyoaamBm+99dY1E7eioqJGHCAZBgzTKSqVStTW1sLr9cLf3x96vR4ejwePPfbYsPfU6/VCKpUOOaeR/PVM5/nhhx+isLAQNpuN1ihl8jsY8Ny5c+eG4DhGOp5arcbhw4dht9vR3NyMkydPwmaz4dtvv8WqVavQ3NwMl8sFn8+HyZMnIz4+HhEREcjKykJUVBRFdtTV1dFZw2CQntlshslkgtPpRFdXF7hcLkVUDB5ogH8NahkZGRTLIBQKweVyKVaFyadgasxeb5b0U1Y+nmKxWA0sFmsbi8ViWhoPYHDalOK7z36y/p0Lv8zF6u3txfTp07Fy5cph462PHj2KMWPGwGq1YsWKFTh9+vTP2g5/f38EBwdj3759w5LimEy+4uJipKWlgRACf39/PPfccz8oqshms+HYsWPDwq9+qpg44cHy8/PDrbfeet19mdjfK0FoP0bFxcW48847f1Ch7J9KQWQKHgsEApw/fx5JSUnD8uetVivtgIODg3H+/Hnk5+cP2ebKgeFKt4TL5UJvby+sVit0Oh29vgz6trGxETKZDGazGVqtlhar8Hg8cDqd4PF4yMnJwauvvvqDZ6Ferxdmsxlms5kmXfX19VH0bm9vL0pLSxEeHg61Wo1p06bBarWir6+PPhvMtU5OTqZhk8y18ng8Q4wVxuI1Go3QarUYM2YMwsPDh8wmWCwWQkJCsH79emRmZkIgEFzVZzB/e71eiEQipKenY86cObDZbOju7qa0Tz8/P0yfPh033ngjDa8MDAxEb28vZDIZTpw4QTnuWVlZSE9Pp9niLpeLEin7+/sREBAANptN3TxMgpPVaoXX64VCoYBer4fBYKB5GIPb6/V6odPpsGvXLlgsFlgsFnA4HHovr6Uf27lvBpAGYCKAXgAMGWm4ueOwvTKLxfozi8WqYrFYVSNZGh6Phz7kTBXywdJqtcNaHUaj8boWG/muIIbX6wWHw4FSqcTZs2eHvWDMzWLaabfboVar6eeDucrDoclCZQAAIABJREFUWQw2m21IO5lQqSvFvLCDs0FtNhvNutRoNJSNrVar4e/vD6VSCY1Gc9WxmOy44c5bIpHQl8fhcND9mfPT6/WQyWR0n8GdHpM2zYhJl2eOzeFw4PP54HK5EBAQAIfDQWOfGTkcDpqtyqi7uxutra1XWTSDj3/l9tfiWavVatohDJZWqx3y24NfqMHPTH9/PxwOB01gAwaex87OTvT19aGxsRFarZZeG6bqTmlpKaRSKa0gdKUlKpFIcPbsWVpiTigUwmAwIDk5GWVlZUM6IJfLBb1ej/r6evT09Aw5Do/Hg5+fH43PTkxMpK4Lo9FIUbtMslpbWxsuX75MkbVarZZmSw/WlffF6XRCo9HQa2Oz2VBUVASxWAyVSkUBWIwFn5iYCK/Xi8jISCiVSpSVleGmm26ii4BdXV1oaWmhdENgAA/S09ODnp4eNDc306xMtVqNmpoaynJvb29Heno6TR5kxLBUNBoNuru70d7eDrFYjPLycpjNZtTW1qKiogJ2u51eX2ZAiYqKQnd3N81AbW9vR2JiIlQqFc1i9nq9CA0NhUAgoJnsDocDgYGB1DBkjAir1YqGhgbI5XJ6ThqNBkFBQejr60NzczMsFgs0Gg36+vrQ29uL3t5eupBrMBjQ1dWFzs5OOgAEBATA6XRCLBZDr9fDZrNBqVQOeUeH04/q3AkhakKIjxDSD2Ar/uV6UQBIGLTpKADKEY7xMSFkCiFkCpP1eaXEYjFmz56NTZs2ARgg6n23L959911ERkaiv78fhw8fhslkwqZNm3D58mWcPHkSS5YsGant2LVrF8RiMfr6+rB8+XI6OovFYkyYMOGqgYF5kJKSkiCTydDc3Izc3FwUFRWhuLgYp0+fxrRp06BWq/HGG28MCc3atGkTVCoVhEIhpk2bhvPnz6O2thYPPvggNBoNTp8+TaMQ6uvrcf78eezfvx8AUFdXh66uLkRHR2PlypWIiYmBQqHA2rVrkZOTQ7fPyckBMNDBFBYWoqSkBP39/cPy71ksFr7++mtkZGRAp9Ohvb0dGzZsQGVlJaqrq5GdnY3w8HBYrVbKmGGz2Xjvvfewa9cuCAQClJaW0nTr+vp6in1gEKRGoxE7dgws0ZSXl2PZsmUICAiAXC7H8ePHweVy8frrr+Ozzz5DbW0tTZV//fXXqV++uroar776Kng8Hm688Ua89tprAICPP/4YDQ0N+Pbbb4dNj3e73WhpacGGDRsQGxuLNWvW4KWXXoJYLMbx48fR39+PN998E3v37sXBgwexevVqWu3n5MmTOH/+PGw2G/bt24eqqip4vV4sX74c7e3tOH36NN566y2cOHECo0ePRk5ODj766CPYbDacOnUKf/vb35CWlgaVSkUzUJnkFObZi4iIQFFREV555RVqXfb09GD06NGQSqX4y1/+gvb2dmi1Wrz//vuoqqrCN998g+eff/6qcxWJRDhz5gzWrl2L7OxsSlesq6uDz+fDkSNHYLfbodfrUVtbiy+//BIdHR1Yv349du7ciSVLlqCxsRE2mw1lZWVYs2YN6urqsGDBAnz++efYt28fWltbER4ejpUrV1JURk1NDZYuXYq6ujq8/vrryM3NpVzy+Ph42iF5PB4UFRUhMzMTDocD5eXlqK+vh0KhwHvvvYfOzk6cP38er7/+OphQ6IcffhiNjY3YtWsXLl68CADYvn07uru7advi4+OvWnRtb2/HhQsXUFlZiZCQEDQ2NuKf//wnjh8/jsrKStx7771QqVSQSqXo6uqCXC5HfX09lEolLl++jNraWjzwwAPIyMjAp59+iqysLISEhGD+/Pl44YUXKEZk37590Ov1OHfuHLZt2wYul4vw8HBaIrCpqYmSaw0GA95//33s2bMHGzduxK5duyASiVBeXg4ul4v169eDxWKhsLCQDnarVq2ig9+GDRtgt9tx5MgRyuNZuXIljh49is7OTtpPjKQf1bmzWKzYQX8uBsCYAEcA3MdisXgsFisFwGgAF3/MbwBAYGAgDV0yGo0oKSkBMGDNHT58GH19fQgODsYdd9yBoqIifPbZZzQZISEhYdhjlpeX4/PPP0d6ejpNQ1epVDQ1ODw8fMQU6VmzZkEulyMxMRGZmZmIjY0Fh8OhLHLG5zl4cDh58iT1w4WHhyM2NhYCgYBOKxsaGugMICwsDHFxcRAKhTT0qr29HQDw5z//GcBAunlqaipYLBb6+vqQlpaGMWPGABjohFevXo2ZM2ciKiqK8m6uVHV1NSwWC3g8HiZMmIA//OEPAIDU1FS6T3h4OLXivF4vLl26hJkzZ4LH42HcuHEoLi5GZGQkYmNjh2AemPvGpHZLJBIarrZ69WrExsbC7XaDw+EgKSkJ27dvx+TJkxEUFAQ+n4+ZM2cCGMADSyQSCIVCOJ1OREdHY//+/XjttdcQExNDq/pcKS6XC7FYjJSUFBo9cubMGWzZsgXR0dG0yj2DRYiMjKTx29XV1QgKCsLp06exfv165OTkYOzYsfDz80NzczMtFjFhwgTweDyKlvDz88PBgwepy8Dj8WDOnDlX+b6ZNRWZTAaRSESt6ptvvhmxsbFIS0tDUFAQQkNDUVlZicOHD9NiFampqUOOxaT1y+Vy2rkcPXoUmzZtQlBQEOx2O2w2GxwOByIiIsBisRAfH4/c3Fzk5ORQRovJZKLAsYCAAISFhSEjIwMejwcff/wxxo8fDw6HAy6Xi+bmZrDZbBo+OGnSJMTFxVFsgcPhQG9vL0JDQ8Hn86klLRKJ0NHRgbVr11IWS0JCAs28ZEB2kZGRmDJlClwuF/bv309nfrGxseByuVCpVDRz1OPxwOFw0Jm9zWZDUFAQoqOjIRAIEBkZCZ/Ph+TkZMTFxVFXCFMc22Qyoa+vj7KYQkJCqA/fbrdTH7tIJAKfz0dLSwvefvttfPbZZ+ByudTVwoipmwqAPpfx8fF44IEHkJqaCrFYjMDAQEgkEvj7++PChQvw9/dHdHQ0MjIyEBgYCLFYjNDQUIwaNQoxMTHYu3cvamtrcfToUVohKjAwEH19fTRB6poaCRfJ/AdgLwZcLx4MWOaPAtgJQAygAQMdeuyg7V8BIAHQCmD+9Y5ProH8dbvd5P777yeEEPKPf/yD5OXl0e+amprISy+9RNhsNiFkAFP61FNPEZ1ON+yxGKWnp5NHHnmEEEKIy+Ui2dnZxOfzkaKiIjJnzpwR93vzzTdJS0sL/XvNmjX03/fccw9Zt24dIYSQ8ePHk88++4wQMoB7XbhwISGEEIPBQLZt20YIIeT06dOksrKSEDKA0129ejVxu93EarWSxYsX03O3WCzk888/J7NmzSKHDx8mUqmU5OfnD8Gw3nnnneSNN94ghAwgZZcsWUI8Hg9FvQ6nKVOmkKKiIjJ//nxy8eJF+vmBAwdIQ0MDIYSQ1atXE7PZTAgh5NNPPyXFxcV0u+3bt5OzZ8+Svr4+ivUlhNB71dDQQJRKJens7CRZWVn0+/j4eEIIIa2trfQzt9tN/z1r1iyi0+mI0+kkAEhJSQkhhJDp06cTt9tNEhMTycsvv0zkcvmI50YIITNnziSPPvooIYSQcePGkaVLlxIej0e8Xi8Ri8VDthWJRGT//v2ktraWLFq0iPh8PpKdnU0efPBB4vF4iFQqJenp6USlUhFC/oU/drlcpKCggEgkEtLT00O4XC45dOgQ0Wq1ZOvWraS8vPyqdnm9XrJ27VqSlpZGfD4fcblc5JNPPiGXLl0ihAzgegsLC4lWqyWxsbHkvvvuI3K5nBiNRtLZ2TnkWBqNhmi1WpKVlUV0Oh3RarVkypQp5PbbbyeHDx8mbW1tRK1Wk/b2diKRSEhubi4pKioiYrGYSCQSsmLFCpKfn0/Onj1LLl26RIRCIamtrSUlJSWkurqa/M///A9ZuHAhsVqtxOl0EpFIRJqamkhLSwuZP38+WbduHVGr1WTMmDHk2WefJXV1daStrY3Y7Xbi8XiIXq8nL7zwAklLSyMSiYS8+eabJCgoiEgkEqLT6YjFYiFWq5UYjUaSnJxMVCoV6ezsJIWFhWT37t1k8eLFpLKykrS3t5Pu7m4il8tJZmYm6enpIUajkdjtdtLf30+sViuxWCxEo9GQO+64g9TX1xOVSkU2bNhAZs+eTXp6esg777xDFi9eTLxeL5HL5USj0RC9Xk9kMhmxWCxkypQp5MUXXyQdHR2ku7ubzJo1izQ1NZE9e/aQ/Px8smXLFrJv3z4yZswY8vXXX5O6ujpSV1dHqqqqiF6vJ2azmbhcLtqOvLw8smrVKqLRaIjZbCbPPvssefvtt0llZSVRKpWkoaGBTJkyhcjlcuJwOIharSanTp0i06dPJzU1NUQul5Nz586R2267jXz44Yf0uTxz5gzZsWMH6e7uJtu2bSO7d+/+achfQsj9hJBYQog/IWQUIeRTQsgSQsgNhJDxhJBFhJDeQduvJYSkEUIyCCHHr3f8a8lisVCrb8+ePViwYAHq6+vxwAMPQKfTYfXq1fj9738PALjjjjsQFBQEDoeDyspK1NXVDXvMrKwsamk+99xzWLlyJdhsNl2kHE5yuZxWaOro6MCyZcvwyCOP0GljR0cHHnvsMSiVSkyYMAFz585FX18f2Gw2CgoKQAjBG2+8Qas/NTc301kDm83GwoULIZfLMW/ePFitVlRUVGDHjh145pln8PDDD2P58uWIjIxEYWEhFAoFdDodPvnkE9hsNng8Hjz99NPo6urCqFGjwGKxKKdjOHyuQqHA/PnzMW/ePGi1WlRXV2Pr1q2w2Wz48MMPccMNN2DLli3Ys2cPLBYL9u7dOyRiYs+ePaioqMCsWbNo4RHmnJgwsdbWVsTGxmLHjh2w2Ww4efIkmpub6b0MDQ3Fvn370NXVRembe/fuxc0330yJgHl5eUhMTMSOHTvw0EMPwd/fH9OmTYPD4cCoUaNQWlo64oK2wWAAm81GR0cH7rrrLmzbtg3x8fHgcDiIiIjAoUOHqBsgODgYU6dOxZdffgk+nw82m40bbrgBfn5+6Ovrw/vvv48//elPtJza1KlTKab5xRdfhNvtRnR0NIKDg5Geno6TJ0/ik08+GTZ0kMPhYOPGjSgoKMCJEycgkUhw8OBBTJw4EcDAgn1/fz/Ky8uxcOFCGjcuFosprZER43u9fPkyqqqqIJfLkZaWBg6Hg3HjxkGtVqOsrAyffPIJenp6YDAY4PP5UFFRAUIIjh07hj/96U8wGAzw9/dHdnY21Go1TVoSCAS0ePW5c+ewYsUKcLlcBAYGorW1FaNGjUJjYyP4fD5mzJhBC1QIBAK6kPjll18iMzMT9fX1mDhxIiZMmEDXGi5dugSTyUSLZpvNZrS2tqKgoAB5eXmIiYlBXFwcvY9OpxOEDJSXk0qlNLSQy+VCr9fT46nVang8Hpw5cwZ5eXlgs9nYsWMH8vPzIRaLER0dTZHGzPnI5XLMmTMHlZWVOH/+PAW4tbW1YfHixRg7dixiYmKQmpoKf39/8Pl8WK1W+kwFBwdDLBZT4iePx8OMGTNgsVhgNBoRFxeHqKgoxMXF0cgZZnZlt9vR09MDPz8/Cozr7e1FcXExPv74Y8ybNw9xcXGorq7GF198gSlTpiAmJgZFRUXXLbPHWbVq1TU3+CX08ccfr2LcDoMlEAiwZ88eOgXmcDiYOXMm+Hw+XC4Xjh49ioCAAMyZMwexsbGQyWTg8/nweDwYO3bsVS9Yf38/goODIZVK0d/fj5iYGMyfP58WXx43bhwtGzdY/v7+6O7uplPoqqoqOuVPTExEXV0dbr31VvD5fGzZsgVjx45Famoq2Gw2Tp06BZ/Ph7Vr11K+udlspthPhvA4Z84clJaWIj09HVlZWYiOjqbRBUwnxcTfZmVlQSQSISUlBRcvXkRCQgLS0tLg7+9POwGz2YysrKyrrkFXVxdCQkIwevRotLS0QCwW4+6776Y+2kceeQQqlQotLS2YMGEC4uPjkZ2djVOnTkGj0cDn8+G2225DVFQUzZJTq9WwWq2orKxEYmIiwsLCkJiYCL1eD61Wi0mTJmH8+PE0dNNsNiM0NBTZ2dno7e2l7glmIXLy5MlgsVhQqVQoLS3FokWLEBcXh7CwMOrS8Hg8mDx58lXnxySrqNVqsNlsLF68mEKmdDodrFYrwsLCkJ6ejoCAAFy6dAmxsbF49913MXXqVMybNw8ikYguykdERODuu++GQCBAT08PAgMDkZaWBo1GA7VaDT8/Pxot4fF4KA3R5/MNmxZfV1cHm82GvLw8JCUloaioCPfddx/6+/vxzTffID8/H1FRUUhNTYVGo0FAQAC4XC4iIiIouZOJeBEIBKirq8OUKVOQmppKa5GGhYVBKBTC398fJSUllCiakJCAmJgYjB49GkVFRcjIyMDEiRMRFBQEf39/migVGxuL8PBwmmrvcDgwYcIEpKWlwWw24+LFi7jjjjsQEBCACxcuYPz48Zg6dSp8Ph+EQiFN7ZfL5QgMDMSMGTOQlJQEPp8Po9GIqKgocDgcBAUFob+/H+fPn0dGRgYEAgFGjRoFHo+Hzs5OpKen0+zXuLg4HD9+HHl5eUhOTqbYbiZjU6vVQqvVoqCggFZsmz9/PiIjI3Hq1CnExsYiJycHAQEBtJKZn58fOBwOTpw4gbS0NPpMM+AuJhAiMTER6enpcLvdCAwMRFRUFAQCAeLj42l2q9frBY/Hg0wmg0wmw8yZM2mYpr+/P63vyiQupaSkQCqVUkRxWloawsPDaTasSCSiVankcjmsVisOHjyIBx98ECEhIfj6669x5513YuPGjb2rVq36eLh+9T+CLTMS0Y95GH+KmBCtZ555Bhs2bLhm0skPlUKhwKhRo3Dq1Cls2bIFBw4cADAQNnYlT34kMYlTjPr7+8FisYac9w8lCZLvQqyGS4K4VuLE9SBNP/Z+XHlcnU4Hp9OJqKgovPrqq1i7du2PTmq6FvWwsLCQWkKrVq3CfffddxVS+tcqnU4Hr9dLE76YDpWZtbHZbLq4zPj+Z8+eTQdYpmMbnFjG7Nfd3Q2v1wuLxYLY2FgEBgbCZDJh1KhR8Hg8aGlpQUBAAFJSUtDX10dDKZ1OJ/z8/OizSL5L4hnMTRn8Lvf19dH6qcnJyVedI/NcMP+32Wy0HsOVx2PCCVNSUuB2u7Fr1y64XC7k5uYiPj4eTqcTHA4HBw8eRE5ODnJzc+HxeGAwGOBwOJCZmUkLbTNJYQkJCWhpaYHP54PVaoXdbofD4UBOTg7YbDYyMzPhdrtp5qlQKIRKpaIDGBPRxOfzYTabIRKJhrxfg99bs9kMNptNmTOEEDQ1NcFoNCI9PR2nTp3C119/jU8++QSEEBqgkJmZ+Z/Nlhlp4eCnduzAQMjczp07cdddd/3sHfvrr7+OtWvX0ukmox8CCboypXy4a/FDs0wZa2I4XSsu/HqZjz/2flx53IqKCthsNtx0003XbOv30bX2bWlpQXBwMORyOW677bb/mI4dAGX/D9aVzwGXy0VqaioiIiJoUh4Ti848R4Ppiz09PYiPj6f8fvIdW5xJpgFAOzXmujIdOwPnYrPZtNNirNnBGvz8MqGcI70PzHPB/H+4eHzmeExxD0II9Ho99u7dC4/Hg9mzZ9NFYKaOaUNDAwoKCuDz+aDRaKBQKBAdHU1/h+lgGRdTf38/DStms9loa2tDTEwMxYiz2Wya7Wo2m2m9XpvNhoCAAPT399NF1sHv1+CBWCQS0QQ4DocDj8cDrVaLL774An/5y1+g0+mQm5uLsLAwOBwOCASC67+P/wmW+79bPxfT+koxsc7/yaiC/w11dnZCr9eDx+MhMTHxZ2emDJbD4aBgqf+LYuL1GYu3v78fVqt12EijwYVTmDyMK59do9F4VS4CU0AGwPfCDPwcM+6R5PV6odFoUFFRgeTkZIwZMwY8Ho+CwpxOJxoaGhAcHIyQkBBcvnwZSqUSUVFRyM/PB5fLpS692NhYmkshEomoC6+2thZnz57FokWLcNttt8Hn88HPzw96vZ7ifRlO/GAC5A8VUykNAOLi4q6atXxXoew35O9v+k3/jdJoNGCxWLSCmNfrZTjgMJlMtGxcQkIC+Hw+3G439b8zLsve3l5aHYnD4SAjIwN2ux3Av6xck8kEgUAwJCTwl5bH46EQr8EdKpNYyMwQ1Go1bDYbIiIiaIUvphwkU/wjNjaWlgdkQGANDQ1gs9mU/xIXF0cXZn0+H13QVigUiImJgZ+fHy3a8e+QwWBAeHj4iJ37f3aZnd/0m37TNRUUFDTE+vbz80N3dze6u7upS4GJSwcGXDNer5eyUgwGA6xWK4KDgxEVFTXEJzzYX8/n82kn90tLo9HQyDGGx36lGLeK1+ul1Y18Ph8yMjLo2oXZbKaZtlwuF6GhodTX7+fnh+zsbCQlJSE6OhoFBQVITU2lQLyYmBjqJmFKejLlJv+39B/hc/8ldD0C2/cRs2jyU+TxeGiK9E9tz69JTH3S72PV/RzX8b9FDApipAX6Kz/3+XwYM2YMxQRMmDCBfsdYvYyLinyHyWDq1nI4nCHVhVQqFfUbK5VK6gL5Kert7YXJZEJERARGylxn2qpUKuFwOBATE0Prrw5HHuVyuTT228/PD+PGjYPX66UIAoZbn5OTAy6XS9lGjK/cz88PNpuNhnoyC6n+/v5ISkqivnJGLBaLoiUCAgJGrCr1U/WTqZD/Deru7h42Jvz7yOFwABiY+r3//vvX2fraOn/+PI4ePYrS0lIcO3YMSuWw5IYfpf8Ni2qw2tracPz4yGkPTEei1+uv2VaXy4W2tjZ8+eWX/45m/iJqb2+HXC4f8fsf4io1Go147733rnm8weJwOJRBfiV7yOl0DoG2SSQStLW1gcfj0UzSwQYH87dUKsW5c+dgMBh+Mm5YoVDg5MmT+OKLL665ndVqhVgsxubNm2EymSCVSik98UqxWKwhRTAkEgmMRiNMJhOtxpScnIyenh709vZSXhAz0DHrFi6Xi+YeMHVZAQyJEPJ4PAgMDKSoYJ/PB4fDMSwV86e6xK+3/2+dOwYSc5qbm3/wfq2trSgpKYFOpwOPx0NpaemPbsOKFSuQnZ2NRYsWIS8vD+Xl5TSu+aeqo6MDx44d+1mO9WPFYrEQGBg4InnRbrejoaEBS5YsQVhY2FUANpfLRal4TALIf6IcDgfNNRhJ7e3tKCws/F4DclxcHCQSyYi4jeHEZrNpqv3gDoKBnzGMeSaaRSQSwW630zBc5h4y4L6enh44HI6fjFe2WCx46623oFQqR6yw5PP54PV64Xa7UV1djY6ODgQFBSEtLQ0CgYCiPK4cuAZH6bjdbvh8PpjNZtTU1GDTpk3o7u5GUFAQwsLC6CIqAyvs6+ujUUPBwcFob2+HWq0ekmPBuG+YQh9+fn7o6emB2+1GeXk59uzZcxUskBlsfqzhNRJUj57zjzrq/yHV19dj0aJFEIvFV32n1+tHZHIDwDfffIOcnBwalsZQ44ZjqzNsi5H0zjvvIDQ0FEajEUKhEH/84x/pi8Q8sFeO/sMx55mZBFMl3uVy4cCBA5g0adKQh2jwcQfL6XTSYwBDrQPm38x+NpttWCLlYNlsNni9Xpw4cQLTp0+nLxnDWOnu7obb7YbJZMLx48fpy8Tj8UAIgVKphF6vh9PphF6vR0BAAA4dOoSFCxdeZblYrVZK0ht8Plqtdthrf72Xg8nqHPw7brcbRqORXkuTyURdCRKJhF475nvGzca82Ha7HTfeeCN1FZjNZqjVaprw1drais2bNw+LY2ZYMYxUKhUaGxvxyCOPXPM8mHYzGnwtGEYR88xGR0eDxWLRXAjGaCkrK6MDLoPHjYyMhFarxaFDhzB37lyEh4dDpVLBYrHAarWitrYWCoWC/vaV94uJTXe5XFCr1bBYLOjs7ERWVhbuuuuuq9qtUqmgUChoMZ2ysjJMmTIFHR0dMBqN0Ol0QwpP19XVwWQyob29HQ0NDairq6OMF61Wi56eHrS1tcHf358mLYnFYlitVkilUmg0GhiNRlRXV1NiLHM/vV4vent76T1ubm6GyWRCR0cHdDod5VVJJBIUFRVBKBQO6UsIIVCpVAAGBh65XA6tVova2lpIpdJhaaaD+5X+/v5hseCD9V/tc2em+HffffdVLz+TGcqERF3JSz99+jT27t2LWbNm0dVwQgjKy8vR0NBAs1EB0EQSq9WK0NBQTJs27aq2BAUF4dChQxg9ejTN3mQwxGq1GosXL8bFixcxc+ZMxMTEwOVy0RcuLS0NOTk5kMlkqKioQGZmJrxeL86dO4epU6di//79FInAZKYyi1A+nw+33347bX9dXR2N+01JSUFDQwO0Wi3++Mc/0ugJxqr69ttvERoaColEgpycnKv8ih0dHZBKpejt7UVFRcWQOOz6+nq4XC50d3djyZIlsFgsOHLkCMaOHQulUkmzIauqquiU+Oabb4afnx/KyspQUFCAmpoapKSkYMyYMXC73SgrK6MIXIaUWVFRQfGx999/P4CBDrmrq4t2EgzBcbCcTicuX74MtVqNKVOmICkpCTabDS0tLVAoFAgKCkJ2djYuXLiAuro6zJkzB1KpFH5+fvjd734Hg8EApVJJE9bS09OhUqlw7tw5aDQa3HvvvWCz2WhqakJfXx/sdjvmzp2LY8eO4fjx45g/fz5YLBaioqLQ1tYGu90OlUqF2bNnw+VyUWyzWCymNM7BOnHiBDQaDfLz8xESEoLy8nLk5ubCZrPh6NGjWLFiBd22ra0NarUabrcbUVFRyM7Ohs1mw5dffonbb78dVqsVJpMJ586dQ1JSEs6cOQOTyYRVq1bBarWiu7sbSUlJcDgcqKyshM/nw/jx41FTUwMej0fbxwwkra2tMJvNFH07b948eL1etLW10VBNxr3D4AUsFgtaWlrgcrkwadIkhIWFobe3FyKRCOfPn4fH48GiRYvA5/NC5tT4AAAgAElEQVQREBAAqVRKjYcbbrgBPB4PFy5cgEQiQVpaGk1SqqqqgkAggMlkojHkTIUsFouFS5cuUdeNv78/vF4vjEYjLBYL2tvbERUVRVEP48ePh8/nw969ezF58mR6f6urq1FQUAC1Wg2hUAij0QiDwYCWlhbMmjWLkh/j4uIQGRmJ4uJizJgxAxMmTKDGiVKphNPphMlkQnZ2NsrLy/9tPPf/E9q/fz84HA61ngZbfHPnzkVKSgrS0tKusnAJIbBYLPB6vTTF/PTp03j66acxd+7cITz5ixcvYv/+/RCJRJBKpfjqq6+GbUtHRwcuX76MF198kVotlZWVEIlEKCoqQmpqKiorK/HQQw+hsLAQDz/8MBYtWoT33nsPX331FVQqFSQSCcrLy5GRkYHJkydj2rRptHjA5MmTkZCQgPb2drS0tGDatGno7OzEq6++CmCANrlixQqKbaiuroZWq8XEiROp//Orr76ineDWrVtpbU2fz0ctbUaNjY2oqalBfn4+9Ho99Ql3dXXhiSeewKRJkzBjxgxs3ryZlvzzer1YtmwZYmNjcerUKTz99NMYO3YsamtrsW7dOsTFxYHP58PhcGD8+PGYN28eli5dCpvNhgcffBDz58+nXHTmt9555x3MnDmT4qGNRiOWLVtG645KJJKrOvZt27bhySefREFBAcrKynDs2DGsWbMGd999N/XBPv744xCLxWCz2Th48CBCQkKQm5uLN954A2azGV6vF/PmzYNEIqHWqVKpBJfLxeHDhxESEoJbbrkF69evpwuHbDab1tjMzs4Gn8/H119/jd27d0Mul6O9vR2dnZ349NNP0dzcjLS0NBw4cAA6nQ4ymQxSqRSdnZ24cOECtFotxGIxNm3ahJqaGso5T05ORnNzMzo7OwEAGzZswObNm5Gfn4+3334bx48fR29vL4xGIzQaDTgcDuLj4+HxeFBaWgqFQgGRSITPPvsMHR0dqKyshNVqBYvFoviIrq4usNlsbNq0CVqtllqYLpcLCoUCb7zxBjgcDiZNmgQej4etW7ciIiICmZmZeOyxx4bgul0uFxoaGjBr1izExsbSsoJisRi5ubmYMGECkpOT8eGHH4LNZsPlcuGjjz5Cd3c3Ro8eje7ubuzduxdyuRwejwdnz56FTCaj0T/ffvstnn76aeTl5UEkEiEjIwPR0dHw8/PD559/jsrKSowaNQp8Pp9idsvKymh0TUlJCbq7u3Hu3DkEBAQgLy8PL730Es6ePQs2m00HAmbgOXHiBI4cOUJdSF1dXdBqtTh37hwOHjyIm2++Ga+99hoOHTpEZw8vv/wypkyZgtDQUEilUuzatQuFhYXXDbj4r+7cL1++DIfDgZaWFgADrhNmOhQbG4tXX30VW7ZsGZIazfjUqqqqhkR0lJSU4He/+x2AgY6aufCfffYZ0tLSYDAYEBQUhAcffPCqdvT29iIhIQErVqzAjh07KNRq/PjxaGxspEWAmaotn3/+Of2My+UiNzcXXq8X48ePR1dXF/UF3njjjThy5Ai1tPl8PrZv346cnBz4+/vDYDAMAXdVVFRAJBIhLy8Pf/jDHyAQCKBWqylutq6ujvqKc3Nz8fbbb2PVqlXUSh4cCXPo0CHk5ORAIBDAbDZj+vTpAICDBw+io6MDQqEQDocD2dnZNNmLgSsxnJWOjg6EhYXBYrEgPz+fckQGV8my2Wyora2l6xNGoxHjx48HAKSkpKCkpARLly6lUR51dXWIjIyk7O3hWEI7d+4Ej8dDf38/Zs2ahdzcXOzYsQOJiYkU92owGBAZGYmEhAQkJCQgPj4ebrcb48aNQ2hoKIRCIZKTk/Hxxx9DKpUiJiYGIpEIZrMZqamp4HA4yPr/7L15fJTluf//niQzk2WyTfZ9Z0lC2MIqu4ALoqAtKuBSPait1mrdqr9Wbc8pLtX2eHCrlW8RhaIiagFlJ4ASCCQsCdn3mezbJJPJbJncvz/iczcr4NJz7Dl+Xi9eQDLzzDPPcz/3ct3X9f6kppKbm8umTZvw9vbG09OToqIiAgMD6erqwmQysX37djQaDR0dHcTFxVFeXs6WLVsYN26cRF4rqXtKTNzX15fw8HC6urqYO3cuOp1OMozq6uro6OiQVZc7duyQjCBPT09iYmKkq5DD4SAmJobIyEi6urqIi4tDrVZTVVUlz7GmpoaQkBC0Wi2BgYGYTCbCw8Olb6iSLgj/YO3HxcUxceJEXC4XfX19NDc3o1arMZlMREZGDhpsnU4n/v7+jBkzhl//+tcybbG6uporrriCyMhITCYTOp0ODw8PCgsLOXDgAMHBwfT09ODp6Ulrays6nU62uwULFpCQkIC/v7+0AlRW1cnJydIKLy0tjbfffptnnnkGNzc38vLy2L9/P25ubnR3d5OZmUlUVBRBQUG0trZKM5LQ0FDCwsKkaXZsbCwqlQqLxcLhw4clj0mZqSsOTmPGjKGtrY3IyEhSU1MlqDA2NhYvLy9iYmIIDQ1l586dkpF1Mf2f7dw//vhj1q9fz9q1a5k1axYGg4Hu7m4CAwPp7Ozkl7/8Ja+99hq5ubmD4spKw3vllVd49tlnOXjwIGfPnuXEiRPodDo2bdpEc3Mz1dXVtLe3s3XrVlavXs2CBQu47bbbSE1NHXQetbW1/OEPfwD6N2V0Op1MT0tKSuLvf/871157LQB5eXmSKf3QQw/R2tpKamoqc+bMQa1WExwcLM9PKWvesmULTzzxBPv27aO5uZkdO3ZImtzBgwe55ZZbaG5u5s0335Tl98HBwajVasaOHcuuXbt49NFHOXDgAGfOnAH6O9QjR47w5ZdfMnv2bDZs2DDs+r7xxhuSY5+VlcXNN99MS0sLf/rTn2Q8OT8/n5/97Gd0dnayc+dO7r//fvR6PW1tbWzatInx48fj4+NDeXk5d955p4zxK+Gdt956i5deeomsrCzuu+8+Ghoa+OCDD0hISKC9vZ3XX3+dnp4eNm7cKOmhR48e5fnnn2fevHn85Cc/GTb7UYwrli1bRnV1tWSEKCuOwMBA3nzzTaZMmUJYWBhnz57lpptuwmKx8Nlnn3HXXXfJ2eVbb73F6dOnJT0yJCSE7du3c80113Du3DluuOEGPv30U0pLS3n22WfR6/UcPHiQ1atXS1hUfn4+a9euZcWKFVx11VWcPXsWg8FASEiIZLYrHPbY2FgSEhLw8/PDaDRKkJuvry+nTp0iICBAZok0Nzdz+vRpTpw4waJFi6isrCQsLEzCtaxWK4GBgQgh0Ov1GAwGIiIi0Ov1ZGVlsXTpUjw8PPj000+ZM2cOjY2NaLVa3nzzTWbNmkVnZydjx46VAwL0h7o++ugj5s+fj8ViwWAwsGnTJm699VZ6eno4fPjwMNidTqfjs88+47bbbuO+++7jd7/7HfX19RiNRqZMmUJoaChHjx7luuuuw2QyceDAAXJycoiMjKS1tZWCggKWL19OUFAQx48fZ/ny5cTFxeHm5kZRURETJkzAZrNJRr0yQSksLMRisbBjxw5mz57Nnj17UKvVfPrpp8ydO5cJEyYwbtw4EhMT5cAC/UkWK1euZOLEiaSkpPD+++8zd+5cdu3aRXR0tKTbJiYmMn78eAlP8/DwYOHChRw9epQVK1aQnJyMj4+P3KtqamqSA+rhw4f5t3/7t1H9GhT9n4u5t7S0kJ2dzf79+1m5ciXQb9Lg4+NDdnY2cXFxOBwOysvLMRqNpKenExoaOgxelpGRIWcGBQUFMia/b98+QkNDaWxsJDo6mrfffpsjR47ICr9p06YNOp8zZ85w/PhxsrKy0Gg0VFRUsGnTJvn7hoYGhBB88MEHfP7554wZM4a77rqLZcuWYbFY8Pb2JisrixtvvJH6+nruueeeQSCu9PR0rFYrQUFBhIaGcvfdd/PFF1/I8uvGxkYyMzN5+umnqa6u5tSpU7S0tBAYGEhwcDDTp0+noKBAkjCPHj3KnDlzqK6u5uzZsxw+fFg6ZA3Uo48+yhdffCHTwrq6uggKCuKee+7BYrGQm5vLvn37mDp1KtOmTcNms1FeXi4NSJ555hlaWlooLi6WVNCBnJNPP/0Ul8vFkiVLGDduHMePH8fpdJKbm0tiYiLTpk2THJDCwkJ5TR966CE+/PBDZs6ciYeHx7CNy+bmZtauXUtxcTHR0dE0NTURFhbG6tWryc3Npb29nYceeoiJEyfi7e2N0+lkypQp1NXVsX//fmbPno3T6cTT05O+vj6qq6vZvXs3TzzxBMXFxbI4JjQ0lPr6ehwOB1deeaWMhytl7ElJSURFRfHEE09w7Ngxxo8fj5eXF0uXLqW1tZWzZ8+Sl5fHihUr8PDwGDTb1Wq1nD9/nqCgIJKTkzEajXJFUVxcLBHFwcHBrFy5kpqaGlwuF6GhoZw5c4Zx48Zx8OBB4uPj6e7ulnaOmZmZ+Pn5odVqSUlJQaPREBkZicVikVaB0dHRnDt3Tm52KkYoLpcLl8vF9ddfT2VlpaRsrlmzhilTptDQ0MDp06cHhWQGAvYUEuz69euZOHEib7zxhty3KC0tZcmSJXh4eDBt2jS5F+FwOLjiiiuYOXMmFouFmJgY5s6dS2BgIFqtVqKDFdtKh8OBw+HA09OTlpYWGV4NCQlh8eLF+Pn58cILL8hVuIKxPnXqFPfddx9qtRqj0Sj7Cz8/P0kw7ejoQKVS8eyzz1JWViaLpxTuz1VXXUVISAhlZWXExcXhdDoJCwvjuuuuo6SkRNonJiQk8OKLL5KVlTVqRpGi7zXy958hJQMjJCSEsWPH4u7uTmtrK7GxsYSEhBAfHy9Nfz08PJg7d+6IxTdxcXHo9XomTZqEVqslIyODmJgY4uLiSElJYdq0aahUKlJSUuSsZurUqcPygJV4uBKW8PHxkR2OUhxyzTXXoNFoJPc7ISEBi8XCtGnTCAkJITo6mrCwMLq6uoiMjBxUoRcdHU1QUJBkfowbNw6DwYBer2f+/PnodDr8/f1JTU0lKiqK7u5upkyZQmRkJBqNRjKmp0+fTkpKCsnJybJzslgsTJ48WXLkByopKYmGhgaCgoKYPXs2/v7++Pv7M2HCBEJCQvD392fevHk4nU5iYmLw8fEZtPpIS0uTA9C8efMIDAyUBEStVktQUBCTJ0+WxzUajWRmZjJv3jz0ej2JiYlMmDCB2tpa+VqlqrKrq0sug4fG2y0WC2PHjsXf3x+9Xk9cXBx+fn6kpKQA/e46EyZMkKEG5R77+vrKEERMTAx+fn6YzWYcDgczZ87E09MTDw8PfHx8iIyMJCMjQ25qp6enk5KSIl1/wsLCpDtVbGwsarWamJgYwsLCiIqKIjg4WA7YGRkZsqNVpFarqaysZPz48UyfPl22g7i4OIKCgiRQy8/PT5o/p6WlSUqkgqtW7pebmxsajYaMjAy5YalsuAcFBRETE0N0dDSdnZ3ExsbidDo5f/48c+fOZdy4cTIzyGq1SjCZh4cHSUlJREREEBAQQHl5OYcPH2b58uV4e3sPAsZFRESgUqkIDg6WFaItLS3MnDlTHltxJLPb7SQmJqLRaIiIiJBoZyWkp5heK25M8fHxTJo0CbVaTWFhIS6Xi5SUFOx2uwwRhYWFERkZKbnsQghiY2NlBavJZCIxMZHw8HDCwsJwc3MjISGB4OBgIiMjiYuLIyoqiqioKCIiIqR3a0REBDqdDrPZTEREBFFRUURHR2Oz2UhKSiI4OFgigaOiouSzGhkZSVVVFXq9nr/+9a//2sjf/4sSX3m9xsfHS4/VH/Td6WLwqtEQ05ejb/NeQGZmfNtiIJvNJjtl6N8rcnd3x93dXX7vvr4+WTfgcDjw8vLCbDZTV1dHRESE3COAfxjnaDSaYawW6J9l5+fnk52dLVn0d9xxxyWJpUIInnjiCTk4KF65I6m7uxt3d3dJXLRYLHh6euLr6yuvuVIZqtyH5uZmuUejrGiVzDiFrCiEkKmUJpOJ6dOnU1pair+/v2Ssd3Z2SltNpepUqVIFJHZ5tEr3rq4udDrdt2obQ1VZWUlSUtK/NvL3/6IMBgOtra3SH/UHfbe6GAbh2zyA3/bh/br45tHU3d0teS+ALKwZKDc3N2muoYQ/rFarDKUBMgQxcFAYutpRwlDKRrW/vz/jxo27rO+ieBMkJibKJIHRNJDT4u3tLb1UB15znU4nQ0BqtZqQkBBZKaqct1I/orzPZDLhcDgIDAzE4XDQ29srXZpsNht2u13yd5T0YWX1pxiHDPzMoWpsbESn0+F0Or8RhmC0icilPCF+mLl/z3Upg4wf9H9bQx/8vr4+mXKoLPsvJoUNDv20RKvVSnx8vDyusufgdDpHHCC+b6qqqqK3t1eG0UaS8p2VdGVl1VJfX49OpxuENFayjBTj89raWhITE6VBuTKbh9E74X8W4rixsZGIiIgfZu7/qvqhY/9Bo2mkTsPNzQ0vLy9iY2MHzWhHc+saOPsLDAzEx8eHtrY2rFYr0dHR0uHpuwwnfFtdzHnM09NT7pmNpL6+vkGpmVqtVg6AFy5cID8/n7S0NNLT02lsbEStVtPS0kJwcLCs41CpVDJDRnk+lXDQSPpnIZCVkNlo+v7csR/0g75HUtjeX1dms/kbve+baOCqW2GuKLRExaBC0eW4mWk0mkHpgA0NDbIewWKx/LdMNBS+zcWu4cVWD8HBwYPgZwOlcHN6e3ux2WyYzWa50mlsbOT06dO89dZbZGVl0dfXh6+vL15eXrLOQ8mxh/7rfblWmd+1lKLKS62ifujcv0M5nU4efvhh3nzzze/keAp7AvqXzK+++up3ctzLUV9fnyzAgeFsi//tslgsrFmz5mtDnV577TXuuOOOYT+32WyUlJRcsmT860jphBVLNg8PD0klHKrROnej0YjJZAL6C8Dq6urw8/MjLCwMu92OWq2WqwFFlxPKFUKQl5c3Iv/oYjKZTBw7dowNGzZ8bSqqgvVtaWkZxPYZKJvNxp/+9CcMBoPMdS8rK6Ojo4OpU6dy8803k5mZKV2cxo0bR0xMDHq9XmYQwfCOdaBv7KVkNptlLcxHH32E3W6Xg9qlvp/y9759+2SV8Wi6ZOeuUqliVCrVYZVKVaRSqS6oVKpffPVzvUql2q9Sqcq++jtwwHueVKlU5SqVqkSlUl11yW/7v0SK23xCQsKo9MPLUW9vL8ePH5d5+ArHYuzYsd/VqV5SClNHgaI999xzrF+//r/t8/8nNRDIpBhXXI5cLheFhYX4+fkN6lSEEJhMJjZs2DCMDPhtpHTu9fX1lJaWAv3hAaUwZqCU2d7ApbzT6SQ6OpqAgABZwBcfH09fXx9qtZr4+Hg6OzuxWq3Y7XZqampoa2vDbDbLjcahqwen0ylxuD/96U85evToiNC2kQZNBV3wxBNPsHbt2mGD1NDvpHwXIYQsRFJ47MprlfJ/6GfotLa2smPHDlwuFwEBAbhcLtLT00lISCAlJYVf/OIXrFq1iq6urmGfFxISgoeHxyB4m8PhGOTBAAwDhClSkA7vvPMO999/P1VVVajVaiwWC25ubnKjVzEdGSqVSiU5Sy+++OKIpuIDdTkz917gESHEeGAmcL9KpUoFfgUcFEKkAAe/+j9f/e4WIA24GnhdpVL9twaOv03HOlRfd+YWERFBUlLSt2I1K/ZcysaOSqWioaGBuLi4b3zMb6LFixfLOGJ5efklG9M30cWW3wMfoovpu7rfSifk7u5OQ0MDU6dOpauri56eHvr6+i6JtHV3d6eqqmpYYZQS4qmurpYhg8vB4/b19clZtaKhZEghBO7u7rKtKHgEZabe19dHd3e3zOgYmK2hQLLgH5koSlqgci2UjlwIgdPpxGq10tfXh6en56AQjvL9FZSExWIhICCA6OjoETNEhoZ4nE4nNptNzvTDw8OHde6Kxd3A797b24sQQjpHKT9XBteB9ESn00lzczPBwcGoVCrMZjNjx46VkDCtViuzixTSpNFopLu7GyGEzIhR/FGh/z4qKwbof1aVCt+hfUBDQwPd3d0UFhZy9uxZAMaOHSvDZw6HQ9630VZaHh4emM1m4uPjL0qshcvYUBVCNAANX/3brFKpioAo4AZgwVcvewfIAp746ufbhBB2oEqlUpUD04HsS33WUJ06dYoPP/yQGTNm4O7uTn19PT/72c8wGo3k5eVJVOaKFSswGAzU19dTUlLC9OnTOXv2LGvWrMFsNnPhwgUKCgqYMmUKV199NdBfGdrQ0CCXUzExMZSWllJTU8OkSZM4e/Ysa9eupaenh/z8fNzd3enp6WHmzJmcOXOGEydOkJaWhlqtJiAggIULF2I2m9HpdDgcDh5++GH8/f3593//d2w2GydOnMDlclFZWcktt9xyUdPclpYWnn76aTIyMigqKmL8+PG8/PLLbNq0iePHj7Nv3z6eeeYZbDabLEcPDAwkLy+PJ554YtCxqqqqqK2tZc+ePTz44INUVFSQnp5OQEAAjz/+OD/5yU84evQoAQEBJCcns337du6++27y8/MRQnDjjTdSVlZGUVERd9xxh9w4ysvLk7MWDw8PUlNT+fvf/47D4SA5OZlPP/2UF154QZIra2trufnmm4c5LJWVlfH73/+eVatWsWjRIjZu3Mgvf/lLVqxYwZ133smKFStkvv+FCxeYPXs23t7evPTSS3h5eTFv3jxJ12toaCAiIoLDhw+zcOFCZsyYMeizGhsbKSoqwmKxUF1dzaxZs8jIyKC1tZUTJ07g5eVFV1cXq1at4tixY1xzzTUUFBRw5swZli5dytixYykoKKC1tVWaTM+aNYvi4mJOnTqFt7c3vr6+/PjHPx4U725tbeXRRx+luLiYY8eOMXbsWElHVHDHMTExLFq0SL5HSfErKCjAZDKh1Woxm81MnjyZ6upqHA6HZIovWrSIpqYm/vrXvzJ27FiioqLkOev1erKzs/Hx8ZFEx5UrVxIYGChTHKE/9fbIkSNAf5VuSEgIt912m5xQuFwuVCoVX3zxBWFhYfT19UlqYWFhIUFBQRJMpgCyUlJS6OzsHJb3bbPZ5CzabrfT0dHB6tWrqaur47XXXgP6c7iVWLfNZkOlUrFnzx60Wi0Oh4PJkydjs9lobm6Wqxaz2Ux0dLSEqF1xxRXSbOTqq68mKCiIU6dOER0djbe3NxqNhvr6ei5cuICbmxsul4u4uDjq6uo4c+YMM2bM4MKFCxQVFfGrX/2Kn//851xxxRVMmDABq9WKzWZj5cqVVFVVUV1dTW1tLXa7nZtvvpm6ujrpFOXj48PixYuJiYnB3d0df39/dDodL7/8Mi+//DLd3d18/vnnnD17lhtuuIHjx49TVlbGfffdN8wgvrm5mc2bN+Pr68uXX345ah8CXzPmrlKp4oHJwEkg7KuOXxkAFJpTFDDQFsb41c++tjw9Pfnwww8xm80sWLCA8vJyALZt20ZtbS3XXXeddK8vLy8nMjKSHTt2MH/+fEJCQigsLCQ7O5vrr7+ejz/+WI6WgGQ8pKamUlpaKk1td+7cSWRkJHq9nsLCQo4fP84111wjO/zW1lY0Gg07duygo6ODefPmsXnzZrq7u6mpqSEjI0NWtu3atQuA7Oxs/vznP0vA16Vm9SEhIXh5ebFu3TqCgoKA/pi7Xq8nMzOTXbt2YbFYcLlcHD58GLvdLhv1UJWUlKDT6di2bRsul4vs7Gw528rLy+P8+fOSb52eno6fnx82m23Q4JOSksIVV1zB9ddfL2d4u3fvZvLkyYSGhmIwGDh79iy9vb0YDAamTp0qyZKvvPIKS5cuHXGZCf151AUFBTQ3N9PS0kJWVhbQv+Rubm5m//79eHh4MGfOHC5cuEBra6scrI8dO8aYMWPw8fEhJyeH7u5uUlNTMRqNkrWtyOl08umnn3Lu3DmmT59OTU2NhMQVFxdjs9mYOnUq5eXl9PT0UFFRIWeOfn5+9PT00Nvby86dO0lKSiIpKYlz587R3t7Ozp07mTNnDnq9Hk9Pz2EDmLL8z8jIYMyYMej1ej755BMiIiLIyMjgyy+/ZNu2bYPeo0wmcnNzCQ0NZcKECXzyySeUlpayY8cOUlJSmDJlCn/84x+pqKigtbWV8+fPc+LECcaPH88777zDmTNnaGpqkis/hQmk8PLVarUcXAoLC+nt7SUqKorTp08P8zdwd3enpKSEv/3tbwQHB9PR0YHVauX06dMcPHiQ4OBggoODaWhoIDIyEh8fH+bPny+pqYrsdjt5eXns2rWLkJAQ0tPTeeuttzCZTISEhNDT00NaWprs2BW5ubnx2muvMX36dPr6+mQB1r59+9BqtSQnJ8syfX9/f4mjSEpKwmAwoNVqCQgIwM/Pj7S0NFwuF35+fhw7dgw/Pz/Gjh1LYWEhbW1tCCHkXlNYWBhHjhzB5XKxZ88e8vLy6OnpQafTkZOTQ3NzM3v27MHf3x9fX1/Kysrw9PSksLCQ6upqiQj28PCQq52AgAC8vb25cOECLS0ttLS04O7uzpkzZ+jq6iI2NpaPP/4YrVYrVx3K86NUMU+dOvWSK/nL7txVKpUO+Ah4SAhxsfXASHk/w3ozlUp1j0qlOq1SqU63tLSMeKDk5GQWLlzIHXfcQUBAgLTA2rJlC3feeSednZ3U1NQQHBzM6tWrCQ8P58knn8TlcrF27VqeeOIJuewUQrBs2TKgv1NTZnWFhYXcdNNNLF68mODgYB5++GGSk5NZu3Ytjz32mHx/Xl4ed9xxB/Hx8SxcuJAlS5awZs0afH19JcXx7bff5qmnniIgIIBTp04xc+ZMoD/FrL6+nlWrVtHQ0DDqbr6i3t5exo4dy8KFC2V13fLly2XJv0L6KygokB1aSUnJiKYNS5YsQafT8dBDDxEdHc0XX3whO58777yTZcuWcf78eR544AG0Wi1PPvkkqampHDt2TKKHs7Ky+PnPfy5nowcPHsRsNrN3715ycnL48Y9/zLx586isrOTxxx9HpxBGpKMAACAASURBVNOxfv16AgICsFgsXHfddZK9PVQTJ04kMTGRFStWsHv3bvmahx9+mOXLl3P33XczY8YMDh8+zO23387MmTNJT0/HZrOxbt06IiMjuffee3nmmWfIyMggJyeHe++9V9IuFRmNRvbu3cuNN94oKw6VsnN3d3eeffZZlixZwooVK6isrOTEiRPs3LmTzMxM1q5dy/jx49m+fTs1NTV88cUX1NfXc91113Hq1ClJYezo6OD2228fscgnPz+fdevWyWX13/72NxISEoD+2djhw4cHhZd6enqorq5mz549ZGZm4unpyWOPPcbPfvYzAgICCA4OJjQ0lIqKCtzc3FiyZAm9vb2sXr1a8smbmpoIDw+npqaGiRMnUlBQIBk/fn5+EndtNBr59a9/TWpqKkFBQbhcLmbMmDHofJTjdXZ2snr1anbv3i2fF6vVyrlz53C5XMyZM4e2tjaysrIkHmKgVCoVd911Fx4eHmg0GgICAsjOzpbgLrPZPIzUqWTPOBwOFi1axObNm4mLi+M///M/+ctf/oLFYqGmpoZbb72VtLQ02eaCgoK4cOECkydPxt3dnY8++og33niDpUuXkpuby8aNG3n77bfR6/UUFBRw3333MWbMGBobG5k1axYRERF0dXXJkv+f/vSntLS0sGLFCsaPH88vfvELHn30UTZs2IDVaiU4OJh169bR2dmJh4cHGzZskDA0m80mn581a9bwl7/8hdjYWMaMGSOpkHa7nVmzZsnUy7i4OFmUNXDD1mg0Mn36dJKSkoY9TwN1WZ27SqVS09+xbxFCKEDyJpVKFfHV7yMABZ1oBAZ6fkUDw7a9hRBvCSEyhRCZoxnhKtRAlUrFe++9R0FBAX19fdxyyy3odDqOHz9OYWGhzOL4r//6L1asWCFnpvn5+Vx55ZUYjUaWLVuGTqfDbrdz4MAB5syZg8FgYPPmzZJd8eabb3LNNdfIz79w4QILFiygtraWHTt2yOWUwWDgqquuQqfTsWXLFl588UV0Oh3FxcW4ubnJooinnnqKyspKTp48yeeff87zzz/Pxo0bgf6Y3mhOQNu2beOBBx4A+pekJ0+e5KabbsJut7NlyxZ++ctfUllZybFjx1i3bh1Tp06VJLmhUhq1QkU8d+4cAKdPn2b69OnodDpJe1RWFBs3bqS0tJQzZ85QW1vL73//e6ZOncr58+eBfvTvAw88wPz587nttttobGxEpVJx6NChQVZnBw4c4MMPP+Q3v/nNIBjaQPX09JCamkpYWBhvvvkmCQkJdHV1STZIV1cXCQkJXHHFFYSGhtLQ0EB9fT0xMTHSZAT645UZGRnMmDGD1NTUYZ1Ke3s7er1ews8Uh6fPPvuMrVu3UlJSwuOPP87evXt56623CAwMZNGiRXzyySdYrVZUKhUlJSWsXbuWa665hsWLFyOE4NixY6SlpVFRUcH7779PcnIytbW1gz67ra2NiRMnStbNiRMniImJkdWZLS0t/PrXvx5WadnZ2SnZIxqNBg8PD8rKygZNTFavXk1iYiJ1dXWkpKSQlJTE6dOnWbFiBddeey1dXV28+eabzJkzh1OnTpGWljbIeFoxuD537hyhoaG0tLQQFBREUFAQbm5uOJ1O6QFaWlrKk08+yYMPPig55h0dHaxcuZJZs2axdOlSnE4nWVlZHD58mICAAHJycgZdC6vVSkVFBTNmzECv11NTU8OaNWtklktgYOCw2b6bmxu7du3i6aef5sYbb+TkyZN0dXVx6NAh0tLSmD59OhkZGej1eqxWK1988QUzZswgJiaGDz74AH9/f/bu3YvZbKa0tBS73c6CBQuIjIwkKioKrVbLFVdcQVtbm7TRmz17NgkJCezcuZOlS5fy61//mh07dpCbm4uXl5e05Dt37hzXXnstKSkpjBs3Dm9vb/bu3cupU6fYu3cvn3/+OR999JG8t8oeiRL+bW5uxs/Pj9OnT5OcnIzT6WTLli0sX74cg8EgK4iVwrSTJ0+Sn5/PmDFjhu3HDNXlZMuogI1AkRDijwN+9XdAyfm6A/h0wM9vUalUWpVKlQCkAIPv8GXKYDBIr0yj0cj9998vnchdLhdffvklbW1tlJeX09vbKzsfZZRLS0vDZrORm5uLw+GgpaUFrVZLamoqDQ0NnDp1itzcXKA/zl1WVjZog0jZbDlz5gw5OTmUlZUB/fFAJTWsoqKCJUuWAP+wwVNAYdAfqzxy5IiMM86ePRuAxx57bFTP1aamJoKDg6mvr5ffKyUlRS4BlY27a665RnYkra2tMvY4VIqDDPxjg81oNEr0r7I6Ub77wYMHcTqdmEwm1Gq1vK5NTU0ATJo0SRINq6qqZCNTjqPM+JR9hqqqqmEPrCLFWxX64+9OpxOj0ShnJQqG2Gq1cvDgQcrKymhrayM+Ph6bzSY3wSZMmCDt75T7NFBarRa9Xo/JZKKoqEguhysrKykuLqa7u5vGxkbS09Opra2VRMSSkhLy8/Pp7e0lMzNTDsiKRVx8fDwul4vz589TU1ODwWAYtunX1taGp6cnbm5utLa24uXlJUM1+fn5rFq1igULFgw6X8Ws2tfXV5rDtLW1MXnyZJqammhvbyc/P5/rr79eHle59iUlJSxZsgRPT0+52Wo0GsnNzSUmJmZYhktAQAAzZsygpaVFGlkoPqPKvVXsBBXbwVmzZuHj4yONpZWQXFdXFx0dHWi1Wrq7u4dt+rm7u5Oenk5XVxcGg4HTp09zww03oNVq6enpITk5eRiGQOGi22w2XC4XixYtQqVSsXDhQjnhUqlUtLe309TURFNTk5xkmUwmVCoV+fn5nDhxgpaWFtra2iS4LCAggNbWVqxWK1VVVXR3d2MymXB3d5fuV4GBgRw9ehRfX1/pOKVgnRctWiTTGFtaWujq6qKlpYXW1la6urpYtGgREyZMkM+Gh4cHLpdLeiMoufZK+E2xikxKShr0PCkb8EM3uy+mS+IHVCrVHOAYkA8o67Sn6I+7fwDEArXAj4UQ7V+95/8D7qI/0+YhIcTotveMjh+4+uqree+992hpaSEuLk5WllVXV9PT08O4cePkAx0aGsrp06fJzPxHJW5TUxPNzc1MmDCBsrIymb8L/TPn3//+9xQUFHDq1CnsdruMxypqaGigsbGRyZMny4df2Wj6z//8T4xGozTogP6ZlMJrNxqNBAQEoNPpMJlMGI1GSdyDfrefSZMmSaOLgers7KS0tJTExESCgoKoqqqSS/jGxkaCg4PlAFZZWSlNL0aTEEI6yihL3PDwcBk/H7h5Bf2uTC0tLWRkZKDRaGhubqa3t5fQ0FD5uWVlZbhcLoKCgqSLUm1t7aB0zYaGBmnBplgRjiQlVpqamkptbS0ajUYaHAghqKmpQQiBv78/Go2GpqYmSSZUNgTr6+vp6+tDr9fLdjJQLpeLkpISzGYzEydOpKioiJSUFHQ6nUzxCwwMJCEhgSNHjpCRkUFAQAD79u1j7NixxMfHYzabqa+vJyAgAK1Wi5eXF0IIcnJyyMjIkLCugWYigDRJVmLMVquVgoICvLy88PT0HHHFBf2ZMcrrFCRAU1MTHR0dhIaG4nK55AD96quvSvqgv78/CQkJaDQaysrKqK6uxmKxsGHDBl566SXGjh2Ll5eXTL8D5KaiYhAdFxcnaYoqlYry8nLZ4el0OnQ6Hd7e3nR3d9PU1ERUVJQcwJSJUlRUFDExMcPuR0FBARaLhfDwcNzd3QkJCaGxsZGDBw+SmJg4bKCD/sG9rKxMrr4G2tW1t7fT19dHQkICPT097N27l9DQUEJCQmT2jqenJ/Hx8RQUFODh4UF6erqcOAUHB5OYmEhPTw+BgYFs27aNn/zkJxgMBgwGA729vUycOJHS0lICAwPR6/WD9qSqqqrw8vIiIiKC3t5evLy8pB9uenq67HOUimKz2SxDSZmZmbi7u7Nt2zYmT55MUlISH374ISEhISxYsEA+b7W1tfT09PDnP/+Zjo4ONm3aRE1NDfHx8aPiB77XbJlp06Zx6tQpOjs7JQZU+bIXK0G+mBoaGqiqqmL27Nn8/Oc/JyIigqeeeuqy33/27FnWrVsnB4TLBQEpLjkqlQqn0yln5wONCYa+/nLKxy+XQtjT0yNd7y9Hl7q+l8vLuNxrpMxSRquCtNls8tydTueIgCb49uc99HparVbUavWo11455qWqN5WsImXAgssrSx/KFlL+r6T/Qf+D//zzz/PUU08RGRk5iH3/1FNPMWXKFHx9fWlpaeHqq6+WtQsDpXDbR7tXSshE6exHuo7Kz5RrOFrbVGbT/v7+uFwuOjs7ycvLk3z60WB5A/nuQz9XWTW7u7vLAS05OZmkpCTq6+sJCgqSqxCtVktTUxNqtRovLy/Cw8PRaDT09vbicDhkiKu2thZPT0/pENXa2ioHWZvNJs9FMUxxd3eX98fhcGCxWPD19R3UdpQq4qFFYZ2dnRJzbLfbh71v165dtLa2StexX/3qV/+6VMjGxkbmzp2Ly+UacSPumwKMysrK2LVrFzabjQceeGDUWdNIcrlcHDhwgBkzZlyUJTGSBjZytVo9zHD7Yq+H0b/v5TI/RprNXkyXur6Xy8u43MHvUp3jwEFptI4dvv15D72eFysx/zq8lYHn/HVYI0Ovi/J/BQtst9s5deoUDQ0NtLW1odfr5cpB2Yfo6ekhPDycq64avZ7wUm156L7YSN9B+dmlcrUHpvYpnPVz584xderUi1JQR7sXiqWfouTkZMLDw2VVqZJVonBlFEMUp9MpV5omkwkfHx+5ulaY7QpkbCCuV+mclcFL2ZQfOBBrNJoR0b8eHh7D2qhKpRp0TUZ6VjUaDSaTiR//+Meyz7pk1OX7PHP/QT/oB11aZrN5UMUl/N8FzvX19dHT0yO580PrSZTN+vr6ejw9PdHr9Xh4eNDb2ztoAtHZ2SndlBQMstVqxeVyDTqmUrfg7+//jXC+30ZlZWWMGTNm1Jn7D2yZH/SD/sXl6+srV7dOp/Nrd+wXq/B1uVyDcAzfhf6ZE8ru7m5sNpvMLhpYzatUl3Z0dNDb2ytNSqxW6zDCore3N1qtVvoau1wuWaQ2UJ2dnTJMNlT/bIDcpSqzf+jcf9D3UkpJd1dX14idy5EjR3j88cf/B87s+yklJHK5eyoDdbHwktVq/UbHHE0KMuBiGthpfd2BQKPRyPPt6OgYdO7u7u50dXUREREh9/Camprw9fUd5keqVqtRq9XyXEYK9/X29sp8+u/yGo2kka7DJUOM/6yT+a7U3t4uH+6Bm0jfpdra2mhra7tko1NSAYeqvr7+azNovgt9W0qj3W4fNdf+f1KlpaWUlJRQV1eHm5sb586dIysri5KSEtkWfH19v/Y+wqU+Mz8//2tTDL9rDTQK/+8ImY72PCnPwsX2N76ulI69r69Ppsgq5txKWl9XV9egSmur1SpTV0fTQEibVquVg5Wfn5+sdC4uLqa2tlbeX2VjMyYmZsRjKrrYwDe0wx96v77LMI0yyWloaCA/P5+enp5Lto/vfef+ox/9iNdffx3o3yC69957v7Njb968mQceeAAvLy+2bNnC7bffPuLrlMZz8803j/j7ZcuWjVqkM5KG8l8uV2+//bYsOAJGPd/L1W9/+1teffXVUQet/wkJIcjKymLlypXU1dVJU+k33ngDo9EoO6MpU6ZwOebuu3btYvPmzSP+zmazyapiZca3Zs2a7+Q7fFM1NTXx4YcfcuLEicta1g/tnIfWGlxKI3VeA9+rZMZ8F5OX7u5uCQg7fPgw/+///T9effVVTCYTvr6+9Pb28vnnn9PY2Mh//dd/YTQasVgsPPLII6xfv37UyZdaraajo0MCwLy9vXG5XHh6etLc3Cy5TB988IHk4sTHx+Pr6zts8FLunZKvP1QDB5Kh5zOaC9PA114OMG6oHA4H/v7+CCE4c+YMP/rRj4BLh32+9527w+Hg2muvlUS7t94a0ej7G+mVV17hjjvuwNvbmzvuuENCi4ZKiWEqcbmhN0ij0Ui0weXohRdeuGzi4UC98cYb6HQ6iY39NmxwpYrxmWeeGbYk/Z9Ub28vWq2W6Oho3nzzTZxOJ2azmfnz53PllVdedLaudEBWqxWn04nD4eD999+XRVhD5enpyR/+8AcyMjKAfqLnn//858s6z9EeLIW5MvD3l+oYnU6nNI1oaGjg6NGjJCcn4+bmdsl4d21trUwDVJC7gPQNVXSpuDr0149UVVWhUqlwc3Oju7tbmlsondPXGbgGfqbdbpepzB0dHezbt48777yTZ555Bjc3N5nCOm/ePFpbW6moqCAoKAgvLy/a29tZvnz5sPY+EGM8FHPs7u4uKZbV1dWsXbuWlStXDnKVGuikJISQ8fru7m58fHzw9fWV119BHw8cDDw8PCgoKJD9gcJlHyiVSoXNZhuEDxjaHoQQw6qaB0rJunE4HJw+fZqEhATc3Nwu6jgF/wKde2RkpHRTV8rcgUGlt62trYNA9oDEh15spFQalNFoxGw2D/JONJvN1NbW0tnZKRuDkjalXOza2lr6+vpIS0uTqUwKdGo02Ww2bDYb3t7e9PX1yQFDIciN9BAqBMCgoKBhQK/u7m4MBsOg1/f09NDW1nZRdvj58+eHbbx1dXXJSkdAdpBKg4d/dCAKkU95XXNz86DXKNfnUiXSQyWEoLm5mVmzZpGVlSVRtUqZvPKwdXR0yMIlRUqRkslkkqS+0tJS9Hr9iJ9lt9ul+1BhYSEFBQVERETIjlm5jorMZrMsqLLb7RKH29fXR319PS0tLbJzt1qt9Pb20tzcPAxiNlTFxcU0NjbK6kjFJUgJJQxUR0fHoJWW1Wrl/Pnzsg3Y7XZaW1upra0d1C5KS0tHHGT6+vqwWq00NjaSn58vy/N7enpoamqiu7sbq9XKmTNnhrVPs9ksq8NHkvLcFBcXy0I4h8NBWVkZlZWVuFwurFYrvr6+8rWtra1UVlai1+tpamqSOeYhISGyvQ2t9jQYDNhsNhobGwe1387OTqqrq7FardTX15OYmCgL7np6emRbamxspKysTN7v+vp6mpqaaGtro6enB6PRSHt7uyzoKysro7u7m97eXtRqNQaDgYqKCtzd3SWyYaCam5sxGo1y89ZiscjBvKamhvLycmw226D3KRXQys/a2towmUy0tbURFxd3WQDC73XnfujQIe655x40Gg11dXUcO3aMd999ly+++IKcnBzmz5/P7373O7RaLfPmzcNut+NyuXjhhRd49913aWxs5JFHHhnx2ApvJjg4GK1Wy4svvih/t3PnTgoLCzl69CgPP/wwAJ999pnkvQC89NJLGI1GHn30UR577DE8PT2pqKhg165dJCQk8Itf/EI2RkWfffYZJ06cYNq0aRKA9sgjj7Bo0SL0ej0ZGRm8/fbbw85Vo9Fw4sQJHnvsMcLCwtDpdJSXl0v395deekk+dCUlJWzZsgWn0znqhmNpaSkff/wxkZGRGI1G6uvreeutt6iqquLDDz+U5g8ff/wx48ePx2Kx8Jvf/IZ7772Xv/3tb6SmptLZ2ckHH3zAjBkzZPhEwTAA3HbbbXh5eclKusuVy+UiKiqKVatWkZmZicViIScnh0mTJuFwOHB3d+dXv/oVBoOBxsZGXn31VTo6Oti/f7+sLM3Pz+fBBx/EarUSGxs7iEEzUBUVFcyfP5+kpCRCQkJ4/vnncXNzQ6vVkp2dzdatWyksLGTu3LkcOnSIkydPcuWVV0oi4VNPPYVKpWLr1q088MADrFixAuhfYe3du5fNmzfj5ubG+vXrh5WK22w2Hn30UR544AFqamrYtm0b3t7ebNu2jerqatzd3SkoKOCDDz6QE43nnnuO6upqiouLeeqpp6ivr6ehoYGbb76ZV155hePHj3PrrbfS3NzM8uXLefHFF3nyySe59957aWho4OTJk4POQWG3Hz16lDfeeAOtVkteXp4E0h0/fpwHH3yQAwcOUFRUxCeffILT6aS4uJgNGzaQk5PDrl27uOuuu4Zd256eHv7yl7/w8ssv43Q6ee655/jd736HyWQiOzubc+fOIYTAy8tLdtLQP2AcOHCAm2++mfDwcBoaGggICKCjo4OHH35Yvq+trY0XXniBU6dOcejQIR599FGCg4MloqCmpoba2lqqq6tZt24dqampWK1WPvvsMw4fPsz+/fu56667OHv2LJ9//jnPPvss+/bto7m5meeff54DBw7w05/+lIMHD8rQ0JdffklTUxNz5syRGTabNm3iueee49y5c5w4cWLY3l1WVhZHjx5l+fLl7N27l48//ljehyeffJKSkhIuXLgg0Q0ff/wxf/3rX7FYLLzyyiu88sorvPHGG5w7d47m5ma2b9/OggUL0Ol0l9wj/F537rm5uRInEBoaio+PDwkJCdTX18tS4yVLluDr6yvZE93d3WRlZREbG4vJZBqxAMrpdKLT6Zg4cSIxMTFoNBqJHTh+/DgbN27Ey8sLh8Mh2Sa5ubmMGTMGu91OaWkpISEhZGZmEhsbKzkoZ86cwdvbGzc3NwkqGyhfX18mTpxIRESELJLw9vZmwoQJeHp60tTUhIeHx7DVRl9fHwaDgfT0dLlyyc3N5bbbbiMwMBCNRiPDPBs3biQmJga1Wj0ifVJJAVOyBqKjo9m6dSuLFi0iLS2NzMxMdu/eTXd3N97e3gQGBhIUFMTPfvYzrr76aul0ExISglqtRqPREB8fj16vlzNUd3d3/vznPxMcHMyYMWO+lslIS0sLoaGhaLVapkyZQnFxMU6nk5CQEIk4OHz4sAQqxcTEYLFYiIuLkxWWS5cu5f7778fb25vJkyczYcKEYZ/T19eHTqcjOTmZ5ORk/Pz8qKmpQaVSYTKZ+Mtf/sLcuXPx9PRECIGfnx8ajQYvLy/sdjsGg0GGAhTI1tixY0lMTGT16tW88847BAQE0NfXR2RkpIwDK1Kr1bz33nucOnUKtVpNXFwcdruduro6Fi9eTHJysuTn2Gw2ioqK0Gg0hISEEB8fz86dO2loaMDT01NWWp49e5bm5mbCw8O5+eabmTZtGlu3buX8+fN4eHgQEhIyLJ5utVo5duwY48ePZ8yYMSQkJFBYWIjNZiMhIQEfHx9CQ0Px8vLCZDJJzO9nn30m878HeowCctXy2muv4e3tTXR0NCaTid27d0uXqqCgIMaMGSOfEQU/rNFo5GerVCrOnj3LNddcQ2RkJE6nUw7qx44do6mpCXd3d9RqNaGhoWg0mkEr2+DgYImD0Ov1HDt2jG3btqFSqYiIiMBsNlNVVSXj/ampqcTHx3PjjTfKauLk5GSioqIoKirCy8sLjUaD2WyWufTbt2/nwIEDMrTW3Nw8KPau1WqJiIggKCiI8PBwDAaDnJTt379fmoRERUXh5eXF7t27pbWhh4cH4eHh7NmzhzFjxhAREUF8fLxEnFxSSsrZ/+SfqVOnioHq6+sTQggRHx8vhBDCarUKs9ksli5dOuh1L774ohBCiNzcXLFx40YhhBCvv/66KCwsFEII0d7eLnp6esRI2rt3rygvLxdCCPH000+LkpISIYQQU6ZMEf/2b/8mWltbhdlsFna7XTQ3N4vk5GThdDpFe3u7mDp1qnA6nUIIMeiczGazeP7550V8fLzIyckZ8XM3b94sSkpKRFdXlxBCiJUrV4qKigrR2dkpFixYIIqLiwe93mQyCSGEmDhxohBCiNbWVtHT0yOuu+46IYQQOTk5Ij09XV63wMBAIYQQ+fn5wmw2D/v8vr4+sXv3bjF79mz5s+DgYHnsP/3pT+L48eNCCCHuvfdesWHDBnk/hBBiyZIl4g9/+IMQQogrr7xSvPvuu0IIIU6ePCn/LYQQf//730VMTIyIiooa9D0upsbGRvHll1+K8vJyYTAYxNmzZ8Xy5cvFkSNHRE9Pj7Db7WL9+vXi4MGDIjs7W9TV1Yn6+np5fsuWLROPP/64sNlsorGxUTz99NPDrqcih8MhPvzwQ7Fz505htVrF7373O7Fw4UJRX18vfvOb3wi9Xi8sFovYuHGjWLZsmbDZbCIvL09cf/31oqurSzz77LPi0KFDwuFwiPb2djFnzhxx4MABUVdXJwwGg/D39xdlZWUiOzt72H2w2WyioaFBvPPOO+LZZ58VoaGh4tZbbxXV1dXi+uuvF2fOnBEXLlwQDz74oDhw4IDIyckRmZmZwmKxiPz8fLF7927xox/9SBQVFYmcnBwxZ84ccfr0aXHTTTeJNWvWiI6ODrF7925x9OhR8cknn4h///d/F4GBgaKyslJ0dHQIm80mhBDCYrGIP/7xjyIyMlKYTCZRUVEhnnnmGfHuu+8Kg8Egtm7dKt566y1x4cIFcf/994u9e/eK1tZWkZycLFauXCmOHTsmKisrRWdn56DvZ7FYRF5enli1apVoamoSlZWVYsaMGeKWW24RBoNB3HrrreKxxx4bdk+cTqd48sknxaRJk0R7e7s4cuSIuO6660R1dbXo6OgQSUlJoqqqSrS1tYlx48aJ/Px8cezYMSGEGHYOTqdT9Pb2inXr1onc3FzR0tIiJk2aJJYtWyZqampEVlaWmDlzpti/f7/405/+JJYsWSLq6+tFQ0ODqK+vFy+++KJYuHChKCwsFOfOnRMrV64Uzc3NYvfu3WLt2rXCZDKJpqYmsWXLFvH000+LgIAAcezYMZGfnz+sTb/00kvipZdeEgaDQVx55ZWiuLhYFBYWiueff17MnDlTBAQEiJKSEtHZ2SlCQkJEXl6e2LlzpzzPO++8U9jtdpGdnS02bNggrFarEEKI8+fPC+C0GKVf/V7O3JWRz+FwcOLECcrKylizZg1+fn6S4rh161aJsf3DH/7AsmXL+Pjjj0lPT5dLYMVhZyQdOnRIzup37NhBU1MTmzdvZsaMGTgcDoKCguSS7f3335d0SA8PD4KCgvDw8MBut9PV1cXJkyfJzs5m3MWbAAAAIABJREFU9erVPPHEE7z77rsjptR9+umn/PGPf5TFEL29vcyePZu4uDj27NnDU089JTfHFCnnWFhYiMlk4r333mP37t3SPPs//uM/mDlzJu+//z6VlZXy9bGxsSM6tahUKumSo8wklb+VEMisWbOAfpjU4sWLB804e3t7ufrqqzGbzXR2drJ48WJqamp45plnmDNnDhUVFbz66qvMmTOHF154QdIghRD89re/HfFeWK1WDAYDJSUl7NmzR5o9REVFcf78edLT02XpuaenJxaLhcTERGlCsWvXLiorK/n888+ZO3cu77//PiUlJWzfvp3AwEBOnjw5LD6pVqs5ePAgY8eORavV8t5775Gamkp2djaBgYF4eHjg7e3NG2+8wZIlSyQ3xdPTk+PHj/PJJ5/g5+fH/v37aWtrY/HixcyfP5+IiAgiIyPx9/cnOjpaUiYHSvEhqK6u5pZbbmHFihWSjxIbG4tOp+PQoUP8/e9/R6fT4eHhQUREhHRv2r59O4888ghBQUE4nU5p/vDpp5+i0+mor69n2rRpdHd3U1xczK233srq1atlmbtaraarq4vKykoqKirkfoZiRhEQEIDT6eSTTz5h4cKF+Pn5cfDgQTQaDUVFRUyePJmgoCDS0tKwWq3D0hSVNEOLxUJTUxMXLlzgqquu4rnnnkOv11NVVcW8efMGhRXsdjvu7u5s3bqVtLQ08vPzqa2tlbC+3NxcMjIyqKiooLKykpSUFLy8vKRb2UD6ZFtbG319fbS0tHDkyBHa29vlnlpHRwfHjx8nKyuLZcuWMWfOHI4cOcLcuXMJCwsjPDwcu93O/v37mTt3LvHx8fT09DBjxgxsNhs5OTksWLCAoqIiTp48SWVlJTfddBM33XSTNP1Q5HQ6cTqdXLhwgeTkZBwOh6Synjhxgr6+PtavX8/KlSvp6uqSBirR0dEkJydTWVlJaWkpGo2Gjo4Odu3aRXp6uqSeXmqj/nvZuStSGk9aWpqsOFNUV1cnSYMKEzssLIzp06fLRnvhwoVRswTOnDkjN0EVJOfkyZNZtWoVoaGhFBcXYzQa8fPzIzw8nOjoaLq6uvD19eWGG27gyJEj5ObmkpKSgtPpZNasWcyfP5/S0lIZmx0qhWSoYIybm5uZNm2arHBTdulH0lVXXUV1dTVXXnkldXV1kgLp7e2N0+kkNDSUhIQE1q1bJ+N4o+XolpSUMGbMGBn+ue+++6R14EDDj/HjxxMVFTXouo8bN46IiAh6enpYsGCBXOoHBgbS0tJCUlISV155JU1NTRQVFcn00YaGBg4dOjTi+Sh0xYMHD/LZZ59JiFV3dze33nqrzJDRaDRcffXVlJeXU1dXR1VVFf7+/jQ1NVFYWMjChQupq6sjKiqK1NRUJk6cSE1NDQEBAYOWygq2tqioCH9/f/r6+pg6dSo2m43ExESmTp3KTTfdRHZ2NuXl5cTFxeHu7i7xvgqlVDFeMZvNTJo0SS7l3dzcuPPOO8nOzqa2tnZYipxWq8Xf35/ExESamppITU1l1apVlJWVMWHCBPz8/NBqtTJkkZyczLXXXsvp06cl9jchIQEvLy98fHzw8vIiNzeXhQsX4ubmhtlsRq/Xk5ycTGJiIvX19ZK3Av0bnRqNBovFwrXXXsuaNWvIy8sjMjKSpUuX4u/vj4+PzyA4mPLdkpKSuO2220hMTJROQ0MzmLRaLSkpKcTGxpKfn4/JZGLFihUEBwfLbBPFaUqRw+Ggs7OTzMxMVCoVkZGR1NXVyTCL8j5vb2+ioqK4/vrrKSoqoqSkRBpK9/b2ys1o5TwiIyMlrXLmzJnU19eze/duoqOjuemmm1CpVDI8p2zeBwYGotPpSEtLo62tjZqaGsaPH4+npydhYWHY7Xb0ej0hISFERUVRXl5Oamoqer1+UHaTYjQ+ZcoUKisrKSsrIzk5GZPJxIwZMxg3bhxtbW1MmTJFmqTceOON7N27l8rKSlQqFZMmTSIoKIjKyko8PT1lda3yPFxM/5JsmYtRAS+lkydPMnnyZFavXs327du/0TG+qYYSCweSDr+JLpccqUip2Lv77rtZv3498SMYXg98oIcSCbu6uvDy8kKtVst7MNK9UAbUgSbNyp+RGqSSNaTkJg/cqxjp+AOvm/JQO51OfH19aWtrk9aEo2nXrl1kZGTwy1/+ku3bt+N0OiXN0N3dndLSUmkf+PLLL/PCCy9IzK+7u/tl3Ufx1WalTqcb8Tt3dXXJQiWdTofVasXDwwO1Wi3TXRW2iVJO73A4MBgMeHp6Eh4ejlqtpqysjJCQEDo7OyVrXDlP6N+gtFqtEiam0Enb29tRq9USpaykPBoMBmJjY1Gr1TIDJTAwUKbeKbHg0e6NooFZVYqhdm5uLh0dHZw/f5577rlHWs8pg4NimalQUxUEsY+PDyqVisbGRiIiIuR9H9rWFcJkZ2enHHi3bt3KI488gsvlkqmminesr68vvr6+VFVVDdqX6evro7GxEb1ej1qtRqVSSaqqMgjp9Xq5d9fc3IynpyfBwcGDron4quhSpVLR3Nws94QUnIFiwqJMZJTno6GhgZCQEHx8fORg4ePjMyy7zWg0EhMT86+J/P1naPXq1WRmZqJWq/n5z3/+3/KZ3xedO3eOAwcO4Ovry+rVq78W1fL7rMvFHiu6/fbbiYuLIyYmhnvuuUeaUStkv7vvvptVq1bJ4y5evPiis6Rvip/+JqqurpYrQBg+AF+O6uvr5Yx/4MxbSU1UQo7+/v60tLTg5eX1jdqKgug1Go14enry17/+Fa1Wy5gxY7jiiiuGcVoGDkyKT8NIeOKLqb29nZaWFl5//XWmTZsmVxAajYagoKBBOeeAZM1cakKg6Ou0tYu9duB9U667MtjC5WEk2traCA4O/qFzV6TE40az9vvfrtbW1q/9wPxvk9FoxGazjYp7zsnJwdvbm7CwsO9NOxm4ovq2MhqNBAUFyVJ9u92OWq2mpaUFb29venp6cHd3Z9OmTdx+++0EBgZ+7ZXywPPt6OjAx8eH0tJSGhsb///2vjy6qev6el/JloRH2fI8YMAx2GEmFAyYBBvCWAKZCk3JQAvJWs3wNV9o2vSXpqZdWemvSb6VliTN0IQm4JSQMiYQZgzYzKaewDY2eJCRPEiyrNGafL4/5Hsrj0AmHEd7LS1Lz3p697zh3HvPPWdv3HHHHejs7BSc5TwWbTKZEBkZed1ww0DQaDRwuVwoKipCWFgYUlNT0dnZKYSleVz7q86av8nr8HXRlVnmd+5++PF9h8iC6BoN3qymAAdPOeQsic3NzWhsbBQatWPHjkVTUxPGjx+PjRs3YsmSJQgICOiTT51LHQ7k8Jqbm8VMw2g0wu12o729HbGxsRg2bJhYpwC8YSSXy9Vv4dn1oFarxQKzwWDA6tWr8dxzz/USTG9raxOL5t8FPTKfxfTXOXyVTkOr1SIhIcFP+evH4MRgGFx8X8CdII+Pf1UqXj4y5qpKwcHBiIqKQnBwsEhcsNvtmDp1qtDx7S8zoy/H3vOa+taaKJVK2O126HQ6yGSyXqpSvjH4m4XH40FcXBxCQ0MFJ87SpUuhUql6VXQGBQWJ+PZ3AV+myr7wVWYD13t2Bq0SU0+0tbUhMDAQQUFBQrrNd8HuZmKuXwf9SX3dalgsFhBRrzjmYIVvnPHbFDngTtBut3+jIRYu/ehwOL7TtQuHwwGn0wmbzSZS7b4ucyNfWHS5XIiLiwMRISIiAlu3boVUKoVSqYTBYOjTTrvdLqg0+DPIuV6oiwWypaUFycnJwoEZDAa0tLRAo9GAiOB0OkUKIS9K8gUXoPbVfO0LUqlUjMIlEgmUSiVWrFiBgIAAaDQa5OXloaioCG+99RakUinCwsL6LHL8NsDbfbMzErfbLXRre84wrkdCNuhH7uXl5Xj33XeFevurr76K6upqHDhwAJcvXwbgJeL6rnD8+HHs2rXrOzvejaK1tfUbJVX7urheabRv6tqhQ4e+EpHajUAikUCr1WLnzp039P2amppuOdP9fefs2bP45z//+ZUZNT0eDywWy03bzXV4z5w5g6NHj6KiogLAfzMzqEuPs6CgoBsvTs/6iZ5tAbwdR0xMDGpra0WYgI/u+xpN+2Y5ccfOs0A4URgRCf4Vl8sFh8MBiUQCg8Egfjc+Ph5Op7NbyMl3pmCz2b4SK6VMJoNSqRSdIc86qq6uhkKhEBkzX+W3fffhI2hf5kedTnfd+6gvWCwWwY/T0dGByspKbNmyBU6nsxdZ3fc6zx3wFistWLAA48ePR2dnJ2bNmoUJEyYgJydHnIQTJ04AuHGa06+Kt99+G3K5HCNHjvxWj3OzcDgccLvd2L17961uCgDvdSgtLcWOHTsA/Ld9vpDL5VAqlVAoFKioqPjWeOU5u+GN0Od2dHSgtLRUlM/3BY/Hg3Xr1qGoqAjx8fFfmVGTiJCbm4s1a9Zcd3rd3t4upvO82On1118H4HUozc3NIt3OaDRCrVajuLgY165dE5z/Op2um4P3HfVxTiaTyQSbzYY//vGP2Lx5M6RSqZil8nixL/OhRCIRGTccUqkUMplMtFmv10Mul+PPf/4ztFotqqurcfjwYdTX14vzx6kffPl3fEepXL/0RkMXvrYZjUaRb75o0SK89NJLOHPmDEwmk+hwBoq59+VAPR5Pt07ZtxaAp2MWFhYiPz+/1303EH++TqfDkSNHsGXLFhgMBsjlctFJcII74L+dyfUGBoPeuT/22GMiH3vTpk2YOnUqiAg2mw3p6enQ6XR4/vnn+w3N9Pfg9Nw+0Eizs7MTZrMZx48fx/jx4wVF7M38Pj8GZ07sjwGys7PzulS+LperG+OjXC7Hxx9/3KsAit/kvm3pKSfWH/pic+y5L38YHQ5Ht1hiS0sLTpw4gczMTNFeq9UKjUbTy4m3trZi1apVSExMhFqtRmtrK4gIarV6wNEm17Nsamoa8Ca3WCzYsWOHWCTk4Cx8vAqQx7F5YRbX1XS5XNBqtWIU1tjYiDNnziApKQnz58/vM1RBRLh69ar47PF44PF4RKWkXq9HeXk5zpw5gxkzZoiitr7abjKZcPr0aZjNZhARmpqacPr0acEpk5ycjPDwcLjdboSEhCAiIgKhoaF47LHHMHr0aOj1ehQUFAgyrfr6esGGSUQoKChATU0NAG9hYFVVFa5cuYL09HQMGzYMWq1WOGWr1SpsM5vN0Ol0aG9vR0lJCVwuF4xGozhOY2OjKBLct28f4uPjYTKZEB4ejkWLFuHnP/+5yIFvbm6G2+1GZWUlmpubodFooFarYTab4Xa7UVdXh6amJtjtdpHtxul5fdHQ0IDTp0/D6XRCr9fDbrfjypUromgtIiICbrcba9asER1Tc3MzqqurcfbsWTEI6OzsFDUFvMNTq9XQ6XRoaWlBXV0dHA5Hn2seHo8HjY2NOHnyJGJiYkTojI/mez5DnICMz3gOHDiAjIwMMSjhny9duiSeecaYCE0PhOvG3BljyQA+BhAHoBPAe0T0V8ZYLoC1AHjt8e+IaG/XPi8A+AUAD4BniGj/9Y7TH7gaul6vR0lJiViY4DdOSUkJDAYDduzYAZvNhocffljse+jQIcHtfd999wEADhw4AIvFAoVCgcWLF4tKxYqKCrjdbtx///29cpYlEglaW1tx6tQpXLp0CYmJiQgPDxdUn0lJSZg+fToqKytRVVWF2NhYaDQamM1mZGdn49ixY5g8eTKuXbuG1tZWzJ49G5cuXYJer8fSpUtF3O/gwYMwmUwgIkHI3xf27dsHh8OBMWPGICMjAwEBAaiursbEiRMBeMnP1Go1ZDIZ7r33XjDGYDabcenSJVRWVmLKlCl9kmlx7N27FzabDUlJSZg8eTLkcjm++OIL2O12qFQqTJgwASEhIdizZ4/IeKipqUFQUBBmz56N8vJy7Nq1CxMmTIBer0daWhra2tpQUlKC1tZWJCYmYv78+bBYLDh8+DDS0tIwbdo0HDt2DDabDdOmTUN+fj7kcjnWrFnTZwyWixzn5+dDJpNh8uTJ4l7h4KPckpISzJo1C59++inmz5+P5ORklJWVoaioCBkZGaioqMBdd92F6OhovPnmm1i/fj06Ojpgs9mwf/9+mM1mqFQq3H333Th37hykUinMZjMsFku3GCp3VBUVFWhuboZMJkNSUhJaWlpQVVUlCmNSUlJQU1ODhoYGaLVaEVP1BafHra+vx6FDh9DR0YHhw4fj2rVrOHbsGIKDgwUbJ38mZDIZLBYLTp06hbFjxyItLQ2XLl1CRUUF7rnnHhw6dAiMMSEW09LSgoKCAiQlJSEgIADt7e1oaGhAeHg4DAYDqqurcfnyZcjlcpjNZlGp/cknn2DMmDGYOXOmoLo1GAyQSqWora1FYWEhHA4H4uPj0djYiKNHj0KhUKCxsRFutxvvvfceli9fjkmTJkGhUKChoQEmk0mEXsePH4+mpiaYzWZMnjwZx44dQ1NTE6ZOnYrs7GxoNBqcOnUKwcHBmDBhApKSkoQ9W7duRWRkJFJTU2E2m0W1emxsLKRSKY4fP4577rkHMTExaGlpwTvvvIPc3FxUVFSIinFOgXz16lUkJiYiNTUV+fn5iIqKQnR0NI4cOYJFixb1+Qy1t7djz5492LlzJ6ZNm4b09HSEhITg5MmTsNvtqK2txYoVK8TamFwuF6EYjUaDo0ePYtKkSSAiTJw4ETU1NZg/fz42bdqEBQsWICsrC3K5HBqNBrW1tf0+w+IBGOgFIB7AlK73oQAuA7gdQC6AdX18/3YAJQDkAEYCuAJAOtAxehKH9YW3336b0tPTu22zWq20cuVKam9vp7q6OoqOjiYiIofDQU8++SSVlJQQEdHzzz9PbrebmpubadGiRURElJ+fT0RER44coTfeeIOIiD744IN+icaIiBYsWEBEXlKi+fPnC4KtmJgY2rdvH23cuJGefvppWrZsGRF5SYN2795NK1asoNWrV4vfWbduHRERXb16ld59910i8hIfPf7446IdfaGiooJ+9rOfkcPhICIvyRnH9OnTqaysjOrr6yk7O5uIiPbu3Sv+/+CDDwoyszFjxtD777/f7bc9Hg8REWVnZ5PBYCAiolWrVpHZbKa5c+eSTqcjIqLVq1fTsmXLqKioiD7//PNuxGmc6I2IaOrUqUREVF1dTTt37qT777+fLl++TC+88AJFRUWR0+mkL7/8krZu3UobNmyg4uJiKi4upgcffJB2795NhYWFFBoa2if5mclkoldeeUWcr3PnztG6det6fdfhcNCOHTvEdVuzZg1NnTqVzpw5Q0eOHKE5c+aQXq+nsrIy2rBhAxUVFdGUKVNIr9dTa2srpaamUktLC7W1tdEjjzxChYWFdOXKFXrhhRcEeROH3W4nq9VKkyZNoqysLCouLqZ9+/aRVqulefPm0blz5+jixYv00ksv0fnz56m4uJieffZZunbtWjdiNrPZTDU1NbRr1y5avnw5lZaW0qlTp+gnP/kJnT9/ni5fvkx/+MMf6OjRo9Te3k5Op5OuXbtG7e3t5Ha76cSJE7Rx40Y6ePAgqdVq2rVrF91///2CjEomk1FpaSkdOHCApkyZQmVlZVRdXU0nT56k1tZWysvLo8LCQiLyPhsPPvgg3XvvvXT27Fl655136NNPP6UVK1ZQVlYWXbhwgWbNmkX79++n6upqOnHiBD355JNUVVVFGzZsoLy8PKqvr6dly5ZRZWUlabVaKi0tFURpx48fp927d9MLL7xAZrOZGhoaKCQkhN5++206ceIEzZw5kz766CM6f/48/exnP6PW1lbatGkTPfDAA1RfX0//+Mc/BFGXx+Mht9tNNpuNNBoNPfvss/TYY4+RwWCguXPn0l/+8hcqKiqiLVu20MaNG+nkyZNUWVlJU6dOJa1WSzt27KBXX32Vqqurad68eXThwgW6cOECvfLKK7Rt2zbas2cP3XvvvbRr1y7Ky8ujZ599lqqrq3s9Q21tbfTKK6/QmDFjyGw2U319PeXm5tLBgwdJq9XSvn376PXXXyer1Sr2U6vV1NzcTGq1mqZPn05ERA0NDfTZZ5/RwoULSa1W07333ksjRowgg8FAxcXFtHz5ctq3b9/XIw4jIi0RXeh6bwZQASBxgF2WAdhCRA4iqgVQA2Da9Y5zPVRWVnbTO+Sr8G1tbQgLC0NxcbFY3Lp8+TI+//xzdHR0YNOmTZg5cyakUiliYmJQXl6OdevWCW6Wzz77DPHx8XjzzTfhdDoHzIThYZ+ysjJkZWUJgi3AWzyxePFiNDc344knnoDT6URsbCyWLl2KtrY2QXJWXl6OZcuWAYDgdge8ccV//etfeOaZZ3DXXXf1OjYR4dNPPxXxN5fLJfhrPB4P0tLSkJaWhvDwcFRWVuLpp5/G9OnTAQDV1dWYMGGCOJZare4lZMJtU6vViIiIQGdnJ1577TWEhISgtrYWKpUKbW1tkEgkgpvmypUrYvTCZyGAN+TBR1M2mw0nTpxAfX09IiMjYbFYsHjxYnR2dmLKlCkwmUwYP348VCoV2tvbYbFYMHv2bFRWVvaZIQB447F5eXmIjIyEw+FAY2Mjli5d2mua7Ha7cerUKYwaNQqdnZ1oa2uDwWBATEwMkpKSBE1weHg4VqxYAZVKhdjYWERGRoopNF9A/+Uvf4nY2Fg0Njbi9ttv71UAwyXh4uPjce7cOXz00UeYMWMGdDodamtrcfnyZRw/fhxLlizBiBEjBG2ATqfrFks2Go0ICAjAnj17UF5ejoSEBCiVSoSHh4uS/itXriAoKAhEBKlUiqCgIISFhaGtrQ1BQUFobGxEVFQUQkJCREiMU2VHR0cjIiICwcHBCA8PR25uLt59911ERkbCYDCgvLxccKSkpqaira0Ny5YtQ2BgIJYvX46pU6dCIpFg7ty5cLvdCA4OFlqle/bsQWJiIhhjuPvuuyGRSET8mZfPR0REID09HZmZmdBqtfjyyy8xbdo0kTkVExOD0NBQyGQyZGRkYPLkyWhvb4fNZsNTTz2F1NRUwYUfHR0t6LZ5hTEv5z927BhGjRoFiUQChUKBMWPGiNH6xIkTERoaimHDhiE0NBRhYWGIiopCZmam0LAtKipCfn4+5s2bh8mTJ8PhcGDevHlIS0uDTqfD9OnTe90DEokEISEhaGhoEKG91tZW7N69G1OmTIHdbofT6URra2s3LVepVAq5XA69Xi9m8Vw7NiUlRYRqMzMzxUzUbDYPKAoE3GTMnTE2AsBkAJz1/ynGWClj7EPGGJcxSgTgKw3UiIE7gwHBHdD27dvxyCOPdFMj37x5s3gwPvzwQ5FJc/DgQTidTkybNg0PPPCAcIKff/45Ghoa8Nprr2HOnDkAgG3btuEnP/kJnnrqqQH1WZubm3Hfffeho6MDO3fuxNq1awF446IzZszAggULoFKpEB0djUWLFnWrsktJScHChQths9mwefNmZGRkQKPR4P3338fw4cNhs9lQXl4Ok8mEN954o0/SMcYYNmzYILicL168KNgKz549ixUrVkAul+P8+fPQaDTYsGEDZs6cCcC7VvH4448D8DreCRMm4IEHHuhVCVhfX4/77rsPZrMZEokEYWFhKC8vx5o1a9De3o6IiAgcOHAAv//97xESEoItW7Zg1qxZALyhnIceegjAf4VQ8vPzERYWhr/97W8YOXIkQkJCUFpaiszMTFE0s2nTJowcORJutxsXLlxAXFwclEoltm/fjrVr1/aZh2y1WlFeXo67774b9fX1OHnyJJKTk7vx13O1q08//VRkdRQXF+P5559HbGws9Ho95s6di+DgYCQnJ0OlUqGoqAhr166FwWCA3W7Hww8/jOzsbDz66KNIS0tDWFgYTp8+LWz2RXJyMvR6PZ566ilcunQJLS0tqKmpgdFoxJw5c7Bw4UKsXLkSw4cPh1wuR15eHpYuXYrk5GQhZ8edd0hICLZv346ZM2eira0Ne/bswcmTJ0W83mg0Yty4cUL5Z9iwYULJKyEhAV988QUiIyMF7zl3Hh9++CFycnKg1WphsVjw4osv4plnnkFxcTH279+PP/3pT3j//fdRX1+PEydOQKFQIDIyEpmZmVAqlYJXpqKiAnPnzkVVVRXmzJkjMp8++ugjrF69GkSEkSNHIjAwECUlJRgzZgwCAwNhMBiwbt06/PjHP0Z6erqogp06dSpCQkJw6NAhPPfcc5g7dy6MRiMmTZokONCJCC+++CJ0Oh2eeOIJrFu3Dk888QSOHz/e7TpIJBKEhobi2rVryM7ORkNDg7in8vPzxbkZPnw4dDodgoKCBKvliBEjoNVqkZ2djYULF+IXv/iFuDcPHz4sCNsKCgqQkpLSKxWxubkZBoMBe/bswerVq1FaWor3338flZWVYIzBarXigw8+wH333SfCygEBAVAqlQgNDUV9fT2WLl2KsrIyJCUlYfv27RgxYgSIvNJ8y5Ytg1qtxt69e/GHP/xhwNAqcBPOnTEWAmAbgF8RkQnA3wGkApgEQAvgdf7VPnbvtbrIGHucMXaeMXZ+IGXzlpYWlJeXo6OjA4mJid1GZ4WFhXjsscfgdruh0WiQnp4uRhqLFi1CZWUlSkpKsHnzZlgsFhw5cgTt7e344IMPhOLRm2++iWPHjuHw4cPYtm1bv+04cuQIcnJyEBAQgDVr1mD37t2oqqrCyy+/LFR0Ll68iMWLF4t9HA4H/vOf/4gRbXFxMU6fPg2VSoV///vfsNvtuHDhAnQ6HbZt24aamhps3bq1X7HtX/3qV5DJZDh16hTeeustGI1GGI1GIS/G0+PMZjM2b94s9EAfeughfPLJJzh9+jRee+01nDhxQggscLjdbiQkJECv16O5uRmnTp1CcXExxo0bB7VajZKSEvz1r3/F6dOn8dBDD4HIK2Axc+ZMXL58Ge+99x4CAwPhcDiEZqnRaER9fT3WrVuHESNGoK6uDlarVeRlV+0lAAAOfUlEQVRmNzY2Ijo6GqWlpZDL5fjss89EnL6yshLjxo3rpWYFANHR0ViyZAkaGhqwd+9eVFVV4fjx4906K4VCgc7OTjz66KMidfDvf/87Vq5cCalUim3btonRJeDlbNmwYQOSkpJgsVhEJ9rU1IRz587h4sWLCA0NFdev52I4L+bRaDSCsZQLbigUCrS2tuLKlSuorKwU3CkajQalpaWCXEwqlQrn/7vf/Q6BgYGorKyEXC7Hv//9b0ycOBGnT58WTslqtaKxsRFNTU1ISEhAaGgozp8/j4SEBJw7dw7BwcG4ePEifv7zn8NoNMJsNmP8+PGIiopCUVGRINiaPXs20tPTkZycjOHDhyM8PBzDhg0T7KOcAIzLyslkMpSVlaGwsBBBQUGoq6tDaGgonnjiCVRXV4uOetKkSUhOTsa4ceNw8eJF2Gw2uN1uZGVlwe12IzY2Fs899xw2bdqEzZs3o7a2Fj/96U8hk8mwZcsWLFy4ENHR0di5c6cQySgrK0NdXR2qqqrw6quv9prl8lTRRx55BEVFRdiwYQNWrlyJO++8E3l5eWCMoba2FsHBwfB4PAgMDERTUxNkMhmioqIwceJEBAcHw2az4erVqzh16hQCAgJw9epVZGRk4Nq1a6iurhb1Br7gNA1ut1vI5D300ENYtWoV6urqUFdXhxUrVojOjIPIKy1ZWFgo1i8MBgMSExORlZUFtVoNlUoFpVKJ2267DS+//DKKioquK2EpvREFecZYIIBdAHYQ0YcAkJuba83NzaXc3Fxav379VQC/zs3NfXv9+vUTAYTn5uYWAMD69eufBPB5bm5uo+9v5ubmFuXm5r6Xm5v7Xl5eXi4fWfbEnj17oFarkZSUBIVCgVGjRonQSUdHB+666y5ERkYiKioKUVFRmDFjBiIiIjBt2jQUFhZCIpFgzpw5YpHl4sWLCAsLEzfF2LFjUVBQAKVSiUmTJvVbBPTll19i7ty5oqflmQdLly5FbGysUJC57bbbRPs4b8aYMWNE5V1AQADuuOMOxMTEYMSIEUhPT8eoUaOQkpKC2tpauN1uLFq0qM82jB07FoGBgYiKisLy5cvR1NQklJ3a2toQFRWFnJwcnDt3DkFBQZg3bx4AryINz0S58847BbOeL/i0lo8gVCqVUKcKCwtDTU0NpkyZIpSpODf46NGjERsbK85/QkICAgICcO3aNSxZsgSpqakIDw8XCk6jRo1CTEyMWGjiNK9xcXEwGAy48847MXLkSDQ3NyMiIgLz5s3rtcBtsVgQFxcHp9OJOXPmIDMzE/Hx8YLSloMxhoiICFgsFowYMQJZWVnweDwICgqC0+lEamqqmFpz6leZTIYJEyYgMDBQLPZFREQgJiYG4eHh2LBhA9auXdsnWRhXuGpoaEBWVhaSkpIEkyJfTB01ahTCwsKEStPo0aOFdq/dbodMJhP52XK5HB0dHZg2bRrGjBkDt9uN/fv3o729HatWrYJCoYBOp4PFYkFERATMZrMI1yQkJEClUsFoNGLu3LlISUkR6lo5OTlCMYwrXnV0dCA7O1uoL2VkZMBkMkEikSA2NlYIRoeGhqKtrQ0ymQzp6elISkpCYmIiwsLCIJVK0dTUBI/HI3RPOXVwSkoK4uLikJiYCIVCgZiYGAwfPhxxcXGw2+0YPXo0VCoVIiMjMWzYMLhcLsTGxiI0NBRyuRxxcXHIycnByJEjYbVaIZFIkJOTg/Dw8G5hLbvdLgqYWltb4XK5xKIrJ1z70Y9+BKVSibCwMGg0Gtx+++1ISUlBWFgYhg0bBr1ej7KyMiQkJCAjIwMqlQparRYzZsyATCaD2+1GRkZGr5EzdRVtcZWv+++/H+Hh4UhMTBRaC7Nnz+5VM8C57JOTk1FbW4ucnByoVCoQEZKTkxEfH4/29naMGjVKzIJ0Oh30ej12796tzc3N7bPA5brcMsx75j4CYCCiX/lsjycibdf7ZwFMJ6KVjLGxAD6BN86eAOAwgDQi6jfjfjBzy7z55psijrd69WqRjnQzVYk2m+0rl1T3BV9GObfbLYpNeuKbqF68HvqiHebMgryT4+3tj6ZVKpUKvhOg+/lqbGwU8fv+cKOsjDfDxcLbwLNWHn30UUyZMgUBAQH49a9/fUO/AXhnL9SVhserm/tL262rqxOc8VKpFDqdTjhHi8WC3/zmN7j99tsxe/ZskWbq+92oqChYLBa4XC7RYfAUVV/xdwDdzjcXb05KSoJMJhOFRgEBAWKdpbOzU2QGcbFoDrlcLtKFo6KicOXKFcjlcqGBwPPUe15LvvbDOyG+tnD48GHEx8fjjjvuQEBAwE3dxy6XC4cPH4bFYsG4ceOwceNG5ObmCrIw3+vvex348Tl4+mrP0OX17rW+6K4Hqp73vX5ctpFLfPKOhLN08tRH/pstLS2IjY39WtwyswA8DCCHMVbc9VoM4C+MsTLGWCmAbADPAgARXQSwFcAlAPsAPDmQYx/skMvlqKurw/DhwwF4nfrNlpt/k44d6F7gERAQ0G8Rxrft2AH0unF5IYfvwjQfQPT1UPC2+z5EvuerP/GSnse8EdzMdeNtcLlc8Hg8YkbjKwJ+I1Aqld0KgZxOp9Db9K2t4E7QV7WJh0d4B2M0GpGZmSmSAQD06hglEkm3vHmu5NQTPKPi6tWrMBqNsNlsIn/a9/+RkZFCNYw7Lj67DQkJEfvK5XLBNso7xZ7Vqz076cDAQJjNZlitVoSHh4u2qlQqBAcHi3b3dR/3V9tgt9uh1WpRUFCAgoICEUrli52+4Bqy/Dr5gi/O9sT17jWJRNLtmRjIsfNZhsvlEvcbv1f4sfnxuPC3729ej37Azwo5AHg5N+C92N8lh833GS0tLUIcYrBjoFGh7/Vua2sToagbhe/+Op1OhBB6jkZbW1vhcDhESGD06NEixg14H2a+IBkYGIjIyEhBk6tQKOBwOEBEUCgUsFgsMBqN3RxpX5zvvNy/ra1N1EiEhYV1G5maTCaYTCZER0eLzsfj8Qgn6TsT0mq1IsuKO6iWlhYx+uSCJE6nE3FxcdDr9aiqqhLEYWlpad041X1H+TxsERAQAJPJBMZYn+FTzijJj8c7LKvVisjIyG4Vt/wcBAYGwmg0wul0dlvkZIyJIit+rbiICg9p9ecPTCZTn+L0PWG32wUfDr8+7e3t4n1ISIiYmfMqW25naGgon/UMbsrfO+64g7g2qh/fTxARDAaDWDQLCQkR3CEKhUJUaPqGarhD4tN6/lBxeDwe8fmrdKrkQ6Pa14PIY8qAd8TUnwPk6xH88/WI4zhnTmdnJ+RyOex2u3g4uSADH2Hzka1EIhFVt1yZx2QyISkpqRvVLx/xc9uCgoKEA+M2c2ZHuVwOj8cjOonAwEB4PB5YrVbhgBQKhajojI6OFnH45ubmbhWQDocDdrsdSqUSERER8Hg8YjTJbeROT6/Xw2aziUXisLAwUZEdHh4uJPbq6upEyEqpVIp7gtvJOy4u4BEcHAyr1QrGmBjZBgYGCkUrqVQKi8UiMoh4J2W1WtHR0SFmA9TFw8NfH3/8MYxGIx555BHExcXBbDbD6XQiKCgIcrlcZKvw7/PzztvqcDjgcrkEXw2/Fowx4ZD5sTkjptVqRXJycrd70m63w+FwQKFQiH35rMNsNovwq8vlQlRUFKxWK5RK5eB27oyxVgBWALpb3ZZbgCj47f4hwW/3Dwvftt0pRNQn3emgcO4AwBg7318PNJTht/uHBb/dPyzcSrv9AWQ//PDDjyEIv3P3ww8//BiCGEzOffAoTXy38Nv9w4Lf7h8Wbpndgybm7ocffvjhxzeHwTRy98MPP/zw4xvCLXfujLGFjLEqxlgNY+y3t7o93yS62DJbGGPlPtsiGWMHGWPVXX8jfP73Qtd5qGKMLbg1rf76YIwlM8aOMsYqGGMXGWP/p2v7kLadMaZgjJ1ljJV02b2+a/uQtpuDMSZljP2HMfZF1+chbzdjrK6rUr+YMXa+a9vgsLs/ovfv4gVACq+YxygAMnhFPm6/lW36hu27E8AUAOU+2/4C4Ldd738L4H+73t+0yMlgfaF/gZchbTu8jKghXe8D4aXGzhzqdvvY/3/h5ZX6ouvzkLcbQB2AqB7bBoXdt3rkPg1ADRFdJSIngC3win0MCRDRcQCGHpuXwUvEhq6/y322f+MiJ7cC1L/Ay5C2nbzg4raBXS/CELcbABhjSQCWAPiHz+Yhb3c/GBR232rn/o0Ke3xPEEtdbJpdfzkJy5A8F6y7wMuQt70rNFEMoAXAQSL6QdgN4A0Az8Ors8zxQ7CbABxgjBUxxjhv+aCw+8bo9L493JCwxw8EQ+5c9BR48eWN6fnVPrZ9L20nLwPqJMaYEsAOxti4Ab4+JOxmjP0YQAsRFTHG5tzILn1s+97Z3YVZRKRhjMUAOMgYqxzgu9+p3bd65N4IINnncxIAzS1qy3eFZsZYPODlxId3hAcMsXPRJfCyDUAeEW3v2vyDsB0AiMgIIB/AQgx9u2cBuIcxVgdvaDWHMbYZQ99uEJGm628LgB3whlkGhd232rmfA5DGGBvJGJMBWAlg9y1u07eN3QAe7Xr/KLwKV3z7SsaYnDE2EkAagLO3oH1fG8w7RP8AQAUR/T+ffw1p2xlj0V0jdjDGhgGYB6ASQ9xuInqBiJKIaAS8z/ARIlqFIW43YyyYMRbK3wOYD6Acg8XuQbDavBjebIorAP7nVrfnG7btX/Dqy7rg7bV/AUAFrzpVddffSJ/v/0/XeagCsOhWt/9r2J0F73SzFEBx12vxULcdwAQA/+myuxzAS13bh7TdPc7BHPw3W2ZI2w1vll9J1+si91+DxW5/haoffvjhxxDErQ7L+OGHH3748S3A79z98MMPP4Yg/M7dDz/88GMIwu/c/fDDDz+GIPzO3Q8//PBjCMLv3P3www8/hiD8zt0PP/zwYwjC79z98MMPP4Yg/j8FjSEjUKJl8AAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": { - "needs_background": "light" - }, - "output_type": "display_data" - } - ], - "source": [ - "imshow(pred, cmap='gray')" - ] - }, - { - "cell_type": "code", - "execution_count": 73, - "metadata": {}, - "outputs": [], - "source": [ - "output_dir = 'test_preds'\n", - "if not os.path.exists(output_dir):\n", - " os.makedirs(output_dir)\n", - "for x_test_file in x_test:\n", - " img = cv2.imread(x_test_file, cv2.IMREAD_GRAYSCALE)\n", - " img = cv2.resize(img, input_shape[::-1], interpolation=cv2.INTER_AREA)\n", - " pred = make_prediction(img)\n", - " filename = path_leaf(x_test_file)\n", - " filepath = os.path.join(output_dir, filename)\n", - " cv2.imwrite(filepath, pred)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "lightweight-gpu-kernel", - "language": "python", - "name": "lightweight-gpu-kernel" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.1" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/live-reloading/onnx/README.md b/examples/live-reloading/onnx/README.md index 77456896ee..e8ec367b01 100644 --- a/examples/live-reloading/onnx/README.md +++ b/examples/live-reloading/onnx/README.md @@ -2,6 +2,4 @@ _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ -The model live-reloading feature is automatically enabled for the ONNX predictors. This means that any ONNX examples found in the [examples](../..) directory will already have this running. - -The live-reloading is a feature that reloads models at run-time from (a) specified S3 bucket(s) in the `cortex.yaml` config of each API. Models are added/removed from the API when the said models are added/removed from the S3 bucket(s) or reloaded when the models are edited. More on this in the [docs](insert-link). +Model live-reloading is automatically enabled for ONNX predictors. diff --git a/examples/live-reloading/tensorflow/README.md b/examples/live-reloading/tensorflow/README.md index 2444484b77..46f4111a4f 100644 --- a/examples/live-reloading/tensorflow/README.md +++ b/examples/live-reloading/tensorflow/README.md @@ -2,10 +2,4 @@ _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ -The model live-reloading feature is automatically enabled 1 for the TensorFlow predictors. This means that any TensorFLow examples found in the [examples](../..) directory will already have this running. - -The live-reloading is a feature that reloads models at run-time from (a) specified S3 bucket(s) in the `cortex.yaml` config of each API. Models are added/removed from the API when the said models are added/removed from the S3 bucket(s) or reloaded when the models are edited. More on this in the [docs](insert-link). - ---- - -*1: The live-reloading feature for the TensorFlow predictor is disabled when Inferentia resources (`compute.inf`) are added to the API and `processes_per_replica` > 1.* +Model live-reloading is automatically enabled for TensorFlow predictors unless using Inferentia resources (`compute.inf`) and `processes_per_replica` > 1. diff --git a/examples/onnx/multi-model-classifier/README.md b/examples/multi-model/onnx/README.md similarity index 100% rename from examples/onnx/multi-model-classifier/README.md rename to examples/multi-model/onnx/README.md diff --git a/examples/onnx/multi-model-classifier/cortex.yaml b/examples/multi-model/onnx/cortex.yaml similarity index 100% rename from examples/onnx/multi-model-classifier/cortex.yaml rename to examples/multi-model/onnx/cortex.yaml diff --git a/examples/onnx/multi-model-classifier/predictor.py b/examples/multi-model/onnx/predictor.py similarity index 100% rename from examples/onnx/multi-model-classifier/predictor.py rename to examples/multi-model/onnx/predictor.py diff --git a/examples/onnx/multi-model-classifier/requirements.txt b/examples/multi-model/onnx/requirements.txt similarity index 100% rename from examples/onnx/multi-model-classifier/requirements.txt rename to examples/multi-model/onnx/requirements.txt diff --git a/examples/tensorflow/image-classifier-resnet50/sample.json b/examples/multi-model/onnx/sample.json similarity index 100% rename from examples/tensorflow/image-classifier-resnet50/sample.json rename to examples/multi-model/onnx/sample.json diff --git a/examples/pytorch/multi-model-text-analyzer/README.md b/examples/multi-model/python/README.md similarity index 100% rename from examples/pytorch/multi-model-text-analyzer/README.md rename to examples/multi-model/python/README.md diff --git a/examples/pytorch/multi-model-text-analyzer/cortex.yaml b/examples/multi-model/python/cortex.yaml similarity index 100% rename from examples/pytorch/multi-model-text-analyzer/cortex.yaml rename to examples/multi-model/python/cortex.yaml diff --git a/examples/pytorch/multi-model-text-analyzer/predictor.py b/examples/multi-model/python/predictor.py similarity index 100% rename from examples/pytorch/multi-model-text-analyzer/predictor.py rename to examples/multi-model/python/predictor.py diff --git a/examples/pytorch/multi-model-text-analyzer/requirements.txt b/examples/multi-model/python/requirements.txt similarity index 100% rename from examples/pytorch/multi-model-text-analyzer/requirements.txt rename to examples/multi-model/python/requirements.txt diff --git a/examples/pytorch/multi-model-text-analyzer/sample-sentiment.json b/examples/multi-model/python/sample-sentiment.json similarity index 100% rename from examples/pytorch/multi-model-text-analyzer/sample-sentiment.json rename to examples/multi-model/python/sample-sentiment.json diff --git a/examples/pytorch/multi-model-text-analyzer/sample-summarizer.json b/examples/multi-model/python/sample-summarizer.json similarity index 100% rename from examples/pytorch/multi-model-text-analyzer/sample-summarizer.json rename to examples/multi-model/python/sample-summarizer.json diff --git a/examples/tensorflow/multi-model-classifier/README.md b/examples/multi-model/tensorflow/README.md similarity index 100% rename from examples/tensorflow/multi-model-classifier/README.md rename to examples/multi-model/tensorflow/README.md diff --git a/examples/tensorflow/multi-model-classifier/cortex.yaml b/examples/multi-model/tensorflow/cortex.yaml similarity index 84% rename from examples/tensorflow/multi-model-classifier/cortex.yaml rename to examples/multi-model/tensorflow/cortex.yaml index ef99bc941e..2c0e39bea8 100644 --- a/examples/tensorflow/multi-model-classifier/cortex.yaml +++ b/examples/multi-model/tensorflow/cortex.yaml @@ -9,14 +9,10 @@ paths: - name: inception model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ - - name: iris - model_path: s3://cortex-examples/tensorflow/iris-classifier/nn/ - name: resnet50 model_path: s3://cortex-examples/tensorflow/resnet50/ config: models: - iris: - labels: ["setosa", "versicolor", "virginica"] resnet50: input_shape: [224, 224] input_key: input diff --git a/examples/tensorflow/multi-model-classifier/predictor.py b/examples/multi-model/tensorflow/predictor.py similarity index 100% rename from examples/tensorflow/multi-model-classifier/predictor.py rename to examples/multi-model/tensorflow/predictor.py diff --git a/examples/tensorflow/image-classifier-inception/requirements.txt b/examples/multi-model/tensorflow/requirements.txt similarity index 100% rename from examples/tensorflow/image-classifier-inception/requirements.txt rename to examples/multi-model/tensorflow/requirements.txt diff --git a/examples/tensorflow/multi-model-classifier/sample-image.json b/examples/multi-model/tensorflow/sample-image.json similarity index 100% rename from examples/tensorflow/multi-model-classifier/sample-image.json rename to examples/multi-model/tensorflow/sample-image.json diff --git a/examples/onnx/iris-classifier/README.md b/examples/onnx/iris-classifier/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/onnx/iris-classifier/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/onnx/yolov5-youtube/README.md b/examples/onnx/yolov5-youtube/README.md deleted file mode 100644 index f7822449bb..0000000000 --- a/examples/onnx/yolov5-youtube/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# YOLOv5 Detection model - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example deploys a detection model trained using [ultralytics' yolo repo](https://github.com/ultralytics/yolov5) using ONNX. -We'll use the `yolov5s` model as an example here. -In can be used to run inference on youtube videos and returns the annotated video with bounding boxes. - -The example can be run on both CPU and on GPU hardware. - -## Sample Prediction - -Deploy the model by running: - -```bash -cortex deploy -``` - -And wait for it to become live by tracking its status with `cortex get --watch`. - -Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get yolov5-youtube`. - -```bash -export ENDPOINT=your-api-endpoint -``` - -When making a prediction with [sample.json](sample.json), [this](https://www.youtube.com/watch?v=aUdKzb4LGJI) youtube video will be used. - -To make a request to the model: - -```bash -curl "${ENDPOINT}" -X POST -H "Content-Type: application/json" -d @sample.json --output video.mp4 -``` - -After a few seconds, `curl` will save the resulting video `video.mp4` in the current working directory. The following is a sample of what should be exported: - -![yolov5](https://user-images.githubusercontent.com/26958764/86545098-e0dce900-bf34-11ea-83a7-8fd544afa11c.gif) - - -## Exporting ONNX - -To export a custom model from the repo, use the [`model/export.py`](https://github.com/ultralytics/yolov5/blob/master/models/export.py) script. -The only change we need to make is to change the line - -```bash -model.model[-1].export = True # set Detect() layer export=True -``` - -to - -```bash -model.model[-1].export = False -``` - -Originally, the ultralytics repo does not export postprocessing steps of the model, e.g. the conversion from the raw CNN outputs to bounding boxes. -With newer ONNX versions, these can be exported as part of the model making the deployment much easier. - -With this modified script, the ONNX graph used for this example has been exported using -```bash -python models/export.py --weights weights/yolov5s.pt --img 416 --batch 1 -``` diff --git a/examples/onnx/yolov5-youtube/conda-packages.txt b/examples/onnx/yolov5-youtube/conda-packages.txt deleted file mode 100644 index 131fce12b5..0000000000 --- a/examples/onnx/yolov5-youtube/conda-packages.txt +++ /dev/null @@ -1,3 +0,0 @@ -conda-forge::ffmpeg=4.2.3 -conda-forge::youtube-dl -conda-forge::matplotlib diff --git a/examples/onnx/yolov5-youtube/cortex.yaml b/examples/onnx/yolov5-youtube/cortex.yaml deleted file mode 100644 index 80d0393308..0000000000 --- a/examples/onnx/yolov5-youtube/cortex.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: yolov5-youtube - kind: RealtimeAPI - predictor: - type: onnx - path: predictor.py - model_path: s3://cortex-examples/onnx/yolov5-youtube/ - config: - iou_threshold: 0.5 - confidence_threshold: 0.6 - compute: - gpu: 1 # this is optional, since the api can also run on cpu diff --git a/examples/onnx/yolov5-youtube/labels.json b/examples/onnx/yolov5-youtube/labels.json deleted file mode 100644 index c86f2f812a..0000000000 --- a/examples/onnx/yolov5-youtube/labels.json +++ /dev/null @@ -1,82 +0,0 @@ -[ - "person", - "bicycle", - "car", - "motorcycle", - "airplane", - "bus", - "train", - "truck", - "boat", - "traffic light", - "fire hydrant", - "stop sign", - "parking meter", - "bench", - "bird", - "cat", - "dog", - "horse", - "sheep", - "cow", - "elephant", - "bear", - "zebra", - "giraffe", - "backpack", - "umbrella", - "handbag", - "tie", - "suitcase", - "frisbee", - "skis", - "snowboard", - "sports ball", - "kite", - "baseball bat", - "baseball glove", - "skateboard", - "surfboard", - "tennis racket", - "bottle", - "wine glass", - "cup", - "fork", - "knife", - "spoon", - "bowl", - "banana", - "apple", - "sandwich", - "orange", - "broccoli", - "carrot", - "hot dog", - "pizza", - "donut", - "cake", - "chair", - "couch", - "potted plant", - "bed", - "dining table", - "toilet", - "tv", - "laptop", - "mouse", - "remote", - "keyboard", - "cell phone", - "microwave", - "oven", - "toaster", - "sink", - "refrigerator", - "book", - "clock", - "vase", - "scissors", - "teddy bear", - "hair drier", - "toothbrush" -] diff --git a/examples/onnx/yolov5-youtube/predictor.py b/examples/onnx/yolov5-youtube/predictor.py deleted file mode 100644 index b99d29d911..0000000000 --- a/examples/onnx/yolov5-youtube/predictor.py +++ /dev/null @@ -1,65 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import json -import os -import io -import uuid -import utils - -import numpy as np -from matplotlib import pyplot as plt - -from starlette.responses import StreamingResponse - - -class ONNXPredictor: - def __init__(self, onnx_client, config): - self.client = onnx_client - # Get the input shape from the ONNX runtime - (signature,) = onnx_client.get_model()["input_signatures"].values() - _, _, height, width = signature["shape"] - self.input_size = (width, height) - self.config = config - with open("labels.json") as buf: - self.labels = json.load(buf) - color_map = plt.cm.tab20(np.linspace(0, 20, len(self.labels))) - self.color_map = [tuple(map(int, colors)) for colors in 255 * color_map] - - def postprocess(self, output): - boxes, obj_score, class_scores = np.split(output[0], [4, 5], axis=1) - boxes = utils.boxes_yolo_to_xyxy(boxes) - - # get the class-prediction & class confidences - class_id = class_scores.argmax(axis=1) - cls_score = class_scores[np.arange(len(class_scores)), class_id] - - confidence = obj_score.squeeze(axis=1) * cls_score - sel = confidence > self.config["confidence_threshold"] - boxes, class_id, confidence = boxes[sel], class_id[sel], confidence[sel] - sel = utils.nms(boxes, confidence, self.config["iou_threshold"]) - boxes, class_id, confidence = boxes[sel], class_id[sel], confidence[sel] - return boxes, class_id, confidence - - def predict(self, payload): - # download YT video - in_path = utils.download_from_youtube(payload["url"], self.input_size[1]) - out_path = f"{uuid.uuid1()}.mp4" - - # run predictions - with utils.FrameWriter(out_path, size=self.input_size) as writer: - for frame in utils.frame_reader(in_path, size=self.input_size): - x = (frame.astype(np.float32) / 255).transpose(2, 0, 1) - # 4 output tensors, the last three are intermediate values and - # not necessary for detection - output, *_ = self.client.predict(x[None]) - boxes, class_ids, confidence = self.postprocess(output) - utils.overlay_boxes(frame, boxes, class_ids, self.labels, self.color_map) - writer.write(frame) - - with open(out_path, "rb") as f: - output_buf = io.BytesIO(f.read()) - - os.remove(in_path) - os.remove(out_path) - - return StreamingResponse(output_buf, media_type="video/mp4") diff --git a/examples/onnx/yolov5-youtube/requirements.txt b/examples/onnx/yolov5-youtube/requirements.txt deleted file mode 100644 index 2c779ca7f1..0000000000 --- a/examples/onnx/yolov5-youtube/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -ffmpeg-python -aiofiles -opencv-python-headless diff --git a/examples/onnx/yolov5-youtube/sample.json b/examples/onnx/yolov5-youtube/sample.json deleted file mode 100644 index 8421278f58..0000000000 --- a/examples/onnx/yolov5-youtube/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://www.youtube.com/watch?v=aUdKzb4LGJI" -} diff --git a/examples/onnx/yolov5-youtube/utils.py b/examples/onnx/yolov5-youtube/utils.py deleted file mode 100644 index c9bbeb73fe..0000000000 --- a/examples/onnx/yolov5-youtube/utils.py +++ /dev/null @@ -1,130 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import youtube_dl -import ffmpeg -import numpy as np -import cv2 -import uuid - -from pathlib import Path -from typing import Iterable, Tuple - - -def download_from_youtube(url: str, min_height: int) -> Path: - target = f"{uuid.uuid1()}.mp4" - ydl_opts = { - "outtmpl": target, - "format": f"worstvideo[vcodec=vp9][height>={min_height}]", - } - with youtube_dl.YoutubeDL(ydl_opts) as ydl: - ydl.download([url]) - # we need to glob in case youtube-dl adds suffix - (path,) = Path().absolute().glob(f"{target}*") - return path - - -def frame_reader(path: Path, size: Tuple[int, int]) -> Iterable[np.ndarray]: - width, height = size - # letterbox frames to fixed size - process = ( - ffmpeg.input(path) - .filter("scale", size=f"{width}:{height}", force_original_aspect_ratio="decrease") - # Negative values for x and y center the padded video - .filter("pad", height=height, width=width, x=-1, y=-1) - .output("pipe:", format="rawvideo", pix_fmt="rgb24") - .run_async(pipe_stdout=True) - ) - - while True: - in_bytes = process.stdout.read(height * width * 3) - if not in_bytes: - process.wait() - break - frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) - yield frame - - -class FrameWriter: - def __init__(self, path: Path, size: Tuple[int, int]): - width, height = size - self.process = ( - ffmpeg.input("pipe:", format="rawvideo", pix_fmt="rgb24", s=f"{width}x{height}") - .output(path, pix_fmt="yuv420p") - .overwrite_output() - .run_async(pipe_stdin=True) - ) - - def write(self, frame: np.ndarray): - self.process.stdin.write(frame.astype(np.uint8).tobytes()) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.__del__() - - def __del__(self): - self.process.stdin.close() - self.process.wait() - - -def nms(dets: np.ndarray, scores: np.ndarray, thresh: float) -> np.ndarray: - x1 = dets[:, 0] - y1 = dets[:, 1] - x2 = dets[:, 2] - y2 = dets[:, 3] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = scores.argsort()[::-1] # get boxes with more ious first - - keep = [] - while order.size > 0: - i = order[0] # pick maxmum iou box - keep.append(i) - xx1 = np.maximum(x1[i], x1[order[1:]]) - yy1 = np.maximum(y1[i], y1[order[1:]]) - xx2 = np.minimum(x2[i], x2[order[1:]]) - yy2 = np.minimum(y2[i], y2[order[1:]]) - - w = np.maximum(0.0, xx2 - xx1 + 1) # maximum width - h = np.maximum(0.0, yy2 - yy1 + 1) # maxiumum height - inter = w * h - ovr = inter / (areas[i] + areas[order[1:]] - inter) - - inds = np.where(ovr <= thresh)[0] - order = order[inds + 1] - - return np.array(keep).astype(np.int) - - -def boxes_yolo_to_xyxy(boxes: np.ndarray): - boxes[:, 0] -= boxes[:, 2] / 2 - boxes[:, 1] -= boxes[:, 3] / 2 - boxes[:, 2] = boxes[:, 2] + boxes[:, 0] - boxes[:, 3] = boxes[:, 3] + boxes[:, 1] - return boxes - - -def overlay_boxes(frame, boxes, class_ids, label_map, color_map, line_thickness=None): - tl = ( - line_thickness or round(0.0005 * (frame.shape[0] + frame.shape[1]) / 2) + 1 - ) # line/font thickness - - for class_id, (x1, y1, x2, y2) in zip(class_ids, boxes.astype(np.int)): - color = color_map[class_id] - label = label_map[class_id] - cv2.rectangle(frame, (x1, y1), (x2, y2), color, tl, cv2.LINE_AA) - tf = max(tl - 1, 1) # font thickness - t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] - x3, y3 = x1 + t_size[0], y1 - t_size[1] - 3 - cv2.rectangle(frame, (x1, y1), (x3, y3), color, -1, cv2.LINE_AA) # filled - cv2.putText( - frame, - label, - (x1, y1 - 2), - 0, - tl / 3, - [225, 255, 255], - thickness=tf, - lineType=cv2.LINE_AA, - ) diff --git a/examples/pytorch/answer-generator/README.md b/examples/pytorch/answer-generator/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/pytorch/answer-generator/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/pytorch/answer-generator/cortex.yaml b/examples/pytorch/answer-generator/cortex.yaml deleted file mode 100644 index b336f257dd..0000000000 --- a/examples/pytorch/answer-generator/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: answer-generator - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 - mem: 5G diff --git a/examples/pytorch/answer-generator/generator.py b/examples/pytorch/answer-generator/generator.py deleted file mode 100644 index 4a9aba613e..0000000000 --- a/examples/pytorch/answer-generator/generator.py +++ /dev/null @@ -1,44 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -# This file includes code which was modified from https://colab.research.google.com/drive/1KTLqiAOdKM_3RnBWfqgrvOQLqumUyOdA - -import torch -import torch.nn.functional as F - - -END_OF_TEXT = 50256 - - -def generate(model, conditioned_tokens, device): - generated_tokens = [] - while True: - result = recalc(model, conditioned_tokens, generated_tokens, device) - if result == END_OF_TEXT: - return generated_tokens[:-1] - - -def recalc(model, conditioned_tokens, generated_tokens, device): - indexed_tokens = conditioned_tokens + generated_tokens - tokens_tensor = torch.tensor([indexed_tokens]) - tokens_tensor = tokens_tensor.to(device) - with torch.no_grad(): - outputs = model(tokens_tensor) - predictions = outputs[0] - logits = predictions[0, -1, :] - filtered_logits = top_p_filtering(logits) - probabilities = F.softmax(filtered_logits, dim=-1) - next_token = torch.multinomial(probabilities, 1) - generated_tokens.append(next_token.item()) - return next_token.item() - - -def top_p_filtering(logits, top_p=0.9, filter_value=-float("Inf")): - assert logits.dim() == 1 - sorted_logits, sorted_indices = torch.sort(logits, descending=True) - cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) - sorted_indices_to_remove = cumulative_probs > top_p - sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() - sorted_indices_to_remove[..., 0] = 0 - indices_to_remove = sorted_indices[sorted_indices_to_remove] - logits[indices_to_remove] = filter_value - return logits diff --git a/examples/pytorch/answer-generator/predictor.py b/examples/pytorch/answer-generator/predictor.py deleted file mode 100644 index 38c6622bf3..0000000000 --- a/examples/pytorch/answer-generator/predictor.py +++ /dev/null @@ -1,36 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import wget -import torch -from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config -import generator - - -class PythonPredictor: - def __init__(self, config): - medium_config = GPT2Config(n_embd=1024, n_layer=24, n_head=16) - model = GPT2LMHeadModel(medium_config) - wget.download( - "https://convaisharables.blob.core.windows.net/lsp/multiref/medium_ft.pkl", - "/tmp/medium_ft.pkl", - ) - - weights = torch.load("/tmp/medium_ft.pkl") - weights["lm_head.weight"] = weights["lm_head.decoder.weight"] - weights.pop("lm_head.decoder.weight", None) - - model.load_state_dict(weights) - - device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"using device: {device}") - model.to(device) - model.eval() - - self.device = device - self.model = model - self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2") - - def predict(self, payload): - conditioned_tokens = self.tokenizer.encode(payload["text"]) + [generator.END_OF_TEXT] - prediction = generator.generate(self.model, conditioned_tokens, self.device) - return self.tokenizer.decode(prediction) diff --git a/examples/pytorch/answer-generator/requirements.txt b/examples/pytorch/answer-generator/requirements.txt deleted file mode 100644 index effba0ef1b..0000000000 --- a/examples/pytorch/answer-generator/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -torch -transformers==2.3.* -wget==3.* diff --git a/examples/pytorch/answer-generator/sample.json b/examples/pytorch/answer-generator/sample.json deleted file mode 100644 index aa91c9d2eb..0000000000 --- a/examples/pytorch/answer-generator/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "What is machine learning?" -} diff --git a/examples/pytorch/image-classifier-alexnet/cortex.yaml b/examples/pytorch/image-classifier-alexnet/cortex.yaml deleted file mode 100644 index 74c463c0b0..0000000000 --- a/examples/pytorch/image-classifier-alexnet/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-alexnet - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 - mem: 4G diff --git a/examples/pytorch/image-classifier-alexnet/predictor.py b/examples/pytorch/image-classifier-alexnet/predictor.py deleted file mode 100644 index a739ddbb8a..0000000000 --- a/examples/pytorch/image-classifier-alexnet/predictor.py +++ /dev/null @@ -1,39 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import requests -import torch -import torchvision -from torchvision import transforms -from PIL import Image -from io import BytesIO - - -class PythonPredictor: - def __init__(self, config): - device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"using device: {device}") - - model = torchvision.models.alexnet(pretrained=True).to(device) - model.eval() - # https://github.com/pytorch/examples/blob/447974f6337543d4de6b888e244a964d3c9b71f6/imagenet/main.py#L198-L199 - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - - self.preprocess = transforms.Compose( - [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] - ) - self.labels = requests.get( - "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" - ).text.split("\n")[1:] - self.model = model - self.device = device - - def predict(self, payload): - image = requests.get(payload["url"]).content - img_pil = Image.open(BytesIO(image)) - img_tensor = self.preprocess(img_pil) - img_tensor.unsqueeze_(0) - img_tensor = img_tensor.to(self.device) - with torch.no_grad(): - prediction = self.model(img_tensor) - _, index = prediction[0].max(0) - return self.labels[index] diff --git a/examples/pytorch/image-classifier-alexnet/requirements.txt b/examples/pytorch/image-classifier-alexnet/requirements.txt deleted file mode 100644 index ac988bdf84..0000000000 --- a/examples/pytorch/image-classifier-alexnet/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch -torchvision diff --git a/examples/pytorch/image-classifier-alexnet/sample.json b/examples/pytorch/image-classifier-alexnet/sample.json deleted file mode 100644 index eb72ddb869..0000000000 --- a/examples/pytorch/image-classifier-alexnet/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/PzXprwl.jpg" -} diff --git a/examples/pytorch/iris-classifier/cortex.yaml b/examples/pytorch/iris-classifier/cortex.yaml deleted file mode 100644 index a8b590882d..0000000000 --- a/examples/pytorch/iris-classifier/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: iris-classifier - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - model: s3://cortex-examples/pytorch/iris-classifier/weights.pth - monitoring: - model_type: classification diff --git a/examples/pytorch/iris-classifier/predictor.py b/examples/pytorch/iris-classifier/predictor.py deleted file mode 100644 index 71994bb9ae..0000000000 --- a/examples/pytorch/iris-classifier/predictor.py +++ /dev/null @@ -1,50 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import re -import torch -import os -import boto3 -from botocore import UNSIGNED -from botocore.client import Config -from model import IrisNet - -labels = ["setosa", "versicolor", "virginica"] - - -class PythonPredictor: - def __init__(self, config): - # download the model - bucket, key = re.match("s3://(.+?)/(.+)", config["model"]).groups() - - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - s3.download_file(bucket, key, "/tmp/model.pth") - - # initialize the model - model = IrisNet() - model.load_state_dict(torch.load("/tmp/model.pth")) - model.eval() - - self.model = model - - def predict(self, payload): - # Convert the request to a tensor and pass it into the model - input_tensor = torch.FloatTensor( - [ - [ - payload["sepal_length"], - payload["sepal_width"], - payload["petal_length"], - payload["petal_width"], - ] - ] - ) - - # Run the prediction - output = self.model(input_tensor) - - # Translate the model output to the corresponding label string - return labels[torch.argmax(output[0])] diff --git a/examples/pytorch/iris-classifier/requirements.txt b/examples/pytorch/iris-classifier/requirements.txt deleted file mode 100644 index f2f30b7ef9..0000000000 --- a/examples/pytorch/iris-classifier/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch -scikit-learn diff --git a/examples/pytorch/iris-classifier/sample.json b/examples/pytorch/iris-classifier/sample.json deleted file mode 100644 index 0bc6836266..0000000000 --- a/examples/pytorch/iris-classifier/sample.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "sepal_length": 2.2, - "sepal_width": 3.6, - "petal_length": 1.4, - "petal_width": 3.3 -} diff --git a/examples/pytorch/language-identifier/README.md b/examples/pytorch/language-identifier/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/pytorch/language-identifier/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/pytorch/language-identifier/cortex.yaml b/examples/pytorch/language-identifier/cortex.yaml deleted file mode 100644 index e8243a58fa..0000000000 --- a/examples/pytorch/language-identifier/cortex.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: language-identifier - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - monitoring: - model_type: classification diff --git a/examples/pytorch/language-identifier/predictor.py b/examples/pytorch/language-identifier/predictor.py deleted file mode 100644 index e59ebe5012..0000000000 --- a/examples/pytorch/language-identifier/predictor.py +++ /dev/null @@ -1,18 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import wget -import fasttext - - -class PythonPredictor: - def __init__(self, config): - wget.download( - "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin", "/tmp/model" - ) - - self.model = fasttext.load_model("/tmp/model") - - def predict(self, payload): - prediction = self.model.predict(payload["text"]) - language = prediction[0][0][-2:] - return language diff --git a/examples/pytorch/language-identifier/requirements.txt b/examples/pytorch/language-identifier/requirements.txt deleted file mode 100644 index a342ff2914..0000000000 --- a/examples/pytorch/language-identifier/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -wget==3.* -fasttext==0.9.* diff --git a/examples/pytorch/language-identifier/sample.json b/examples/pytorch/language-identifier/sample.json deleted file mode 100644 index 225c357392..0000000000 --- a/examples/pytorch/language-identifier/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "build machine learning apis" -} diff --git a/examples/pytorch/object-detector/README.md b/examples/pytorch/object-detector/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/pytorch/object-detector/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/pytorch/object-detector/coco_labels.txt b/examples/pytorch/object-detector/coco_labels.txt deleted file mode 100644 index 8d950d95da..0000000000 --- a/examples/pytorch/object-detector/coco_labels.txt +++ /dev/null @@ -1,91 +0,0 @@ -__background__ -person -bicycle -car -motorcycle -airplane -bus -train -truck -boat -traffic light -fire hydrant -N/A -stop sign -parking meter -bench -bird -cat -dog -horse -sheep -cow -elephant -bear -zebra -giraffe -N/A -backpack -umbrella -N/A -N/A -handbag -tie -suitcase -frisbee -skis -snowboard -sports ball -kite -baseball bat -baseball glove -skateboard -surfboard -tennis racket -bottle -N/A -wine glass -cup -fork -knife -spoon -bowl -banana -apple -sandwich -orange -broccoli -carrot -hot dog -pizza -donut -cake -chair -couch -potted plant -bed -N/A -dining table -N/A -N/A -toilet -N/A -tv -laptop -mouse -remote -keyboard -cell phone -microwave -oven -toaster -sink -refrigerator -N/A -book -clock -vase -scissors -teddy bear -hair drier -toothbrush diff --git a/examples/pytorch/object-detector/cortex.yaml b/examples/pytorch/object-detector/cortex.yaml deleted file mode 100644 index 9b06d29e9e..0000000000 --- a/examples/pytorch/object-detector/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: object-detector - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 - mem: 4G diff --git a/examples/pytorch/object-detector/predictor.py b/examples/pytorch/object-detector/predictor.py deleted file mode 100644 index 52aa593774..0000000000 --- a/examples/pytorch/object-detector/predictor.py +++ /dev/null @@ -1,49 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -from io import BytesIO - -import requests -import torch -from PIL import Image -from torchvision import models -from torchvision import transforms - - -class PythonPredictor: - def __init__(self, config): - self.device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"using device: {self.device}") - - model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).to(self.device) - model.eval() - - self.preprocess = transforms.Compose([transforms.ToTensor()]) - - with open("/mnt/project/coco_labels.txt") as f: - self.coco_labels = f.read().splitlines() - - self.model = model - - def predict(self, payload): - threshold = float(payload["threshold"]) - image = requests.get(payload["url"]).content - img_pil = Image.open(BytesIO(image)) - img_tensor = self.preprocess(img_pil).to(self.device) - img_tensor.unsqueeze_(0) - - with torch.no_grad(): - pred = self.model(img_tensor) - - predicted_class = [self.coco_labels[i] for i in pred[0]["labels"].cpu().tolist()] - predicted_boxes = [ - [(i[0], i[1]), (i[2], i[3])] for i in pred[0]["boxes"].detach().cpu().tolist() - ] - predicted_score = pred[0]["scores"].detach().cpu().tolist() - predicted_t = [predicted_score.index(x) for x in predicted_score if x > threshold] - if len(predicted_t) == 0: - return [], [] - - predicted_t = predicted_t[-1] - predicted_boxes = predicted_boxes[: predicted_t + 1] - predicted_class = predicted_class[: predicted_t + 1] - return predicted_boxes, predicted_class diff --git a/examples/pytorch/object-detector/requirements.txt b/examples/pytorch/object-detector/requirements.txt deleted file mode 100644 index ac988bdf84..0000000000 --- a/examples/pytorch/object-detector/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch -torchvision diff --git a/examples/pytorch/object-detector/sample.json b/examples/pytorch/object-detector/sample.json deleted file mode 100644 index 5005f13bad..0000000000 --- a/examples/pytorch/object-detector/sample.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "url": "https://i.imgur.com/PzXprwl.jpg", - "threshold": "0.8" -} diff --git a/examples/pytorch/question-generator/cortex.yaml b/examples/pytorch/question-generator/cortex.yaml deleted file mode 100644 index a944303edb..0000000000 --- a/examples/pytorch/question-generator/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: question-generator - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - mem: 6G diff --git a/examples/pytorch/question-generator/dependencies.sh b/examples/pytorch/question-generator/dependencies.sh deleted file mode 100644 index 5040da2342..0000000000 --- a/examples/pytorch/question-generator/dependencies.sh +++ /dev/null @@ -1,4 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -# torchvision isn’t required for this example, and pip was throwing warnings with it installed -pip uninstall torchvision -y diff --git a/examples/pytorch/question-generator/predictor.py b/examples/pytorch/question-generator/predictor.py deleted file mode 100644 index 0b7692890c..0000000000 --- a/examples/pytorch/question-generator/predictor.py +++ /dev/null @@ -1,36 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -from transformers import AutoModelWithLMHead, AutoTokenizer -import spacy -import subprocess -import json - - -class PythonPredictor: - def __init__(self, config): - subprocess.call("python -m spacy download en_core_web_sm".split(" ")) - import en_core_web_sm - - self.tokenizer = AutoTokenizer.from_pretrained( - "mrm8488/t5-base-finetuned-question-generation-ap" - ) - self.model = AutoModelWithLMHead.from_pretrained( - "mrm8488/t5-base-finetuned-question-generation-ap" - ) - self.nlp = en_core_web_sm.load() - - def predict(self, payload): - context = payload["context"] - answer = payload["answer"] - max_length = int(payload.get("max_length", 64)) - - input_text = "answer: {} context: {} ".format(answer, context) - features = self.tokenizer([input_text], return_tensors="pt") - - output = self.model.generate( - input_ids=features["input_ids"], - attention_mask=features["attention_mask"], - max_length=max_length, - ) - - return {"result": self.tokenizer.decode(output[0])} diff --git a/examples/pytorch/question-generator/requirements.txt b/examples/pytorch/question-generator/requirements.txt deleted file mode 100644 index d7b5db27a0..0000000000 --- a/examples/pytorch/question-generator/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -spacy==2.1.8 --e git+https://github.com/huggingface/transformers.git#egg=transformers ---find-links https://download.pytorch.org/whl/torch_stable.html -torch==1.6.0+cpu diff --git a/examples/pytorch/question-generator/sample.json b/examples/pytorch/question-generator/sample.json deleted file mode 100644 index 88c9fb0c92..0000000000 --- a/examples/pytorch/question-generator/sample.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "context": "Sarah works as a software engineer in London", - "answer": "London" -} diff --git a/examples/pytorch/reading-comprehender/README.md b/examples/pytorch/reading-comprehender/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/pytorch/reading-comprehender/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/pytorch/reading-comprehender/cortex.yaml b/examples/pytorch/reading-comprehender/cortex.yaml deleted file mode 100644 index ba89862c78..0000000000 --- a/examples/pytorch/reading-comprehender/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: reading-comprehender - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 - mem: 4G diff --git a/examples/pytorch/reading-comprehender/predictor.py b/examples/pytorch/reading-comprehender/predictor.py deleted file mode 100644 index 7b86ac4770..0000000000 --- a/examples/pytorch/reading-comprehender/predictor.py +++ /dev/null @@ -1,25 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -from allennlp.predictors.predictor import Predictor as AllenNLPPredictor - - -class PythonPredictor: - def __init__(self, config): - self.device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"using device: {self.device}") - - cuda_device = -1 - if self.device == "cuda": - cuda_device = 0 - - self.predictor = AllenNLPPredictor.from_path( - "https://storage.googleapis.com/allennlp-public-models/bidaf-elmo-model-2018.11.30-charpad.tar.gz", - cuda_device=cuda_device, - ) - - def predict(self, payload): - prediction = self.predictor.predict( - passage=payload["passage"], question=payload["question"] - ) - return prediction["best_span_str"] diff --git a/examples/pytorch/reading-comprehender/requirements.txt b/examples/pytorch/reading-comprehender/requirements.txt deleted file mode 100644 index 13dd5fbdba..0000000000 --- a/examples/pytorch/reading-comprehender/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -allennlp==0.9.* diff --git a/examples/pytorch/reading-comprehender/sample.json b/examples/pytorch/reading-comprehender/sample.json deleted file mode 100644 index 14f60455bc..0000000000 --- a/examples/pytorch/reading-comprehender/sample.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "passage": "Cortex Labs is building machine learning infrastructure for deploying models in production", - "question": "What does Cortex Labs do?" -} diff --git a/examples/pytorch/search-completer/README.md b/examples/pytorch/search-completer/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/pytorch/search-completer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/pytorch/search-completer/cortex.yaml b/examples/pytorch/search-completer/cortex.yaml deleted file mode 100644 index cd73458149..0000000000 --- a/examples/pytorch/search-completer/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: search-completer - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 - mem: 4G diff --git a/examples/pytorch/search-completer/predictor.py b/examples/pytorch/search-completer/predictor.py deleted file mode 100644 index 58d03ccc2c..0000000000 --- a/examples/pytorch/search-completer/predictor.py +++ /dev/null @@ -1,20 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -import regex -import tqdm - - -class PythonPredictor: - def __init__(self, config): - roberta = torch.hub.load("pytorch/fairseq", "roberta.large", force_reload=True) - roberta.eval() - device = "cuda" if torch.cuda.is_available() else "cpu" - print(f"using device: {device}") - roberta.to(device) - - self.model = roberta - - def predict(self, payload): - predictions = self.model.fill_mask(payload["text"] + " ", topk=5) - return [prediction[0] for prediction in predictions] diff --git a/examples/pytorch/search-completer/requirements.txt b/examples/pytorch/search-completer/requirements.txt deleted file mode 100644 index 16b9215d31..0000000000 --- a/examples/pytorch/search-completer/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -torch -regex -tqdm -dataclasses -hydra-core diff --git a/examples/pytorch/search-completer/sample.json b/examples/pytorch/search-completer/sample.json deleted file mode 100644 index dfd2a2f433..0000000000 --- a/examples/pytorch/search-completer/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "machine learning is" -} diff --git a/examples/pytorch/sentiment-analyzer/README.md b/examples/pytorch/sentiment-analyzer/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/pytorch/sentiment-analyzer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/pytorch/sentiment-analyzer/cortex.yaml b/examples/pytorch/sentiment-analyzer/cortex.yaml deleted file mode 100644 index 1ed6c45bbf..0000000000 --- a/examples/pytorch/sentiment-analyzer/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: sentiment-analyzer - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - # gpu: 1 # this is optional, since the api can also run on cpu diff --git a/examples/pytorch/sentiment-analyzer/predictor.py b/examples/pytorch/sentiment-analyzer/predictor.py deleted file mode 100644 index 03b796d199..0000000000 --- a/examples/pytorch/sentiment-analyzer/predictor.py +++ /dev/null @@ -1,15 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -from transformers import pipeline - - -class PythonPredictor: - def __init__(self, config): - device = 0 if torch.cuda.is_available() else -1 - print(f"using device: {'cuda' if device == 0 else 'cpu'}") - - self.analyzer = pipeline(task="sentiment-analysis", device=device) - - def predict(self, payload): - return self.analyzer(payload["text"])[0] diff --git a/examples/pytorch/sentiment-analyzer/requirements.txt b/examples/pytorch/sentiment-analyzer/requirements.txt deleted file mode 100644 index 3f565d80e4..0000000000 --- a/examples/pytorch/sentiment-analyzer/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch -transformers==2.9.* diff --git a/examples/pytorch/sentiment-analyzer/sample.json b/examples/pytorch/sentiment-analyzer/sample.json deleted file mode 100644 index 7622d16ae0..0000000000 --- a/examples/pytorch/sentiment-analyzer/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "best day ever" -} diff --git a/examples/pytorch/text-summarizer/README.md b/examples/pytorch/text-summarizer/README.md deleted file mode 100644 index 4323c6e133..0000000000 --- a/examples/pytorch/text-summarizer/README.md +++ /dev/null @@ -1,5 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. - -Please refer [here](https://sshleifer.github.io/blog_v2/jupyter/2020/03/12/bart.html) to learn more about BART. diff --git a/examples/pytorch/text-summarizer/cortex.yaml b/examples/pytorch/text-summarizer/cortex.yaml deleted file mode 100644 index 9f7b620ca9..0000000000 --- a/examples/pytorch/text-summarizer/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: text-summarizer - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 # this is optional, since the api can also run on cpu - mem: 6G diff --git a/examples/pytorch/text-summarizer/predictor.py b/examples/pytorch/text-summarizer/predictor.py deleted file mode 100644 index 05652afd17..0000000000 --- a/examples/pytorch/text-summarizer/predictor.py +++ /dev/null @@ -1,18 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -from transformers import pipeline - - -class PythonPredictor: - def __init__(self, config): - device = 0 if torch.cuda.is_available() else -1 - print(f"using device: {'cuda' if device == 0 else 'cpu'}") - - self.summarizer = pipeline(task="summarization", device=device) - - def predict(self, payload): - summary = self.summarizer( - payload["text"], num_beams=4, length_penalty=2.0, max_length=142, no_repeat_ngram_size=3 - ) - return summary[0]["summary_text"] diff --git a/examples/pytorch/text-summarizer/requirements.txt b/examples/pytorch/text-summarizer/requirements.txt deleted file mode 100644 index 5afceb377e..0000000000 --- a/examples/pytorch/text-summarizer/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -transformers==2.9.* -torch diff --git a/examples/pytorch/text-summarizer/sample.json b/examples/pytorch/text-summarizer/sample.json deleted file mode 100644 index e54b77f18c..0000000000 --- a/examples/pytorch/text-summarizer/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "Machine learning (ML) is the scientific study of algorithms and statistical models that computer systems use to perform a specific task without using explicit instructions, relying on patterns and inference instead. It is seen as a subset of artificial intelligence. Machine learning algorithms build a mathematical model based on sample data, known as training data, in order to make predictions or decisions without being explicitly programmed to perform the task. Machine learning algorithms are used in a wide variety of applications, such as email filtering and computer vision, where it is difficult or infeasible to develop a conventional algorithm for effectively performing the task. Machine learning is closely related to computational statistics, which focuses on making predictions using computers. The study of mathematical optimization delivers methods, theory and application domains to the field of machine learning. Data mining is a field of study within machine learning, and focuses on exploratory data analysis through unsupervised learning. In its application across business problems, machine learning is also referred to as predictive analytics." -} diff --git a/examples/sklearn/iris-classifier/README.md b/examples/sklearn/iris-classifier/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/sklearn/iris-classifier/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/sklearn/iris-classifier/cortex.yaml b/examples/sklearn/iris-classifier/cortex.yaml deleted file mode 100644 index 1f05c85eca..0000000000 --- a/examples/sklearn/iris-classifier/cortex.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: iris-classifier - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - bucket: cortex-examples - key: sklearn/iris-classifier/model.pkl - monitoring: - model_type: classification - compute: - cpu: 0.2 - mem: 200M diff --git a/examples/sklearn/iris-classifier/predictor.py b/examples/sklearn/iris-classifier/predictor.py deleted file mode 100644 index 46edab0ad2..0000000000 --- a/examples/sklearn/iris-classifier/predictor.py +++ /dev/null @@ -1,31 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import os -import boto3 -from botocore import UNSIGNED -from botocore.client import Config -import pickle - -labels = ["setosa", "versicolor", "virginica"] - - -class PythonPredictor: - def __init__(self, config): - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - s3.download_file(config["bucket"], config["key"], "/tmp/model.pkl") - self.model = pickle.load(open("/tmp/model.pkl", "rb")) - - def predict(self, payload): - measurements = [ - payload["sepal_length"], - payload["sepal_width"], - payload["petal_length"], - payload["petal_width"], - ] - - label_id = self.model.predict([measurements])[0] - return labels[label_id] diff --git a/examples/sklearn/iris-classifier/requirements.txt b/examples/sklearn/iris-classifier/requirements.txt deleted file mode 100644 index bbc213cf3e..0000000000 --- a/examples/sklearn/iris-classifier/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -boto3 -scikit-learn==0.21.3 diff --git a/examples/sklearn/iris-classifier/sample.json b/examples/sklearn/iris-classifier/sample.json deleted file mode 100644 index 9e792863cd..0000000000 --- a/examples/sklearn/iris-classifier/sample.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "sepal_length": 5.2, - "sepal_width": 3.6, - "petal_length": 1.5, - "petal_width": 0.3 -} diff --git a/examples/sklearn/iris-classifier/trainer.py b/examples/sklearn/iris-classifier/trainer.py deleted file mode 100644 index db1b047938..0000000000 --- a/examples/sklearn/iris-classifier/trainer.py +++ /dev/null @@ -1,25 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import boto3 -import pickle - -from sklearn.datasets import load_iris -from sklearn.model_selection import train_test_split -from sklearn.linear_model import LogisticRegression - -# Train the model - -iris = load_iris() -data, labels = iris.data, iris.target -training_data, test_data, training_labels, test_labels = train_test_split(data, labels) - -model = LogisticRegression(solver="lbfgs", multi_class="multinomial") -model.fit(training_data, training_labels) -accuracy = model.score(test_data, test_labels) -print("accuracy: {:.2f}".format(accuracy)) - -# Upload the model - -pickle.dump(model, open("model.pkl", "wb")) -s3 = boto3.client("s3") -s3.upload_file("model.pkl", "cortex-examples", "sklearn/iris-classifier/model.pkl") diff --git a/examples/sklearn/mpg-estimator/README.md b/examples/sklearn/mpg-estimator/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/sklearn/mpg-estimator/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/sklearn/mpg-estimator/cortex.yaml b/examples/sklearn/mpg-estimator/cortex.yaml deleted file mode 100644 index e6ffc969ee..0000000000 --- a/examples/sklearn/mpg-estimator/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: mpg-estimator - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - model: s3://cortex-examples/sklearn/mpg-estimator/linreg/ - monitoring: - model_type: regression diff --git a/examples/sklearn/mpg-estimator/predictor.py b/examples/sklearn/mpg-estimator/predictor.py deleted file mode 100644 index bb1c2ed19a..0000000000 --- a/examples/sklearn/mpg-estimator/predictor.py +++ /dev/null @@ -1,41 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import boto3 -from botocore import UNSIGNED -from botocore.client import Config -import mlflow.sklearn -import numpy as np -import re -import os - - -class PythonPredictor: - def __init__(self, config): - model_path = "/tmp/model" - os.makedirs(model_path, exist_ok=True) - - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - # download mlflow model folder from S3 - bucket, prefix = re.match("s3://(.+?)/(.+)", config["model"]).groups() - response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix) - for s3_obj in response["Contents"]: - obj_key = s3_obj["Key"] - s3.download_file(bucket, obj_key, os.path.join(model_path, os.path.basename(obj_key))) - - self.model = mlflow.sklearn.load_model(model_path) - - def predict(self, payload): - model_input = [ - payload["cylinders"], - payload["displacement"], - payload["horsepower"], - payload["weight"], - payload["acceleration"], - ] - - result = self.model.predict([model_input]) - return np.asscalar(result) diff --git a/examples/sklearn/mpg-estimator/requirements.txt b/examples/sklearn/mpg-estimator/requirements.txt deleted file mode 100644 index cbcad6b321..0000000000 --- a/examples/sklearn/mpg-estimator/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -mlflow -pandas -numpy -scikit-learn==0.21.3 diff --git a/examples/sklearn/mpg-estimator/sample.json b/examples/sklearn/mpg-estimator/sample.json deleted file mode 100644 index 2dbbca46dd..0000000000 --- a/examples/sklearn/mpg-estimator/sample.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "cylinders": 4, - "displacement": 135, - "horsepower": 84, - "weight": 2490, - "acceleration": 15.7 -} diff --git a/examples/sklearn/mpg-estimator/trainer.py b/examples/sklearn/mpg-estimator/trainer.py deleted file mode 100644 index f17b7d9c05..0000000000 --- a/examples/sklearn/mpg-estimator/trainer.py +++ /dev/null @@ -1,25 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import mlflow.sklearn -import pandas as pd -import numpy as np -from sklearn.linear_model import LinearRegression -from sklearn.model_selection import train_test_split - - -df = pd.read_csv( - "https://www.uio.no/studier/emner/sv/oekonomi/ECON4150/v16/statacourse/datafiles/auto.csv" -) -df = df.replace("?", np.nan) -df = df.dropna() -df = df.drop(["name", "origin", "year"], axis=1) # drop categorical variables for simplicity -data = df.drop("mpg", axis=1) -labels = df[["mpg"]] - -training_data, test_data, training_labels, test_labels = train_test_split(data, labels) -model = LinearRegression() -model.fit(training_data, training_labels) -accuracy = model.score(test_data, test_labels) -print("accuracy: {:.2f}".format(accuracy)) - -mlflow.sklearn.save_model(model, "linreg") diff --git a/examples/spacy/entity-recognizer/README.md b/examples/spacy/entity-recognizer/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/spacy/entity-recognizer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/spacy/entity-recognizer/cortex.yaml b/examples/spacy/entity-recognizer/cortex.yaml deleted file mode 100644 index cc4dbbba38..0000000000 --- a/examples/spacy/entity-recognizer/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: entity-recognizer - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - mem: 1G diff --git a/examples/spacy/entity-recognizer/predictor.py b/examples/spacy/entity-recognizer/predictor.py deleted file mode 100644 index 9d42a9de4c..0000000000 --- a/examples/spacy/entity-recognizer/predictor.py +++ /dev/null @@ -1,22 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import spacy -import subprocess - - -class PythonPredictor: - """ - Class to perform NER (named entity recognition) - """ - - def __init__(self, config): - subprocess.call("python -m spacy download en_core_web_md".split(" ")) - import en_core_web_md - - self.nlp = en_core_web_md.load() - - def predict(self, payload): - doc = self.nlp(payload["text"]) - proc = lambda ent: {"label": ent.label_, "start": ent.start, "end": ent.end} - out = {ent.text: proc(ent) for ent in doc.ents} - return out diff --git a/examples/spacy/entity-recognizer/requirements.txt b/examples/spacy/entity-recognizer/requirements.txt deleted file mode 100644 index 568e4fc634..0000000000 --- a/examples/spacy/entity-recognizer/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -spacy diff --git a/examples/spacy/entity-recognizer/sample.json b/examples/spacy/entity-recognizer/sample.json deleted file mode 100644 index ae0f0f4120..0000000000 --- a/examples/spacy/entity-recognizer/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "Lilium, a Munich-based startup that is designing and building vertical take-off and landing (VTOL) aircraft with speeds of up to 100 km/h that it plans eventually to run in its own taxi fleet, has closed a funding round of over $240 million — money that it plans to use to keep developing its aircraft, and to start building manufacturing facilities to produce more of them, for an expected launch date of 2025." -} diff --git a/examples/tensorflow/image-classifier-inception/README.md b/examples/tensorflow/image-classifier-inception/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/tensorflow/image-classifier-inception/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/tensorflow/image-classifier-inception/cortex.yaml b/examples/tensorflow/image-classifier-inception/cortex.yaml deleted file mode 100644 index e5177788ba..0000000000 --- a/examples/tensorflow/image-classifier-inception/cortex.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-inception - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ - monitoring: - model_type: classification - compute: - cpu: 1 - gpu: 1 diff --git a/examples/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml b/examples/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml deleted file mode 100644 index 919870651c..0000000000 --- a/examples/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml +++ /dev/null @@ -1,17 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-inception - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ - server_side_batching: - max_batch_size: 2 - batch_interval: 0.2s - threads_per_process: 2 - monitoring: - model_type: classification - compute: - cpu: 1 - gpu: 1 diff --git a/examples/tensorflow/image-classifier-inception/inception.ipynb b/examples/tensorflow/image-classifier-inception/inception.ipynb deleted file mode 100644 index 46956e0e48..0000000000 --- a/examples/tensorflow/image-classifier-inception/inception.ipynb +++ /dev/null @@ -1,211 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "inception.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "n8CwINQcEBKz", - "colab_type": "text" - }, - "source": [ - "# Exporting ImageNet Inception\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n", - "In this notebook, we'll show how to export the [pre-trained Imagenet Inception model](https://tfhub.dev/google/imagenet/inception_v3/classification/3) for serving." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3221z3P69fgf", - "colab_type": "text" - }, - "source": [ - "First, we'll install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "_SdQpq7g9LiI", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!pip install tensorflow==1.14.* tensorflow-hub==0.6.* boto3==1.*" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "I-k0gUpxDGkU", - "colab_type": "text" - }, - "source": [ - "Next, we'll download the model from TensorFlow Hub and export it for serving:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "z6QLCzB4BKMe", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import time\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def\n", - "\n", - "export_dir = \"export/\" + str(time.time()).split('.')[0]\n", - "builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n", - "\n", - "with tf.Session(graph=tf.Graph()) as sess:\n", - " module = hub.Module(\"https://tfhub.dev/google/imagenet/inception_v3/classification/3\")\n", - "\n", - " input_params = module.get_input_info_dict()\n", - " image_input = tf.placeholder(\n", - " name=\"images\", dtype=input_params[\"images\"].dtype, shape=input_params[\"images\"].get_shape()\n", - " )\n", - " \n", - " sess.run([tf.global_variables_initializer(), tf.tables_initializer()])\n", - "\n", - " classes = module(image_input)\n", - " signature = predict_signature_def(inputs={\"images\": image_input}, outputs={\"classes\": classes})\n", - "\n", - " builder.add_meta_graph_and_variables(\n", - " sess, [\"serve\"], signature_def_map={\"predict\": signature}, strip_default_attrs=True\n", - " )\n", - "\n", - "builder.save()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aGtJiyEnBgwl", - "colab_type": "text" - }, - "source": [ - "## Upload the model to AWS\n", - "\n", - "Cortex loads models from AWS, so we need to upload the exported model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fTkjvSKBBmUB", - "colab_type": "text" - }, - "source": [ - "Set these variables to configure your AWS credentials and model upload path:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "4xcDWxqCBPre", - "colab_type": "code", - "cellView": "form", - "colab": {} - }, - "source": [ - "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", - "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", - "S3_UPLOAD_PATH = \"s3://my-bucket/image-classifier/inception\" #@param {type:\"string\"}\n", - "\n", - "import sys\n", - "import re\n", - "\n", - "if AWS_ACCESS_KEY_ID == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", - "\n", - "elif AWS_SECRET_ACCESS_KEY == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", - "\n", - "else:\n", - " try:\n", - " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", - " except:\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "czZkjb1IBr-f", - "colab_type": "text" - }, - "source": [ - "Upload the model to S3:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "M0b0IbyaBsim", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import os\n", - "import boto3\n", - "\n", - "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", - "\n", - "for dirpath, _, filenames in os.walk(\"export\"):\n", - " for filename in filenames:\n", - " filepath = os.path.join(dirpath, filename)\n", - " filekey = os.path.join(key, filepath[len(\"export/\"):])\n", - " print(\"Uploading s3://{}/{}...\".format(bucket, filekey), end = '')\n", - " s3.upload_file(filepath, bucket, filekey)\n", - " print(\" ✓\")\n", - "\n", - "print(\"\\nUploaded model export directory to \" + S3_UPLOAD_PATH)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pZQWoeZbE7Wc", - "colab_type": "text" - }, - "source": [ - "\n", - "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/image-classifier-inception) for how to deploy the model as an API." - ] - } - ] -} diff --git a/examples/tensorflow/image-classifier-inception/predictor.py b/examples/tensorflow/image-classifier-inception/predictor.py deleted file mode 100644 index c2afb63c0c..0000000000 --- a/examples/tensorflow/image-classifier-inception/predictor.py +++ /dev/null @@ -1,21 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import requests -import numpy as np -from PIL import Image -from io import BytesIO - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - self.labels = requests.get( - "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" - ).text.split("\n") - - def predict(self, payload): - image = requests.get(payload["url"]).content - decoded_image = np.asarray(Image.open(BytesIO(image)), dtype=np.float32) / 255 - model_input = {"images": np.expand_dims(decoded_image, axis=0)} - prediction = self.client.predict(model_input) - return self.labels[np.argmax(prediction["classes"])] diff --git a/examples/tensorflow/image-classifier-inception/sample.json b/examples/tensorflow/image-classifier-inception/sample.json deleted file mode 100644 index 667652007a..0000000000 --- a/examples/tensorflow/image-classifier-inception/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/PzXprwl.jpg" -} diff --git a/examples/tensorflow/iris-classifier/README.md b/examples/tensorflow/iris-classifier/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/tensorflow/iris-classifier/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/tensorflow/license-plate-reader/README.md b/examples/tensorflow/license-plate-reader/README.md deleted file mode 100644 index 009286a4e1..0000000000 --- a/examples/tensorflow/license-plate-reader/README.md +++ /dev/null @@ -1,175 +0,0 @@ -# Real-Time License Plate Identification System - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This project implements a license plate identification system. On resource-constrained systems, running inferences may prove to be too computationally expensive. One solution is to run the ML in the cloud and have the local (embedded) system act as a client of these services. - -![Demo GIF](https://i.imgur.com/jgkJB59.gif) - -*Figure 1 - GIF taken from this real-time recording [video](https://www.youtube.com/watch?v=gsYEZtecXlA) of predictions* - -![Raspberry Pi client with 4G access and onboard GPS that connects to cortex's APIs for inference](https://i.imgur.com/MvDAXWU.jpg) - -*Figure 2 - Raspberry Pi-powered client with 4G access and onboard GPS that connects to cortex's APIs for inference. More on that [here](https://github.com/RobertLucian/cortex-license-plate-reader-client).* - -In our example, we assume we have a dashcam mounted on a car and we want to detect and recognize all license plates in the video stream in real-time. We can use an embedded computer system to record the video, then stream and infer frame-by-frame using a web service, reassemble the stream with the licence plate annotations, and finally display the annotated stream on a screen. The web service in our case is a set of 2 web APIs deployed using cortex. - -## Used Models - -The identification of license plates is done in three steps: - -1. Detecting the bounding boxes of each license plate using *YOLOv3* model. -1. Detecting the very specific region of each word inside each bounding box with high accuracy using a pretrained *CRAFT* text detector. -1. Recognizing the text inside the previously detected boxes using a pretrained *CRNN* model. - -Out of these three models (*YOLOv3*, *CRAFT* and *CRNN*) only *YOLOv3* has been fine-tuned with a rather small dataset to better work with license plates. This dataset can be found [here](https://github.com/RobertLucian/license-plate-dataset). This *YOLOv3* model has in turn been trained using [this](https://github.com/experiencor/keras-yolo3) GitHub project. To get more details about our fine-tuned model, check the project's description page. - -The other two models, *CRAFT* and *CRNN*, can be found in [keras-ocr](https://github.com/faustomorales/keras-ocr). - -## Deployment - Lite Version - -A lite version of the deployment is available with `cortex_lite.yaml`. The lite version accepts an image as input and returns an image with the recognized license plates overlayed on top. A single GPU is required for this deployment (i.e. `g4dn.xlarge`). - -Once the cortex cluster is created, run - -```bash -cortex deploy cortex_lite.yaml -``` - -And monitor the API with - -```bash -cortex get --watch -``` - -To run an inference on the lite version, the only 3 tools you need are `curl`, `sed` and `base64`. This API expects an URL pointing to an image onto which the inferencing is done. This includes the detection of license plates with *YOLOv3* and the recognition part with *CRAFT* + *CRNN* models. - -Export the endpoint & the image's URL by running - -```bash -export ENDPOINT=your-api-endpoint -export IMAGE_URL=https://i.imgur.com/r8xdI7P.png -``` - -Then run the following piped commands - -```bash -curl "${ENDPOINT}" -X POST -H "Content-Type: application/json" -d '{"url":"'${IMAGE_URL}'"}' | -sed 's/"//g' | -base64 -d > prediction.jpg -``` - -The resulting image is the same as the one in [Verifying the Deployed APIs](#verifying-the-deployed-apis). - -For another prediction, let's use a generic image from the web. Export [this image's URL link](https://i.imgur.com/mYuvMOs.jpg) and re-run the prediction. This is what we get. - -![annotated sample image](https://i.imgur.com/tg1PE1E.jpg) - -*The above prediction has the bounding boxes colored differently to distinguish them from the cars' red bodies* - -## Deployment - Full Version - -The recommended number of instances to run this smoothly on a video stream is about 12 GPU instances (2 GPU instances for *YOLOv3* and 10 for *CRNN* + *CRAFT*). `cortex_full.yaml` is already set up to use these 12 instances. Note: this is the optimal number of instances when using the `g4dn.xlarge` instance type. For the client to work smoothly, the number of processes per replica can be adjusted, especially for `p3` or `g4` instances, where the GPU has a lot of compute capacity. - -If you don't have access to this many GPU-equipped instances, you could just lower the number and expect dropped frames. It will still prove the point, albeit at a much lower framerate and with higher latency. More on that [here](https://github.com/RobertLucian/cortex-license-plate-reader-client). - -Then after the cortex cluster is created, run - -```bash -cortex deploy cortex_full.yaml -``` - -And monitor the APIs with - -```bash -cortex get --watch -``` - -We can run the inference on a sample image to verify that both APIs are working as expected before we move on to running the client. Here is an example image: - -![sample image](https://i.imgur.com/r8xdI7P.png) - -On your local machine run: - -``` -pip install requests click opencv-contrib-python numpy -``` - -and run the following script with Python >= `3.6.x`. The application expects the argument to be a link to an image. The following link is for the above sample image. - - -```bash -export YOLOV3_ENDPOINT=api_endpoint_for_yolov3 -export CRNN_ENDPOINT=api_endpoint_for_crnn -python sample_inference.py "https://i.imgur.com/r8xdI7P.png" -``` - -If all goes well, then a prediction will be saved as a JPEG image to disk. By default, it's saved to `prediction.jpg`. Here is the output for the image above: - -![annotated sample image](https://i.imgur.com/JaD4A05.jpg) - -You can use `python sample_inference.py --help` to find out more. Keep in mind that any detected license plates with a confidence score lower than 80% are discarded. - -If this verification works, then we can move on and run the main client. - -### Running the Client - -Once the APIs are up and running, launch the streaming client by following the instructions at [robertlucian/cortex-license-plate-reader-client](https://github.com/RobertLucian/cortex-license-plate-reader-client). - -*Note: The client is kept in a separate repository to maintain the cortex project clean and focused. Keeping some of the projects that are more complex out of this repository can reduce the confusion.* - -## Customization/Optimization - -### Uploading the Model to S3 - -The only model to upload to an S3 bucket (for Cortex to deploy) is the *YOLOv3* model. The other two models are downloaded automatically upon deploying the service. - -If you would like to host the model from your own bucket, or if you want to fine tune the model for your needs, here's what you can do. - -#### Lite Version - -Download the *Keras* model: - -```bash -wget -O license_plate.h5 "https://www.dropbox.com/s/vsvgoyricooksyv/license_plate.h5?dl=0" -``` - -And then upload it to your bucket (also make sure [cortex_lite.yaml](cortex_lite.yaml) points to this bucket): - -```bash -BUCKET=my-bucket -YOLO3_PATH=examples/tensorflow/license-plate-reader/yolov3_keras -aws s3 cp license_plate.h5 "s3://$BUCKET/$YOLO3_PATH/model.h5" -``` - -#### Full Version - -Download the *SavedModel*: - -```bash -wget -O yolov3.zip "https://www.dropbox.com/sh/4ltffycnzfeul01/AAB7Xdmmi59w0EPOwhQ1nkvua/yolov3?dl=0" -``` - -Unzip it: - -```bash -unzip yolov3.zip -d yolov3 -``` - -And then upload it to your bucket (also make sure [cortex_full.yaml](cortex_full.yaml) points to this bucket): - -```bash -BUCKET=my-bucket -YOLO3_PATH=examples/tensorflow/license-plate-reader/yolov3_tf -aws s3 cp yolov3/ "s3://$BUCKET/$YOLO3_PATH" --recursive -``` - -### Configuring YOLOv3 Predictor - -The `yolov3` API predictor requires a [config.json](config.json) file to configure the input size of the image (dependent on the model's architecture), the anchor boxes, the object threshold, and the IoU threshold. All of these are already set appropriately so no other change is required. - -The configuration file's content is based on [this](https://github.com/experiencor/keras-yolo3/blob/bf37c87561caeccc4f1b879e313d4a3fec1b987e/zoo/config_license_plates.json#L2-L7). - -### Opportunities for performance improvements - -One way to reduce the inference time is to convert the models to use FP16/BFP16 (in mixed mode or not) and then choose the accelerator that gives the best performance in half precision mode - i.e. T4/V100. A speedup of an order of magnitude can be expected. diff --git a/examples/tensorflow/license-plate-reader/config.json b/examples/tensorflow/license-plate-reader/config.json deleted file mode 100644 index 0ff64d0a98..0000000000 --- a/examples/tensorflow/license-plate-reader/config.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "labels": ["license-plate"], - "net_h" : 416, - "net_w" : 416, - "anchors" : [15,6, 18,8, 22,9, 27,11, 32,13, 41,17, 54,21, 66,27, 82,33], - "obj_thresh" : 0.8, - "nms_thresh" : 0.01 -} diff --git a/examples/tensorflow/license-plate-reader/cortex_full.yaml b/examples/tensorflow/license-plate-reader/cortex_full.yaml deleted file mode 100644 index f16f6ab934..0000000000 --- a/examples/tensorflow/license-plate-reader/cortex_full.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: yolov3 - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor_yolo.py - model_path: s3://cortex-examples/tensorflow/license-plate-reader/yolov3_tf/ - processes_per_replica: 4 - threads_per_process: 3 - signature_key: serving_default - config: - model_config: config.json - compute: - cpu: 1 - gpu: 1 - mem: 8G - autoscaling: - min_replicas: 2 - max_replicas: 2 - -- name: crnn - kind: RealtimeAPI - predictor: - type: python - path: predictor_crnn.py - processes_per_replica: 1 - threads_per_process: 1 - compute: - cpu: 1 - gpu: 1 - mem: 8G - autoscaling: - min_replicas: 10 - max_replicas: 10 diff --git a/examples/tensorflow/license-plate-reader/cortex_lite.yaml b/examples/tensorflow/license-plate-reader/cortex_lite.yaml deleted file mode 100644 index 8e07cd8280..0000000000 --- a/examples/tensorflow/license-plate-reader/cortex_lite.yaml +++ /dev/null @@ -1,14 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: license-plate-reader - kind: RealtimeAPI - predictor: - type: python - path: predictor_lite.py - config: - yolov3: s3://cortex-examples/tensorflow/license-plate-reader/yolov3_keras/model.h5 - yolov3_model_config: config.json - compute: - cpu: 1 - gpu: 1 - mem: 4G diff --git a/examples/tensorflow/license-plate-reader/predictor_crnn.py b/examples/tensorflow/license-plate-reader/predictor_crnn.py deleted file mode 100644 index aa543f45cf..0000000000 --- a/examples/tensorflow/license-plate-reader/predictor_crnn.py +++ /dev/null @@ -1,44 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import cv2 -import numpy as np -import keras_ocr -import base64 -import pickle -import tensorflow as tf - - -class PythonPredictor: - def __init__(self, config): - # limit memory usage on each process - for gpu in tf.config.list_physical_devices("GPU"): - tf.config.experimental.set_memory_growth(gpu, True) - - # keras-ocr will automatically download pretrained - # weights for the detector and recognizer. - self.pipeline = keras_ocr.pipeline.Pipeline() - - def predict(self, payload): - # preprocess the images w/ license plates (LPs) - imgs = payload["imgs"] - imgs = base64.b64decode(imgs.encode("utf-8")) - jpgs_as_np = pickle.loads(imgs) - images = [cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR) for jpg_as_np in jpgs_as_np] - - # run batch inference - try: - prediction_groups = self.pipeline.recognize(images) - except ValueError: - # exception can occur when the images are too small - prediction_groups = [] - - image_list = [] - for img_predictions in prediction_groups: - boxes_per_image = [] - for predictions in img_predictions: - boxes_per_image.append([predictions[0], predictions[1].tolist()]) - image_list.append(boxes_per_image) - - lps = {"license-plates": image_list} - - return lps diff --git a/examples/tensorflow/license-plate-reader/predictor_lite.py b/examples/tensorflow/license-plate-reader/predictor_lite.py deleted file mode 100644 index 0a71b775fa..0000000000 --- a/examples/tensorflow/license-plate-reader/predictor_lite.py +++ /dev/null @@ -1,120 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import boto3, base64, cv2, re, os, requests, json -import keras_ocr - -from botocore import UNSIGNED -from botocore.client import Config -from tensorflow.keras.models import load_model -import utils.utils as utils -import utils.bbox as bbox_utils -import utils.preprocess as preprocess_utils - - -class PythonPredictor: - def __init__(self, config): - # download yolov3 model - bucket, key = re.match("s3://(.+?)/(.+)", config["yolov3"]).groups() - - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - model_path = "/tmp/model.h5" - s3.download_file(bucket, key, model_path) - - # load yolov3 model - self.yolov3_model = load_model(model_path) - - # get configuration for yolov3 model - with open(config["yolov3_model_config"]) as json_file: - data = json.load(json_file) - for key in data: - setattr(self, key, data[key]) - self.box_confidence_score = 0.8 - - # keras-ocr automatically downloads the pretrained - # weights for the detector and recognizer - self.recognition_model_pipeline = keras_ocr.pipeline.Pipeline() - - def predict(self, payload): - # download image - img_url = payload["url"] - image = preprocess_utils.get_url_image(img_url) - - # detect the bounding boxes - boxes = utils.get_yolo_boxes( - self.yolov3_model, - image, - self.net_h, - self.net_w, - self.anchors, - self.obj_thresh, - self.nms_thresh, - len(self.labels), - tensorflow_model=False, - ) - - # purge bounding boxes with a low confidence score - aux = [] - for b in boxes: - label = -1 - for i in range(len(b.classes)): - if b.classes[i] > self.box_confidence_score: - label = i - if label >= 0: - aux.append(b) - boxes = aux - del aux - - # if bounding boxes have been detected - dec_words = [] - if len(boxes) > 0: - # create set of images of the detected license plates - lps = [] - for b in boxes: - lp = image[b.ymin : b.ymax, b.xmin : b.xmax] - lps.append(lp) - - # run batch inference - try: - prediction_groups = self.recognition_model_pipeline.recognize(lps) - except ValueError: - # exception can occur when the images are too small - prediction_groups = [] - - # process pipeline output - image_list = [] - for img_predictions in prediction_groups: - boxes_per_image = [] - for predictions in img_predictions: - boxes_per_image.append([predictions[0], predictions[1].tolist()]) - image_list.append(boxes_per_image) - - # reorder text within detected LPs based on horizontal position - dec_lps = preprocess_utils.reorder_recognized_words(image_list) - for dec_lp in dec_lps: - dec_words.append([word[0] for word in dec_lp]) - - # if there are no recognized LPs, then don't draw them - if len(dec_words) == 0: - dec_words = [[] for i in range(len(boxes))] - - # draw predictions as overlays on the source image - draw_image = bbox_utils.draw_boxes( - image, - boxes, - overlay_text=dec_words, - labels=["LP"], - obj_thresh=self.box_confidence_score, - ) - - # image represented in bytes - byte_im = preprocess_utils.image_to_jpeg_bytes(draw_image) - - # encode image - image_enc = base64.b64encode(byte_im).decode("utf-8") - - # image with draw boxes overlayed - return image_enc diff --git a/examples/tensorflow/license-plate-reader/predictor_yolo.py b/examples/tensorflow/license-plate-reader/predictor_yolo.py deleted file mode 100644 index 7648b66960..0000000000 --- a/examples/tensorflow/license-plate-reader/predictor_yolo.py +++ /dev/null @@ -1,46 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import json -import base64 -import numpy as np -import cv2 -import pickle -import utils.utils as utils - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - with open(config["model_config"]) as json_file: - data = json.load(json_file) - for key in data: - setattr(self, key, data[key]) - - def predict(self, payload): - # decode the payload - img = payload["img"] - img = base64.b64decode(img) - jpg_as_np = np.frombuffer(img, dtype=np.uint8) - image = cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR) - - # detect the bounding boxes - boxes = utils.get_yolo_boxes( - self.client, - image, - self.net_h, - self.net_w, - self.anchors, - self.obj_thresh, - self.nms_thresh, - len(self.labels), - ) - - # package the response - response = {"boxes": []} - for box in boxes: - response["boxes"].append( - [box.xmin, box.ymin, box.xmax, box.ymax, float(box.c), box.classes.tolist()] - ) - - return response diff --git a/examples/tensorflow/license-plate-reader/requirements.txt b/examples/tensorflow/license-plate-reader/requirements.txt deleted file mode 100644 index 0fb87fcf23..0000000000 --- a/examples/tensorflow/license-plate-reader/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -keras-ocr==0.8.5 -keras==2.3.1 -tensorflow==2.3.0 -scipy==1.4.1 -numpy==1.18.* diff --git a/examples/tensorflow/license-plate-reader/sample_inference.py b/examples/tensorflow/license-plate-reader/sample_inference.py deleted file mode 100644 index 11e217ec78..0000000000 --- a/examples/tensorflow/license-plate-reader/sample_inference.py +++ /dev/null @@ -1,100 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import click, cv2, requests, pickle, base64, json -import numpy as np -import utils.bbox as bbox_utils -import utils.preprocess as preprocess_utils - - -@click.command( - help=( - "Identify license plates in a given image" - " while outsourcing the predictions using the REST API endpoints." - " Both API endpoints have to be exported as environment variables." - ) -) -@click.argument("img_url_src", type=str) -@click.argument("yolov3_endpoint", envvar="YOLOV3_ENDPOINT") -@click.argument("crnn_endpoint", envvar="CRNN_ENDPOINT") -@click.option( - "--output", - "-o", - type=str, - default="prediction.jpg", - show_default=True, - help="File to save the prediction to.", -) -def main(img_url_src, yolov3_endpoint, crnn_endpoint, output): - - # get the image in bytes representation - image = preprocess_utils.get_url_image(img_url_src) - image_bytes = preprocess_utils.image_to_jpeg_bytes(image) - - # encode image - image_enc = base64.b64encode(image_bytes).decode("utf-8") - image_dump = json.dumps({"img": image_enc}) - - # make yolov3 api request - resp = requests.post( - yolov3_endpoint, data=image_dump, headers={"content-type": "application/json"} - ) - - # parse response - boxes_raw = resp.json()["boxes"] - boxes = [] - for b in boxes_raw: - box = bbox_utils.BoundBox(*b) - boxes.append(box) - - # purge bounding boxes with a low confidence score - confidence_score = 0.8 - aux = [] - for b in boxes: - label = -1 - for i in range(len(b.classes)): - if b.classes[i] > confidence_score: - label = i - if label >= 0: - aux.append(b) - boxes = aux - del aux - - dec_words = [] - if len(boxes) > 0: - # create set of images of the detected license plates - lps = [] - for b in boxes: - lp = image[b.ymin : b.ymax, b.xmin : b.xmax] - jpeg = preprocess_utils.image_to_jpeg_nparray(lp) - lps.append(jpeg) - - # encode the cropped license plates - lps = pickle.dumps(lps, protocol=0) - lps_enc = base64.b64encode(lps).decode("utf-8") - lps_dump = json.dumps({"imgs": lps_enc}) - - # make crnn api request - resp = requests.post( - crnn_endpoint, data=lps_dump, headers={"content-type": "application/json"} - ) - - # parse the response - dec_lps = resp.json()["license-plates"] - dec_lps = preprocess_utils.reorder_recognized_words(dec_lps) - for dec_lp in dec_lps: - dec_words.append([word[0] for word in dec_lp]) - - if len(dec_words) == 0: - dec_words = [[] for i in range(len(boxes))] - - # draw predictions as overlays on the source image - draw_image = bbox_utils.draw_boxes( - image, boxes, overlay_text=dec_words, labels=["LP"], obj_thresh=confidence_score - ) - - # and save it to disk - cv2.imwrite(output, draw_image) - - -if __name__ == "__main__": - main() diff --git a/examples/tensorflow/license-plate-reader/utils/__init__.py b/examples/tensorflow/license-plate-reader/utils/__init__.py deleted file mode 100644 index 5f47d63e43..0000000000 --- a/examples/tensorflow/license-plate-reader/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) diff --git a/examples/tensorflow/license-plate-reader/utils/bbox.py b/examples/tensorflow/license-plate-reader/utils/bbox.py deleted file mode 100644 index de9c7ef8c0..0000000000 --- a/examples/tensorflow/license-plate-reader/utils/bbox.py +++ /dev/null @@ -1,111 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import numpy as np -import cv2 -from .colors import get_color - - -class BoundBox: - def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None): - self.xmin = xmin - self.ymin = ymin - self.xmax = xmax - self.ymax = ymax - - self.c = c - self.classes = classes - - self.label = -1 - self.score = -1 - - def get_label(self): - if self.label == -1: - self.label = np.argmax(self.classes) - - return self.label - - def get_score(self): - if self.score == -1: - self.score = self.classes[self.get_label()] - - return self.score - - -def _interval_overlap(interval_a, interval_b): - x1, x2 = interval_a - x3, x4 = interval_b - - if x3 < x1: - if x4 < x1: - return 0 - else: - return min(x2, x4) - x1 - else: - if x2 < x3: - return 0 - else: - return min(x2, x4) - x3 - - -def bbox_iou(box1, box2): - intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax]) - intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax]) - - intersect = intersect_w * intersect_h - - w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin - w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin - - union = w1 * h1 + w2 * h2 - intersect - - return float(intersect) / union - - -def draw_boxes(image, boxes, overlay_text, labels, obj_thresh, quiet=True): - for box, overlay in zip(boxes, overlay_text): - label_str = "" - label = -1 - - for i in range(len(labels)): - if box.classes[i] > obj_thresh: - if label_str != "": - label_str += ", " - label_str += labels[i] + " " + str(round(box.get_score() * 100, 2)) + "%" - label = i - if not quiet: - print(label_str) - - if label >= 0: - if len(overlay) > 0: - text = label_str + ": [" + " ".join(overlay) + "]" - else: - text = label_str - text = text.upper() - text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5) - width, height = text_size[0][0], text_size[0][1] - region = np.array( - [ - [box.xmin - 3, box.ymin], - [box.xmin - 3, box.ymin - height - 26], - [box.xmin + width + 13, box.ymin - height - 26], - [box.xmin + width + 13, box.ymin], - ], - dtype="int32", - ) - - # cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5) - rec = (box.xmin, box.ymin, box.xmax - box.xmin, box.ymax - box.ymin) - rec = tuple(int(i) for i in rec) - cv2.rectangle(img=image, rec=rec, color=get_color(label), thickness=3) - cv2.fillPoly(img=image, pts=[region], color=get_color(label)) - cv2.putText( - img=image, - text=text, - org=(box.xmin + 13, box.ymin - 13), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=1e-3 * image.shape[0], - color=(0, 0, 0), - thickness=1, - ) - - return image diff --git a/examples/tensorflow/license-plate-reader/utils/colors.py b/examples/tensorflow/license-plate-reader/utils/colors.py deleted file mode 100644 index 2902c4e5aa..0000000000 --- a/examples/tensorflow/license-plate-reader/utils/colors.py +++ /dev/null @@ -1,100 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - - -def get_color(label): - """Return a color from a set of predefined colors. Contains 80 colors in total. - code originally from https://github.com/fizyr/keras-retinanet/ - Args - label: The label to get the color for. - Returns - A list of three values representing a RGB color. - """ - if label < len(colors): - return colors[label] - else: - print("Label {} has no color, returning default.".format(label)) - return (0, 255, 0) - - -colors = [ - [31, 0, 255], - [0, 159, 255], - [255, 95, 0], - [255, 19, 0], - [255, 0, 0], - [255, 38, 0], - [0, 255, 25], - [255, 0, 133], - [255, 172, 0], - [108, 0, 255], - [0, 82, 255], - [0, 255, 6], - [255, 0, 152], - [223, 0, 255], - [12, 0, 255], - [0, 255, 178], - [108, 255, 0], - [184, 0, 255], - [255, 0, 76], - [146, 255, 0], - [51, 0, 255], - [0, 197, 255], - [255, 248, 0], - [255, 0, 19], - [255, 0, 38], - [89, 255, 0], - [127, 255, 0], - [255, 153, 0], - [0, 255, 255], - [0, 255, 216], - [0, 255, 121], - [255, 0, 248], - [70, 0, 255], - [0, 255, 159], - [0, 216, 255], - [0, 6, 255], - [0, 63, 255], - [31, 255, 0], - [255, 57, 0], - [255, 0, 210], - [0, 255, 102], - [242, 255, 0], - [255, 191, 0], - [0, 255, 63], - [255, 0, 95], - [146, 0, 255], - [184, 255, 0], - [255, 114, 0], - [0, 255, 235], - [255, 229, 0], - [0, 178, 255], - [255, 0, 114], - [255, 0, 57], - [0, 140, 255], - [0, 121, 255], - [12, 255, 0], - [255, 210, 0], - [0, 255, 44], - [165, 255, 0], - [0, 25, 255], - [0, 255, 140], - [0, 101, 255], - [0, 255, 82], - [223, 255, 0], - [242, 0, 255], - [89, 0, 255], - [165, 0, 255], - [70, 255, 0], - [255, 0, 172], - [255, 76, 0], - [203, 255, 0], - [204, 0, 255], - [255, 0, 229], - [255, 133, 0], - [127, 0, 255], - [0, 235, 255], - [0, 255, 197], - [255, 0, 191], - [0, 44, 255], - [50, 255, 0], -] diff --git a/examples/tensorflow/license-plate-reader/utils/preprocess.py b/examples/tensorflow/license-plate-reader/utils/preprocess.py deleted file mode 100644 index 5e40a35719..0000000000 --- a/examples/tensorflow/license-plate-reader/utils/preprocess.py +++ /dev/null @@ -1,59 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import numpy as np -import cv2, requests -from statistics import mean - - -def get_url_image(url_image): - """ - Get numpy image from URL image. - """ - resp = requests.get(url_image, stream=True).raw - image = np.asarray(bytearray(resp.read()), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - return image - - -def image_to_jpeg_nparray(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): - """ - Convert numpy image to jpeg numpy vector. - """ - is_success, im_buf_arr = cv2.imencode(".jpg", image, quality) - return im_buf_arr - - -def image_to_jpeg_bytes(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): - """ - Convert numpy image to bytes-encoded jpeg image. - """ - buf = image_to_jpeg_nparray(image, quality) - byte_im = buf.tobytes() - return byte_im - - -def reorder_recognized_words(detected_images): - """ - Reorder the detected words in each image based on the average horizontal position of each word. - Sorting them in ascending order. - """ - - reordered_images = [] - for detected_image in detected_images: - - # computing the mean average position for each word - mean_horizontal_positions = [] - for words in detected_image: - box = words[1] - y_positions = [point[0] for point in box] - mean_y_position = mean(y_positions) - mean_horizontal_positions.append(mean_y_position) - indexes = np.argsort(mean_horizontal_positions) - - # and reordering them - reordered = [] - for index, words in zip(indexes, detected_image): - reordered.append(detected_image[index]) - reordered_images.append(reordered) - - return reordered_images diff --git a/examples/tensorflow/license-plate-reader/utils/utils.py b/examples/tensorflow/license-plate-reader/utils/utils.py deleted file mode 100644 index 9d07b289e0..0000000000 --- a/examples/tensorflow/license-plate-reader/utils/utils.py +++ /dev/null @@ -1,160 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import cv2 -import numpy as np -import math -from .bbox import BoundBox, bbox_iou -from scipy.special import expit - - -def _sigmoid(x): - return expit(x) - - -def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w): - if (float(net_w) / image_w) < (float(net_h) / image_h): - new_w = net_w - new_h = (image_h * net_w) / image_w - else: - new_h = net_w - new_w = (image_w * net_h) / image_h - - for i in range(len(boxes)): - x_offset, x_scale = (net_w - new_w) / 2.0 / net_w, float(new_w) / net_w - y_offset, y_scale = (net_h - new_h) / 2.0 / net_h, float(new_h) / net_h - - boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w) - boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w) - boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h) - boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h) - - -def do_nms(boxes, nms_thresh): - if len(boxes) > 0: - nb_class = len(boxes[0].classes) - else: - return - - for c in range(nb_class): - sorted_indices = np.argsort([-box.classes[c] for box in boxes]) - - for i in range(len(sorted_indices)): - index_i = sorted_indices[i] - - if boxes[index_i].classes[c] == 0: - continue - - for j in range(i + 1, len(sorted_indices)): - index_j = sorted_indices[j] - - if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh: - boxes[index_j].classes[c] = 0 - - -def decode_netout(netout, anchors, obj_thresh, net_h, net_w): - grid_h, grid_w = netout.shape[:2] - nb_box = 3 - netout = netout.reshape((grid_h, grid_w, nb_box, -1)) - nb_class = netout.shape[-1] - 5 - - boxes = [] - - netout[..., :2] = _sigmoid(netout[..., :2]) - netout[..., 4] = _sigmoid(netout[..., 4]) - netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:]) - netout[..., 5:] *= netout[..., 5:] > obj_thresh - - for i in range(grid_h * grid_w): - row = i // grid_w - col = i % grid_w - - for b in range(nb_box): - # 4th element is objectness score - objectness = netout[row, col, b, 4] - - if objectness <= obj_thresh: - continue - - # first 4 elements are x, y, w, and h - x, y, w, h = netout[row, col, b, :4] - - x = (col + x) / grid_w # center position, unit: image width - y = (row + y) / grid_h # center position, unit: image height - w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width - h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height - - # last elements are class probabilities - classes = netout[row, col, b, 5:] - - box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, objectness, classes) - - boxes.append(box) - - return boxes - - -def preprocess_input(image, net_h, net_w): - new_h, new_w, _ = image.shape - - # determine the new size of the image - if (float(net_w) / new_w) < (float(net_h) / new_h): - new_h = (new_h * net_w) // new_w - new_w = net_w - else: - new_w = (new_w * net_h) // new_h - new_h = net_h - - # resize the image to the new size - resized = cv2.resize(image[:, :, ::-1] / 255.0, (new_w, new_h)) - - # embed the image into the standard letter box - new_image = np.ones((net_h, net_w, 3)) * 0.5 - new_image[ - (net_h - new_h) // 2 : (net_h + new_h) // 2, (net_w - new_w) // 2 : (net_w + new_w) // 2, : - ] = resized - new_image = np.expand_dims(new_image, 0) - - return new_image - - -def get_yolo_boxes( - model, image, net_h, net_w, anchors, obj_thresh, nms_thresh, classes, tensorflow_model=True -): - # preprocess the input - image_h, image_w, _ = image.shape - batch_input = np.zeros((1, net_h, net_w, 3)) - batch_input[0] = preprocess_input(image, net_h, net_w) - - # run the prediction - if tensorflow_model: - output = model.predict({"input_1": batch_input}) - yolos = [output["conv_81"], output["conv_93"], output["conv_105"]] - filters = 3 * (5 + classes) - for i in range(len(yolos)): - length = len(yolos[i]) - box_size = int(math.sqrt(length / filters)) - yolos[i] = np.array(yolos[i]).reshape((box_size, box_size, filters)) - else: - output = model.predict_on_batch(batch_input) - yolos = [output[0][0], output[1][0], output[2][0]] - - boxes = [] - # decode the output of the network - for j in range(len(yolos)): - yolo_anchors = anchors[(2 - j) * 6 : (3 - j) * 6] # config['model']['anchors'] - boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w) - - # correct the sizes of the bounding boxes - correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w) - - # suppress non-maximal boxes - do_nms(boxes, nms_thresh) - - return boxes - - -def _softmax(x, axis=-1): - x = x - np.amax(x, axis, keepdims=True) - e_x = np.exp(x) - - return e_x / e_x.sum(axis, keepdims=True) diff --git a/examples/tensorflow/multi-model-classifier/requirements.txt b/examples/tensorflow/multi-model-classifier/requirements.txt deleted file mode 100644 index 7e2fba5e6c..0000000000 --- a/examples/tensorflow/multi-model-classifier/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Pillow diff --git a/examples/tensorflow/multi-model-classifier/sample-iris.json b/examples/tensorflow/multi-model-classifier/sample-iris.json deleted file mode 100644 index 67c03827f2..0000000000 --- a/examples/tensorflow/multi-model-classifier/sample-iris.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": { - "sepal_length": 5.2, - "sepal_width": 3.6, - "petal_length": 1.4, - "petal_width": 0.3 - } -} diff --git a/examples/tensorflow/sentiment-analyzer/README.md b/examples/tensorflow/sentiment-analyzer/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/tensorflow/sentiment-analyzer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/tensorflow/sentiment-analyzer/bert.ipynb b/examples/tensorflow/sentiment-analyzer/bert.ipynb deleted file mode 100644 index 27ca8c67b1..0000000000 --- a/examples/tensorflow/sentiment-analyzer/bert.ipynb +++ /dev/null @@ -1,1007 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "bert.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "accelerator": "GPU" - }, - "cells": [ - { - "cell_type": "code", - "metadata": { - "id": "j0a4mTk9o1Qg", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Modified source from https://colab.research.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb\n", - "\n", - "# Copyright 2019 Google Inc.\n", - "\n", - "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "\n", - "# http://www.apache.org/licenses/LICENSE-2.0\n", - "\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "dCpvgG0vwXAZ", - "colab_type": "text" - }, - "source": [ - "#Predicting Movie Review Sentiment with BERT on TF Hub", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xiYrZKaHwV81", - "colab_type": "text" - }, - "source": [ - "If you’ve been following Natural Language Processing over the past year, you’ve probably heard of BERT: Bidirectional Encoder Representations from Transformers. It’s a neural network architecture designed by Google researchers that’s totally transformed what’s state-of-the-art for NLP tasks, like text classification, translation, summarization, and question answering.\n", - "\n", - "Now that BERT's been added to [TF Hub](https://www.tensorflow.org/hub) as a loadable module, it's easy(ish) to add into existing TensorFlow text pipelines. In an existing pipeline, BERT can replace text embedding layers like ELMO and GloVE. Alternatively, [finetuning](http://wiki.fast.ai/index.php/Fine_tuning) BERT can provide both an accuracy boost and faster training time in many cases.\n", - "\n", - "Here, we'll train a model to predict whether an IMDB movie review is positive or negative using BERT in TensorFlow with tf hub. Some code was adapted from [this colab notebook](https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb). Let's get started!" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "chM4UttbMIqq", - "colab_type": "text" - }, - "source": [ - "First, we'll install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jviywGyWyKsA", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!pip install bert-tensorflow==1.0.* tensorflow-gpu==1.13.* scikit-learn==0.21.* pandas==0.24.* tensorflow-hub==0.6.* boto3==1.*" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "hsZvic2YxnTz", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from datetime import datetime\n", - "\n", - "from sklearn.model_selection import train_test_split\n", - "import pandas as pd\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "\n", - "import bert\n", - "from bert import run_classifier\n", - "from bert import optimization\n", - "from bert import tokenization" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "KVB3eOcjxxm1", - "colab_type": "text" - }, - "source": [ - "Below, we'll set an output location to store our model output, checkpoints, and export in a local directory. Note: if you're running on Google Colab, local directories don't persist after the session ends." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "US_EAnICvP7f", - "colab_type": "code", - "colab": {} - }, - "source": [ - "OUTPUT_DIR = \"bert\"\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pmFYvkylMwXn", - "colab_type": "text" - }, - "source": [ - "#Data" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MC_w8SRqN0fr", - "colab_type": "text" - }, - "source": [ - "First, let's download the dataset, hosted by Stanford. The code below, which downloads, extracts, and imports the IMDB Large Movie Review Dataset, is borrowed from [this TensorFlow tutorial](https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "fom_ff20gyy6", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from tensorflow import keras\n", - "import os\n", - "import re\n", - "\n", - "# Load all files from a directory in a DataFrame.\n", - "def load_directory_data(directory):\n", - " data = {}\n", - " data[\"sentence\"] = []\n", - " data[\"sentiment\"] = []\n", - " for file_path in os.listdir(directory):\n", - " with tf.gfile.GFile(os.path.join(directory, file_path), \"r\") as f:\n", - " data[\"sentence\"].append(f.read())\n", - " data[\"sentiment\"].append(re.match(\"\\d+_(\\d+)\\.txt\", file_path).group(1))\n", - " return pd.DataFrame.from_dict(data)\n", - "\n", - "# Merge positive and negative examples, add a polarity column and shuffle.\n", - "def load_dataset(directory):\n", - " pos_df = load_directory_data(os.path.join(directory, \"pos\"))\n", - " neg_df = load_directory_data(os.path.join(directory, \"neg\"))\n", - " pos_df[\"polarity\"] = 1\n", - " neg_df[\"polarity\"] = 0\n", - " return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)\n", - "\n", - "# Download and process the dataset files.\n", - "def download_and_load_datasets(force_download=False):\n", - " dataset = tf.keras.utils.get_file(\n", - " fname=\"aclImdb.tar.gz\", \n", - " origin=\"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\", \n", - " extract=True)\n", - " \n", - " train_df = load_dataset(os.path.join(os.path.dirname(dataset), \n", - " \"aclImdb\", \"train\"))\n", - " test_df = load_dataset(os.path.join(os.path.dirname(dataset), \n", - " \"aclImdb\", \"test\"))\n", - " \n", - " return train_df, test_df\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "2abfwdn-g135", - "colab_type": "code", - "colab": {} - }, - "source": [ - "train, test = download_and_load_datasets()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "XA8WHJgzhIZf", - "colab_type": "text" - }, - "source": [ - "To keep training fast, we'll take a sample of 5000 train and test examples, respectively." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "lw_F488eixTV", - "colab_type": "code", - "colab": {} - }, - "source": [ - "train = train.sample(5000)\n", - "test = test.sample(5000)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "prRQM8pDi8xI", - "colab_type": "code", - "colab": {} - }, - "source": [ - "train.columns" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "sfRnHSz3iSXz", - "colab_type": "text" - }, - "source": [ - "For us, our input data is the 'sentence' column and our label is the 'polarity' column (0, 1 for negative and positive, respecitvely)" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "IuMOGwFui4it", - "colab_type": "code", - "colab": {} - }, - "source": [ - "DATA_COLUMN = 'sentence'\n", - "LABEL_COLUMN = 'polarity'\n", - "# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'\n", - "label_list = [0, 1]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "V399W0rqNJ-Z", - "colab_type": "text" - }, - "source": [ - "#Data Preprocessing\n", - "We'll need to transform our data into a format BERT understands. This involves two steps. First, we create `InputExample`'s using the constructor provided in the BERT library.\n", - "\n", - "- `text_a` is the text we want to classify, which in this case, is the `Request` field in our Dataframe. \n", - "- `text_b` is used if we're training a model to understand the relationship between sentences (i.e. is `text_b` a translation of `text_a`? Is `text_b` an answer to the question asked by `text_a`?). This doesn't apply to our task, so we can leave `text_b` blank.\n", - "- `label` is the label for our example, i.e. True, False" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "p9gEt5SmM6i6", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Use the InputExample class from BERT's run_classifier code to create examples from the data\n", - "train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example\n", - " text_a = x[DATA_COLUMN], \n", - " text_b = None, \n", - " label = x[LABEL_COLUMN]), axis = 1)\n", - "\n", - "test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None, \n", - " text_a = x[DATA_COLUMN], \n", - " text_b = None, \n", - " label = x[LABEL_COLUMN]), axis = 1)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "SCZWZtKxObjh", - "colab_type": "text" - }, - "source": [ - "Next, we need to preprocess our data so that it matches the data BERT was trained on. For this, we'll need to do a couple of things (but don't worry--this is also included in the Python library):\n", - "\n", - "\n", - "1. Lowercase our text (if we're using a BERT lowercase model)\n", - "2. Tokenize it (i.e. \"sally says hi\" -> [\"sally\", \"says\", \"hi\"])\n", - "3. Break words into WordPieces (i.e. \"calling\" -> [\"call\", \"##ing\"])\n", - "4. Map our words to indexes using a vocab file that BERT provides\n", - "5. Add special \"CLS\" and \"SEP\" tokens (see the [readme](https://github.com/google-research/bert))\n", - "6. Append \"index\" and \"segment\" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))\n", - "\n", - "Happily, we don't have to worry about most of these details.\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qMWiDtpyQSoU", - "colab_type": "text" - }, - "source": [ - "To start, we'll need to load a vocabulary file and lowercasing information directly from the BERT tf hub module:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "IhJSe0QHNG7U", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# This is a path to an uncased (all lowercase) version of BERT\n", - "BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n", - "\n", - "def create_tokenizer_from_hub_module():\n", - " \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n", - " with tf.Graph().as_default():\n", - " bert_module = hub.Module(BERT_MODEL_HUB)\n", - " tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n", - " with tf.Session() as sess:\n", - " vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n", - " tokenization_info[\"do_lower_case\"]])\n", - " \n", - " return bert.tokenization.FullTokenizer(\n", - " vocab_file=vocab_file, do_lower_case=do_lower_case)\n", - "\n", - "tokenizer = create_tokenizer_from_hub_module()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "z4oFkhpZBDKm", - "colab_type": "text" - }, - "source": [ - "Great--we just learned that the BERT model we're using expects lowercase data (that's what stored in tokenization_info[\"do_lower_case\"]) and we also loaded BERT's vocab file. We also created a tokenizer, which breaks words into word pieces:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "dsBo6RCtQmwx", - "colab_type": "code", - "colab": {} - }, - "source": [ - "tokenizer.tokenize(\"This here's an example of using the BERT tokenizer\")" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0OEzfFIt6GIc", - "colab_type": "text" - }, - "source": [ - "Using our tokenizer, we'll call `run_classifier.convert_examples_to_features` on our InputExamples to convert them into features BERT understands." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "LL5W8gEGRTAf", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# We'll set sequences to be at most 128 tokens long.\n", - "MAX_SEQ_LENGTH = 128\n", - "# Convert our train and test features to InputFeatures that BERT understands.\n", - "train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)\n", - "test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ccp5trMwRtmr", - "colab_type": "text" - }, - "source": [ - "#Creating a model\n", - "\n", - "Now that we've prepared our data, let's focus on building a model. `create_model` does just this below. First, it loads the BERT tf hub module again (this time to extract the computation graph). Next, it creates a single new layer that will be trained to adapt BERT to our sentiment task (i.e. classifying whether a movie review is positive or negative). This strategy of using a mostly trained model is called [fine-tuning](http://wiki.fast.ai/index.php/Fine_tuning)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "6o2a5ZIvRcJq", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,\n", - " num_labels):\n", - " \"\"\"Creates a classification model.\"\"\"\n", - "\n", - " bert_module = hub.Module(\n", - " BERT_MODEL_HUB,\n", - " trainable=True)\n", - " bert_inputs = dict(\n", - " input_ids=input_ids,\n", - " input_mask=input_mask,\n", - " segment_ids=segment_ids)\n", - " bert_outputs = bert_module(\n", - " inputs=bert_inputs,\n", - " signature=\"tokens\",\n", - " as_dict=True)\n", - "\n", - " # Use \"pooled_output\" for classification tasks on an entire sentence.\n", - " # Use \"sequence_outputs\" for token-level output.\n", - " output_layer = bert_outputs[\"pooled_output\"]\n", - "\n", - " hidden_size = output_layer.shape[-1].value\n", - "\n", - " # Create our own layer to tune for politeness data.\n", - " output_weights = tf.get_variable(\n", - " \"output_weights\", [num_labels, hidden_size],\n", - " initializer=tf.truncated_normal_initializer(stddev=0.02))\n", - "\n", - " output_bias = tf.get_variable(\n", - " \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n", - "\n", - " with tf.variable_scope(\"loss\"):\n", - "\n", - " # Dropout helps prevent overfitting\n", - " output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n", - "\n", - " logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n", - " logits = tf.nn.bias_add(logits, output_bias)\n", - " log_probs = tf.nn.log_softmax(logits, axis=-1)\n", - "\n", - " # Convert labels into one-hot encoding\n", - " one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n", - "\n", - " predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n", - " # If we're predicting, we want predicted labels and the probabiltiies.\n", - " if is_predicting:\n", - " return (predicted_labels, log_probs)\n", - "\n", - " # If we're train/eval, compute loss between predicted and actual label\n", - " per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n", - " loss = tf.reduce_mean(per_example_loss)\n", - " return (loss, predicted_labels, log_probs)\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qpE0ZIDOCQzE", - "colab_type": "text" - }, - "source": [ - "Next we'll wrap our model function in a `model_fn_builder` function that adapts our model to work for training, evaluation, and prediction." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "FnH-AnOQ9KKW", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# model_fn_builder actually creates our model function\n", - "# using the passed parameters for num_labels, learning_rate, etc.\n", - "def model_fn_builder(num_labels, learning_rate, num_train_steps,\n", - " num_warmup_steps):\n", - " \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n", - " def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n", - " \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n", - "\n", - " input_ids = features[\"input_ids\"]\n", - " input_mask = features[\"input_mask\"]\n", - " segment_ids = features[\"segment_ids\"]\n", - " label_ids = features[\"label_ids\"]\n", - "\n", - " is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n", - " \n", - " # TRAIN and EVAL\n", - " if not is_predicting:\n", - "\n", - " (loss, predicted_labels, log_probs) = create_model(\n", - " is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n", - "\n", - " train_op = bert.optimization.create_optimizer(\n", - " loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n", - "\n", - " # Calculate evaluation metrics. \n", - " def metric_fn(label_ids, predicted_labels):\n", - " accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n", - " f1_score = tf.contrib.metrics.f1_score(\n", - " label_ids,\n", - " predicted_labels)\n", - " auc = tf.metrics.auc(\n", - " label_ids,\n", - " predicted_labels)\n", - " recall = tf.metrics.recall(\n", - " label_ids,\n", - " predicted_labels)\n", - " precision = tf.metrics.precision(\n", - " label_ids,\n", - " predicted_labels) \n", - " true_pos = tf.metrics.true_positives(\n", - " label_ids,\n", - " predicted_labels)\n", - " true_neg = tf.metrics.true_negatives(\n", - " label_ids,\n", - " predicted_labels) \n", - " false_pos = tf.metrics.false_positives(\n", - " label_ids,\n", - " predicted_labels) \n", - " false_neg = tf.metrics.false_negatives(\n", - " label_ids,\n", - " predicted_labels)\n", - " return {\n", - " \"eval_accuracy\": accuracy,\n", - " \"f1_score\": f1_score,\n", - " \"auc\": auc,\n", - " \"precision\": precision,\n", - " \"recall\": recall,\n", - " \"true_positives\": true_pos,\n", - " \"true_negatives\": true_neg,\n", - " \"false_positives\": false_pos,\n", - " \"false_negatives\": false_neg\n", - " }\n", - "\n", - " eval_metrics = metric_fn(label_ids, predicted_labels)\n", - "\n", - " if mode == tf.estimator.ModeKeys.TRAIN:\n", - " return tf.estimator.EstimatorSpec(mode=mode,\n", - " loss=loss,\n", - " train_op=train_op)\n", - " else:\n", - " return tf.estimator.EstimatorSpec(mode=mode,\n", - " loss=loss,\n", - " eval_metric_ops=eval_metrics)\n", - " else:\n", - " (predicted_labels, log_probs) = create_model(\n", - " is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n", - "\n", - " predictions = {\n", - " 'probabilities': log_probs,\n", - " 'labels': predicted_labels\n", - " }\n", - " return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n", - "\n", - " # Return the actual model function in the closure\n", - " return model_fn\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "OjwJ4bTeWXD8", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Compute train and warmup steps from batch size\n", - "# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\n", - "BATCH_SIZE = 32\n", - "LEARNING_RATE = 2e-5\n", - "NUM_TRAIN_EPOCHS = 3.0\n", - "# Warmup is a period of time where hte learning rate \n", - "# is small and gradually increases--usually helps training.\n", - "WARMUP_PROPORTION = 0.1\n", - "# Model configs\n", - "SAVE_CHECKPOINTS_STEPS = 500\n", - "SAVE_SUMMARY_STEPS = 100" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "emHf9GhfWBZ_", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Compute # train and warmup steps from batch size\n", - "num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\n", - "num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "oEJldMr3WYZa", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Specify outpit directory and number of checkpoint steps to save\n", - "run_config = tf.estimator.RunConfig(\n", - " model_dir=OUTPUT_DIR,\n", - " save_summary_steps=SAVE_SUMMARY_STEPS,\n", - " save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "q_WebpS1X97v", - "colab_type": "code", - "colab": {} - }, - "source": [ - "model_fn = model_fn_builder(\n", - " num_labels=len(label_list),\n", - " learning_rate=LEARNING_RATE,\n", - " num_train_steps=num_train_steps,\n", - " num_warmup_steps=num_warmup_steps)\n", - "\n", - "estimator = tf.estimator.Estimator(\n", - " model_fn=model_fn,\n", - " config=run_config,\n", - " params={\"batch_size\": BATCH_SIZE})\n" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NOO3RfG1DYLo", - "colab_type": "text" - }, - "source": [ - "Next we create an input builder function that takes our training feature set (`train_features`) and produces a generator. This is a pretty standard design pattern for working with TensorFlow [Estimators](https://www.tensorflow.org/guide/estimators)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "1Pv2bAlOX_-K", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Create an input function for training. drop_remainder = True for using TPUs.\n", - "train_input_fn = bert.run_classifier.input_fn_builder(\n", - " features=train_features,\n", - " seq_length=MAX_SEQ_LENGTH,\n", - " is_training=True,\n", - " drop_remainder=False)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "t6Nukby2EB6-", - "colab_type": "text" - }, - "source": [ - "Now we train our model! For me, using a Colab notebook running on Google's GPUs, training time is typically 8-14 minutes." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "nucD4gluYJmK", - "colab_type": "code", - "colab": {} - }, - "source": [ - "print(f'Beginning Training!')\n", - "current_time = datetime.now()\n", - "estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n", - "print(\"Training took time \", datetime.now() - current_time)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "CmbLTVniARy3", - "colab_type": "text" - }, - "source": [ - "Now let's use our test data to see how well our model did:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "JIhejfpyJ8Bx", - "colab_type": "code", - "colab": {} - }, - "source": [ - "test_input_fn = run_classifier.input_fn_builder(\n", - " features=test_features,\n", - " seq_length=MAX_SEQ_LENGTH,\n", - " is_training=False,\n", - " drop_remainder=False)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "PPVEXhNjYXC-", - "colab_type": "code", - "colab": {} - }, - "source": [ - "estimator.evaluate(input_fn=test_input_fn, steps=None)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ueKsULteiz1B", - "colab_type": "text" - }, - "source": [ - "Now let's write code to make predictions on new sentences:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "OsrbTD2EJTVl", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def getPrediction(in_sentences):\n", - " labels = [\"Negative\", \"Positive\"]\n", - " input_examples = [run_classifier.InputExample(guid=\"\", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, \"\" is just a dummy label\n", - " input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n", - " predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)\n", - " predictions = estimator.predict(predict_input_fn)\n", - " return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "-thbodgih_VJ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pred_sentences = [\n", - " \"That movie was absolutely awful\",\n", - " \"The acting was a bit lacking\",\n", - " \"The film was creative and surprising\",\n", - " \"Absolutely fantastic!\"\n", - "]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "QrZmvZySKQTm", - "colab_type": "code", - "colab": {} - }, - "source": [ - "predictions = getPrediction(pred_sentences)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MXkRiEBUqN3n", - "colab_type": "text" - }, - "source": [ - "Voila! We have a sentiment classifier!" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ERkTE8-7oQLZ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "predictions" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "P3Tg7c47vfLE", - "colab_type": "text" - }, - "source": [ - "# Export the model\n", - "\n", - "We are now ready to export the model. The following code defines the serving input function and exports the model to `OUTPUT_DIR`." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "NfXsdV4qtlpW", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def serving_input_fn():\n", - " reciever_tensors = {\n", - " \"input_ids\": tf.placeholder(dtype=tf.int32,\n", - " shape=[1, MAX_SEQ_LENGTH])\n", - " }\n", - " features = {\n", - " \"input_ids\": reciever_tensors['input_ids'],\n", - " \"input_mask\": 1 - tf.cast(tf.equal(reciever_tensors['input_ids'], 0), dtype=tf.int32),\n", - " \"segment_ids\": tf.zeros(dtype=tf.int32, shape=[1, MAX_SEQ_LENGTH]),\n", - " \"label_ids\": tf.placeholder(tf.int32, [None], name='label_ids')\n", - " }\n", - " return tf.estimator.export.ServingInputReceiver(features, reciever_tensors)\n", - " \n", - "estimator._export_to_tpu = False\n", - "estimator.export_saved_model(OUTPUT_DIR+\"/export\", serving_input_fn)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "tIFTmUbcwI0w", - "colab_type": "text" - }, - "source": [ - "# Upload the model to AWS\n", - "\n", - "Cortex loads models from AWS, so we need to upload the exported model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gByRzrnR_OBX", - "colab_type": "text" - }, - "source": [ - "Set these variables to configure your AWS credentials and model upload path:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "1bdCOb3z0_Gh", - "colab_type": "code", - "cellView": "form", - "colab": {} - }, - "source": [ - "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", - "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", - "S3_UPLOAD_PATH = \"s3://my-bucket/sentiment-analyzer/bert\" #@param {type:\"string\"}\n", - "\n", - "import sys\n", - "import re\n", - "\n", - "if AWS_ACCESS_KEY_ID == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", - "\n", - "elif AWS_SECRET_ACCESS_KEY == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", - "\n", - "else:\n", - " try:\n", - " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", - " except:\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "WLT09hZr_bhm", - "colab_type": "text" - }, - "source": [ - "Upload to S3:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jCN3BINl2sKN", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import os\n", - "import boto3\n", - "\n", - "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", - "\n", - "for dirpath, _, filenames in os.walk(OUTPUT_DIR+\"/export\"):\n", - " for filename in filenames:\n", - " filepath = os.path.join(dirpath, filename)\n", - " filekey = os.path.join(key, filepath[len(OUTPUT_DIR+\"/export/\"):])\n", - " print(\"Uploading s3://{}/{} ...\".format(bucket, filekey), end = '')\n", - " s3.upload_file(filepath, bucket, filekey)\n", - " print(\" ✓\")\n", - "\n", - "print(\"\\nUploaded model export directory to \" + S3_UPLOAD_PATH)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7XPKSHzf_d7M", - "colab_type": "text" - }, - "source": [ - "\n", - "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/sentiment-analyzer) for how to deploy the model as an API." - ] - } - ] -} diff --git a/examples/tensorflow/sentiment-analyzer/cortex.yaml b/examples/tensorflow/sentiment-analyzer/cortex.yaml deleted file mode 100644 index 3e6447053e..0000000000 --- a/examples/tensorflow/sentiment-analyzer/cortex.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: sentiment-analyzer - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/sentiment-analyzer/bert/ - monitoring: - model_type: classification - compute: - cpu: 1 - gpu: 1 diff --git a/examples/tensorflow/sentiment-analyzer/predictor.py b/examples/tensorflow/sentiment-analyzer/predictor.py deleted file mode 100644 index 901f2bf349..0000000000 --- a/examples/tensorflow/sentiment-analyzer/predictor.py +++ /dev/null @@ -1,29 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import tensorflow as tf -import tensorflow_hub as hub -from bert import tokenization, run_classifier - -labels = ["negative", "positive"] - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - with tf.Graph().as_default(): - bert_module = hub.Module("https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1") - info = bert_module(signature="tokenization_info", as_dict=True) - with tf.Session() as sess: - vocab_file, do_lower_case = sess.run([info["vocab_file"], info["do_lower_case"]]) - self._tokenizer = tokenization.FullTokenizer( - vocab_file=vocab_file, do_lower_case=do_lower_case - ) - self.client = tensorflow_client - - def predict(self, payload): - input_example = run_classifier.InputExample(guid="", text_a=payload["review"], label=0) - input_feature = run_classifier.convert_single_example( - 0, input_example, [0, 1], 128, self._tokenizer - ) - model_input = {"input_ids": [input_feature.input_ids]} - prediction = self.client.predict(model_input) - return labels[prediction["labels"][0]] diff --git a/examples/tensorflow/sentiment-analyzer/requirements.txt b/examples/tensorflow/sentiment-analyzer/requirements.txt deleted file mode 100644 index 273614922e..0000000000 --- a/examples/tensorflow/sentiment-analyzer/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -bert-tensorflow==1.0.1 -tensorflow-hub==0.7.0 -tensorflow==1.15.* -tensorflow-serving-api==1.15.* -numpy==1.16.* diff --git a/examples/tensorflow/sentiment-analyzer/sample.json b/examples/tensorflow/sentiment-analyzer/sample.json deleted file mode 100644 index c433e33216..0000000000 --- a/examples/tensorflow/sentiment-analyzer/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "review": "the movie was amazing!" -} diff --git a/examples/tensorflow/text-generator/README.md b/examples/tensorflow/text-generator/README.md deleted file mode 100644 index 41a04891b3..0000000000 --- a/examples/tensorflow/text-generator/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/examples/tensorflow/text-generator/cortex.yaml b/examples/tensorflow/text-generator/cortex.yaml deleted file mode 100644 index d0e54b527d..0000000000 --- a/examples/tensorflow/text-generator/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: text-generator - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/text-generator/gpt-2/124M/ - compute: - cpu: 1 - gpu: 1 diff --git a/examples/tensorflow/text-generator/encoder.py b/examples/tensorflow/text-generator/encoder.py deleted file mode 100644 index 2f73dd509b..0000000000 --- a/examples/tensorflow/text-generator/encoder.py +++ /dev/null @@ -1,118 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -# This file includes code which was modified from https://github.com/openai/gpt-2 - -import json -import regex -from functools import lru_cache - - -@lru_cache() -def bytes_to_unicode(): - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2 ** 8): - if b not in bs: - bs.append(b) - cs.append(2 ** 8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -class Encoder: - def __init__(self, encoder, bpe_merges, errors="replace"): - self.encoder = encoder - self.decoder = {v: k for k, v in self.encoder.items()} - self.errors = errors - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) - self.cache = {} - self.pat = regex.compile( - r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" - ) - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token) - pairs = get_pairs(word) - - if not pairs: - return token - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - for token in regex.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) - return text - - -def get_encoder(s3_client): - encoder = json.load( - s3_client.get_object( - Bucket="cortex-examples", Key="tensorflow/text-generator/gpt-2/encoder.json" - )["Body"] - ) - bpe_data = ( - s3_client.get_object( - Bucket="cortex-examples", Key="tensorflow/text-generator/gpt-2/vocab.bpe" - )["Body"] - .read() - .decode("utf-8") - ) - bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]] - return Encoder(encoder=encoder, bpe_merges=bpe_merges) diff --git a/examples/tensorflow/text-generator/gpt-2.ipynb b/examples/tensorflow/text-generator/gpt-2.ipynb deleted file mode 100644 index 1597816fcd..0000000000 --- a/examples/tensorflow/text-generator/gpt-2.ipynb +++ /dev/null @@ -1,383 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "gpt-2.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "kc5cIgeEmv8o", - "colab_type": "text" - }, - "source": [ - "# Exporting GPT-2\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n", - "In this notebook, we'll show how to export [OpenAI's GPT-2 text generation model](https://github.com/openai/gpt-2) for serving." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RAWs29lAktOK", - "colab_type": "text" - }, - "source": [ - "First, we'll download the GPT-2 code repository:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "gHs3aaFaLUXq", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!git clone --no-checkout https://github.com/openai/gpt-2.git\n", - "!cd gpt-2 && git reset --hard ac5d52295f8a1c3856ea24fb239087cc1a3d1131" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "A4al4P14nmni", - "colab_type": "text" - }, - "source": [ - "Next we'll specify the model size (choose one of 124M, 355M, or 774M):" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "3Y4bt6hkfuxY", - "colab_type": "code", - "colab": {}, - "cellView": "form" - }, - "source": [ - "import sys\n", - "\n", - "MODEL_SIZE = \"124M\" #@param {type:\"string\"}\n", - "\n", - "if MODEL_SIZE not in {\"124M\", \"355M\", \"774M\"}:\n", - " print(\"\\033[91m{}\\033[00m\".format('ERROR: MODEL_SIZE must be \"124M\", \"355M\", or \"774M\"'), file=sys.stderr)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "C6xRx0Monh_j", - "colab_type": "text" - }, - "source": [ - "We can use `download_model.py` to download the model:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Kb50Z6NjbJBN", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!python3 ./gpt-2/download_model.py $MODEL_SIZE" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zz2ioOcpoPjV", - "colab_type": "text" - }, - "source": [ - "Next, we'll install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Vk4Q2RR-UZQm", - "colab_type": "code", - "colab": {} - }, - "source": [ - "!pip install tensorflow==1.14.* numpy==1.* boto3==1.*" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "KkVf5FmuUMrl", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import sys\n", - "import os\n", - "import time\n", - "import json\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "6Ay7qiQFoWRn", - "colab_type": "text" - }, - "source": [ - "Now we can export the model for serving:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "GdnYXr1IKaF0", - "colab_type": "code", - "colab": {} - }, - "source": [ - "sys.path.append(os.path.join(os.getcwd(), 'gpt-2/src'))\n", - "import model, sample\n", - "\n", - "def export_for_serving(\n", - " model_name='124M',\n", - " seed=None,\n", - " batch_size=1,\n", - " length=None,\n", - " temperature=1,\n", - " top_k=0,\n", - " models_dir='models'\n", - "):\n", - " \"\"\"\n", - " Export the model for TF Serving\n", - " :model_name=124M : String, which model to use\n", - " :seed=None : Integer seed for random number generators, fix seed to reproduce\n", - " results\n", - " :length=None : Number of tokens in generated text, if None (default), is\n", - " determined by model hyperparameters\n", - " :temperature=1 : Float value controlling randomness in boltzmann\n", - " distribution. Lower temperature results in less random completions. As the\n", - " temperature approaches zero, the model will become deterministic and\n", - " repetitive. Higher temperature results in more random completions.\n", - " :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n", - " considered for each step (token), resulting in deterministic completions,\n", - " while 40 means 40 words are considered at each step. 0 (default) is a\n", - " special setting meaning no restrictions. 40 generally is a good value.\n", - " :models_dir : path to parent folder containing model subfolders\n", - " (i.e. contains the folder)\n", - " \"\"\"\n", - " models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n", - "\n", - " hparams = model.default_hparams()\n", - " with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n", - " hparams.override_from_dict(json.load(f))\n", - "\n", - " if length is None:\n", - " length = hparams.n_ctx\n", - " elif length > hparams.n_ctx:\n", - " raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n", - "\n", - " with tf.Session(graph=tf.Graph()) as sess:\n", - " context = tf.placeholder(tf.int32, [batch_size, None])\n", - " np.random.seed(seed)\n", - " tf.set_random_seed(seed)\n", - "\n", - " output = sample.sample_sequence(\n", - " hparams=hparams, length=length,\n", - " context=context,\n", - " batch_size=batch_size,\n", - " temperature=temperature, top_k=top_k\n", - " )\n", - "\n", - " saver = tf.train.Saver()\n", - " ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\n", - " saver.restore(sess, ckpt)\n", - "\n", - " export_dir=os.path.join(models_dir, model_name, \"export\", str(time.time()).split('.')[0])\n", - " if not os.path.isdir(export_dir):\n", - " os.makedirs(export_dir)\n", - "\n", - " builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n", - " signature = predict_signature_def(inputs={'context': context},\n", - " outputs={'sample': output})\n", - "\n", - " builder.add_meta_graph_and_variables(sess,\n", - " [tf.saved_model.SERVING],\n", - " signature_def_map={\"predict\": signature},\n", - " strip_default_attrs=True)\n", - " builder.save()\n", - "\n", - "\n", - "export_for_serving(top_k=40, length=256, model_name=MODEL_SIZE)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hGfSohMrowmg", - "colab_type": "text" - }, - "source": [ - "## Upload the model to AWS\n", - "\n", - "Cortex loads models from AWS, so we need to upload the exported model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BfB5QZ82ozj9", - "colab_type": "text" - }, - "source": [ - "Set these variables to configure your AWS credentials and model upload path:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "B2RNuNk7o1c5", - "colab_type": "code", - "colab": {}, - "cellView": "form" - }, - "source": [ - "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", - "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", - "S3_UPLOAD_PATH = \"s3://my-bucket/text-generator/gpt-2\" #@param {type:\"string\"}\n", - "\n", - "import sys\n", - "import re\n", - "\n", - "if AWS_ACCESS_KEY_ID == \"\":\n", - " print(\"\\033[91m {}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", - "\n", - "elif AWS_SECRET_ACCESS_KEY == \"\":\n", - " print(\"\\033[91m {}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", - "\n", - "else:\n", - " try:\n", - " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", - " except:\n", - " print(\"\\033[91m {}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ics0omsrpS8V", - "colab_type": "text" - }, - "source": [ - "Upload the model to S3:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "BnKncToppUhN", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import os\n", - "import boto3\n", - "\n", - "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", - "\n", - "for dirpath, _, filenames in os.walk(\"models/{}/export\".format(MODEL_SIZE)):\n", - " for filename in filenames:\n", - " filepath = os.path.join(dirpath, filename)\n", - " filekey = os.path.join(key, MODEL_SIZE, filepath[len(\"models/{}/export/\".format(MODEL_SIZE)):])\n", - " print(\"Uploading s3://{}/{} ...\".format(bucket, filekey), end = '')\n", - " s3.upload_file(filepath, bucket, filekey)\n", - " print(\" ✓\")\n", - "\n", - "print(\"\\nUploaded model export directory to {}/{}\".format(S3_UPLOAD_PATH, MODEL_SIZE))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "IIMVPhe2qkU4", - "colab_type": "text" - }, - "source": [ - "\n", - "We also need to upload `vocab.bpe` and `encoder.json`, so that the [encoder](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/text-generator/encoder.py) in the [Predictor](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/text-generator/predictor.py) can encode the input text before making a request to the model." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "YdN8MtZxsO9V", - "colab_type": "code", - "colab": {} - }, - "source": [ - "print(\"Uploading s3://{}/{}/vocab.bpe ...\".format(bucket, key), end = '')\n", - "s3.upload_file(os.path.join(\"models\", MODEL_SIZE, \"vocab.bpe\"), bucket, os.path.join(key, \"vocab.bpe\"))\n", - "print(\" ✓\")\n", - "\n", - "print(\"Uploading s3://{}/{}/encoder.json ...\".format(bucket, key), end = '')\n", - "s3.upload_file(os.path.join(\"models\", MODEL_SIZE, \"encoder.json\"), bucket, os.path.join(key, \"encoder.json\"))\n", - "print(\" ✓\")" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MsoxwahIpnTO", - "colab_type": "text" - }, - "source": [ - "\n", - "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/text-generator) for how to deploy the model as an API." - ] - } - ] -} diff --git a/examples/tensorflow/text-generator/predictor.py b/examples/tensorflow/text-generator/predictor.py deleted file mode 100644 index 3cbc45e1d7..0000000000 --- a/examples/tensorflow/text-generator/predictor.py +++ /dev/null @@ -1,24 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import os -import boto3 -from botocore import UNSIGNED -from botocore.client import Config -from encoder import get_encoder - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - self.encoder = get_encoder(s3) - - def predict(self, payload): - model_input = {"context": [self.encoder.encode(payload["text"])]} - prediction = self.client.predict(model_input) - return self.encoder.decode(prediction["sample"]) diff --git a/examples/tensorflow/text-generator/requirements.txt b/examples/tensorflow/text-generator/requirements.txt deleted file mode 100644 index f064e1eb7e..0000000000 --- a/examples/tensorflow/text-generator/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -requests -regex diff --git a/examples/tensorflow/text-generator/sample.json b/examples/tensorflow/text-generator/sample.json deleted file mode 100644 index dfd2a2f433..0000000000 --- a/examples/tensorflow/text-generator/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "machine learning is" -} diff --git a/examples/traffic-splitter/model.py b/examples/traffic-splitter/model.py deleted file mode 100644 index fe29ff7b6d..0000000000 --- a/examples/traffic-splitter/model.py +++ /dev/null @@ -1,59 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -from sklearn.datasets import load_iris -from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score - - -class IrisNet(nn.Module): - def __init__(self): - super(IrisNet, self).__init__() - self.fc1 = nn.Linear(4, 100) - self.fc2 = nn.Linear(100, 100) - self.fc3 = nn.Linear(100, 3) - self.softmax = nn.Softmax(dim=1) - - def forward(self, X): - X = F.relu(self.fc1(X)) - X = self.fc2(X) - X = self.fc3(X) - X = self.softmax(X) - return X - - -if __name__ == "__main__": - iris = load_iris() - X, y = iris.data, iris.target - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42) - - train_X = Variable(torch.Tensor(X_train).float()) - test_X = Variable(torch.Tensor(X_test).float()) - train_y = Variable(torch.Tensor(y_train).long()) - test_y = Variable(torch.Tensor(y_test).long()) - - model = IrisNet() - - criterion = nn.CrossEntropyLoss() - - optimizer = torch.optim.SGD(model.parameters(), lr=0.01) - - for epoch in range(1000): - optimizer.zero_grad() - out = model(train_X) - loss = criterion(out, train_y) - loss.backward() - optimizer.step() - - if epoch % 100 == 0: - print("number of epoch {} loss {}".format(epoch, loss)) - - predict_out = model(test_X) - _, predict_y = torch.max(predict_out, 1) - - print("prediction accuracy {}".format(accuracy_score(test_y.data, predict_y.data))) - - torch.save(model.state_dict(), "weights.pth") diff --git a/examples/traffic-splitter/README.md b/examples/traffic-splitting/README.md similarity index 100% rename from examples/traffic-splitter/README.md rename to examples/traffic-splitting/README.md diff --git a/examples/traffic-splitter/cortex.yaml b/examples/traffic-splitting/cortex.yaml similarity index 100% rename from examples/traffic-splitter/cortex.yaml rename to examples/traffic-splitting/cortex.yaml diff --git a/examples/pytorch/iris-classifier/model.py b/examples/traffic-splitting/model.py similarity index 100% rename from examples/pytorch/iris-classifier/model.py rename to examples/traffic-splitting/model.py diff --git a/examples/traffic-splitter/onnx_predictor.py b/examples/traffic-splitting/onnx_predictor.py similarity index 100% rename from examples/traffic-splitter/onnx_predictor.py rename to examples/traffic-splitting/onnx_predictor.py diff --git a/examples/traffic-splitter/pytorch_predictor.py b/examples/traffic-splitting/pytorch_predictor.py similarity index 100% rename from examples/traffic-splitter/pytorch_predictor.py rename to examples/traffic-splitting/pytorch_predictor.py diff --git a/examples/traffic-splitter/sample.json b/examples/traffic-splitting/sample.json similarity index 100% rename from examples/traffic-splitter/sample.json rename to examples/traffic-splitting/sample.json From f97ba2d169c48d55435bf99db5c270dda2c24e3b Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Tue, 1 Dec 2020 16:37:37 -0800 Subject: [PATCH 02/36] Delete README.md --- examples/README.md | 63 ---------------------------------------------- 1 file changed, 63 deletions(-) delete mode 100644 examples/README.md diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index a9b4f3ed15..0000000000 --- a/examples/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# Examples - -## TensorFlow - -- [Text generation](tensorflow/text-generator): deploy OpenAI's GPT-2 to generate text. - -- [Sentiment analysis](tensorflow/sentiment-analyzer): deploy a BERT model for sentiment analysis. - -- [Image classification](tensorflow/image-classifier-inception): deploy an Inception model to classify images. - -- [Image classification](tensorflow/image-classifier-resnet50): deploy a ResNet50 model to classify images. - -- [License plate reader](tensorflow/license-plate-reader): deploy a YOLOv3 model (and others) to identify license plates in real time. - -- [Multi-model classification](tensorflow/multi-model-classifier): deploy 3 models (ResNet50, Iris, Inception) in a single API. - -## Keras - -- [Denoisify text documents](keras/document-denoiser): deploy an Autoencoder model to clean text document images of noise. - -## PyTorch - -- [Iris classification](pytorch/iris-classifier): deploy a model to classify iris flowers. - -- [Text generation](pytorch/text-generator): deploy Hugging Face's GPT-2 model to generate text. - -- [Sentiment analysis](pytorch/sentiment-analyzer): deploy a Hugging Face transformers model for sentiment analysis. - -- [Search completion](pytorch/search-completer): deploy a Facebook's RoBERTa model to complete search terms. - -- [Answer generation](pytorch/answer-generator): deploy Microsoft's DialoGPT model to answer questions. - -- [Text summarization](pytorch/text-summarizer): deploy a BART model (from Hugging Face's transformers library) to summarize text. - -- [Reading comprehension](pytorch/reading-comprehender): deploy an AllenNLP model for reading comprehension. - -- [Language identification](pytorch/language-identifier): deploy a fastText model to identify languages. - -- [Multi-model text analysis](pytorch/multi-model-text-analyzer): deploy 2 models (Sentiment and Summarization analyzers) in a single API. - -- [Image classification](pytorch/image-classifier-alexnet): deploy an AlexNet model from TorchVision to classify images. - -- [Image classification](pytorch/image-classifier-resnet50): deploy a ResNet50 model from TorchVision to classify images. - -- [Object detection](pytorch/object-detector): deploy a Faster R-CNN model from TorchVision to detect objects in images. - -- [Question generator](pytorch/question-generator): deploy a transformers model to generate questions given text and the correct answer. - -## ONNX - -- [YOLOv5 YouTube detection](onnx/yolov5-youtube): deploy a YOLOv5 model trained on COCO val2017 dataset. - -- [Multi-model classification](onnx/multi-model-classifier): deploy 3 models (ResNet50, MobileNet, ShuffleNet) in a single API. - -## scikit-learn - -- [Iris classification](sklearn/iris-classifier): deploy a model to classify iris flowers. - -- [MPG estimation](sklearn/mpg-estimator): deploy a linear regression model to estimate MPG. - -## spacy - -- [Entity recognizer](spacy/entity-recognizer): deploy a spacy model for named entity recognition. From 780e8341f61f972531355bb581e44f1602e5acd8 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Thu, 3 Dec 2020 15:11:16 -0800 Subject: [PATCH 03/36] Rename examples to tutorials --- .dockerignore | 1 - .gitbook.yaml | 6 +- build/lint.sh | 1 - build/test-examples.sh | 2 +- docs/aws/install.md | 3 - docs/deployments/batch-api.md | 7 - docs/deployments/batch-api/deployment.md | 7 - docs/deployments/batch-api/predictors.md | 15 - docs/deployments/inferentia.md | 6 +- docs/deployments/realtime-api.md | 7 - docs/deployments/realtime-api/deployment.md | 7 - docs/deployments/realtime-api/models.md | 6 - docs/deployments/realtime-api/parallelism.md | 3 - docs/deployments/realtime-api/predictors.md | 46 --- .../realtime-api/traffic-splitter.md | 6 - docs/guides/exporting.md | 31 +- docs/guides/multi-model.md | 9 - docs/guides/single-node-deployment.md | 2 +- docs/summary.md | 4 +- .../python => docs/tutorials/batch}/README.md | 8 - .../tutorials/batch}/cortex.yaml | 0 .../tutorials/batch}/predictor.py | 0 .../tutorials/batch}/requirements.txt | 0 .../tutorials/batch}/sample.json | 0 .../tutorials/compute}/README.md | 0 .../tutorials/compute}/cortex.yaml | 0 .../tutorials/compute}/cortex_gpu.yaml | 0 .../cortex_gpu_server_side_batching.yaml | 0 .../tutorials/compute}/cortex_inf.yaml | 0 .../cortex_inf_server_side_batching.yaml | 0 .../generate_gpu_resnet50_model.ipynb | 0 .../compute}/generate_resnet50_models.ipynb | 0 .../tutorials/compute}/predictor.py | 0 .../tutorials/compute}/requirements.txt | 0 .../tutorials/compute}/sample.bin | Bin .../tutorials/compute}/sample.json | 0 .../tutorials/multi-model}/README.md | 0 .../tutorials/multi-model}/cortex.yaml | 0 .../tutorials/multi-model}/predictor.py | 0 .../tutorials/multi-model}/requirements.txt | 0 .../tutorials/multi-model}/sample.json | 0 .../tutorials/realtime}/README.md | 0 .../tutorials/realtime}/deploy.ipynb | 0 .../tutorials/realtime}/predictor.py | 0 .../tutorials/realtime}/requirements.txt | 0 .../tutorials}/traffic-splitting/README.md | 0 .../tutorials}/traffic-splitting/cortex.yaml | 0 .../tutorials}/traffic-splitting/model.py | 0 .../traffic-splitting/onnx_predictor.py | 0 .../traffic-splitting/pytorch_predictor.py | 0 .../tutorials}/traffic-splitting/sample.json | 0 {examples => docs/tutorials}/utils/README.md | 0 .../tutorials}/utils/throughput_test.py | 0 examples/batch/onnx/README.md | 6 - examples/batch/onnx/cortex.yaml | 10 - examples/batch/onnx/predictor.py | 64 ---- examples/batch/onnx/requirements.txt | 3 - examples/batch/tensorflow/README.md | 6 - examples/batch/tensorflow/cortex.yaml | 10 - examples/batch/tensorflow/predictor.py | 60 ---- examples/batch/tensorflow/requirements.txt | 1 - examples/compute/python/README.md | 59 ---- examples/compute/python/cortex.yaml | 15 - examples/compute/python/cortex_gpu.yaml | 16 - examples/compute/python/cortex_inf.yaml | 16 - .../python/generate_resnet50_models.ipynb | 121 ------- examples/compute/python/predictor.py | 93 ------ examples/hello-world/onnx/README.md | 3 - examples/hello-world/onnx/cortex.yaml | 10 - examples/hello-world/onnx/predictor.py | 20 -- examples/hello-world/onnx/sample.json | 6 - examples/hello-world/onnx/xgboost.ipynb | 244 --------------- examples/hello-world/tensorflow/README.md | 3 - examples/hello-world/tensorflow/cortex.yaml | 10 - examples/hello-world/tensorflow/predictor.py | 13 - examples/hello-world/tensorflow/sample.json | 6 - .../hello-world/tensorflow/tensorflow.ipynb | 296 ------------------ examples/live-reloading/onnx/README.md | 5 - .../python/mpg-estimator/cortex.yaml | 8 - .../python/mpg-estimator/predictor.py | 27 -- .../python/mpg-estimator/requirements.txt | 4 - .../python/mpg-estimator/sample.json | 7 - examples/live-reloading/tensorflow/README.md | 5 - .../onnx/multi-model-classifier/README.md | 77 ----- .../onnx/multi-model-classifier/cortex.yaml | 22 -- .../onnx/multi-model-classifier/predictor.py | 99 ------ .../onnx/multi-model-classifier/sample.json | 3 - .../python/mpg-estimator/README.md | 75 ----- .../python/mpg-estimator/cortex.yaml | 13 - .../python/mpg-estimator/predictor.py | 28 -- .../python/mpg-estimator/requirements.txt | 4 - .../python/mpg-estimator/sample.json | 7 - .../multi-model-classifier/README.md | 77 ----- .../multi-model-classifier/cortex.yaml | 32 -- .../multi-model-classifier/predictor.py | 63 ---- .../multi-model-classifier/requirements.txt | 1 - .../multi-model-classifier/sample-image.json | 3 - .../multi-model-classifier/sample-iris.json | 8 - examples/multi-model/onnx/requirements.txt | 2 - examples/multi-model/onnx/sample.json | 3 - examples/multi-model/python/README.md | 51 --- examples/multi-model/python/cortex.yaml | 11 - examples/multi-model/python/predictor.py | 25 -- examples/multi-model/python/requirements.txt | 2 - .../multi-model/python/sample-sentiment.json | 3 - .../multi-model/python/sample-summarizer.json | 3 - examples/multi-model/tensorflow/README.md | 69 ---- examples/multi-model/tensorflow/cortex.yaml | 26 -- examples/multi-model/tensorflow/predictor.py | 62 ---- .../multi-model/tensorflow/requirements.txt | 1 - .../multi-model/tensorflow/sample-image.json | 3 - 111 files changed, 14 insertions(+), 2083 deletions(-) rename {examples/batch/python => docs/tutorials/batch}/README.md (97%) rename {examples/batch/python => docs/tutorials/batch}/cortex.yaml (100%) rename {examples/batch/python => docs/tutorials/batch}/predictor.py (100%) rename {examples/batch/python => docs/tutorials/batch}/requirements.txt (100%) rename {examples/batch/python => docs/tutorials/batch}/sample.json (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/README.md (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/cortex.yaml (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/cortex_gpu.yaml (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/cortex_gpu_server_side_batching.yaml (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/cortex_inf.yaml (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/cortex_inf_server_side_batching.yaml (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/generate_gpu_resnet50_model.ipynb (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/generate_resnet50_models.ipynb (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/predictor.py (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/requirements.txt (100%) rename {examples/compute/tensorflow => docs/tutorials/compute}/sample.bin (100%) rename {examples/compute/python => docs/tutorials/compute}/sample.json (100%) rename {examples/multi-model/onnx => docs/tutorials/multi-model}/README.md (100%) rename {examples/multi-model/onnx => docs/tutorials/multi-model}/cortex.yaml (100%) rename {examples/multi-model/onnx => docs/tutorials/multi-model}/predictor.py (100%) rename {examples/model-caching/onnx/multi-model-classifier => docs/tutorials/multi-model}/requirements.txt (100%) rename {examples/compute/tensorflow => docs/tutorials/multi-model}/sample.json (100%) rename {examples/hello-world/python => docs/tutorials/realtime}/README.md (100%) rename {examples/hello-world/python => docs/tutorials/realtime}/deploy.ipynb (100%) rename {examples/hello-world/python => docs/tutorials/realtime}/predictor.py (100%) rename {examples/hello-world/python => docs/tutorials/realtime}/requirements.txt (100%) rename {examples => docs/tutorials}/traffic-splitting/README.md (100%) rename {examples => docs/tutorials}/traffic-splitting/cortex.yaml (100%) rename {examples => docs/tutorials}/traffic-splitting/model.py (100%) rename {examples => docs/tutorials}/traffic-splitting/onnx_predictor.py (100%) rename {examples => docs/tutorials}/traffic-splitting/pytorch_predictor.py (100%) rename {examples => docs/tutorials}/traffic-splitting/sample.json (100%) rename {examples => docs/tutorials}/utils/README.md (100%) rename {examples => docs/tutorials}/utils/throughput_test.py (100%) delete mode 100644 examples/batch/onnx/README.md delete mode 100644 examples/batch/onnx/cortex.yaml delete mode 100644 examples/batch/onnx/predictor.py delete mode 100644 examples/batch/onnx/requirements.txt delete mode 100644 examples/batch/tensorflow/README.md delete mode 100644 examples/batch/tensorflow/cortex.yaml delete mode 100644 examples/batch/tensorflow/predictor.py delete mode 100644 examples/batch/tensorflow/requirements.txt delete mode 100644 examples/compute/python/README.md delete mode 100644 examples/compute/python/cortex.yaml delete mode 100644 examples/compute/python/cortex_gpu.yaml delete mode 100644 examples/compute/python/cortex_inf.yaml delete mode 100644 examples/compute/python/generate_resnet50_models.ipynb delete mode 100644 examples/compute/python/predictor.py delete mode 100644 examples/hello-world/onnx/README.md delete mode 100644 examples/hello-world/onnx/cortex.yaml delete mode 100644 examples/hello-world/onnx/predictor.py delete mode 100644 examples/hello-world/onnx/sample.json delete mode 100644 examples/hello-world/onnx/xgboost.ipynb delete mode 100644 examples/hello-world/tensorflow/README.md delete mode 100644 examples/hello-world/tensorflow/cortex.yaml delete mode 100644 examples/hello-world/tensorflow/predictor.py delete mode 100644 examples/hello-world/tensorflow/sample.json delete mode 100644 examples/hello-world/tensorflow/tensorflow.ipynb delete mode 100644 examples/live-reloading/onnx/README.md delete mode 100644 examples/live-reloading/python/mpg-estimator/cortex.yaml delete mode 100644 examples/live-reloading/python/mpg-estimator/predictor.py delete mode 100644 examples/live-reloading/python/mpg-estimator/requirements.txt delete mode 100644 examples/live-reloading/python/mpg-estimator/sample.json delete mode 100644 examples/live-reloading/tensorflow/README.md delete mode 100644 examples/model-caching/onnx/multi-model-classifier/README.md delete mode 100644 examples/model-caching/onnx/multi-model-classifier/cortex.yaml delete mode 100644 examples/model-caching/onnx/multi-model-classifier/predictor.py delete mode 100644 examples/model-caching/onnx/multi-model-classifier/sample.json delete mode 100644 examples/model-caching/python/mpg-estimator/README.md delete mode 100644 examples/model-caching/python/mpg-estimator/cortex.yaml delete mode 100644 examples/model-caching/python/mpg-estimator/predictor.py delete mode 100644 examples/model-caching/python/mpg-estimator/requirements.txt delete mode 100644 examples/model-caching/python/mpg-estimator/sample.json delete mode 100644 examples/model-caching/tensorflow/multi-model-classifier/README.md delete mode 100644 examples/model-caching/tensorflow/multi-model-classifier/cortex.yaml delete mode 100644 examples/model-caching/tensorflow/multi-model-classifier/predictor.py delete mode 100644 examples/model-caching/tensorflow/multi-model-classifier/requirements.txt delete mode 100644 examples/model-caching/tensorflow/multi-model-classifier/sample-image.json delete mode 100644 examples/model-caching/tensorflow/multi-model-classifier/sample-iris.json delete mode 100644 examples/multi-model/onnx/requirements.txt delete mode 100644 examples/multi-model/onnx/sample.json delete mode 100644 examples/multi-model/python/README.md delete mode 100644 examples/multi-model/python/cortex.yaml delete mode 100644 examples/multi-model/python/predictor.py delete mode 100644 examples/multi-model/python/requirements.txt delete mode 100644 examples/multi-model/python/sample-sentiment.json delete mode 100644 examples/multi-model/python/sample-summarizer.json delete mode 100644 examples/multi-model/tensorflow/README.md delete mode 100644 examples/multi-model/tensorflow/cortex.yaml delete mode 100644 examples/multi-model/tensorflow/predictor.py delete mode 100644 examples/multi-model/tensorflow/requirements.txt delete mode 100644 examples/multi-model/tensorflow/sample-image.json diff --git a/.dockerignore b/.dockerignore index 1b0561f446..ee2e048e67 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,7 +2,6 @@ /bin/ /dev/ /docs/ -/examples/ **/.* **/*.md diff --git a/.gitbook.yaml b/.gitbook.yaml index 09f320911a..8b207447a3 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -5,9 +5,9 @@ structure: summary: summary.md redirects: - tutorial: ../examples/hello-world/python/README.md - tutorial/realtime: ../examples/hello-world/python/README.md - tutorial/batch: ../examples/batch/python/README.md + tutorial: ./tutorials/hello-world/python/README.md + tutorial/realtime: ./tutorials/hello-world/python/README.md + tutorial/batch: ./tutorials/batch/python/README.md install: ./aws/install.md uninstall: ./aws/uninstall.md update: ./aws/update.md diff --git a/build/lint.sh b/build/lint.sh index 656f4c4455..9243106188 100755 --- a/build/lint.sh +++ b/build/lint.sh @@ -72,7 +72,6 @@ output=$(cd "$ROOT" && find . -type f \ ! -path "./vendor/*" \ ! -path "**/.vscode/*" \ ! -path "**/__pycache__/*" \ -! -path "./examples/*" \ ! -path "./dev/config/*" \ ! -path "./bin/*" \ ! -path "./.circleci/*" \ diff --git a/build/test-examples.sh b/build/test-examples.sh index 33b562f095..a886a63587 100755 --- a/build/test-examples.sh +++ b/build/test-examples.sh @@ -19,7 +19,7 @@ set -eou pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" CORTEX="$ROOT/bin/cortex" -for example in $ROOT/examples/*/cortex.yaml; do +for example in $ROOT/docs/tutorials/*/cortex.yaml; do timer=1200 example_base_dir=$(dirname "${example}") retry="false" diff --git a/docs/aws/install.md b/docs/aws/install.md index 44a40b4aba..55dc61d1a8 100644 --- a/docs/aws/install.md +++ b/docs/aws/install.md @@ -19,9 +19,6 @@ cortex cluster up # or: cortex cluster up --config cluster.yaml (see configurat cortex env default aws ``` - -Try the [tutorial](../../examples/hello-world/python/README.md) or deploy one of our [examples](https://github.com/cortexlabs/cortex/tree/master/examples). - ## Configure Cortex diff --git a/docs/deployments/batch-api.md b/docs/deployments/batch-api.md index 9710290a6c..57f994d70c 100644 --- a/docs/deployments/batch-api.md +++ b/docs/deployments/batch-api.md @@ -34,10 +34,3 @@ Once you've implemented your predictor and defined your API configuration, you c A job submission typically consists of an input dataset or the location of your input dataset, the number of workers for your job, and the batch size. When a job is submitted to your Batch API endpoint, you will immediately receive a Job ID that you can use to get the job's status and logs, and stop the job if necessary. Behind the scenes, your Batch API will break down the dataset into batches and push them onto a queue. Once all of the batches have been enqueued, the Cortex Cluster will spin up the requested number of workers and initialize them with your predictor implementation. Each worker will take one batch at a time from the queue and run your Predictor implementation. After all batches have been processed, the `on_job_complete` hook in your predictor implementation (if provided) will be executed by one of the workers. At any point, you can use the Job ID that was provided upon job submission to make requests to the Batch API endpoint to get job status, progress metrics, and worker statuses. Logs for each job are aggregated and are accessible via the Cortex CLI or in your AWS console. - -## Next steps - -* Try the [tutorial](../../examples/batch/python/README.md) to deploy a Batch API on your Cortex cluster. -* See our [exporting guide](../guides/exporting.md) for how to export your model to use in a Batch API. -* See the [Predictor docs](batch-api/predictors.md) for how to implement a Predictor class. -* See the [API configuration docs](batch-api/api-configuration.md) for a full list of features that can be used to deploy your Batch API. diff --git a/docs/deployments/batch-api/deployment.md b/docs/deployments/batch-api/deployment.md index 9608e927cb..27b94f82bf 100644 --- a/docs/deployments/batch-api/deployment.md +++ b/docs/deployments/batch-api/deployment.md @@ -118,10 +118,3 @@ $ cortex delete my-api deleting my-api ``` - -## Additional resources - - -* [Tutorial](../../../examples/batch/python/README.md) provides a step-by-step walkthrough of deploying an image classification batch API -* [CLI documentation](../../miscellaneous/cli.md) lists all CLI commands -* [Examples](https://github.com/cortexlabs/cortex/tree/master/examples/batch) demonstrate how to deploy models from common ML libraries diff --git a/docs/deployments/batch-api/predictors.md b/docs/deployments/batch-api/predictors.md index e66681b6a8..beca48fc17 100644 --- a/docs/deployments/batch-api/predictors.md +++ b/docs/deployments/batch-api/predictors.md @@ -94,11 +94,6 @@ class PythonPredictor: For proper separation of concerns, it is recommended to use the constructor's `config` parameter for information such as from where to download the model and initialization files, or any configurable model parameters. You define `config` in your [API configuration](api-configuration.md), and it is passed through to your Predictor's constructor. The `config` parameters in the `API configuration` can be overridden by providing `config` in the job submission requests. -### Examples - - -You can find an example of a BatchAPI using a PythonPredictor in [examples/batch/python](https://github.com/cortexlabs/cortex/tree/master/examples/batch/python). - ### Pre-installed packages The following Python packages are pre-installed in Python Predictors and can be used in your implementations: @@ -231,11 +226,6 @@ When multiple models are defined using the Predictor's `models` field, the `tens For proper separation of concerns, it is recommended to use the constructor's `config` parameter for information such as from where to download the model and initialization files, or any configurable model parameters. You define `config` in your [API configuration](api-configuration.md), and it is passed through to your Predictor's constructor. The `config` parameters in the `API configuration` can be overridden by providing `config` in the job submission requests. -### Examples - - -You can find an example of a BatchAPI using a TensorFlowPredictor in [examples/batch/tensorflow](https://github.com/cortexlabs/cortex/tree/master/examples/batch/tensorflow). - ### Pre-installed packages The following Python packages are pre-installed in TensorFlow Predictors and can be used in your implementations: @@ -318,11 +308,6 @@ When multiple models are defined using the Predictor's `models` field, the `onnx For proper separation of concerns, it is recommended to use the constructor's `config` parameter for information such as from where to download the model and initialization files, or any configurable model parameters. You define `config` in your [API configuration](api-configuration.md), and it is passed through to your Predictor's constructor. The `config` parameters in the `API configuration` can be overridden by providing `config` in the job submission requests. -### Examples - - -You can find an example of a BatchAPI using an ONNXPredictor in [examples/batch/onnx](https://github.com/cortexlabs/cortex/tree/master/examples/batch/onnx). - ### Pre-installed packages The following Python packages are pre-installed in ONNX Predictors and can be used in your implementations: diff --git a/docs/deployments/inferentia.md b/docs/deployments/inferentia.md index 34390f3a57..cfd56efaa8 100644 --- a/docs/deployments/inferentia.md +++ b/docs/deployments/inferentia.md @@ -66,11 +66,7 @@ model_neuron.save(compiled_model) The versions of `tensorflow-neuron` and `torch-neuron` that are used by Cortex are found in the [Realtime API pre-installed packages list](realtime-api/predictors.md#inferentia-equipped-apis) and [Batch API pre-installed packages list](batch-api/predictors.md#inferentia-equipped-apis). When installing these packages with `pip` to compile models of your own, use the extra index URL `--extra-index-url=https://pip.repos.neuron.amazonaws.com`. -A list of model compilation examples for Inferentia can be found on the [`aws/aws-neuron-sdk`](https://github.com/aws/aws-neuron-sdk) repo for [TensorFlow](https://github.com/aws/aws-neuron-sdk/blob/master/docs/tensorflow-neuron/) and for [PyTorch](https://github.com/aws/aws-neuron-sdk/blob/master/docs/pytorch-neuron/README.md). Here are 2 examples implemented with Cortex: - - -1. [ResNet50 in TensorFlow](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/image-classifier-resnet50) -1. [ResNet50 in PyTorch](https://github.com/cortexlabs/cortex/tree/master/examples/pytorch/image-classifier-resnet50) +A list of model compilation examples for Inferentia can be found on the [`aws/aws-neuron-sdk`](https://github.com/aws/aws-neuron-sdk) repo for [TensorFlow](https://github.com/aws/aws-neuron-sdk/blob/master/docs/tensorflow-neuron/) and for [PyTorch](https://github.com/aws/aws-neuron-sdk/blob/master/docs/pytorch-neuron/README.md). ### Improving performance diff --git a/docs/deployments/realtime-api.md b/docs/deployments/realtime-api.md index 3bdba221ea..687fb270b0 100644 --- a/docs/deployments/realtime-api.md +++ b/docs/deployments/realtime-api.md @@ -37,10 +37,3 @@ Once you've implemented your predictor and defined your API configuration, you c When a request is made to the HTTP endpoint, it gets routed to one your API's replicas (at random). The replica receives the request, parses the payload and executes the inference code you've defined in your predictor implementation and sends a response. The Cortex Cluster will automatically scale based on the incoming traffic and the autoscaling configuration you've defined. You can safely update your model or your code and use the Cortex CLI to deploy without experiencing downtime because updates to your API will be rolled out automatically. Request metrics and logs will automatically be aggregated and be accessible via the Cortex CLI or on your AWS console. - -## Next steps - -* Try the [tutorial](../../examples/hello-world/python/README.md) to deploy a Realtime API locally or on AWS. -* See our [exporting guide](../guides/exporting.md) for how to export your model to use in a Realtime API. -* See the [Predictor docs](realtime-api/predictors.md) for how to implement a Predictor class. -* See the [API configuration docs](realtime-api/api-configuration.md) for a full list of features that can be used to deploy your Realtime API. diff --git a/docs/deployments/realtime-api/deployment.md b/docs/deployments/realtime-api/deployment.md index b2bf5dccc1..f068ff463d 100644 --- a/docs/deployments/realtime-api/deployment.md +++ b/docs/deployments/realtime-api/deployment.md @@ -59,10 +59,3 @@ $ cortex delete my-api deleting my-api ``` - -## Additional resources - - -* [Tutorial](../../../examples/hello-world/python/README.md) provides a step-by-step walkthrough of deploying a text generation API -* [CLI documentation](../../miscellaneous/cli.md) lists all CLI commands -* [Examples](https://github.com/cortexlabs/cortex/tree/master/examples) demonstrate how to deploy models from common ML libraries diff --git a/docs/deployments/realtime-api/models.md b/docs/deployments/realtime-api/models.md index 114d30236e..ab88460a78 100644 --- a/docs/deployments/realtime-api/models.md +++ b/docs/deployments/realtime-api/models.md @@ -168,9 +168,6 @@ When using the `models.dir` field, the directory provided may contain multiple s In this case, there are two models in the directory, one of which is named "text-generator", and the other is named "sentiment-analyzer". - -Additional examples can be seen in the [multi model guide](../../guides/multi-model.md) and in [examples/model-caching](https://github.com/cortexlabs/cortex/tree/master/examples/model-caching) (remove the `cache_size` and `disk_cache_size` configurations in `cortex.yaml` to disable [multi model caching](#multi-model-caching)). - ## Live model reloading Live model reloading is a mechanism that periodically checks for updated models in the model path(s) provided in `predictor.model_path` or `predictor.models`. It is automatically enabled for all predictor types, including the Python predictor type (as long as model paths are specified via `model_path` or `models` in the `predictor` configuration). @@ -390,9 +387,6 @@ The model cache is a two-layer cache, configured by the following parameters in Both of these fields must be specified, in addition to either the `dir` or `paths` field (which specifies the model paths, see above for documentation). Multi model caching is only supported if `predictor.processes_per_replica` is set to 1 (the default value). - -See [examples/model-caching](https://github.com/cortexlabs/cortex/tree/master/examples/model-caching) for examples. - ### Caveats Cortex periodically runs a background script (every 10 seconds) that counts the number of models in memory and on disk, and evicts the least recently used models if the count exceeds `cache_size` / `disk_cache_size`. diff --git a/docs/deployments/realtime-api/parallelism.md b/docs/deployments/realtime-api/parallelism.md index ad44641ff8..3ec3ca7854 100644 --- a/docs/deployments/realtime-api/parallelism.md +++ b/docs/deployments/realtime-api/parallelism.md @@ -47,6 +47,3 @@ When optimizing for maximum throughput, a good rule of thumb is to follow these 1. Multiply the maximum throughput from step 1 by the `batch_interval` from step 2. The result is a number which you can assign to `max_batch_size`. 1. Run the load test again. If the inference fails with that batch size (e.g. due to running out of GPU or RAM memory), then reduce `max_batch_size` to a level that works (reduce `batch_interval` by the same factor). 1. Use the load test to determine the peak throughput of the API replica. Multiply the observed throughput by the `batch_interval` to calculate the average batch size. If the average batch size coincides with `max_batch_size`, then it might mean that the throughput could still be further increased by increasing `max_batch_size`. If it's lower, then it means that `batch_interval` is triggering the inference before `max_batch_size` requests have been aggregated. If modifying both `max_batch_size` and `batch_interval` doesn't improve the throughput, then the service may be bottlenecked by something else (e.g. CPU, network IO, `processes_per_replica`, `threads_per_process`, etc). - - -An example of server-side batching for the TensorFlow Predictor that has been benchmarked is found in [ResNet50 in TensorFlow](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/image-classifier-resnet50#throughput-test). diff --git a/docs/deployments/realtime-api/predictors.md b/docs/deployments/realtime-api/predictors.md index 0ff5b9951d..d4958a69b9 100644 --- a/docs/deployments/realtime-api/predictors.md +++ b/docs/deployments/realtime-api/predictors.md @@ -275,27 +275,6 @@ Your API can accept requests with different types of payloads such as `JSON`-par Your `predictor` method can return different types of objects such as `JSON`-parseable, `string`, and `bytes` objects. Navigate to the [API responses](#api-responses) section to learn about how to configure your `predictor` method to respond with different response codes and content-types. -### Examples - - -Most of the examples in [examples/tensorflow](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow) use the TensorFlow Predictor. - - -Here is the Predictor for [examples/tensorflow/iris-classifier](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/iris-classifier): - -```python -labels = ["setosa", "versicolor", "virginica"] - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - def predict(self, payload): - prediction = self.client.predict(payload) - predicted_class_id = int(prediction["class_ids"][0]) - return labels[predicted_class_id] -``` - ### Pre-installed packages The following Python packages are pre-installed in TensorFlow Predictors and can be used in your implementations: @@ -387,31 +366,6 @@ Your API can accept requests with different types of payloads such as `JSON`-par Your `predictor` method can return different types of objects such as `JSON`-parseable, `string`, and `bytes` objects. Navigate to the [API responses](#api-responses) section to learn about how to configure your `predictor` method to respond with different response codes and content-types. -### Examples - - -[examples/onnx/iris-classifier](https://github.com/cortexlabs/cortex/tree/master/examples/onnx/iris-classifier) uses the ONNX Predictor: - -```python -labels = ["setosa", "versicolor", "virginica"] - -class ONNXPredictor: - def __init__(self, onnx_client, config): - self.client = onnx_client - - def predict(self, payload): - model_input = [ - payload["sepal_length"], - payload["sepal_width"], - payload["petal_length"], - payload["petal_width"], - ] - - prediction = self.client.predict(model_input) - predicted_class_id = prediction[0][0] - return labels[predicted_class_id] -``` - ### Pre-installed packages The following Python packages are pre-installed in ONNX Predictors and can be used in your implementations: diff --git a/docs/deployments/realtime-api/traffic-splitter.md b/docs/deployments/realtime-api/traffic-splitter.md index 90726aa173..adfee17215 100644 --- a/docs/deployments/realtime-api/traffic-splitter.md +++ b/docs/deployments/realtime-api/traffic-splitter.md @@ -73,9 +73,3 @@ deleted traffic-splitter ``` Note that this will not delete the Realtime APIs targeted by the Traffic Splitter. - -## Additional resources - -* [Traffic Splitter Tutorial](../../../examples/traffic-splitting/README.md) provides a step-by-step walkthrough for deploying an Traffic Splitter -* [Realtime API Tutorial](../../../examples/hello-world/python/README.md) provides a step-by-step walkthrough of deploying a realtime API for text generation -* [CLI documentation](../../miscellaneous/cli.md) lists all CLI commands diff --git a/docs/guides/exporting.md b/docs/guides/exporting.md index 05823382e9..b34e6c5b82 100644 --- a/docs/guides/exporting.md +++ b/docs/guides/exporting.md @@ -10,10 +10,7 @@ Here are examples for some common ML libraries: ### `torch.save()` -The recommended approach is export your PyTorch model with [torch.save()](https://pytorch.org/docs/stable/torch.html?highlight=save#torch.save). Here is PyTorch's documentation on [saving and loading models](https://pytorch.org/tutorials/beginner/saving_loading_models.html). - - -[examples/pytorch/iris-classifier](https://github.com/cortexlabs/cortex/blob/master/examples/pytorch/iris-classifier) exports its trained model like this: +The recommended approach is export your PyTorch model with [torch.save()](https://pytorch.org/docs/stable/torch.html?highlight=save#torch.save). Here is PyTorch's documentation on [saving and loading models](https://pytorch.org/tutorials/beginner/saving_loading_models.html). For example: ```python torch.save(model.state_dict(), "weights.pth") @@ -23,10 +20,7 @@ For Inferentia-equipped instances, check the [Inferentia instructions](inferenti ### ONNX -It may also be possible to export your PyTorch model into the ONNX format using [torch.onnx.export()](https://pytorch.org/docs/stable/onnx.html#torch.onnx.export). - - -For example, if [examples/pytorch/iris-classifier](https://github.com/cortexlabs/cortex/blob/master/examples/pytorch/iris-classifier) were to export the model to ONNX, it would look like this: +It may also be possible to export your PyTorch model into the ONNX format using [torch.onnx.export()](https://pytorch.org/docs/stable/onnx.html#torch.onnx.export). For example: ```python placeholder = torch.randn(1, 4) @@ -63,8 +57,7 @@ A TensorFlow `SavedModel` directory should have this structure: └── variables.data-00002-of-... ``` - -Most of the TensorFlow examples use this approach. Here is the relevant code from [examples/tensorflow/sentiment-analyzer](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/sentiment-analyzer): +For example: ```python import tensorflow as tf @@ -101,24 +94,15 @@ zip -r bert.zip 1568244606 aws s3 cp bert.zip s3://my-bucket/bert.zip ``` - -[examples/tensorflow/iris-classifier](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/iris-classifier) also use the `SavedModel` approach, and includes a Python notebook demonstrating how it was exported. - ### Other model formats There are other ways to export Keras or TensorFlow models, and as long as they can be loaded and used to make predictions in Python, they will be supported by Cortex. - -For example, the `crnn` API in [examples/tensorflow/license-plate-reader](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/license-plate-reader) uses this approach. - ## Scikit-learn ### `pickle` -Scikit-learn models are typically exported using `pickle`. Here is [Scikit-learn's documentation](https://scikit-learn.org/stable/modules/model_persistence.html). - - -[examples/sklearn/iris-classifier](https://github.com/cortexlabs/cortex/blob/master/examples/sklearn/iris-classifier) uses this approach. Here is the relevant code: +Scikit-learn models are typically exported using `pickle`. Here is [Scikit-learn's documentation](https://scikit-learn.org/stable/modules/model_persistence.html). For example: ```python pickle.dump(model, open("model.pkl", "wb")) @@ -126,7 +110,7 @@ pickle.dump(model, open("model.pkl", "wb")) ### ONNX -It is also possible to export a scikit-learn model to the ONNX format using [onnxmltools](https://github.com/onnx/onnxmltools). Here is an example: +It is also possible to export a scikit-learn model to the ONNX format using [onnxmltools](https://github.com/onnx/onnxmltools). For example: ```python from sklearn.linear_model import LogisticRegression @@ -168,10 +152,7 @@ model.save_model("model.bin") ### ONNX -It is also possible to export an XGBoost model to the ONNX format using [onnxmltools](https://github.com/onnx/onnxmltools). - - -[examples/onnx/iris-classifier](https://github.com/cortexlabs/cortex/blob/master/examples/onnx/iris-classifier) uses this approach. Here is the relevant code: +It is also possible to export an XGBoost model to the ONNX format using [onnxmltools](https://github.com/onnx/onnxmltools). For example: ```python from onnxmltools.convert import convert_xgboost diff --git a/docs/guides/multi-model.md b/docs/guides/multi-model.md index e8380d7631..7ea950b1f7 100644 --- a/docs/guides/multi-model.md +++ b/docs/guides/multi-model.md @@ -76,9 +76,6 @@ $ curl "${api_endpoint}?version=2" -X POST -H "Content-Type: application/json" - For the Python Predictor, the API configuration for a multi-model API is similar to single-model APIs. The Predictor's `config` field can be used to customize the behavior of the `predictor.py` implementation. - -The following template is based on the [pytorch/multi-model-text-analyzer](https://github.com/cortexlabs/cortex/tree/master/examples/pytorch/multi-model-text-analyzer) example. - #### `cortex.yaml` ```yaml @@ -154,9 +151,6 @@ Machine learning is the study of algorithms and statistical models that computer For the TensorFlow Predictor, a multi-model API is configured by placing the list of models in the Predictor's `models` field (each model will specify its own unique name). The `predict()` method of the `tensorflow_client` object expects a second argument that represents the name of the model that will be used for inference. - -The following template is based on the [multi-model/tensorflow](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/multi-model-classifier) example. - ### `cortex.yaml` ```yaml @@ -238,9 +232,6 @@ $ curl "${ENDPOINT}?model=inception" -X POST -H "Content-Type: application/json" For the ONNX Predictor, a multi-model API is configured by placing the list of models in the Predictor's `models` field (each model will specify its own unique name). The `predict()` method of the `onnx_client` object expects a second argument that represents the name of the model that will be used for inference. - -The following template is based on the [onnx/multi-model-classifier](https://github.com/cortexlabs/cortex/tree/master/examples/onnx/multi-model-classifier) example. - ### `cortex.yaml` ```yaml diff --git a/docs/guides/single-node-deployment.md b/docs/guides/single-node-deployment.md index 1ec54a0003..c9973ebe68 100644 --- a/docs/guides/single-node-deployment.md +++ b/docs/guides/single-node-deployment.md @@ -120,7 +120,7 @@ You can now use Cortex to deploy your model: ```bash $ git clone -b master https://github.com/cortexlabs/cortex.git -$ cd cortex/examples/hello-world/python +$ cd cortex/docs/tutorials/realtime $ cortex deploy diff --git a/docs/summary.md b/docs/summary.md index d1891fc600..b21c1caed4 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -32,14 +32,14 @@ * [Autoscaling](deployments/realtime-api/autoscaling.md) * [Prediction monitoring](deployments/realtime-api/prediction-monitoring.md) * [Traffic Splitter](deployments/realtime-api/traffic-splitter.md) - * [Realtime API tutorial](../examples/hello-world/python/README.md) + * [Realtime API tutorial](tutorials/realtime/README.md) * [Batch API](deployments/batch-api.md) * [Predictor implementation](deployments/batch-api/predictors.md) * [API configuration](deployments/batch-api/api-configuration.md) * [API deployment](deployments/batch-api/deployment.md) * [Endpoints](deployments/batch-api/endpoints.md) * [Job statuses](deployments/batch-api/statuses.md) - * [Batch API tutorial](../examples/batch/python/README.md) + * [Batch API tutorial](tutorials/batch/README.md) ## Advanced diff --git a/examples/batch/python/README.md b/docs/tutorials/batch/README.md similarity index 97% rename from examples/batch/python/README.md rename to docs/tutorials/batch/README.md index 03cc827d35..a37cb8f966 100644 --- a/examples/batch/python/README.md +++ b/docs/tutorials/batch/README.md @@ -570,11 +570,3 @@ deleting image-classifier ``` Running `cortex delete` will stop all in progress jobs for the API and will delete job history for that API. It will not spin down your cluster. - -## Next steps - - -* Deploy another one of our [batch examples](https://github.com/cortexlabs/cortex/tree/master/examples/batch). -* See our [exporting guide](../../../docs/guides/exporting.md) for how to export your model to use in an API. -* Try the [realtime API tutorial](../../pytorch/text-generator/README.md) to learn how to deploy realtime APIs in Cortex. -* See [uninstall](../../../docs/aws/uninstall.md) if you'd like to spin down your cluster. diff --git a/examples/batch/python/cortex.yaml b/docs/tutorials/batch/cortex.yaml similarity index 100% rename from examples/batch/python/cortex.yaml rename to docs/tutorials/batch/cortex.yaml diff --git a/examples/batch/python/predictor.py b/docs/tutorials/batch/predictor.py similarity index 100% rename from examples/batch/python/predictor.py rename to docs/tutorials/batch/predictor.py diff --git a/examples/batch/python/requirements.txt b/docs/tutorials/batch/requirements.txt similarity index 100% rename from examples/batch/python/requirements.txt rename to docs/tutorials/batch/requirements.txt diff --git a/examples/batch/python/sample.json b/docs/tutorials/batch/sample.json similarity index 100% rename from examples/batch/python/sample.json rename to docs/tutorials/batch/sample.json diff --git a/examples/compute/tensorflow/README.md b/docs/tutorials/compute/README.md similarity index 100% rename from examples/compute/tensorflow/README.md rename to docs/tutorials/compute/README.md diff --git a/examples/compute/tensorflow/cortex.yaml b/docs/tutorials/compute/cortex.yaml similarity index 100% rename from examples/compute/tensorflow/cortex.yaml rename to docs/tutorials/compute/cortex.yaml diff --git a/examples/compute/tensorflow/cortex_gpu.yaml b/docs/tutorials/compute/cortex_gpu.yaml similarity index 100% rename from examples/compute/tensorflow/cortex_gpu.yaml rename to docs/tutorials/compute/cortex_gpu.yaml diff --git a/examples/compute/tensorflow/cortex_gpu_server_side_batching.yaml b/docs/tutorials/compute/cortex_gpu_server_side_batching.yaml similarity index 100% rename from examples/compute/tensorflow/cortex_gpu_server_side_batching.yaml rename to docs/tutorials/compute/cortex_gpu_server_side_batching.yaml diff --git a/examples/compute/tensorflow/cortex_inf.yaml b/docs/tutorials/compute/cortex_inf.yaml similarity index 100% rename from examples/compute/tensorflow/cortex_inf.yaml rename to docs/tutorials/compute/cortex_inf.yaml diff --git a/examples/compute/tensorflow/cortex_inf_server_side_batching.yaml b/docs/tutorials/compute/cortex_inf_server_side_batching.yaml similarity index 100% rename from examples/compute/tensorflow/cortex_inf_server_side_batching.yaml rename to docs/tutorials/compute/cortex_inf_server_side_batching.yaml diff --git a/examples/compute/tensorflow/generate_gpu_resnet50_model.ipynb b/docs/tutorials/compute/generate_gpu_resnet50_model.ipynb similarity index 100% rename from examples/compute/tensorflow/generate_gpu_resnet50_model.ipynb rename to docs/tutorials/compute/generate_gpu_resnet50_model.ipynb diff --git a/examples/compute/tensorflow/generate_resnet50_models.ipynb b/docs/tutorials/compute/generate_resnet50_models.ipynb similarity index 100% rename from examples/compute/tensorflow/generate_resnet50_models.ipynb rename to docs/tutorials/compute/generate_resnet50_models.ipynb diff --git a/examples/compute/tensorflow/predictor.py b/docs/tutorials/compute/predictor.py similarity index 100% rename from examples/compute/tensorflow/predictor.py rename to docs/tutorials/compute/predictor.py diff --git a/examples/compute/tensorflow/requirements.txt b/docs/tutorials/compute/requirements.txt similarity index 100% rename from examples/compute/tensorflow/requirements.txt rename to docs/tutorials/compute/requirements.txt diff --git a/examples/compute/tensorflow/sample.bin b/docs/tutorials/compute/sample.bin similarity index 100% rename from examples/compute/tensorflow/sample.bin rename to docs/tutorials/compute/sample.bin diff --git a/examples/compute/python/sample.json b/docs/tutorials/compute/sample.json similarity index 100% rename from examples/compute/python/sample.json rename to docs/tutorials/compute/sample.json diff --git a/examples/multi-model/onnx/README.md b/docs/tutorials/multi-model/README.md similarity index 100% rename from examples/multi-model/onnx/README.md rename to docs/tutorials/multi-model/README.md diff --git a/examples/multi-model/onnx/cortex.yaml b/docs/tutorials/multi-model/cortex.yaml similarity index 100% rename from examples/multi-model/onnx/cortex.yaml rename to docs/tutorials/multi-model/cortex.yaml diff --git a/examples/multi-model/onnx/predictor.py b/docs/tutorials/multi-model/predictor.py similarity index 100% rename from examples/multi-model/onnx/predictor.py rename to docs/tutorials/multi-model/predictor.py diff --git a/examples/model-caching/onnx/multi-model-classifier/requirements.txt b/docs/tutorials/multi-model/requirements.txt similarity index 100% rename from examples/model-caching/onnx/multi-model-classifier/requirements.txt rename to docs/tutorials/multi-model/requirements.txt diff --git a/examples/compute/tensorflow/sample.json b/docs/tutorials/multi-model/sample.json similarity index 100% rename from examples/compute/tensorflow/sample.json rename to docs/tutorials/multi-model/sample.json diff --git a/examples/hello-world/python/README.md b/docs/tutorials/realtime/README.md similarity index 100% rename from examples/hello-world/python/README.md rename to docs/tutorials/realtime/README.md diff --git a/examples/hello-world/python/deploy.ipynb b/docs/tutorials/realtime/deploy.ipynb similarity index 100% rename from examples/hello-world/python/deploy.ipynb rename to docs/tutorials/realtime/deploy.ipynb diff --git a/examples/hello-world/python/predictor.py b/docs/tutorials/realtime/predictor.py similarity index 100% rename from examples/hello-world/python/predictor.py rename to docs/tutorials/realtime/predictor.py diff --git a/examples/hello-world/python/requirements.txt b/docs/tutorials/realtime/requirements.txt similarity index 100% rename from examples/hello-world/python/requirements.txt rename to docs/tutorials/realtime/requirements.txt diff --git a/examples/traffic-splitting/README.md b/docs/tutorials/traffic-splitting/README.md similarity index 100% rename from examples/traffic-splitting/README.md rename to docs/tutorials/traffic-splitting/README.md diff --git a/examples/traffic-splitting/cortex.yaml b/docs/tutorials/traffic-splitting/cortex.yaml similarity index 100% rename from examples/traffic-splitting/cortex.yaml rename to docs/tutorials/traffic-splitting/cortex.yaml diff --git a/examples/traffic-splitting/model.py b/docs/tutorials/traffic-splitting/model.py similarity index 100% rename from examples/traffic-splitting/model.py rename to docs/tutorials/traffic-splitting/model.py diff --git a/examples/traffic-splitting/onnx_predictor.py b/docs/tutorials/traffic-splitting/onnx_predictor.py similarity index 100% rename from examples/traffic-splitting/onnx_predictor.py rename to docs/tutorials/traffic-splitting/onnx_predictor.py diff --git a/examples/traffic-splitting/pytorch_predictor.py b/docs/tutorials/traffic-splitting/pytorch_predictor.py similarity index 100% rename from examples/traffic-splitting/pytorch_predictor.py rename to docs/tutorials/traffic-splitting/pytorch_predictor.py diff --git a/examples/traffic-splitting/sample.json b/docs/tutorials/traffic-splitting/sample.json similarity index 100% rename from examples/traffic-splitting/sample.json rename to docs/tutorials/traffic-splitting/sample.json diff --git a/examples/utils/README.md b/docs/tutorials/utils/README.md similarity index 100% rename from examples/utils/README.md rename to docs/tutorials/utils/README.md diff --git a/examples/utils/throughput_test.py b/docs/tutorials/utils/throughput_test.py similarity index 100% rename from examples/utils/throughput_test.py rename to docs/tutorials/utils/throughput_test.py diff --git a/examples/batch/onnx/README.md b/examples/batch/onnx/README.md deleted file mode 100644 index b3091bb133..0000000000 --- a/examples/batch/onnx/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Batch Image Classifier in ONNX - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - - -Please refer to the [tutorial](https://docs.cortex.dev/v/master/batch-api/image-classifier#deploy-your-batch-api) to see how to deploy a Batch API with Cortex. diff --git a/examples/batch/onnx/cortex.yaml b/examples/batch/onnx/cortex.yaml deleted file mode 100644 index 4bdf7080e1..0000000000 --- a/examples/batch/onnx/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier - kind: BatchAPI - predictor: - type: onnx - path: predictor.py - model_path: s3://cortex-examples/image-classifier/alexnet_batch/ - compute: - cpu: 1 diff --git a/examples/batch/onnx/predictor.py b/examples/batch/onnx/predictor.py deleted file mode 100644 index 7f005a0b72..0000000000 --- a/examples/batch/onnx/predictor.py +++ /dev/null @@ -1,64 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import requests -import numpy as np -import base64 -from PIL import Image -from io import BytesIO -from torchvision import transforms -import boto3 -import json -import re -import os - - -class ONNXPredictor: - def __init__(self, onnx_client, config, job_spec): - self.client = onnx_client - - self.labels = requests.get( - "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" - ).text.split("\n")[1:] - - # https://github.com/pytorch/examples/blob/447974f6337543d4de6b888e244a964d3c9b71f6/imagenet/main.py#L198-L199 - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - self.preprocess = transforms.Compose( - [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] - ) - - if len(config.get("dest_s3_dir", "")) == 0: - raise Exception("'dest_s3_dir' field was not provided in job submission") - - self.s3 = boto3.client("s3") - - self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() - self.key = os.path.join(self.key, job_spec["job_id"]) - - def predict(self, payload, batch_id): - arr_list = [] - - # download and preprocess each image - for image_url in payload: - if image_url.startswith("s3://"): - bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() - image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() - else: - image_bytes = requests.get(image_url).content - - img_pil = Image.open(BytesIO(image_bytes)) - arr_list.append(self.preprocess(img_pil).numpy()) - - # classify the batch of images - imgs_arr = np.stack(arr_list, axis=0) - result = self.client.predict(imgs_arr) - - # extract predicted classes - predicted_classes = np.argmax(result[0], axis=1) - results = [ - {"url": payload[i], "class": self.labels[class_idx]} - for i, class_idx in enumerate(predicted_classes) - ] - - # save results - json_output = json.dumps(results) - self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) diff --git a/examples/batch/onnx/requirements.txt b/examples/batch/onnx/requirements.txt deleted file mode 100644 index 5a2cde2a12..0000000000 --- a/examples/batch/onnx/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -torchvision -boto3 -pillow diff --git a/examples/batch/tensorflow/README.md b/examples/batch/tensorflow/README.md deleted file mode 100644 index 163fe34968..0000000000 --- a/examples/batch/tensorflow/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Batch Image Classifier in TensorFlow - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - - -Please refer to the [tutorial](https://docs.cortex.dev/v/master/batch-api/image-classifier#deploy-your-batch-api) to see how to deploy a Batch API with Cortex. diff --git a/examples/batch/tensorflow/cortex.yaml b/examples/batch/tensorflow/cortex.yaml deleted file mode 100644 index 189e1a9b0e..0000000000 --- a/examples/batch/tensorflow/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier - kind: BatchAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ - compute: - cpu: 1 diff --git a/examples/batch/tensorflow/predictor.py b/examples/batch/tensorflow/predictor.py deleted file mode 100644 index da4bb39ec3..0000000000 --- a/examples/batch/tensorflow/predictor.py +++ /dev/null @@ -1,60 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import requests -import numpy as np -from PIL import Image -from io import BytesIO -import json -import os -import re -import boto3 -import tensorflow as tf - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config, job_spec): - self.client = tensorflow_client - self.labels = requests.get( - "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" - ).text.split("\n")[1:] - - if len(config.get("dest_s3_dir", "")) == 0: - raise Exception("'dest_s3_dir' field was not provided in job submission") - - self.s3 = boto3.client("s3") - - self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() - self.key = os.path.join(self.key, job_spec["job_id"]) - - def predict(self, payload, batch_id): - arr_list = [] - - # download and preprocess each image - for image_url in payload: - if image_url.startswith("s3://"): - bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() - image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() - else: - image_bytes = requests.get(image_url).content - - decoded_image = np.asarray(Image.open(BytesIO(image_bytes)), dtype=np.float32) / 255 - resized_image = tf.image.resize( - decoded_image, [224, 224], method=tf.image.ResizeMethod.BILINEAR - ) - arr_list.append(resized_image) - - # classify the batch of images - model_input = {"images": np.stack(arr_list, axis=0)} - predictions = self.client.predict(model_input) - - # extract predicted classes - reshaped_predictions = np.reshape(np.array(predictions["classes"]), [-1, len(self.labels)]) - predicted_classes = np.argmax(reshaped_predictions, axis=1) - results = [ - {"url": payload[i], "class": self.labels[class_idx]} - for i, class_idx in enumerate(predicted_classes) - ] - - # save results - json_output = json.dumps(results) - self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) diff --git a/examples/batch/tensorflow/requirements.txt b/examples/batch/tensorflow/requirements.txt deleted file mode 100644 index 7e2fba5e6c..0000000000 --- a/examples/batch/tensorflow/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Pillow diff --git a/examples/compute/python/README.md b/examples/compute/python/README.md deleted file mode 100644 index f13020d874..0000000000 --- a/examples/compute/python/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# Image Classifier with ResNet50 - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example implements an image recognition system using ResNet50, which allows for the recognition of up to 1000 classes. - -## Deploying - -There are 3 Cortex APIs available in this example: - -1. [cortex.yaml](cortex.yaml) - can be used with any instances. -1. [cortex_inf.yaml](cortex_inf.yaml) - to be used with `inf1` instances. -1. [cortex_gpu.yaml](cortex_gpu.yaml) - to be used with GPU instances. - -To deploy an API, run: - -```bash -cortex deploy -``` - -E.g. - -```bash -cortex deploy cortex_gpu.yaml -``` - -## Verifying your API - -Check that your API is live by running `cortex get image-classifier-resnet50`, and copy the example `curl` command that's shown. After the API is live, run the `curl` command, e.g. - -```bash -$ curl -X POST -H "Content-Type: application/json" -d @sample.json - -["tabby", "Egyptian_cat", "tiger_cat", "tiger", "plastic_bag"] -``` - -The following image is embedded in [sample.json](sample.json): - -![image](https://i.imgur.com/213xcvs.jpg) - -## Exporting SavedModels - -This example deploys models that we have built and uploaded to a public S3 bucket. If you want to build the models yourself, follow these instructions. - -Run the following command to install the dependencies required for the [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook: - -```bash -pip install --extra-index-url=https://pip.repos.neuron.amazonaws.com \ - neuron-cc==1.0.9410.0+6008239556 \ - torch-neuron==1.0.825.0 -``` - -Also, `torchvision` has to be installed, but without any dependencies: - -```bash -pip install torchvision==0.4.2 --no-deps -``` - -The [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook will generate 2 torch models. One is saved as `resnet50.pt` which can be run on GPU or CPU, and another is saved as `resnet50_neuron.pt`, which can only be run on `inf1` instances. diff --git a/examples/compute/python/cortex.yaml b/examples/compute/python/cortex.yaml deleted file mode 100644 index d6c1cb64c9..0000000000 --- a/examples/compute/python/cortex.yaml +++ /dev/null @@ -1,15 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - model_path: s3://cortex-examples/pytorch/image-classifier-resnet50 - model_name: resnet50.pt - device: cpu - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - compute: - cpu: 1 diff --git a/examples/compute/python/cortex_gpu.yaml b/examples/compute/python/cortex_gpu.yaml deleted file mode 100644 index 7f06603504..0000000000 --- a/examples/compute/python/cortex_gpu.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - model_path: s3://cortex-examples/pytorch/image-classifier-resnet50 - model_name: resnet50.pt - device: gpu - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - compute: - gpu: 1 - cpu: 1 diff --git a/examples/compute/python/cortex_inf.yaml b/examples/compute/python/cortex_inf.yaml deleted file mode 100644 index 55ce4ff793..0000000000 --- a/examples/compute/python/cortex_inf.yaml +++ /dev/null @@ -1,16 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - config: - model_path: s3://cortex-examples/pytorch/image-classifier-resnet50 - model_name: resnet50_neuron.pt - device: inf - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - compute: - inf: 1 - cpu: 1 diff --git a/examples/compute/python/generate_resnet50_models.ipynb b/examples/compute/python/generate_resnet50_models.ipynb deleted file mode 100644 index e4e1343d85..0000000000 --- a/examples/compute/python/generate_resnet50_models.ipynb +++ /dev/null @@ -1,121 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Generate Resnet50 Models\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "import numpy as np\n", - "import os\n", - "import torch_neuron\n", - "from torchvision import models" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Load Resnet50 model" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "model = models.resnet50(pretrained=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Compile model for Inferentia. Should have worked with 1 NeuronCores, but it appears that setting it to a minimum of 2 is required." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:Neuron:compiling module ResNet with neuron-cc\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Compiler args type is value is ['--num-neuroncores', '2']\n" - ] - } - ], - "source": [ - "model.eval()\n", - "batch_size = 1\n", - "image = torch.zeros([batch_size, 3, 224, 224], dtype=torch.float32)\n", - "model_neuron = torch.neuron.trace(model, example_inputs=[image], compiler_args=[\"--num-neuroncores\", \"2\"])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Save both models to disk" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "model_neuron.save(\"resnet50_neuron.pt\")\n", - "torch.save(model.state_dict(), \"resnet50.pt\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/compute/python/predictor.py b/examples/compute/python/predictor.py deleted file mode 100644 index 8059c4078c..0000000000 --- a/examples/compute/python/predictor.py +++ /dev/null @@ -1,93 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import os -import torch -import cv2 -import numpy as np -import requests -import re -import boto3 -from botocore import UNSIGNED -from botocore.client import Config -from torchvision import models, transforms, datasets - - -def get_url_image(url_image): - """ - Get numpy image from URL image. - """ - resp = requests.get(url_image, stream=True).raw - image = np.asarray(bytearray(resp.read()), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - return image - - -class PythonPredictor: - def __init__(self, config): - # load classes - classes = requests.get(config["classes"]).json() - self.idx2label = [classes[str(k)][1] for k in range(len(classes))] - - # create s3 client - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - # download the model - model_path = config["model_path"] - model_name = config["model_name"] - bucket, key = re.match("s3://(.+?)/(.+)", model_path).groups() - s3.download_file(bucket, os.path.join(key, model_name), model_name) - - # load the model - self.device = None - if config["device"] == "gpu": - self.device = torch.device("cuda") - self.model = models.resnet50() - self.model.load_state_dict(torch.load(model_name, map_location="cuda:0")) - self.model.eval() - self.model = self.model.to(self.device) - elif config["device"] == "cpu": - self.model = models.resnet50() - self.model.load_state_dict(torch.load(model_name)) - self.model.eval() - elif config["device"] == "inf": - import torch_neuron - - self.model = torch.jit.load(model_name) - else: - raise RuntimeError("invalid predictor: config: must be cpu, gpu, or inf") - - # save normalization transform for later use - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - self.transform = transforms.Compose( - [ - transforms.ToPILImage(), - transforms.Resize(config["input_shape"]), - transforms.ToTensor(), - normalize, - ] - ) - - def predict(self, payload): - # preprocess image - image = get_url_image(payload["url"]) - image = self.transform(image) - image = torch.tensor(image.numpy()[np.newaxis, ...]) - - # predict - if self.device: - results = self.model(image.to(self.device)) - else: - results = self.model(image) - - # Get the top 5 results - top5_idx = results[0].sort()[1][-5:] - - # Lookup and print the top 5 labels - top5_labels = [self.idx2label[idx] for idx in top5_idx] - top5_labels = top5_labels[::-1] - - return top5_labels diff --git a/examples/hello-world/onnx/README.md b/examples/hello-world/onnx/README.md deleted file mode 100644 index a45b69db8f..0000000000 --- a/examples/hello-world/onnx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/tutorial) to see how to deploy an example with Cortex. diff --git a/examples/hello-world/onnx/cortex.yaml b/examples/hello-world/onnx/cortex.yaml deleted file mode 100644 index 00b8a61112..0000000000 --- a/examples/hello-world/onnx/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: iris-classifier - kind: RealtimeAPI - predictor: - type: onnx - path: predictor.py - model_path: s3://cortex-examples/onnx/iris-classifier/ - monitoring: - model_type: classification diff --git a/examples/hello-world/onnx/predictor.py b/examples/hello-world/onnx/predictor.py deleted file mode 100644 index b135129e14..0000000000 --- a/examples/hello-world/onnx/predictor.py +++ /dev/null @@ -1,20 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -labels = ["setosa", "versicolor", "virginica"] - - -class ONNXPredictor: - def __init__(self, onnx_client, config): - self.client = onnx_client - - def predict(self, payload): - model_input = [ - payload["sepal_length"], - payload["sepal_width"], - payload["petal_length"], - payload["petal_width"], - ] - - prediction = self.client.predict(model_input) - predicted_class_id = prediction[0][0] - return labels[predicted_class_id] diff --git a/examples/hello-world/onnx/sample.json b/examples/hello-world/onnx/sample.json deleted file mode 100644 index 252c666b3a..0000000000 --- a/examples/hello-world/onnx/sample.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "sepal_length": 5.2, - "sepal_width": 3.6, - "petal_length": 1.4, - "petal_width": 0.3 -} diff --git a/examples/hello-world/onnx/xgboost.ipynb b/examples/hello-world/onnx/xgboost.ipynb deleted file mode 100644 index d4e1497360..0000000000 --- a/examples/hello-world/onnx/xgboost.ipynb +++ /dev/null @@ -1,244 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "iris_xgboost.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "IiTxCwB7t6Ef", - "colab_type": "text" - }, - "source": [ - "# Training an Iris classifier using XGBoost\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n", - "In this notebook, we'll show how to train a classifier trained on the [iris data set](https://archive.ics.uci.edu/ml/datasets/iris) using XGBoost." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "j6QdLAUpuW7r", - "colab_type": "text" - }, - "source": [ - "## Install Dependencies\n", - "First, we'll install our dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "BQE5z_kHj9jV", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pip install xgboost==0.90 scikit-learn==0.21.* onnxmltools==1.5.* boto3==1.*" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yEVK-sLnumqn", - "colab_type": "text" - }, - "source": [ - "## Load the data\n", - "We can use scikit-learn to load the Iris dataset:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "tx9Xw0x0lfbl", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from sklearn.datasets import load_iris\n", - "from sklearn.model_selection import train_test_split\n", - "\n", - "iris = load_iris()\n", - "X, y = iris.data, iris.target\n", - "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "obGdgMm3urb2", - "colab_type": "text" - }, - "source": [ - "## Train the model\n", - "We'll use XGBoost's [`XGBClassifier`](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBClassifier) to train the model:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jjYp8TaflhW0", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import xgboost as xgb\n", - "\n", - "xgb_model = xgb.XGBClassifier()\n", - "xgb_model = xgb_model.fit(X_train, y_train)\n", - "\n", - "print(\"Test data accuracy of the xgb classifier is {:.2f}\".format(xgb_model.score(X_test, y_test))) # Accuracy should be > 90%" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Hdwu-wzJvJLb", - "colab_type": "text" - }, - "source": [ - "## Export the model\n", - "Now we can export the model in the ONNX format:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "AVgs2mkdllRn", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from onnxmltools.convert import convert_xgboost\n", - "from onnxconverter_common.data_types import FloatTensorType\n", - "\n", - "onnx_model = convert_xgboost(xgb_model, initial_types=[(\"input\", FloatTensorType([1, 4]))])\n", - "\n", - "with open(\"gbtree.onnx\", \"wb\") as f:\n", - " f.write(onnx_model.SerializeToString())" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ipVlP4yPxFxw", - "colab_type": "text" - }, - "source": [ - "## Upload the model to AWS\n", - "\n", - "Cortex loads models from AWS, so we need to upload the exported model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3IqsfyylxLhy", - "colab_type": "text" - }, - "source": [ - "Set these variables to configure your AWS credentials and model upload path:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "lc9LBH1uHT_h", - "colab_type": "code", - "cellView": "form", - "colab": {} - }, - "source": [ - "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", - "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", - "S3_UPLOAD_PATH = \"s3://my-bucket/iris-classifier/gbtree.onnx\" #@param {type:\"string\"}\n", - "\n", - "import sys\n", - "import re\n", - "\n", - "if AWS_ACCESS_KEY_ID == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", - "\n", - "elif AWS_SECRET_ACCESS_KEY == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", - "\n", - "else:\n", - " try:\n", - " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", - " except:\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NXeuZsaQxUc8", - "colab_type": "text" - }, - "source": [ - "Upload the model to S3:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "YLmnWTEVsu55", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import boto3\n", - "\n", - "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", - "print(\"Uploading {} ...\".format(S3_UPLOAD_PATH), end = '')\n", - "s3.upload_file(\"gbtree.onnx\", bucket, key)\n", - "print(\" ✓\")" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aR-mmcUzyCV3", - "colab_type": "text" - }, - "source": [ - "\n", - "That's it! See the [example](https://github.com/cortexlabs/cortex/tree/master/examples/onnx/iris-classifier) for how to deploy the model as an API." - ] - } - ] -} diff --git a/examples/hello-world/tensorflow/README.md b/examples/hello-world/tensorflow/README.md deleted file mode 100644 index a45b69db8f..0000000000 --- a/examples/hello-world/tensorflow/README.md +++ /dev/null @@ -1,3 +0,0 @@ -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Please refer to the [tutorial](https://docs.cortex.dev/tutorial) to see how to deploy an example with Cortex. diff --git a/examples/hello-world/tensorflow/cortex.yaml b/examples/hello-world/tensorflow/cortex.yaml deleted file mode 100644 index 2a11090b83..0000000000 --- a/examples/hello-world/tensorflow/cortex.yaml +++ /dev/null @@ -1,10 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: iris-classifier - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/iris-classifier/nn/ - monitoring: - model_type: classification diff --git a/examples/hello-world/tensorflow/predictor.py b/examples/hello-world/tensorflow/predictor.py deleted file mode 100644 index 6267256aaf..0000000000 --- a/examples/hello-world/tensorflow/predictor.py +++ /dev/null @@ -1,13 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -labels = ["setosa", "versicolor", "virginica"] - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - def predict(self, payload): - prediction = self.client.predict(payload) - predicted_class_id = int(prediction["class_ids"][0]) - return labels[predicted_class_id] diff --git a/examples/hello-world/tensorflow/sample.json b/examples/hello-world/tensorflow/sample.json deleted file mode 100644 index 252c666b3a..0000000000 --- a/examples/hello-world/tensorflow/sample.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "sepal_length": 5.2, - "sepal_width": 3.6, - "petal_length": 1.4, - "petal_width": 0.3 -} diff --git a/examples/hello-world/tensorflow/tensorflow.ipynb b/examples/hello-world/tensorflow/tensorflow.ipynb deleted file mode 100644 index 2981ba77ef..0000000000 --- a/examples/hello-world/tensorflow/tensorflow.ipynb +++ /dev/null @@ -1,296 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "iris_tensorflow.ipynb", - "provenance": [], - "collapsed_sections": [] - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "id": "IiTxCwB7t6Ef", - "colab_type": "text" - }, - "source": [ - "# Training an Iris classifier using TensorFlow\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n", - "In this notebook, we'll show how to train a classifier trained on the [iris data set](https://archive.ics.uci.edu/ml/datasets/iris) using TensorFlow." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "j6QdLAUpuW7r", - "colab_type": "text" - }, - "source": [ - "## Install Dependencies\n", - "First, we'll install our dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "BQE5z_kHj9jV", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pip install tensorflow==1.14.* scikit-learn==0.21.* boto3==1.*" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "yEVK-sLnumqn", - "colab_type": "text" - }, - "source": [ - "## Load the data\n", - "We can use scikit-learn to load the Iris dataset:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "tx9Xw0x0lfbl", - "colab_type": "code", - "colab": {} - }, - "source": [ - "from sklearn.datasets import load_iris\n", - "from sklearn.model_selection import train_test_split\n", - "\n", - "iris = load_iris()\n", - "X, y = iris.data, iris.target\n", - "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "obGdgMm3urb2", - "colab_type": "text" - }, - "source": [ - "## Train the model\n", - "We'll use TensorFlow's [`DNNClassifier`](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/estimator/DNNClassifier) to train the model:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jjYp8TaflhW0", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "\n", - "feature_names = [\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\"]\n", - "\n", - "\n", - "def train_input_fn(features, labels, batch_size):\n", - " irises = {}\n", - "\n", - " for i, feature_name in enumerate(feature_names):\n", - " irises[feature_name] = features[:, i]\n", - " \n", - " dataset = tf.data.Dataset.from_tensor_slices((irises, labels))\n", - " dataset = dataset.shuffle(1000).repeat().batch(batch_size)\n", - "\n", - " return dataset\n", - "\n", - "\n", - "def eval_input_fn(features, labels, batch_size):\n", - " irises = {}\n", - " for i, feature_name in enumerate(feature_names):\n", - " irises[feature_name] = features[:, i]\n", - "\n", - " if labels is None:\n", - " inputs = irises\n", - " else:\n", - " inputs = (irises, labels)\n", - "\n", - " dataset = tf.data.Dataset.from_tensor_slices(inputs)\n", - " dataset = dataset.batch(batch_size)\n", - "\n", - " return dataset\n", - "\n", - "\n", - "feature_columns = [tf.feature_column.numeric_column(feature_name) for feature_name in feature_names]\n", - "\n", - "classifier = tf.estimator.DNNClassifier(\n", - " feature_columns=feature_columns,\n", - " hidden_units=[10, 10],\n", - " n_classes=3,\n", - ")\n", - "\n", - "classifier.train(input_fn=lambda: train_input_fn(X_train, y_train, 100), steps=1000)\n", - "\n", - "eval_result = classifier.evaluate(input_fn=lambda: eval_input_fn(X_test, y_test, 100))\n", - "\n", - "print(\"\\nTest set accuracy: {accuracy:0.3f}\\n\".format(**eval_result)) # Accuracy should be > 90%" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Hdwu-wzJvJLb", - "colab_type": "text" - }, - "source": [ - "## Export the model\n", - "Now we can export the model using [`Estimator.export_saved_model`](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/estimator/Estimator#export_saved_model):" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "AVgs2mkdllRn", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def json_serving_input_fn():\n", - " placeholders = {}\n", - " features = {}\n", - " for feature_name in feature_names:\n", - " placeholders[feature_name] = tf.placeholder(shape=[None], dtype=tf.float64, name=feature_name)\n", - " features[feature_name] = tf.expand_dims(placeholders[feature_name], -1)\n", - " \n", - " return tf.estimator.export.ServingInputReceiver(features, receiver_tensors=placeholders)\n", - "\n", - "\n", - "classifier.export_saved_model(\"export\", json_serving_input_fn)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ipVlP4yPxFxw", - "colab_type": "text" - }, - "source": [ - "## Upload the model to AWS\n", - "\n", - "Cortex loads models from AWS, so we need to upload the exported model." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "3IqsfyylxLhy", - "colab_type": "text" - }, - "source": [ - "Set these variables to configure your AWS credentials and model upload path:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "lc9LBH1uHT_h", - "colab_type": "code", - "cellView": "form", - "colab": {} - }, - "source": [ - "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", - "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", - "S3_UPLOAD_PATH = \"s3://my-bucket/iris-classifier/tensorflow\" #@param {type:\"string\"}\n", - "\n", - "import sys\n", - "import re\n", - "\n", - "if AWS_ACCESS_KEY_ID == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", - "\n", - "elif AWS_SECRET_ACCESS_KEY == \"\":\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", - "\n", - "else:\n", - " try:\n", - " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", - " except:\n", - " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "NXeuZsaQxUc8", - "colab_type": "text" - }, - "source": [ - "Upload the model to S3:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "YLmnWTEVsu55", - "colab_type": "code", - "colab": {} - }, - "source": [ - "import os\n", - "import boto3\n", - "\n", - "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", - "\n", - "for dirpath, _, filenames in os.walk(\"export\"):\n", - " for filename in filenames:\n", - " filepath = os.path.join(dirpath, filename)\n", - " filekey = os.path.join(key, filepath[len(\"export/\"):])\n", - " print(\"Uploading s3://{}/{}...\".format(bucket, filekey), end = '')\n", - " s3.upload_file(filepath, bucket, filekey)\n", - " print(\" ✓\")", - "\n", - "print(\"\\nUploaded model export directory to \" + S3_UPLOAD_PATH)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "aR-mmcUzyCV3", - "colab_type": "text" - }, - "source": [ - "\n", - "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/iris-classifier) for how to deploy the model as an API." - ] - } - ] -} diff --git a/examples/live-reloading/onnx/README.md b/examples/live-reloading/onnx/README.md deleted file mode 100644 index e8ec367b01..0000000000 --- a/examples/live-reloading/onnx/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## Live-reloading model APIs - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Model live-reloading is automatically enabled for ONNX predictors. diff --git a/examples/live-reloading/python/mpg-estimator/cortex.yaml b/examples/live-reloading/python/mpg-estimator/cortex.yaml deleted file mode 100644 index 4c243b5032..0000000000 --- a/examples/live-reloading/python/mpg-estimator/cortex.yaml +++ /dev/null @@ -1,8 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: mpg-estimator - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - model_path: s3://cortex-examples/sklearn/mpg-estimator/linreg/ diff --git a/examples/live-reloading/python/mpg-estimator/predictor.py b/examples/live-reloading/python/mpg-estimator/predictor.py deleted file mode 100644 index 104b9a5c0a..0000000000 --- a/examples/live-reloading/python/mpg-estimator/predictor.py +++ /dev/null @@ -1,27 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import mlflow.sklearn -import numpy as np - - -class PythonPredictor: - def __init__(self, config, python_client): - self.client = python_client - - def load_model(self, model_path): - return mlflow.sklearn.load_model(model_path) - - def predict(self, payload, query_params): - model_version = query_params.get("version") - - model = self.client.get_model(model_version=model_version) - model_input = [ - payload["cylinders"], - payload["displacement"], - payload["horsepower"], - payload["weight"], - payload["acceleration"], - ] - result = model.predict([model_input]).item() - - return {"prediction": result, "model": {"version": model_version}} diff --git a/examples/live-reloading/python/mpg-estimator/requirements.txt b/examples/live-reloading/python/mpg-estimator/requirements.txt deleted file mode 100644 index cbcad6b321..0000000000 --- a/examples/live-reloading/python/mpg-estimator/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -mlflow -pandas -numpy -scikit-learn==0.21.3 diff --git a/examples/live-reloading/python/mpg-estimator/sample.json b/examples/live-reloading/python/mpg-estimator/sample.json deleted file mode 100644 index 2dbbca46dd..0000000000 --- a/examples/live-reloading/python/mpg-estimator/sample.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "cylinders": 4, - "displacement": 135, - "horsepower": 84, - "weight": 2490, - "acceleration": 15.7 -} diff --git a/examples/live-reloading/tensorflow/README.md b/examples/live-reloading/tensorflow/README.md deleted file mode 100644 index 46f4111a4f..0000000000 --- a/examples/live-reloading/tensorflow/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## Live-reloading model APIs - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -Model live-reloading is automatically enabled for TensorFlow predictors unless using Inferentia resources (`compute.inf`) and `processes_per_replica` > 1. diff --git a/examples/model-caching/onnx/multi-model-classifier/README.md b/examples/model-caching/onnx/multi-model-classifier/README.md deleted file mode 100644 index bf5fc906cb..0000000000 --- a/examples/model-caching/onnx/multi-model-classifier/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Multi-Model Classifier API - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example deploys ResNet50, MobileNet and ShuffleNet models in one API. Query parameters are used for selecting the model and the version. - -Since model caching is enabled, there can only be 2 models loaded into memory - loading a 3rd one will lead to the removal of the least recently used one. To witness the adding/removal process of models, check the logs of the API by running `cortex logs multi-model-classifier` once the API is up. - -The example can be run on both CPU and on GPU hardware. - -## Sample Prediction - -Deploy the model by running: - -```bash -cortex deploy -``` - -And wait for it to become live by tracking its status with `cortex get --watch`. - -Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get multi-model-classifier`. - -```bash -export ENDPOINT=your-api-endpoint -``` - -When making a prediction with [sample.json](sample.json), the following image will be used: - -![cat](https://i.imgur.com/213xcvs.jpg) - -### ResNet50 Classifier - -Make a request to the ResNet50 model: - -```bash -curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample.json -``` - -The expected response is: - -```json -{"label": "tabby", "model": {"name": "resnet50", "version": "latest"}} -``` - -### MobileNet Classifier - -Make a request to the MobileNet model: - -```bash -curl "${ENDPOINT}?model=mobilenet" -X POST -H "Content-Type: application/json" -d @sample.json -``` - -The expected response is: - -```json -{"label": "tabby", "model": {"name": "mobilenet", "version": "latest"}} -``` - -### ShuffleNet Classifier - -At this point, there are 2 models loaded into memory (as specified by `cache_size`). Loading `ShuffleNet` as well will lead to the removal of the least recently used model - in this case, it will be the ResNet50 model that will get evicted. Since the `disk_cache_size` is set to 3, no model will be removed from disk. - -Make a request to the ShuffleNet model: - -```bash -curl "${ENDPOINT}?model=shufflenet" -X POST -H "Content-Type: application/json" -d @sample.json -``` - -The expected response is: - -```json -{"label": "Egyptian_cat", "model": {"name": "shufflenet", "version": "latest"}} -``` - ---- - -Now, inspect `cortex get multi-model-classifier` to see when and which models were removed in this process of making requests to different versions of the same model. diff --git a/examples/model-caching/onnx/multi-model-classifier/cortex.yaml b/examples/model-caching/onnx/multi-model-classifier/cortex.yaml deleted file mode 100644 index f074721fd3..0000000000 --- a/examples/model-caching/onnx/multi-model-classifier/cortex.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: multi-model-classifier - kind: RealtimeAPI - predictor: - type: onnx - path: predictor.py - models: - paths: - - name: resnet50 - model_path: s3://cortex-examples/onnx/resnet50/ - - name: mobilenet - model_path: s3://cortex-examples/onnx/mobilenet/ - - name: shufflenet - model_path: s3://cortex-examples/onnx/shufflenet/ - cache_size: 2 - disk_cache_size: 3 - config: - image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - image-resize: 224 - compute: - mem: 2G diff --git a/examples/model-caching/onnx/multi-model-classifier/predictor.py b/examples/model-caching/onnx/multi-model-classifier/predictor.py deleted file mode 100644 index 6ab949a24c..0000000000 --- a/examples/model-caching/onnx/multi-model-classifier/predictor.py +++ /dev/null @@ -1,99 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import numpy as np -import cv2, requests -from scipy.special import softmax - - -def get_url_image(url_image): - """ - Get numpy image from URL image. - """ - resp = requests.get(url_image, stream=True).raw - image = np.asarray(bytearray(resp.read()), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - return image - - -def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA): - """ - Resize a numpy image. - """ - dim = None - (h, w) = image.shape[:2] - - if width is None and height is None: - return image - - if width is None: - # calculate the ratio of the height and construct the dimensions - r = height / float(h) - dim = (int(w * r), height) - else: - # calculate the ratio of the width and construct the dimensions - r = width / float(w) - dim = (width, int(h * r)) - - resized = cv2.resize(image, dim, interpolation=inter) - - return resized - - -def preprocess(img_data): - """ - Normalize input for inference. - """ - # move pixel color dimension to position 0 - img = np.moveaxis(img_data, 2, 0) - - mean_vec = np.array([0.485, 0.456, 0.406]) - stddev_vec = np.array([0.229, 0.224, 0.225]) - norm_img_data = np.zeros(img.shape).astype("float32") - for i in range(img.shape[0]): - # for each pixel in each channel, divide the value by 255 to get value between [0, 1] and then normalize - norm_img_data[i, :, :] = (img[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i] - - # extend to batch size of 1 - norm_img_data = norm_img_data[np.newaxis, ...] - return norm_img_data - - -def postprocess(results): - """ - Eliminates all dimensions of size 1, softmaxes the input and then returns the index of the element with the highest value. - """ - squeezed = np.squeeze(results) - maxed = softmax(squeezed) - result = np.argmax(maxed) - return result - - -class ONNXPredictor: - def __init__(self, onnx_client, config): - # onnx client - self.client = onnx_client - - # for image classifiers - classes = requests.get(config["image-classifier-classes"]).json() - self.image_classes = [classes[str(k)][1] for k in range(len(classes))] - self.resize_value = config["image-resize"] - - def predict(self, payload, query_params): - # get request params - model_name = query_params["model"] - model_version = query_params.get("version", "latest") - img_url = payload["url"] - - # process the input - img = get_url_image(img_url) - img = image_resize(img, height=self.resize_value) - img = preprocess(img) - - # predict - results = self.client.predict(img, model_name, model_version)[0] - - # interpret result - result = postprocess(results) - predicted_label = self.image_classes[result] - - return {"label": predicted_label, "model": {"name": model_name, "version": model_version}} diff --git a/examples/model-caching/onnx/multi-model-classifier/sample.json b/examples/model-caching/onnx/multi-model-classifier/sample.json deleted file mode 100644 index 4ee3aa45df..0000000000 --- a/examples/model-caching/onnx/multi-model-classifier/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/213xcvs.jpg" -} diff --git a/examples/model-caching/python/mpg-estimator/README.md b/examples/model-caching/python/mpg-estimator/README.md deleted file mode 100644 index e120ac8204..0000000000 --- a/examples/model-caching/python/mpg-estimator/README.md +++ /dev/null @@ -1,75 +0,0 @@ -# MPG Estimator API - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example deploys an MPG estimator model of multiple versions in one API. Query parameters are used for selecting the model and the version. - -Since model caching is enabled, there can only be 2 models loaded into memory (counting the versioned models as well) - loading a 3rd one will lead to the removal of the least recently used one. To witness the adding/removal process of models, check the logs of the API by running `cortex logs mpg-estimator` once the API is up. - -The example can be run on both CPU and on GPU hardware. - -## Sample Prediction - -Deploy the model by running: - -```bash -cortex deploy -``` - -And wait for it to become live by tracking its status with `cortex get --watch`. - -Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get mpg-estimator`. - -```bash -export ENDPOINT=your-api-endpoint -``` - -### Version 1 - -Make a request version `1` of the `mpg-estimator` model: - -```bash -curl "${ENDPOINT}?model=resnet50&version=1" -X POST -H "Content-Type: application/json" -d @sample.json -``` - -The expected response is: - -```json -{"prediction": 26.929889872154185, "model": {"name": "mpg-estimator", "version": "1"}} -``` - -### Version 2 - -At this point, there is one model loaded into memory (as specified by `cache_size`). Loading another versioned model as well will lead to the removal of the least recently used model - in this case, it will be version 1 that will get evicted. Since the `disk_cache_size` is set to 2, no model will be removed from disk. - -Make a request version `2` of the `mpg-estimator` model: - -```bash -curl "${ENDPOINT}?model=mobilenet" -X POST -H "Content-Type: application/json" -d @sample.json -``` - -The expected response is: - -```json -{"prediction": 26.929889872154185, "model": {"name": "mpg-estimator", "version": "1"}} -``` - -### Version 3 - -With the following request, version 2 of the model will have to be evicted from the memory. Since `disk_cache_size` is set to 2, this time, version 1 of the model will get removed from the disk. - -Make a request version `3` of the `mpg-estimator` model: - -```bash -curl "${ENDPOINT}?model=shufflenet" -X POST -H "Content-Type: application/json" -d @sample.json -``` - -The expected response is: - -```json -{"prediction": 26.929889872154185, "model": {"name": "mpg-estimator", "version": "1"}} -``` - ---- - -Now, inspect `cortex get mpg-estimator` to see when and which models were removed in this process of making requests to different versions of the same model. The same algorithm is applied to different models as well, not just for the versions of a specific model. diff --git a/examples/model-caching/python/mpg-estimator/cortex.yaml b/examples/model-caching/python/mpg-estimator/cortex.yaml deleted file mode 100644 index 1d26879aaa..0000000000 --- a/examples/model-caching/python/mpg-estimator/cortex.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: mpg-estimator - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - models: - paths: - - name: mpg-estimator - model_path: s3://cortex-examples/sklearn/mpg-estimator/linreg/ - cache_size: 1 - disk_cache_size: 2 diff --git a/examples/model-caching/python/mpg-estimator/predictor.py b/examples/model-caching/python/mpg-estimator/predictor.py deleted file mode 100644 index 84aa206f41..0000000000 --- a/examples/model-caching/python/mpg-estimator/predictor.py +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import mlflow.sklearn -import numpy as np - - -class PythonPredictor: - def __init__(self, config, python_client): - self.client = python_client - - def load_model(self, model_path): - return mlflow.sklearn.load_model(model_path) - - def predict(self, payload, query_params): - model_name = query_params["model"] - model_version = query_params.get("version", "latest") - - model = self.client.get_model(model_name, model_version) - model_input = [ - payload["cylinders"], - payload["displacement"], - payload["horsepower"], - payload["weight"], - payload["acceleration"], - ] - result = model.predict([model_input]).item() - - return {"prediction": result, "model": {"name": model_name, "version": model_version}} diff --git a/examples/model-caching/python/mpg-estimator/requirements.txt b/examples/model-caching/python/mpg-estimator/requirements.txt deleted file mode 100644 index cbcad6b321..0000000000 --- a/examples/model-caching/python/mpg-estimator/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -mlflow -pandas -numpy -scikit-learn==0.21.3 diff --git a/examples/model-caching/python/mpg-estimator/sample.json b/examples/model-caching/python/mpg-estimator/sample.json deleted file mode 100644 index 2dbbca46dd..0000000000 --- a/examples/model-caching/python/mpg-estimator/sample.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "cylinders": 4, - "displacement": 135, - "horsepower": 84, - "weight": 2490, - "acceleration": 15.7 -} diff --git a/examples/model-caching/tensorflow/multi-model-classifier/README.md b/examples/model-caching/tensorflow/multi-model-classifier/README.md deleted file mode 100644 index 9fd921884b..0000000000 --- a/examples/model-caching/tensorflow/multi-model-classifier/README.md +++ /dev/null @@ -1,77 +0,0 @@ -# Multi-Model Classifier API - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example deploys Iris, ResNet50 and Inception models in one API. Query parameters are used for selecting the model. - -Since model caching is enabled, there can only be 2 models loaded into memory - loading a 3rd one will lead to the removal of the least recently used one. To witness the adding/removal process of models, check the logs of the API by running `cortex logs multi-model-classifier` once the API is up. - -The example can be run on both CPU and on GPU hardware. - -## Sample Prediction - -Deploy the model by running: - -```bash -cortex deploy -``` - -And wait for it to become live by tracking its status with `cortex get --watch`. - -Once the API has been successfully deployed, export the APIs endpoint. You can get the API's endpoint by running `cortex get multi-model-classifier`. - -```bash -export ENDPOINT=your-api-endpoint -``` - -When making a prediction with [sample-image.json](sample-image.json), the following image will be used: - -![sports car](https://i.imgur.com/zovGIKD.png) - -### ResNet50 Classifier - -Make a request to the ResNet50 model: - -```bash -curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample-image.json -``` - -The expected response is: - -```json -{"label": "sports_car"} -``` - -### Inception Classifier - -Make a request to the Inception model: - -```bash -curl "${ENDPOINT}?model=inception" -X POST -H "Content-Type: application/json" -d @sample-image.json -``` - -The expected response is: - -```json -{"label": "sports_car"} -``` - -### Iris Classifier - -At this point, there are 2 models loaded into memory (as specified by `cache_size`). Loading the `iris` classifier will lead to the removal of the least recently used model - in this case, it will be the ResNet50 model that will get evicted. Since the `disk_cache_size` is set to 3, no model will be removed from disk. - -Make a request to the Iris model: - -```bash -curl "${ENDPOINT}?model=iris" -X POST -H "Content-Type: application/json" -d @sample-iris.json -``` - -The expected response is: - -```json -{"label": "setosa"} -``` - ---- - -Now, inspect `cortex get multi-model-classifier` to see when and which models were removed in this process of making requests to different versions of the same model. diff --git a/examples/model-caching/tensorflow/multi-model-classifier/cortex.yaml b/examples/model-caching/tensorflow/multi-model-classifier/cortex.yaml deleted file mode 100644 index 4a165d177d..0000000000 --- a/examples/model-caching/tensorflow/multi-model-classifier/cortex.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: multi-model-classifier - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - models: - paths: - - name: inception - model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ - - name: iris - model_path: s3://cortex-examples/tensorflow/iris-classifier/nn/ - - name: resnet50 - model_path: s3://cortex-examples/tensorflow/resnet50/ - cache_size: 2 - disk_cache_size: 3 - config: - models: - iris: - labels: ["setosa", "versicolor", "virginica"] - resnet50: - input_shape: [224, 224] - input_key: input - output_key: output - inception: - input_shape: [224, 224] - input_key: images - output_key: classes - image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - compute: - mem: 2G diff --git a/examples/model-caching/tensorflow/multi-model-classifier/predictor.py b/examples/model-caching/tensorflow/multi-model-classifier/predictor.py deleted file mode 100644 index d0914b8411..0000000000 --- a/examples/model-caching/tensorflow/multi-model-classifier/predictor.py +++ /dev/null @@ -1,63 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import requests -import numpy as np -import cv2 - - -def get_url_image(url_image): - """ - Get numpy image from URL image. - """ - resp = requests.get(url_image, stream=True).raw - image = np.asarray(bytearray(resp.read()), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - return image - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - # for image classifiers - classes = requests.get(config["image-classifier-classes"]).json() - self.image_classes = [classes[str(k)][1] for k in range(len(classes))] - - # assign "models"' key value to self.config for ease of use - self.config = config["models"] - - # for iris classifier - self.iris_labels = self.config["iris"]["labels"] - - def predict(self, payload, query_params): - model_name = query_params["model"] - model_version = query_params.get("version", "latest") - predicted_label = None - - if model_name == "iris": - prediction = self.client.predict(payload["input"], model_name, model_version) - predicted_class_id = int(prediction["class_ids"][0]) - predicted_label = self.iris_labels[predicted_class_id] - - elif model_name in ["resnet50", "inception"]: - predicted_label = self.predict_image_classifier(model_name, payload["url"]) - - return {"label": predicted_label, "model": {"model": model_name, "version": model_version}} - - def predict_image_classifier(self, model, img_url): - img = get_url_image(img_url) - img = cv2.resize( - img, tuple(self.config[model]["input_shape"]), interpolation=cv2.INTER_NEAREST - ) - if model == "inception": - img = img.astype("float32") / 255 - img = {self.config[model]["input_key"]: img[np.newaxis, ...]} - - results = self.client.predict(img, model)[self.config[model]["output_key"]] - result = np.argmax(results) - if model == "inception": - result -= 1 - predicted_label = self.image_classes[result] - - return predicted_label diff --git a/examples/model-caching/tensorflow/multi-model-classifier/requirements.txt b/examples/model-caching/tensorflow/multi-model-classifier/requirements.txt deleted file mode 100644 index 7e2fba5e6c..0000000000 --- a/examples/model-caching/tensorflow/multi-model-classifier/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Pillow diff --git a/examples/model-caching/tensorflow/multi-model-classifier/sample-image.json b/examples/model-caching/tensorflow/multi-model-classifier/sample-image.json deleted file mode 100644 index 95200916c7..0000000000 --- a/examples/model-caching/tensorflow/multi-model-classifier/sample-image.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/zovGIKD.png" -} diff --git a/examples/model-caching/tensorflow/multi-model-classifier/sample-iris.json b/examples/model-caching/tensorflow/multi-model-classifier/sample-iris.json deleted file mode 100644 index 67c03827f2..0000000000 --- a/examples/model-caching/tensorflow/multi-model-classifier/sample-iris.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "input": { - "sepal_length": 5.2, - "sepal_width": 3.6, - "petal_length": 1.4, - "petal_width": 0.3 - } -} diff --git a/examples/multi-model/onnx/requirements.txt b/examples/multi-model/onnx/requirements.txt deleted file mode 100644 index 212d089934..0000000000 --- a/examples/multi-model/onnx/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -opencv-python==4.2.0.34 -scipy==1.4.1 diff --git a/examples/multi-model/onnx/sample.json b/examples/multi-model/onnx/sample.json deleted file mode 100644 index 4ee3aa45df..0000000000 --- a/examples/multi-model/onnx/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/213xcvs.jpg" -} diff --git a/examples/multi-model/python/README.md b/examples/multi-model/python/README.md deleted file mode 100644 index 0fbca390cd..0000000000 --- a/examples/multi-model/python/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# Multi-Model Analyzer API - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example deploys a sentiment analyzer and a text summarizer in one API. Query parameters are used for selecting the model. - -The example can be run on both CPU and on GPU hardware. - -## Sample Prediction - -Deploy the model by running: - -```bash -cortex deploy -``` - -And wait for it to become live by tracking its status with `cortex get --watch`. - -Once the API has been successfully deployed, export the APIs endpoint. You can get the API's endpoint by running `cortex get text-analyzer`. - -```bash -export ENDPOINT=your-api-endpoint -``` - -### Sentiment Analyzer Classifier - -Make a request to the sentiment analyzer model: - -```bash -curl "${ENDPOINT}?model=sentiment" -X POST -H "Content-Type: application/json" -d @sample-sentiment.json -``` - -The expected response is: - -```json -{"label": "POSITIVE", "score": 0.9998506903648376} -``` - -### Text Summarizer - -Make a request to the text summarizer model: - -```bash -curl "${ENDPOINT}?model=summarizer" -X POST -H "Content-Type: application/json" -d @sample-summarizer.json -``` - -The expected response is: - -```text -Machine learning is the study of algorithms and statistical models that computer systems use to perform a specific task. It is seen as a subset of artificial intelligence. Machine learning algorithms are used in a wide variety of applications, such as email filtering and computer vision. In its application across business problems, machine learning is also referred to as predictive analytics. -``` diff --git a/examples/multi-model/python/cortex.yaml b/examples/multi-model/python/cortex.yaml deleted file mode 100644 index b2ece6bab9..0000000000 --- a/examples/multi-model/python/cortex.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: multi-model-text-analyzer - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - gpu: 1 - mem: 6G diff --git a/examples/multi-model/python/predictor.py b/examples/multi-model/python/predictor.py deleted file mode 100644 index 03a8b03fbb..0000000000 --- a/examples/multi-model/python/predictor.py +++ /dev/null @@ -1,25 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -from transformers import pipeline -from starlette.responses import JSONResponse - - -class PythonPredictor: - def __init__(self, config): - device = 0 if torch.cuda.is_available() else -1 - print(f"using device: {'cuda' if device == 0 else 'cpu'}") - - self.analyzer = pipeline(task="sentiment-analysis", device=device) - self.summarizer = pipeline(task="summarization", device=device) - - def predict(self, query_params, payload): - model_name = query_params.get("model") - - if model_name == "sentiment": - return self.analyzer(payload["text"])[0] - elif model_name == "summarizer": - summary = self.summarizer(payload["text"]) - return summary[0]["summary_text"] - else: - return JSONResponse({"error": f"unknown model: {model_name}"}, status_code=400) diff --git a/examples/multi-model/python/requirements.txt b/examples/multi-model/python/requirements.txt deleted file mode 100644 index 3f565d80e4..0000000000 --- a/examples/multi-model/python/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch -transformers==2.9.* diff --git a/examples/multi-model/python/sample-sentiment.json b/examples/multi-model/python/sample-sentiment.json deleted file mode 100644 index de3a18a92a..0000000000 --- a/examples/multi-model/python/sample-sentiment.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "best day ever" -} diff --git a/examples/multi-model/python/sample-summarizer.json b/examples/multi-model/python/sample-summarizer.json deleted file mode 100644 index b19a1406d4..0000000000 --- a/examples/multi-model/python/sample-summarizer.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "text": "Machine learning (ML) is the scientific study of algorithms and statistical models that computer systems use to perform a specific task without using explicit instructions, relying on patterns and inference instead. It is seen as a subset of artificial intelligence. Machine learning algorithms build a mathematical model based on sample data, known as training data, in order to make predictions or decisions without being explicitly programmed to perform the task. Machine learning algorithms are used in a wide variety of applications, such as email filtering and computer vision, where it is difficult or infeasible to develop a conventional algorithm for effectively performing the task. Machine learning is closely related to computational statistics, which focuses on making predictions using computers. The study of mathematical optimization delivers methods, theory and application domains to the field of machine learning. Data mining is a field of study within machine learning, and focuses on exploratory data analysis through unsupervised learning. In its application across business problems, machine learning is also referred to as predictive analytics." -} diff --git a/examples/multi-model/tensorflow/README.md b/examples/multi-model/tensorflow/README.md deleted file mode 100644 index 631f800179..0000000000 --- a/examples/multi-model/tensorflow/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# Multi-Model Classifier API - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example deploys Iris, ResNet50 and Inception models in one API. Query parameters are used for selecting the model. - -The example can be run on both CPU and on GPU hardware. - -## Sample Prediction - -Deploy the model by running: - -```bash -cortex deploy -``` - -And wait for it to become live by tracking its status with `cortex get --watch`. - -Once the API has been successfully deployed, export the APIs endpoint. You can get the API's endpoint by running `cortex get multi-model-classifier`. - -```bash -export ENDPOINT=your-api-endpoint -``` - -When making a prediction with [sample-image.json](sample-image.json), the following image will be used: - -![sports car](https://i.imgur.com/zovGIKD.png) - -### ResNet50 Classifier - -Make a request to the ResNet50 model: - -```bash -curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample-image.json -``` - -The expected response is: - -```json -{"label": "sports_car"} -``` - -### Inception Classifier - -Make a request to the Inception model: - -```bash -curl "${ENDPOINT}?model=inception" -X POST -H "Content-Type: application/json" -d @sample-image.json -``` - -The expected response is: - -```json -{"label": "sports_car"} -``` - -### Iris Classifier - -Make a request to the Iris model: - -```bash -curl "${ENDPOINT}?model=iris" -X POST -H "Content-Type: application/json" -d @sample-iris.json -``` - -The expected response is: - -```json -{"label": "setosa"} -``` diff --git a/examples/multi-model/tensorflow/cortex.yaml b/examples/multi-model/tensorflow/cortex.yaml deleted file mode 100644 index 2c0e39bea8..0000000000 --- a/examples/multi-model/tensorflow/cortex.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: multi-model-classifier - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - models: - paths: - - name: inception - model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ - - name: resnet50 - model_path: s3://cortex-examples/tensorflow/resnet50/ - config: - models: - resnet50: - input_shape: [224, 224] - input_key: input - output_key: output - inception: - input_shape: [224, 224] - input_key: images - output_key: classes - image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - compute: - mem: 2G diff --git a/examples/multi-model/tensorflow/predictor.py b/examples/multi-model/tensorflow/predictor.py deleted file mode 100644 index 6577777037..0000000000 --- a/examples/multi-model/tensorflow/predictor.py +++ /dev/null @@ -1,62 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import requests -import numpy as np -import cv2 - - -def get_url_image(url_image): - """ - Get numpy image from URL image. - """ - resp = requests.get(url_image, stream=True).raw - image = np.asarray(bytearray(resp.read()), dtype="uint8") - image = cv2.imdecode(image, cv2.IMREAD_COLOR) - image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - return image - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - # for image classifiers - classes = requests.get(config["image-classifier-classes"]).json() - self.image_classes = [classes[str(k)][1] for k in range(len(classes))] - - # assign "models"' key value to self.config for ease of use - self.config = config["models"] - - # for iris classifier - self.iris_labels = self.config["iris"]["labels"] - - def predict(self, payload, query_params): - model_name = query_params["model"] - predicted_label = None - - if model_name == "iris": - prediction = self.client.predict(payload["input"], model_name) - predicted_class_id = int(prediction["class_ids"][0]) - predicted_label = self.iris_labels[predicted_class_id] - - elif model_name in ["resnet50", "inception"]: - predicted_label = self.predict_image_classifier(model_name, payload["url"]) - - return {"label": predicted_label} - - def predict_image_classifier(self, model, img_url): - img = get_url_image(img_url) - img = cv2.resize( - img, tuple(self.config[model]["input_shape"]), interpolation=cv2.INTER_NEAREST - ) - if model == "inception": - img = img.astype("float32") / 255 - img = {self.config[model]["input_key"]: img[np.newaxis, ...]} - - results = self.client.predict(img, model)[self.config[model]["output_key"]] - result = np.argmax(results) - if model == "inception": - result -= 1 - predicted_label = self.image_classes[result] - - return predicted_label diff --git a/examples/multi-model/tensorflow/requirements.txt b/examples/multi-model/tensorflow/requirements.txt deleted file mode 100644 index 7e2fba5e6c..0000000000 --- a/examples/multi-model/tensorflow/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -Pillow diff --git a/examples/multi-model/tensorflow/sample-image.json b/examples/multi-model/tensorflow/sample-image.json deleted file mode 100644 index 95200916c7..0000000000 --- a/examples/multi-model/tensorflow/sample-image.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "url": "https://i.imgur.com/zovGIKD.png" -} From 82a8272365c3ffc85e1dc5c4cc8cb1d4a32382f8 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Fri, 4 Dec 2020 16:17:49 -0800 Subject: [PATCH 04/36] Update tutorials --- README.md | 7 +- docs/summary.md | 6 +- docs/tutorials/batch.md | 0 docs/tutorials/compute/README.md | 90 -------- docs/tutorials/compute/cortex.yaml | 18 -- docs/tutorials/compute/cortex_gpu.yaml | 19 -- .../cortex_gpu_server_side_batching.yaml | 22 -- docs/tutorials/compute/cortex_inf.yaml | 21 -- .../cortex_inf_server_side_batching.yaml | 24 --- .../compute/generate_gpu_resnet50_model.ipynb | 131 ------------ .../compute/generate_resnet50_models.ipynb | 178 ---------------- docs/tutorials/compute/predictor.py | 63 ------ docs/tutorials/compute/requirements.txt | 1 - docs/tutorials/compute/sample.bin | Bin 8680 -> 0 bytes docs/tutorials/compute/sample.json | 3 - docs/tutorials/multi-model.md | 77 +++++++ docs/tutorials/multi-model/README.md | 69 ------- docs/tutorials/multi-model/cortex.yaml | 20 -- docs/tutorials/multi-model/predictor.py | 98 --------- docs/tutorials/multi-model/requirements.txt | 2 - docs/tutorials/multi-model/sample.json | 3 - docs/tutorials/realtime.md | 100 +++++++++ docs/tutorials/realtime/README.md | 192 ------------------ docs/tutorials/realtime/deploy.ipynb | 80 -------- docs/tutorials/realtime/predictor.py | 17 -- docs/tutorials/realtime/requirements.txt | 2 - docs/tutorials/traffic-splitter.md | 96 +++++++++ docs/tutorials/traffic-splitting/README.md | 111 ---------- docs/tutorials/traffic-splitting/cortex.yaml | 28 --- docs/tutorials/traffic-splitting/model.py | 59 ------ .../traffic-splitting/onnx_predictor.py | 20 -- .../traffic-splitting/pytorch_predictor.py | 50 ----- docs/tutorials/traffic-splitting/sample.json | 6 - docs/tutorials/utils/README.md | 36 ---- docs/tutorials/utils/throughput_test.py | 179 ---------------- 35 files changed, 276 insertions(+), 1552 deletions(-) create mode 100644 docs/tutorials/batch.md delete mode 100644 docs/tutorials/compute/README.md delete mode 100644 docs/tutorials/compute/cortex.yaml delete mode 100644 docs/tutorials/compute/cortex_gpu.yaml delete mode 100644 docs/tutorials/compute/cortex_gpu_server_side_batching.yaml delete mode 100644 docs/tutorials/compute/cortex_inf.yaml delete mode 100644 docs/tutorials/compute/cortex_inf_server_side_batching.yaml delete mode 100644 docs/tutorials/compute/generate_gpu_resnet50_model.ipynb delete mode 100644 docs/tutorials/compute/generate_resnet50_models.ipynb delete mode 100644 docs/tutorials/compute/predictor.py delete mode 100644 docs/tutorials/compute/requirements.txt delete mode 100644 docs/tutorials/compute/sample.bin delete mode 100644 docs/tutorials/compute/sample.json create mode 100644 docs/tutorials/multi-model.md delete mode 100644 docs/tutorials/multi-model/README.md delete mode 100644 docs/tutorials/multi-model/cortex.yaml delete mode 100644 docs/tutorials/multi-model/predictor.py delete mode 100644 docs/tutorials/multi-model/requirements.txt delete mode 100644 docs/tutorials/multi-model/sample.json create mode 100644 docs/tutorials/realtime.md delete mode 100644 docs/tutorials/realtime/README.md delete mode 100644 docs/tutorials/realtime/deploy.ipynb delete mode 100644 docs/tutorials/realtime/predictor.py delete mode 100644 docs/tutorials/realtime/requirements.txt create mode 100644 docs/tutorials/traffic-splitter.md delete mode 100644 docs/tutorials/traffic-splitting/README.md delete mode 100644 docs/tutorials/traffic-splitting/cortex.yaml delete mode 100644 docs/tutorials/traffic-splitting/model.py delete mode 100644 docs/tutorials/traffic-splitting/onnx_predictor.py delete mode 100644 docs/tutorials/traffic-splitting/pytorch_predictor.py delete mode 100644 docs/tutorials/traffic-splitting/sample.json delete mode 100644 docs/tutorials/utils/README.md delete mode 100644 docs/tutorials/utils/throughput_test.py diff --git a/README.md b/README.md index 53657f23f2..bbbde36959 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,7 @@
- -[install](https://docs.cortex.dev/install) • [documentation](https://docs.cortex.dev) • [examples](https://github.com/cortexlabs/cortex/tree/0.23/examples) • [community](https://gitter.im/cortexlabs/cortex) +[install](https://docs.cortex.dev/install) • [documentation](https://docs.cortex.dev) • [community](https://gitter.im/cortexlabs/cortex) # Deploy machine learning models to production @@ -74,10 +73,6 @@ class PythonPredictor: api_spec = { "name": "text-generator", "kind": "RealtimeAPI", - "predictor": { - "type": "python", - "path": "predictor.py" - }, "compute": { "gpu": 1, "mem": "8Gi", diff --git a/docs/summary.md b/docs/summary.md index b21c1caed4..04f21db7a3 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -1,14 +1,12 @@ # Table of contents * [Deploy machine learning models to production](../README.md) -* [Install](aws/install.md) -* [Tutorial](https://docs.cortex.dev/v/master/deployments/realtime-api/text-generator) -* [GitHub](https://github.com/cortexlabs/cortex) -* [Examples](https://github.com/cortexlabs/cortex/tree/master/examples) +* [Get started](tutorials/realtime.md) * [Contact us](contact.md) ## Running Cortex on AWS +* [Install](aws/install.md) * [Credentials](aws/credentials.md) * [Security](aws/security.md) * [Spot instances](aws/spot.md) diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/docs/tutorials/compute/README.md b/docs/tutorials/compute/README.md deleted file mode 100644 index 7a52dadbb8..0000000000 --- a/docs/tutorials/compute/README.md +++ /dev/null @@ -1,90 +0,0 @@ -# Image Classifier with ResNet50 - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example implements an image recognition system using ResNet50, which allows for the recognition of up to 1000 classes. - -## Deploying - -There are 4 Cortex APIs available in this example: - -1. [cortex.yaml](cortex.yaml) - can be used with any instances. -1. [cortex_inf.yaml](cortex_inf.yaml) - to be used with `inf1` instances. -1. [cortex_gpu.yaml](cortex_gpu.yaml) - to be used with GPU instances. -1. [cortex_gpu_server_side_batching.yaml](cortex_gpu_server_side_batching.yaml) - to be used with GPU instances. Deployed with `max_batch_size` > 1. The exported model and the TensorFlow Predictor do not need to be modified to support server-side batching. - -To deploy an API, run: - -```bash -cortex deploy -``` - -E.g. - -```bash -cortex deploy cortex_inf.yaml -``` - -## Verifying your API - -Check that your API is live by running `cortex get image-classifier-resnet50`, and copy the example `curl` command that's shown. After the API is live, run the `curl` command, e.g. - -```bash -$ curl -X POST -H "Content-Type: application/json" -d @sample.json - -["tabby", "Egyptian_cat", "tiger_cat", "tiger", "plastic_bag"] -``` - -The following image is embedded in [sample.json](sample.json): - -![image](https://i.imgur.com/213xcvs.jpg) - -## Throughput test - -Before [throughput_test.py](../../utils/throughput_test.py) is run, 2 environment variables have to be exported: - -```bash -export ENDPOINT= # you can find this with `cortex get image-classifier-resnet50` -export PAYLOAD=https://i.imgur.com/213xcvs.jpg # this is the cat image shown in the previous step -``` - -Then, deploy each API one at a time and check the results: - -1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 2` with the [cortex.yaml](cortex.yaml) API running on an `c5.xlarge` instance will get **~16.2 inferences/sec** with an average latency of **200 ms**. -1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 48` with the [cortex_inf.yaml](cortex_inf.yaml) API running on an `inf1.2xlarge` instance will get **~510 inferences/sec** with an average latency of **80 ms**. -1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 24` with the [cortex_gpu.yaml](cortex_gpu.yaml) API running on an `g4dn.xlarge` instance will get **~125 inferences/sec** with an average latency of **85 ms**. Optimizing the model with TensorRT to use FP16 on TF-serving only seems to achieve a 10% performance improvement - one thing to consider is that the TensorRT engines hadn't been built beforehand, so this might have affected the results negatively. -1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 60` with the [cortex_gpu_server_side_batching.yaml](cortex_gpu_batch_sized.yaml) API running on an `g4dn.xlarge` instance will get **~186 inferences/sec** with an average latency of **500 ms**. This achieves a 49% higher throughput than the [cortex_gpu.yaml](cortex_gpu.yaml) API, at the expense of increased latency. - -Alternatively to [throughput_test.py](../../utils/throughput_test.py), the `ab` GNU utility can also be used to benchmark the API. This has the advantage that it's not as taxing on your local machine, but the disadvantage that it doesn't implement a cooldown period. You can run `ab` like this: - -```bash -# for making octet-stream requests, which is the default for throughput_test script -ab -n -c -p sample.bin -T 'application/octet-stream' -rks 120 $ENDPOINT - -# for making json requests, will will have lower performance because the API has to download the image every time -ab -n -c -p sample.json -T 'application/json' -rks 120 $ENDPOINT -``` - -*Note: `inf1.xlarge` isn't used because the major bottleneck with `inf` instances for this example is with the CPU, and `inf1.2xlarge` has twice the amount of CPU cores for same number of Inferentia ASICs (which is 1), which translates to almost double the throughput.* - -## Exporting SavedModels - -This example deploys models that we have built and uploaded to a public S3 bucket. If you want to build the models yourself, follow these instructions. - -Run the following command to install the dependencies required for the [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook: - -```bash -pip install --extra-index-url=https://pip.repos.neuron.amazonaws.com \ - neuron-cc==1.0.9410.0+6008239556 \ - tensorflow-neuron==1.15.0.1.0.1333.0 -``` - -The [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook will generate 2 SavedModels. One will be saved in the `resnet50` directory which can be run on GPU or on CPU and another in the `resnet50_neuron` directory which can only be run on `inf1` instances. For server-side batching on `inf1` instances, a different compilation of the model is required. To compile ResNet50 model for a batch size of 5, run `run_all` from [this directory](https://github.com/aws/aws-neuron-sdk/tree/master/src/examples/tensorflow/keras_resnet50). - -If you'd also like to build the TensorRT version of the GPU model, run the following command in a new Python environment to install the pip dependencies required for the [generate_gpu_resnet50_model.ipynb](generate_gpu_resnet50_model.ipynb) notebook: - -```bash -pip install tensorflow==2.0.0 -``` - -TensorRT also has to be installed to export the SavedModel. Follow the instructions on [Nvidia TensorRT Documentation](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-debian) to download and install TensorRT on your local machine (this will require ~5GB of space, and you will have to create an Nvidia account). This notebook also requires that the SavedModel generated with the [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook exists in the `resnet50` directory. The TensorRT SavedModel will be exported to the `resnet50_gpu` directory. You can then replace the existing SavedModel with the TensorRT-optimized version in [cortex_gpu.yaml](cortex_gpu.yaml) - it's a drop-in replacement that doesn't require any other dependencies on the Cortex side. By default, the API config in [cortex_gpu.yaml](cortex_gpu.yaml) uses the non-TensorRT-optimized version due to simplicity. diff --git a/docs/tutorials/compute/cortex.yaml b/docs/tutorials/compute/cortex.yaml deleted file mode 100644 index afbe5a8394..0000000000 --- a/docs/tutorials/compute/cortex.yaml +++ /dev/null @@ -1,18 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/resnet50/ - processes_per_replica: 4 - threads_per_process: 16 - config: - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - input_key: input - output_key: output - compute: - cpu: 3 - mem: 4G diff --git a/docs/tutorials/compute/cortex_gpu.yaml b/docs/tutorials/compute/cortex_gpu.yaml deleted file mode 100644 index f86b85e414..0000000000 --- a/docs/tutorials/compute/cortex_gpu.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/resnet50/ - processes_per_replica: 4 - threads_per_process: 24 - config: - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - input_key: input - output_key: output - compute: - gpu: 1 - cpu: 3 - mem: 4G diff --git a/docs/tutorials/compute/cortex_gpu_server_side_batching.yaml b/docs/tutorials/compute/cortex_gpu_server_side_batching.yaml deleted file mode 100644 index 61604346d0..0000000000 --- a/docs/tutorials/compute/cortex_gpu_server_side_batching.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/resnet50/ - server_side_batching: - max_batch_size: 32 - batch_interval: 0.1s - processes_per_replica: 4 - threads_per_process: 192 - config: - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - input_key: input - output_key: output - compute: - gpu: 1 - cpu: 3 - mem: 4G diff --git a/docs/tutorials/compute/cortex_inf.yaml b/docs/tutorials/compute/cortex_inf.yaml deleted file mode 100644 index 13f999e1b5..0000000000 --- a/docs/tutorials/compute/cortex_inf.yaml +++ /dev/null @@ -1,21 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/resnet50_neuron/ - processes_per_replica: 4 - threads_per_process: 256 - config: - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - input_key: input - output_key: output - compute: - inf: 1 - cpu: 3 - mem: 4G - autoscaling: - max_replica_concurrency: 16384 diff --git a/docs/tutorials/compute/cortex_inf_server_side_batching.yaml b/docs/tutorials/compute/cortex_inf_server_side_batching.yaml deleted file mode 100644 index 2b33961e95..0000000000 --- a/docs/tutorials/compute/cortex_inf_server_side_batching.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier-resnet50 - kind: RealtimeAPI - predictor: - type: tensorflow - path: predictor.py - model_path: s3://cortex-examples/tensorflow/resnet50_neuron_batch_size_5/ - server_side_batching: - max_batch_size: 5 - batch_interval: 0.1s - processes_per_replica: 4 - threads_per_process: 260 - config: - classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json - input_shape: [224, 224] - input_key: input_1:0 - output_key: probs/Softmax:0 - compute: - inf: 1 - cpu: 3 - mem: 4G - autoscaling: - max_replica_concurrency: 16384 diff --git a/docs/tutorials/compute/generate_gpu_resnet50_model.ipynb b/docs/tutorials/compute/generate_gpu_resnet50_model.ipynb deleted file mode 100644 index ca78235b4d..0000000000 --- a/docs/tutorials/compute/generate_gpu_resnet50_model.ipynb +++ /dev/null @@ -1,131 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Generate GPU Resnet50 Model\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "from tensorflow.python.compiler.tensorrt import trt_convert as trt" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "input_model_dir = \"resnet50\"\n", - "output_model_dir = \"resnet50_gpu\"" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS\n", - "conversion_params = conversion_params._replace(\n", - " max_workspace_size_bytes=(1<<30))\n", - "conversion_params = conversion_params._replace(precision_mode=\"FP16\")\n", - "conversion_params = conversion_params._replace(\n", - " maximum_cached_engines=100)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Linked TensorRT version: (0, 0, 0)\n", - "INFO:tensorflow:Loaded TensorRT version: (0, 0, 0)\n", - "INFO:tensorflow:Running against TensorRT version 0.0.0\n" - ] - } - ], - "source": [ - "converter = trt.TrtGraphConverterV2(\n", - " input_saved_model_dir=input_model_dir,\n", - " conversion_params=conversion_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/robert/.miniconda3/envs/py36-tf/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1781: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "If using Keras pass *_constraint arguments to layers.\n", - "WARNING:tensorflow:Issue encountered when serializing variables.\n", - "Type is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\n", - "to_proto not supported in EAGER mode.\n", - "WARNING:tensorflow:Issue encountered when serializing trainable_variables.\n", - "Type is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\n", - "to_proto not supported in EAGER mode.\n" - ] - } - ], - "source": [ - "converter.convert()" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Assets written to: resnet50_gpu/assets\n" - ] - } - ], - "source": [ - "converter.save(output_model_dir)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/tutorials/compute/generate_resnet50_models.ipynb b/docs/tutorials/compute/generate_resnet50_models.ipynb deleted file mode 100644 index 11eaf5a316..0000000000 --- a/docs/tutorials/compute/generate_resnet50_models.ipynb +++ /dev/null @@ -1,178 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Generate Resnet50 Models\n", - "\n", - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import time\n", - "import shutil\n", - "import tensorflow as tf\n", - "import tensorflow.neuron as tfn\n", - "import tensorflow.compat.v1.keras as keras\n", - "from tensorflow.keras.applications.resnet50 import ResNet50" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Prepare export directories for compile/non-compiled versions of the model." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "model_dir = \"resnet50\"\n", - "compiled_model_dir = model_dir + \"_neuron\"\n", - "shutil.rmtree(model_dir, ignore_errors=True)\n", - "shutil.rmtree(compiled_model_dir, ignore_errors=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Instantiate a Keras ResNet50 model." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From /home/robert/.miniconda3/envs/py36-neuron/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "If using Keras pass *_constraint arguments to layers.\n" - ] - } - ], - "source": [ - "keras.backend.set_learning_phase(0)\n", - "keras.backend.set_image_data_format('channels_last')\n", - "model = ResNet50(weights='imagenet')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Export the model as SavedModel." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "WARNING:tensorflow:From :5: simple_save (from tensorflow.python.saved_model.simple_save) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.simple_save.\n", - "WARNING:tensorflow:From /home/robert/.miniconda3/envs/py36-neuron/lib/python3.6/site-packages/tensorflow_core/python/saved_model/signature_def_utils_impl.py:201: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.\n", - "Instructions for updating:\n", - "This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.\n", - "INFO:tensorflow:Assets added to graph.\n", - "INFO:tensorflow:No assets to write.\n", - "INFO:tensorflow:SavedModel written to: resnet50/saved_model.pb\n" - ] - } - ], - "source": [ - "tf.saved_model.simple_save(\n", - " session = keras.backend.get_session(),\n", - " export_dir = model_dir,\n", - " inputs = {'input': model.inputs[0]},\n", - " outputs = {'output': model.outputs[0]})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And then compile it for Inferentia to be used on only one Neuron core. `--static-weights` option is used to cache all weights onto the neuron core's memory." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from resnet50/variables/variables\n", - "INFO:tensorflow:Froze 320 variables.\n", - "INFO:tensorflow:Converted 320 variables to const ops.\n", - "INFO:tensorflow:fusing subgraph neuron_op_d6f098c01c780733 with neuron-cc\n", - "INFO:tensorflow:Number of operations in TensorFlow session: 4638\n", - "INFO:tensorflow:Number of operations after tf.neuron optimizations: 556\n", - "INFO:tensorflow:Number of operations placed on Neuron runtime: 554\n", - "INFO:tensorflow:No assets to save.\n", - "INFO:tensorflow:No assets to write.\n", - "INFO:tensorflow:SavedModel written to: resnet50_neuron/saved_model.pb\n", - "INFO:tensorflow:Successfully converted resnet50 to resnet50_neuron\n" - ] - }, - { - "data": { - "text/plain": [ - "{'OnNeuronRatio': 0.9964028776978417}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "compiler_args = ['--static-weights', '--num-neuroncores', '1']\n", - "batch_size = 1\n", - "tfn.saved_model.compile(model_dir, compiled_model_dir, batch_size)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/tutorials/compute/predictor.py b/docs/tutorials/compute/predictor.py deleted file mode 100644 index 98828723cc..0000000000 --- a/docs/tutorials/compute/predictor.py +++ /dev/null @@ -1,63 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import os -import cv2 -import numpy as np -import requests -import imageio -import json -import base64 - - -def read_image(payload): - """ - Read JPG image from {"url": "https://..."} or from a bytes object. - """ - if isinstance(payload, bytes): - jpg_as_np = np.frombuffer(payload, dtype=np.uint8) - img = cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR) - elif isinstance(payload, dict) and "url" in payload.keys(): - img = imageio.imread(payload["url"]) - else: - return None - return img - - -def prepare_image(image, input_shape, input_key): - """ - Prepares an image for the TFS client. - """ - img = cv2.resize(image, input_shape, interpolation=cv2.INTER_NEAREST) - img = {input_key: img[np.newaxis, ...]} - return img - - -class TensorFlowPredictor: - def __init__(self, tensorflow_client, config): - self.client = tensorflow_client - - # load classes - classes = requests.get(config["classes"]).json() - self.idx2label = [classes[str(k)][1] for k in range(len(classes))] - - self.input_shape = tuple(config["input_shape"]) - self.input_key = str(config["input_key"]) - self.output_key = str(config["output_key"]) - - def predict(self, payload): - # preprocess image - img = read_image(payload) - if img is None: - return None - img = prepare_image(img, self.input_shape, self.input_key) - - # predict - results = self.client.predict(img)[self.output_key] - results = np.argsort(results) - - # Lookup and print the top 5 labels - top5_idx = results[-5:] - top5_labels = [self.idx2label[idx] for idx in top5_idx] - top5_labels = top5_labels[::-1] - - return top5_labels diff --git a/docs/tutorials/compute/requirements.txt b/docs/tutorials/compute/requirements.txt deleted file mode 100644 index 66340adf33..0000000000 --- a/docs/tutorials/compute/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -imageio==2.9.* diff --git a/docs/tutorials/compute/sample.bin b/docs/tutorials/compute/sample.bin deleted file mode 100644 index 921abf24a5c99cd3c1d1cd12d00f134a16391a77..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8680 zcmbVxYg`le*6v`cQB&uZa{9BbO)$v8W{2LJ+NxL z*0Y{xjkDd^g}nahij^x67Z(I^fq#heF%p4{b73CL^El@58jE=z|JrM;*CvdgFyYk~ zd(y-S>`Cki6DCfcILVcHz)w>qyG~*L!@SAgFC90Y#TxI*p1^)}$^Yodc?)r$h;$)c zESEQsaqccGcNga!1cP&qe|0=K?B5TUaje(IPhi6_Cc_04ufy?KEI9OdI5AwE0e?qc zb00r#Zs76>9xF zgAYTNghqTExgzS5m7lIz8~YV+-TDpjUw^Z8Tf+7oDSJ}C-z!WLWgo~nDE{$K?!S)w zeDv7yUrrRBDf#v6xzh6&F8)@2t>Sv+4aLp6`a2Dcs=L28J^bU();4YXqYj}*2L)_9WE}rJ!#(JFU_7?cDK0Bm>;Bj)53)IDzg8-!1n*IBKz;a{%>3z$P|_f z6p!VO;K*>(f-^M{>YxQ@auMcB9_hI|J5TOecjRR24cSbeirq2PN9JhTfQ@s=;N6k? z%)Ihq(O27!#ozlmCURxZ%!Q|>uO8o28aW+_iR5dB!;|=0{OWKha?j4I!FG&XzLKJ6 z?+-&&+(k~L?E9Nnb{itKVQ2|yxb@75#A~lOeiq^#;}w+^sxT@h0f4}L% zQ~c9YUH!S&79eMG7r5Lcp5QOF?^@)1C-T##UwepQFNI%DVHg22>(oDG<5QSgI}!e>I?31Wj}l;uP;4Wm3+hZDIwg(pq_Oyfi9^btyxD(AwoX84PVzeMp1KyAiUqT;k4a<~$m~v+?aFRDYVT*HD-f@yNZR$2$6M(G&4uueKmUL_St2 z&8w4g3pt7|C*qU9C%zT#r57&j`Py+d^{uM3c*Ape}$xV@=HZ zVcs)wN6!DLp+7el@PDDT3Gx4^yyrg+G49hRW}J!+R`-W#12)F7A)DQbJWaB6c4DDc z<(s5GSLQf~U&qU+NGI}!Npwzpx{?x4bnH+Ua6R4=+gJ2)LNr68t-dL|O)2zz(JcH_ zrqXdnEjBJ0IJAMIkS)>%0{YUm*}Wn!?PABlrMxS&W<$54_^?$z!*SY)=%cy_GR_;z z!>{}6QzF`LoqQ{&I3bhmxQOd-V1r)Jgx_DKLx?*PLqS=b#QtRu{#f~2TfRkk%ZNQ< z&yt6bmmCU1J{qOUdl%D*Os_Q;=-korut6CmNKvT?*O;<8d9iqCnP z=lgux9qMrMXjeu`gmy)E;&6m+W|kAtvB}t96Ea&Zva_;;!tPxEm&BAl!hBLVJL*-r z10`-eX`y>oP{PGMynEQX=`oRrr!;c9OWcK)NcOC_+|o$)W<}(-mSx^Qf90F7I{El} zRoc_ET7*&2L?zzimZ+9hX=Nl1oRz~fN37HOw>6D%c7~0X+ww@MBgY^iV>(%?zv8-X2!n%qk=TeuTj=^2T=E9AfCU9fr@m9vI%a=BGX9@bte z#V;f`Mv5YEzAc*Oi&mMHG0eRH%=zu4Z;urntCAG(%#Lj2%yWCeQ7hy(g)| zUw=$4Pkj2`obB_T`_(M(c~pMxkFf3bB|VuR(tl~F&H4apeqzVVR3;R2l+Bn`EpU_x z{K<%Qnp)4E&_;2kJ)T||K$a$Ib`o1nNkB8N?SZsTB5Zgb{wyH(q&zU(C%w6pPd|Z4 zpie8-woA)8Z5)Mti4)n}LkzLy2QodWU0Ph9Y?kXwse}H#l6I8O5TN5;MbycRsX)1c z_cCDqtvNj|cfsj#8El{K>Aof|;-A>4CmYPNZ)+(ayf_M9jHRsqCRkO7l;o zM8nhIX4!GAwS@M3I7U~L+UmnPt!bQ@U8m5yZND{^LGe0svp;K5?+?Pgh&%e4@;e(a zY7Per?X?J`c*R$hPGoYDfc`6qJmN(DY=8-5Pm!{vgy!0iykjvW>T%&q2{ifVcyEAy zVGSVWaUn)+P>yD6W6hVk2)H7LaJ4cV*$53UhkP9{bct<7YMm2#q`bo??lmlIMj9I$Ut1ZWsS;vhQd&&gW=F8bscpg zzciMu%M5{+P|24Q;^}`_d%QVEW$C*>cbMhtG{cMI=GvE}dysD$zC(Fh?%N8l7ugU0 z*;ijLx)(JC5HSdycX$ICt~ZesBFGM zH$^c3B)N+)sae_*;`S12rw>v0Cta^i2xqwKKZ z*O{^A&YEyJ84-rluh+kbqUPQlsWiqtw&6)_$NNQoBT>)S54DB|^xw}jtI5d6or5K` z`}ZT%rN5NAS%T%Gtr!`peLPs|xQML|^Pt_g6NO{(;u3(Q!+HOYoH^Qiq{O9@V*r=NL z+HZ6R{+kC?u?HCcPdYZbnQJYbYb3T~OER_0FiF5H^FVodQ&pr=M${$C`$9PigPV1% z#PV<2@1~lH4#yjn;do-eBLVFP9dJ^}QGU~eKU~gD_W%_tMjPtn*nJtg_#Fz z;p=Y+<1R65QD-)DB@Qve%kg`udlf?kWnF|%y1zwMUQD+XEdu6iB=n~&o*53a#E#w? z^P+*G59}S_-6za3(LM4k~X$Aw0kFQCfhD;!6yYBDyx zFWZjUx72<+g5JD_GvaHb)co(BxP!GTEvkku&|3z4kX;htD8YL|{3F!dv-Fde@W|DW zt?|N4u_gNY4ktndxb*sKg2IzF-iVG~WsNdTdNHOggp}XrIFVDUwP7p!w-etScAVb# zu3U0tub}NFHkGt|;Lr(X^-5=Z-I${l=;Zm5Yty2QajUi;dC1KU(7!Y6h^2q$9F&9+ zd)jD4SGfuLz?GXlC_nLRTb1M7J<*JRg-?y6?_q!8Bdu#`6*qgRG>N;vU|vxDsM*1JX>N}L;HEF9Fr@Cq^20)m-|T(fNH4h=a^&o zbomJl=&?E!8toE@$CXZ`7lzC8QyIaa&Ci=;+FK$HDBLN=VgntI4FwW|XDWm6{4pKf zPXBdTz0AP{--TJeJm05$Ehv0nhk zb{o)$gj|K|TYFNUq@yIyiA=;P9w$T|cHbt?k7H{;BzEr09n2Tp8Z+8vQhwgpY5Aw- znFY8mqy<{r;HIxmw25JcXzBTxi%hHbO@62S9xy<{7keYDvP%)9X5ZG@H0^g87YoFC zAO>#d*)(ZKDPc<+*_K5w_D%WWTvdKbgNjdklYc-lr!|V7eDzdpUtaKwC(MW)KKSbJ zc$N3_I1-Z)-)WDM1_Mma36iX_msJi0TpzlOSlMYlypJ$k8^Aki_dsIpjr18gVBV$; zS8f;@bsI1(?#ackUaB9)3pf=|5{T*ct>xkIH|V;{;VJN$Mo&dC*%@((ndE-wPosyN z$o3=_10zQ0ybz41;z*t>9o42v3t)h^1!oIH#O>u3PjRUGr=<1^Qddmv^1C(mT%=e( zBm^O1=7u?ugXhT$z?d-obu!|PVO<;kY*UF&V2Y6m4WObdx8++%gWVTChl&mgH9L{n z)a!71Oee6tbr3JJ=5MK4oG0d6RCUVS!tOozD67X>-&Y#~ z@@u4L#Qg*vQa{EkAPxF$1y=Q_i}c|+EjK6;Xe~M@jQ6%I^G}w2GCf~dVviC3In@EI z%rt?=gWdT6uCQcBSux3}HbNgAFc=Oh+G-0)o*9oIdBwndeMdifHyw4HbRs=VMq39u zN+2l*4Ez=fE#~=K*4WN#gKsmytGij!l`%z#MS#I%Q>zw)4ZVtWA{PYjm|sC+6CL7< z+EeT)5m(eXB1RxI?5+*bSeu5L>ra81g&(9a9l8&O$bi`KtJeFxSQ)Xo@92qs4(3oe z4n38altZ~MgVi~4)~E<_edlzS<2uHt56a8&uQ#DbR3gi!4gT|6N7gMOD-nv zrrfr+CJ+@XAYp+w;jA~bVN+$_%XqKS5CL7{C;_54-zW79LziK5=vK@V+VuhUvHS#) zpk|Or{pdt2er>_IV~MsdW*Q#iDg;aDcB!lN!DL8!&V?>wY-XY+L_N=pGYG?=9U%)U z-~^s!O~mhVUmBcnLo``T?c5u!bm(bV9lomX~uVEwD{?hu$P7F&Kw1dxP2P1-Cc>Q*|$3`!Dz zp!X2p?7zQiUxa2D&l>Yh6BGnLNYS5r)#gvg3t)ojF00|DvEhqdD7pMv=n$GddopkMu^1@}OO8uB@kEHJzyrh8@)BHk()u7pCx8$^ zke}Ft_v1MkvPeoZ?fEXdS*K zXV;j~G|N$0%uF~Tx5iGpZa2#zEA$ijzcNFZSDNLtpVSOs{g!B03PQ?+2*svZ7ZYxB z6sOSOoMQS>fNZ2{38Qqc|MeX8@35iT@q3;k@s zBmR^5t=x%112EH}hV((MyU?&PR0S2dRIeK-z$qu5adfS&_^pT~HU%^M3b zm^&Omfk)F|4Z7*o!eLsNwhT5Nwr)7)w9w%KkKZhfveX9C^nsIv)Y+_y?yKkF16`%@hV8GWaN(Zfw2RbGS zcD|6b>>i>V%(vt7SU4R-IgzV~D}H{gf{8KWm5Ib?zoSoxoo$%S#4buoAS`cadPi&~ z7zmXs#14gES+Z2~fxj@V`HFHs zUU}SfL7pCCF36J64+XDV|M_(8!w8oCB}#o^*LDKxW~v`n-RHqi$jj#!oCuD#W8~I> z+@}*jK4Fb)x_d4^s^AUn9v~NLmzi+fUnei8lFh#ejr6k2Mc%kBE?JiOpmX*x_dp_+ zleye9ZJs?4dym*GG(t1-FcuZ>C@ayHrMo$lLE+58%?NvhqZSn;q<}LKaAT}fvSAH03DhU`J`mx3;=_pZQmJski-^+yt*j4GkX284x*u;a|e^0|cE>zQ#E?n70 zFCe_g?`tzit$7k)aW8Zr0Q^VDZ&s919VV~BB!9|*e| zkEY-61sc0iKsy)|%NxM}he?eJMjt0&kO#(Pul%zRZ+#BL94z zwyhdAu;4b1wkAL}KLOTV;ol59x9fK84H(KlazoPBGTj{Lkd$cH>M`FoXayg0Vt;fF zQMN+S1)D1mNiIx?9M65cexrRZ^}6GvmEro(#h!hD3;Ut2e%?5Bul5ZYc57^a;XtJ&49?oHPn<<1gB^h5 z=)gHX1D*4cNfb57sDvUVs7V=ql}d)JL5MjW)@UDJ=%WmXn+I{js)YHVk~kia=~uw7 zM)-Iy;z`v;X?dv%HK}3p!E2{w#7CVTM(1joz4+AKRr#n2(nH+ceS_#I1q{vsncFR1 zN0b-cNL9l=u4wB%F)ZXechnS9sPzgnjJuh6gtK5(RC}G#G+ighm|uCQVLSH&H_;r` zZv=%0fJfyQxd7JN?^6&+pc;1etsHQaW;wVI3XLF6l9&l)X(kOLh@Yu|q%{jITB zcd*r(@nQXj+ETp$BU9>18{$81mH+r%Kw8DHmi-$A zfLFLlRE}##^_k;VQWg) zKS)zIMCZaTAq;TqDra;e6Lr2@4Qq?4fStu`!tM+Lg26OG8UU=nb3-f)jTK;z3w<+Y z6_W;-E@i{vC^TF{+P2Lap<$DT0i(x*U2QrhKOkJ8*MPnb)YsqSmN}6%o;wqzS1}dZ zv543iZ|6YqNI$$F47PV;Q&x-EM?suk#Z0q6i`kh5Hy4ogG~C4FpaFU>*@lCa;C*y)DWKKB3&HKP za3u2~QFqPEbGULj)Ws3X9~)0-N^}EF(4!=n`F6*S5o&&CKYdiH9XbxgA>uqN<86c# zKi^?fTxrGn7ixxn55i3yy~;AYd547h;gZ;*Qn?LtvR()*^Dlynr<%w?P-}|E4dSUbzXj zHn3&i=qTYb+aM7CuzC|*DP*R%;S^ev*1gRL@6!cf_QZ97paU4+BDOV3q9E_&Tic*3 z=!haz{xE5DD)@L1(HeHcO(Qiex{H9V>nmj>ddc-<>;j9a`uiCf!9ZQ$G+^YdW&?HA zzmxtBj4p0IYD)nYCCH)#V6Cst^%1&dJ~)-NfPU;mLYRox5s$CAv=W;cFCu32sNI+H zht2--zKbxG|4QB9iKJ^`DT2eTV<`9?y*z;^>nup@R6^j);jMs$$yJZ7@@0mw<9vs> nI+?jSC^F{xUjzvL0|#PdBqF?9&IK8`: - -```bash -$ cortex get text-generator - -status last update avg request 2XX -live 1m - - - -endpoint: http://localhost:8889 -``` - -You can also stream logs from your API: - -```bash -$ cortex logs text-generator - -... -``` - -## Deploy your model to AWS - -Cortex can automatically provision infrastructure on your AWS account and deploy your models as production-ready web services: - -```bash -$ cortex cluster up -``` - -This creates a Cortex cluster in your AWS account, which will take approximately 15 minutes. After your cluster is created, you can deploy to your cluster by using the same code and configuration as before: - -```python -import cortex - -cx_aws = cortex.client("aws") - -api_spec = { - "name": "text-generator", - "kind": "RealtimeAPI", - "predictor": { - "type": "python", - "path": "predictor.py" - } -} - -cx_aws.deploy(api_spec, project_dir=".") -``` - -Monitor the status of your APIs using `cortex get` using your CLI: - -```bash -$ cortex get --watch - -env realtime api status up-to-date requested last update avg request 2XX -aws text-generator live 1 1 1m - - -local text-generator live 1 1 17m 3.1285 s 1 -``` - -The output above indicates that one replica of your API was requested and is available to serve predictions. Cortex will automatically launch more replicas if the load increases and will spin down replicas if there is unused capacity. - -Show additional information for your API (e.g. its endpoint) using `cortex get `: - -```bash -$ cortex get text-generator --env aws - -status up-to-date requested last update avg request 2XX -live 1 1 1m - - - -endpoint: https://***.execute-api.us-west-2.amazonaws.com/text-generator -``` - -## Run on GPUs - -If your cortex cluster is using GPU instances (configured during cluster creation) or if you are running locally with an nvidia GPU, you can run your text generator API on GPUs. Add the `compute` field to your API configuration and re-deploy: - -```python -api_spec = { - "name": "text-generator", - "kind": "RealtimeAPI", - "predictor": { - "type": "python", - "path": "predictor.py" - }, - "compute": { - "gpu": 1 - } -} - -cx_aws.deploy(api_spec, project_dir=".") -``` - -As your new API is initializing, the old API will continue to respond to prediction requests. Once the API's status becomes "live" (with one up-to-date replica), traffic will be routed to the updated version. You can track the status of your API using `cortex get`: - -```bash -$ cortex get --env aws --watch - -realtime api status up-to-date stale requested last update avg request 2XX -text-generator updating 0 1 1 29s - - -``` - -## Cleanup - -Deleting APIs will free up cluster resources and allow Cortex to scale down to the minimum number of instances you specified during cluster creation: - -```python -cx_local.delete_api("text-generator") - -cx_aws.delete_api("text-generator") -``` diff --git a/docs/tutorials/realtime/deploy.ipynb b/docs/tutorials/realtime/deploy.ipynb deleted file mode 100644 index 5ffbce9caa..0000000000 --- a/docs/tutorials/realtime/deploy.ipynb +++ /dev/null @@ -1,80 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", - "\n", - "This example needs to run on a machine that supports Docker to deploy Cortex APIs locally (Colab users can still deploy to remote Cortex clusters)", - "\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!pip3 install cortex\n", - "!pip3 install requests" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import cortex\n", - "\n", - "cx = cortex.client(\"local\")\n", - "\n", - "api_spec = {\n", - " \"name\": \"text-generator\",\n", - " \"kind\": \"RealtimeAPI\",\n", - " \"predictor\": {\n", - " \"type\": \"python\",\n", - " \"path\": \"predictor.py\"\n", - " }\n", - "}\n", - "\n", - "cx.deploy(api_spec, project_dir=\".\", wait=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import requests\n", - "\n", - "endpoint = cx.get_api(\"text-generator\")[\"endpoint\"]\n", - "payload = {\"text\": \"hello world\"}\n", - "print(requests.post(endpoint, payload).text)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.9" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/tutorials/realtime/predictor.py b/docs/tutorials/realtime/predictor.py deleted file mode 100644 index b14d8abcc7..0000000000 --- a/docs/tutorials/realtime/predictor.py +++ /dev/null @@ -1,17 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -from transformers import GPT2Tokenizer, GPT2LMHeadModel - - -class PythonPredictor: - def __init__(self, config): - self.device = "cuda" if torch.cuda.is_available() else "cpu" - self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2") - self.model = GPT2LMHeadModel.from_pretrained("gpt2").to(self.device) - - def predict(self, payload): - input_length = len(payload["text"].split()) - tokens = self.tokenizer.encode(payload["text"], return_tensors="pt").to(self.device) - prediction = self.model.generate(tokens, max_length=input_length + 20, do_sample=True) - return self.tokenizer.decode(prediction[0]) diff --git a/docs/tutorials/realtime/requirements.txt b/docs/tutorials/realtime/requirements.txt deleted file mode 100644 index 1447500abe..0000000000 --- a/docs/tutorials/realtime/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -torch -transformers==3.0.* diff --git a/docs/tutorials/traffic-splitter.md b/docs/tutorials/traffic-splitter.md new file mode 100644 index 0000000000..5db0afda9a --- /dev/null +++ b/docs/tutorials/traffic-splitter.md @@ -0,0 +1,96 @@ +# Deploy a traffic splitter + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +## Install cortex + +```bash +$ pip install cortex +``` + +## Spin up a cluster on AWS (requires AWS credentials) + +```bash +$ cortex cluster up +``` + +## Define 2 realtime APIs and a traffic splitter + +```python +# traffic_splitter.py + +import cortex + +class PythonPredictor: + def __init__(self, config): + from transformers import pipeline + + self.model = pipeline(task="text-generation") + + def predict(self, payload): + return self.model(payload["text"])[0] + +requirements = ["tensorflow", "transformers"] + +api_spec_cpu = { + "name": "text-generator-cpu", + "kind": "RealtimeAPI", + "compute": { + "cpu": 1, + }, +} + +api_spec_gpu = { + "name": "text-generator-gpu", + "kind": "RealtimeAPI", + "compute": { + "gpu": 1, + }, +} + +traffic_splitter = { + "name": "text-generator", + "kind": "TrafficSplitter", + "apis": [ + {"name": "text-generator-cpu", "weight": 30}, + {"name": "text-generator-gpu", "weight": 70}, + ], +} + +cx = cortex.client("aws") +cx.deploy(api_spec_cpu, predictor=PythonPredictor, requirements=requirements) +cx.deploy(api_spec_gpu, predictor=PythonPredictor, requirements=requirements) +cx.deploy(traffic_splitter) +``` + +## Deploy to AWS + +```bash +$ python traffic_splitter.py +``` + +## Monitor + +```bash +$ cortex get text-generator --env aws --watch +``` + +## Stream logs + +```bash +$ cortex logs text-generator +``` + +## Make a request + +```bash +$ curl https:// \ + -X POST -H "Content-Type: application/json" \ + -d '{"text": "hello world"}' +``` + +## Delete the API + +```bash +$ cortex delete text-generator +``` diff --git a/docs/tutorials/traffic-splitting/README.md b/docs/tutorials/traffic-splitting/README.md deleted file mode 100644 index d68d763dd0..0000000000 --- a/docs/tutorials/traffic-splitting/README.md +++ /dev/null @@ -1,111 +0,0 @@ -# Splitting traffic between APIs - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example shows how to split traffic between 2 different iris-classifiers deployed as Realtime APIs. - -To deploy this example: - -1. Determine your CLI Version `cortex version` -1. Clone the repo and switch to the current version by replacing `` with your CLI version: `git clone -b v https://github.com/cortexlabs/cortex` (e.g. if the output of `cortex version` is 0.18.1, the clone command would be `git clone -b v0.18.1 https://github.com/cortexlabs/cortex`) -1. Navigate to this example directory - -## `cortex deploy` - -```bash -$ cortex deploy --env aws - -creating iris-classifier-onnx (RealtimeAPI) -creating iris-classifier-tf (RealtimeAPI) -created iris-classifier (TrafficSplitter) -``` - -## `cortex get` - -```bash -$ cortex get - -env realtime api status up-to-date requested last update avg request 2XX -aws iris-classifier-onnx updating 0 1 27s - - -aws iris-classifier-tf updating 0 1 27s - - - -env traffic splitter apis last update -aws iris-classifier iris-classifier-onnx:30 iris-classifier-tf:70 27s -``` - -## `cortex get iris-classifier` - -```bash -$ cortex get iris-classifier --env aws - -apis weights status requested last update avg request 2XX 5XX -iris-classifier-onnx 30 live 1 1m - - - -iris-classifier-tf 70 live 1 1m - - - - -last updated: 1m -endpoint: https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -example curl: curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -... -``` - -## Make multiple requests - -```bash -$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -setosa - -$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -setosa - -$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -setosa - -$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -setosa - -$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -setosa - -$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -setosa -``` - -## `cortex get iris-classifier` - -Notice the requests being routed to the different Realtime APIs based on their weights (the output below may not match yours): - -```bash -$ cortex get iris-classifier --env aws - -using aws environment - - -apis weights status requested last update avg request 2XX 5XX -iris-classifier-onnx 30 live 1 4m 6.00791 ms 1 - -iris-classifier-tf 70 live 1 4m 5.81867 ms 5 - - -last updated: 4m -endpoint: https://comtf6hs64.execute-api.us-west-2.amazonaws.com/iris-classifier -example curl: curl https://comtf6hs64.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json -... -``` - -## Cleanup - -Use `cortex delete ` to delete the Traffic Splitter and the two Realtime APIs (note that the Traffic Splitter and each Realtime API must be deleted by separate `cortex delete` commands): - -```bash -$ cortex delete iris-classifier --env aws - -deleting iris-classifier - -$ cortex delete iris-classifier-onnx --env aws - -deleting iris-classifier-onnx - -$ cortex delete iris-classifier-tf --env aws - -deleting iris-classifier-tf -``` - -Running `cortex delete ` will free up cluster resources and allow Cortex to scale down to the minimum number of instances you specified during cluster installation. It will not spin down your cluster. diff --git a/docs/tutorials/traffic-splitting/cortex.yaml b/docs/tutorials/traffic-splitting/cortex.yaml deleted file mode 100644 index 16702378cd..0000000000 --- a/docs/tutorials/traffic-splitting/cortex.yaml +++ /dev/null @@ -1,28 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: iris-classifier-pytorch - kind: RealtimeAPI - predictor: - type: python - path: pytorch_predictor.py - config: - model: s3://cortex-examples/pytorch/iris-classifier/weights.pth - monitoring: - model_type: classification - -- name: iris-classifier-onnx - kind: RealtimeAPI - predictor: - type: onnx - path: onnx_predictor.py - model_path: s3://cortex-examples/onnx/iris-classifier/ - monitoring: - model_type: classification - -- name: iris-classifier - kind: TrafficSplitter - apis: - - name: iris-classifier-onnx - weight: 30 - - name: iris-classifier-pytorch - weight: 70 diff --git a/docs/tutorials/traffic-splitting/model.py b/docs/tutorials/traffic-splitting/model.py deleted file mode 100644 index fe29ff7b6d..0000000000 --- a/docs/tutorials/traffic-splitting/model.py +++ /dev/null @@ -1,59 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.autograd import Variable -from sklearn.datasets import load_iris -from sklearn.model_selection import train_test_split -from sklearn.metrics import accuracy_score - - -class IrisNet(nn.Module): - def __init__(self): - super(IrisNet, self).__init__() - self.fc1 = nn.Linear(4, 100) - self.fc2 = nn.Linear(100, 100) - self.fc3 = nn.Linear(100, 3) - self.softmax = nn.Softmax(dim=1) - - def forward(self, X): - X = F.relu(self.fc1(X)) - X = self.fc2(X) - X = self.fc3(X) - X = self.softmax(X) - return X - - -if __name__ == "__main__": - iris = load_iris() - X, y = iris.data, iris.target - X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42) - - train_X = Variable(torch.Tensor(X_train).float()) - test_X = Variable(torch.Tensor(X_test).float()) - train_y = Variable(torch.Tensor(y_train).long()) - test_y = Variable(torch.Tensor(y_test).long()) - - model = IrisNet() - - criterion = nn.CrossEntropyLoss() - - optimizer = torch.optim.SGD(model.parameters(), lr=0.01) - - for epoch in range(1000): - optimizer.zero_grad() - out = model(train_X) - loss = criterion(out, train_y) - loss.backward() - optimizer.step() - - if epoch % 100 == 0: - print("number of epoch {} loss {}".format(epoch, loss)) - - predict_out = model(test_X) - _, predict_y = torch.max(predict_out, 1) - - print("prediction accuracy {}".format(accuracy_score(test_y.data, predict_y.data))) - - torch.save(model.state_dict(), "weights.pth") diff --git a/docs/tutorials/traffic-splitting/onnx_predictor.py b/docs/tutorials/traffic-splitting/onnx_predictor.py deleted file mode 100644 index b135129e14..0000000000 --- a/docs/tutorials/traffic-splitting/onnx_predictor.py +++ /dev/null @@ -1,20 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -labels = ["setosa", "versicolor", "virginica"] - - -class ONNXPredictor: - def __init__(self, onnx_client, config): - self.client = onnx_client - - def predict(self, payload): - model_input = [ - payload["sepal_length"], - payload["sepal_width"], - payload["petal_length"], - payload["petal_width"], - ] - - prediction = self.client.predict(model_input) - predicted_class_id = prediction[0][0] - return labels[predicted_class_id] diff --git a/docs/tutorials/traffic-splitting/pytorch_predictor.py b/docs/tutorials/traffic-splitting/pytorch_predictor.py deleted file mode 100644 index 71994bb9ae..0000000000 --- a/docs/tutorials/traffic-splitting/pytorch_predictor.py +++ /dev/null @@ -1,50 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import re -import torch -import os -import boto3 -from botocore import UNSIGNED -from botocore.client import Config -from model import IrisNet - -labels = ["setosa", "versicolor", "virginica"] - - -class PythonPredictor: - def __init__(self, config): - # download the model - bucket, key = re.match("s3://(.+?)/(.+)", config["model"]).groups() - - if os.environ.get("AWS_ACCESS_KEY_ID"): - s3 = boto3.client("s3") # client will use your credentials if available - else: - s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client - - s3.download_file(bucket, key, "/tmp/model.pth") - - # initialize the model - model = IrisNet() - model.load_state_dict(torch.load("/tmp/model.pth")) - model.eval() - - self.model = model - - def predict(self, payload): - # Convert the request to a tensor and pass it into the model - input_tensor = torch.FloatTensor( - [ - [ - payload["sepal_length"], - payload["sepal_width"], - payload["petal_length"], - payload["petal_width"], - ] - ] - ) - - # Run the prediction - output = self.model(input_tensor) - - # Translate the model output to the corresponding label string - return labels[torch.argmax(output[0])] diff --git a/docs/tutorials/traffic-splitting/sample.json b/docs/tutorials/traffic-splitting/sample.json deleted file mode 100644 index e17bbb2896..0000000000 --- a/docs/tutorials/traffic-splitting/sample.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "sepal_length": 5.2, - "sepal_width": 3.6, - "petal_length": 1.4, - "petal_width": 0.3 -} diff --git a/docs/tutorials/utils/README.md b/docs/tutorials/utils/README.md deleted file mode 100644 index 61202eb0c0..0000000000 --- a/docs/tutorials/utils/README.md +++ /dev/null @@ -1,36 +0,0 @@ -## Throughput tester - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -[throughput_test.py](throughput_test.py) is a Python CLI that can be used to test the throughput of your deployed API. The throughput will vary depending on your API's configuration (specified in your `cortex.yaml` file), your local machine's resources (mostly CPU, since it has to spawn many concurrent requests), and the internet connection on your local machine. - -```bash -Usage: throughput_test.py [OPTIONS] ENDPOINT PAYLOAD - - Program for testing the throughput of Cortex-deployed APIs. - -Options: - -w, --processes INTEGER Number of processes for prediction requests. [default: 1] - -t, --threads INTEGER Number of threads per process for prediction requests. [default: 1] - -s, --samples INTEGER Number of samples to run per thread. [default: 10] - -i, --time-based FLOAT How long the thread making predictions will run for in seconds. - If set, -s option will be ignored. - --help Show this message and exit. -``` - -`ENDPOINT` is the API's endpoint, which you can get by running `cortex get `. This argument can also be exported as an environment variable instead of being passed to the CLI. - -`PAYLOAD` can either be a local file or an URL resource that points to a file. The allowed extension types for the file are `json` and `jpg`. This argument can also be exported as an environment variable instead of being passed to the CLI. - -* `json` files are generally `sample.json`s as they are found in most Cortex examples. Each of these is attached to the request as payload. The content type of the request is `"application/json"`. -* `jpg` images are read as numpy arrays and then are converted to a bytes object using `cv2.imencode` function. The content type of the request is `"application/octet-stream"`. - -The same payload `PAYLOAD` is attached to all requests the script makes. - -### Dependencies - -The [throughput_test.py](throughput_test.py) CLI has been tested with Python 3.6.9. To install the CLI's dependencies, run the following: - -```bash -pip install requests click opencv-contrib-python numpy validator-collection imageio -``` diff --git a/docs/tutorials/utils/throughput_test.py b/docs/tutorials/utils/throughput_test.py deleted file mode 100644 index c157cf0b29..0000000000 --- a/docs/tutorials/utils/throughput_test.py +++ /dev/null @@ -1,179 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import os -import sys -import click -import concurrent.futures -import requests -import imageio -import json -import time -import itertools -import cv2 -import numpy as np - -from validator_collection import checkers - - -@click.command(help="Program for testing the throughput of Cortex-deployed APIs.") -@click.argument("endpoint", type=str, envvar="ENDPOINT") -@click.argument("payload", type=str, envvar="PAYLOAD") -@click.option( - "--processes", - "-p", - type=int, - default=1, - show_default=True, - help="Number of processes for prediction requests.", -) -@click.option( - "--threads", - "-t", - type=int, - default=1, - show_default=True, - help="Number of threads per process for prediction requests.", -) -@click.option( - "--samples", - "-s", - type=int, - default=10, - show_default=True, - help="Number of samples to run per thread.", -) -@click.option( - "--time-based", - "-i", - type=float, - default=0.0, - help="How long the thread making predictions will run for in seconds. If set, -s option will be ignored.", -) -def main(payload, endpoint, processes, threads, samples, time_based): - file_type = None - if checkers.is_url(payload): - if payload.lower().endswith(".json"): - file_type = "json" - payload_data = requests.get(payload).json() - elif payload.lower().endswith(".jpg"): - file_type = "jpg" - payload_data = imageio.imread(payload) - elif checkers.is_file(payload): - if payload.lower().endswith(".json"): - file_type = "json" - with open(payload, "r") as f: - payload_data = json.load(f) - elif payload.lower().endswith(".jpg"): - file_type = "jpg" - payload_data = cv2.imread(payload, cv2.IMREAD_COLOR) - else: - print(f"'{payload}' isn't an URL resource, nor is it a local file") - sys.exit(1) - - if file_type is None: - print(f"'{payload}' doesn't point to a jpg image or to a json file") - sys.exit(1) - if file_type == "jpg": - data = image_to_jpeg_bytes(payload_data) - if file_type == "json": - data = json.dumps(payload_data) - - print("Starting the inference throughput test...") - results = [] - start = time.time() - with concurrent.futures.ProcessPoolExecutor(max_workers=processes) as executor: - results = executor_submitter( - executor, processes, process_worker, threads, data, endpoint, samples, time_based - ) - end = time.time() - elapsed = end - start - - total_requests = sum(results) - - print(f"A total of {total_requests} requests have been served in {elapsed} seconds") - print(f"Avg number of inferences/sec is {total_requests / elapsed}") - print(f"Avg time spent on an inference is {elapsed / total_requests} seconds") - - -def process_worker(threads, data, endpoint, samples, time_based): - results = [] - with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: - results = executor_submitter(executor, threads, task, data, endpoint, samples, time_based) - - return results - - -def executor_submitter(executor, workers, *args, **kwargs): - futures = [] - for worker in range(workers): - future = executor.submit(*args, **kwargs) - futures.append(future) - - results = [future.result() for future in futures] - results = list(itertools.chain.from_iterable(results)) - - return results - - -def task(data, endpoint, samples, time_based): - timeout = 60 - - if isinstance(data, str): - headers = {"content-type": "application/json"} - elif isinstance(data, bytes): - headers = {"content-type": "application/octet-stream"} - else: - return - - if time_based == 0.0: - for i in range(samples): - try: - resp = requests.post( - endpoint, - data=data, - headers=headers, - timeout=timeout, - ) - except Exception as e: - print(e) - break - time.sleep(0.1) - return [samples] - else: - start = time.time() - counter = 0 - while start + time_based >= time.time(): - try: - resp = requests.post( - endpoint, - data=data, - headers=headers, - timeout=timeout, - ) - except Exception as e: - print(e) - break - time.sleep(0.1) - counter += 1 - return [counter] - - -def image_to_jpeg_nparray(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): - """ - Convert numpy image to jpeg numpy vector. - """ - is_success, im_buf_arr = cv2.imencode(".jpg", image, quality) - return im_buf_arr - - -def image_to_jpeg_bytes(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): - """ - Convert numpy image to bytes-encoded jpeg image. - """ - buf = image_to_jpeg_nparray(image, quality) - byte_im = buf.tobytes() - return byte_im - - -if __name__ == "__main__": - main() From 22010bfd7a4e54f8af07caaf156118b88ac88bb1 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Fri, 4 Dec 2020 16:53:48 -0800 Subject: [PATCH 05/36] Update docs --- .gitbook.yaml | 5 +-- CODE_OF_CONDUCT.md | 76 ----------------------------------- CONTRIBUTING.md | 9 ----- README.md | 99 ++-------------------------------------------- docs/contact.md | 19 --------- docs/summary.md | 9 ++--- 6 files changed, 7 insertions(+), 210 deletions(-) delete mode 100644 CODE_OF_CONDUCT.md delete mode 100644 CONTRIBUTING.md delete mode 100644 docs/contact.md diff --git a/.gitbook.yaml b/.gitbook.yaml index 8b207447a3..8a7909c2c0 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -1,13 +1,10 @@ root: ./docs/ structure: - readme: ../README.md summary: summary.md redirects: - tutorial: ./tutorials/hello-world/python/README.md - tutorial/realtime: ./tutorials/hello-world/python/README.md - tutorial/batch: ./tutorials/batch/python/README.md + start: ./tutorials/realtime.md install: ./aws/install.md uninstall: ./aws/uninstall.md update: ./aws/update.md diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index 425f0e1e73..0000000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,76 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or - advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at contact@cortex.dev. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -[homepage]: https://www.contributor-covenant.org - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index facbf253e0..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,9 +0,0 @@ -# Contributing - -Thank you for your interest in contributing to Cortex! - -- **Report a bug, request a feature, or share feedback:** please let us know via [email](mailto:hello@cortex.dev), [chat](https://gitter.im/cortexlabs/cortex), or [issues](https://github.com/cortexlabs/cortex/issues). - -- **Add an example:** we're always excited to see cool models deployed with Cortex. Please check out our [examples](examples) and feel free to add a one by submitting a pull request. - -- **Implement a feature:** here are [instructions for setting up a development environment](docs/contributing/development.md). If you'd like to contribute significant code to the project, please reach out to us so we can work together on the design and make sure we're on the same page before you get started. diff --git a/README.md b/README.md index bbbde36959..9feb5f7aeb 100644 --- a/README.md +++ b/README.md @@ -3,15 +3,10 @@
- -[install](https://docs.cortex.dev/install) • [documentation](https://docs.cortex.dev) • [community](https://gitter.im/cortexlabs/cortex) - # Deploy machine learning models to production Cortex is an open source platform for deploying, managing, and scaling machine learning in production. -
- ## Model serving infrastructure * Supports deploying TensorFlow, PyTorch, sklearn and other models as realtime or batch APIs. @@ -19,32 +14,6 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Runs inference on spot instances with on-demand backups. * Autoscales to handle production workloads. -#### Configure Cortex - -```yaml -# cluster.yaml - -region: us-east-1 -instance_type: g4dn.xlarge -min_instances: 10 -max_instances: 100 -spot: true -``` - -#### Spin up Cortex on your AWS account - -```text -$ cortex cluster up --config cluster.yaml - -○ configuring autoscaling ✓ -○ configuring networking ✓ -○ configuring logging ✓ - -cortex is ready! -``` - -
- ## Reproducible deployments * Package dependencies, code, and configuration for reproducible deployments. @@ -52,43 +21,6 @@ cortex is ready! * Integrate with your data science platform or CI/CD system. * Test locally before deploying to your cluster. -#### Implement a predictor - -```python -# predictor.py - -from transformers import pipeline - -class PythonPredictor: - def __init__(self, config): - self.model = pipeline(task="text-generation") - - def predict(self, payload): - return self.model(payload["text"])[0] -``` - -#### Configure an API - -```python -api_spec = { - "name": "text-generator", - "kind": "RealtimeAPI", - "compute": { - "gpu": 1, - "mem": "8Gi", - }, - "autoscaling": { - "min_replicas": 1, - "max_replicas": 10 - }, - "networking": { - "api_gateway": "public" - } -} -``` - -
- ## Scalable machine learning APIs * Scale to handle production workloads with request-based autoscaling. @@ -97,33 +29,8 @@ api_spec = { * Configure traffic splitting for A/B testing. * Update APIs without downtime. -#### Deploy to your cluster - -```python -import cortex - -cx = cortex.client("aws") -cx.deploy(api_spec, project_dir=".") - -# creating https://example.com/text-generator -``` - -#### Consume your API - -```python -import requests - -endpoint = "https://example.com/text-generator" -payload = {"text": "hello world"} -prediction = requests.post(endpoint, payload) -``` - -
- ## Get started -```bash -pip install cortex -``` - -See the [installation guide](https://docs.cortex.dev/install) for next steps. +* [Deploy a realtime API](https://docs.cortex.dev/start) +* [Read the docs](https://docs.cortex.dev) +* [Join our community](https://gitter.im/cortexlabs/cortex) diff --git a/docs/contact.md b/docs/contact.md deleted file mode 100644 index 70a9748f34..0000000000 --- a/docs/contact.md +++ /dev/null @@ -1,19 +0,0 @@ -# Contact us - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -## Support - -[GitHub](https://github.com/cortexlabs/cortex/issues) - Submit feature requests, file bugs, and track issues. - -[Gitter](https://gitter.im/cortexlabs/cortex) - Chat with us in our community channel. - -[Email](mailto:hello@cortex.dev) - Email us at `hello@cortex.dev` to contact us privately. - -## Contributing - -Find instructions for how to set up your development environment in the [development guide](contributing/development.md). - -## We're hiring - -Interested in joining us? See our [job postings](https://angel.co/company/cortex-labs-inc/jobs). diff --git a/docs/summary.md b/docs/summary.md index 04f21db7a3..cd57cf0bd4 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -1,10 +1,9 @@ # Table of contents -* [Deploy machine learning models to production](../README.md) -* [Get started](tutorials/realtime.md) -* [Contact us](contact.md) +* [Deploy a realtime API](tutorials/realtime.md) +* [Deploy a batch API](tutorials/batch.md) -## Running Cortex on AWS +## Running on AWS * [Install](aws/install.md) * [Credentials](aws/credentials.md) @@ -30,14 +29,12 @@ * [Autoscaling](deployments/realtime-api/autoscaling.md) * [Prediction monitoring](deployments/realtime-api/prediction-monitoring.md) * [Traffic Splitter](deployments/realtime-api/traffic-splitter.md) - * [Realtime API tutorial](tutorials/realtime/README.md) * [Batch API](deployments/batch-api.md) * [Predictor implementation](deployments/batch-api/predictors.md) * [API configuration](deployments/batch-api/api-configuration.md) * [API deployment](deployments/batch-api/deployment.md) * [Endpoints](deployments/batch-api/endpoints.md) * [Job statuses](deployments/batch-api/statuses.md) - * [Batch API tutorial](tutorials/batch/README.md) ## Advanced From a6695daf61794ec5ae9e6fcdb022276365425576 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Fri, 4 Dec 2020 16:57:14 -0800 Subject: [PATCH 06/36] Update README.md --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9feb5f7aeb..4d8f3165d7 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ Cortex is an open source platform for deploying, managing, and scaling machine learning in production. +
+ ## Model serving infrastructure * Supports deploying TensorFlow, PyTorch, sklearn and other models as realtime or batch APIs. @@ -14,6 +16,8 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Runs inference on spot instances with on-demand backups. * Autoscales to handle production workloads. +
+ ## Reproducible deployments * Package dependencies, code, and configuration for reproducible deployments. @@ -21,6 +25,8 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Integrate with your data science platform or CI/CD system. * Test locally before deploying to your cluster. +
+ ## Scalable machine learning APIs * Scale to handle production workloads with request-based autoscaling. @@ -29,8 +35,9 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Configure traffic splitting for A/B testing. * Update APIs without downtime. +
+ ## Get started -* [Deploy a realtime API](https://docs.cortex.dev/start) -* [Read the docs](https://docs.cortex.dev) +* [Deploy models](https://docs.cortex.dev/start) * [Join our community](https://gitter.im/cortexlabs/cortex) From 49586aa4d4e76ade2aa9ec37c0aa0cf6a8792632 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Fri, 4 Dec 2020 16:59:39 -0800 Subject: [PATCH 07/36] Update README.md --- .gitbook.yaml | 1 - README.md | 3 +-- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/.gitbook.yaml b/.gitbook.yaml index 8a7909c2c0..52b37106f2 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -4,7 +4,6 @@ structure: summary: summary.md redirects: - start: ./tutorials/realtime.md install: ./aws/install.md uninstall: ./aws/uninstall.md update: ./aws/update.md diff --git a/README.md b/README.md index 4d8f3165d7..e955d8c274 100644 --- a/README.md +++ b/README.md @@ -39,5 +39,4 @@ Cortex is an open source platform for deploying, managing, and scaling machine l ## Get started -* [Deploy models](https://docs.cortex.dev/start) -* [Join our community](https://gitter.im/cortexlabs/cortex) +[Deploy models](https://docs.cortex.dev) and [join our community](https://gitter.im/cortexlabs/cortex). From 994a8e1b0f41aa256043a2d2b0870bbcd1087853 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Fri, 4 Dec 2020 17:31:19 -0800 Subject: [PATCH 08/36] Reorganize misc docs --- .../environments.md | 0 .../python-client.md | 0 .../telemetry.md | 4 - .../development.md => guides/contributing.md} | 2 +- docs/miscellaneous/architecture.md | 7 - docs/miscellaneous/cli.md | 323 ------------------ docs/summary.md | 16 +- 7 files changed, 5 insertions(+), 347 deletions(-) rename docs/{miscellaneous => deployments}/environments.md (100%) rename docs/{miscellaneous => deployments}/python-client.md (100%) rename docs/{miscellaneous => deployments}/telemetry.md (71%) rename docs/{contributing/development.md => guides/contributing.md} (99%) delete mode 100644 docs/miscellaneous/architecture.md delete mode 100644 docs/miscellaneous/cli.md diff --git a/docs/miscellaneous/environments.md b/docs/deployments/environments.md similarity index 100% rename from docs/miscellaneous/environments.md rename to docs/deployments/environments.md diff --git a/docs/miscellaneous/python-client.md b/docs/deployments/python-client.md similarity index 100% rename from docs/miscellaneous/python-client.md rename to docs/deployments/python-client.md diff --git a/docs/miscellaneous/telemetry.md b/docs/deployments/telemetry.md similarity index 71% rename from docs/miscellaneous/telemetry.md rename to docs/deployments/telemetry.md index b26f2ece87..e7e767c79c 100644 --- a/docs/miscellaneous/telemetry.md +++ b/docs/deployments/telemetry.md @@ -8,10 +8,6 @@ By default, Cortex sends anonymous usage data to Cortex Labs. If telemetry is enabled, events and errors are collected. Each time you run a command an event will be sent with a randomly generated unique CLI ID and the name of the command. For example, if you run `cortex deploy`, Cortex Labs will receive an event of the structure `{id: 1234, command: "deploy"}`. In addition, the operator sends heartbeats that include cluster metrics like the types of instances running in your cluster. -## Why is this data being collected? - -Telemetry helps us make Cortex better. For example, we discovered that people are running `cortex delete` more times than we expected and realized that our documentation doesn't explain clearly that `cortex deploy` is declarative and can be run consecutively without deleting APIs. - ## How do I opt out? If you'd like to disable telemetry, modify your `~/.cortex/cli.yaml` file (or create it if it doesn't exist) and add `telemetry: false`. diff --git a/docs/contributing/development.md b/docs/guides/contributing.md similarity index 99% rename from docs/contributing/development.md rename to docs/guides/contributing.md index c851e0abf9..f87133dc8c 100644 --- a/docs/contributing/development.md +++ b/docs/guides/contributing.md @@ -1,4 +1,4 @@ -# Development +# Contributing ## Remote development diff --git a/docs/miscellaneous/architecture.md b/docs/miscellaneous/architecture.md deleted file mode 100644 index 88940898ec..0000000000 --- a/docs/miscellaneous/architecture.md +++ /dev/null @@ -1,7 +0,0 @@ -# Architecture diagram - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -![architecture diagram](https://user-images.githubusercontent.com/808475/83995909-92c1cf00-a90f-11ea-983f-c96117e42aa3.png) - -_note: this diagram is simplified for illustrative purposes_ diff --git a/docs/miscellaneous/cli.md b/docs/miscellaneous/cli.md deleted file mode 100644 index a1bf1ee72e..0000000000 --- a/docs/miscellaneous/cli.md +++ /dev/null @@ -1,323 +0,0 @@ -# CLI commands - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -## Install the CLI - -```bash -pip install cortex -``` - -## Install the CLI without Python Client - -### Mac/Linux OS - -```bash -# Replace `INSERT_CORTEX_VERSION` with the complete CLI version (e.g. 0.18.1): -$ bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/vINSERT_CORTEX_VERSION/get-cli.sh)" - -# For example to download CLI version 0.18.1 (Note the "v"): -$ bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/v0.18.1/get-cli.sh)" -``` - -By default, the Cortex CLI is installed at `/usr/local/bin/cortex`. To install the executable elsewhere, export the `CORTEX_INSTALL_PATH` environment variable to your desired location before running the command above. - -By default, the Cortex CLI creates a directory at `~/.cortex/` and uses it to store environment configuration. To use a different directory, export the `CORTEX_CLI_CONFIG_DIR` environment variable before running a `cortex` command. - -### Windows - -To install the Cortex CLI on a Windows machine, follow [this guide](../guides/windows-cli.md). - -## Command overview - -### deploy - -```text -create or update apis - -Usage: - cortex deploy [CONFIG_FILE] [flags] - -Flags: - -e, --env string environment to use (default "local") - -f, --force override the in-progress api update - -y, --yes skip prompts - -o, --output string output format: one of pretty|json (default "pretty") - -h, --help help for deploy -``` - -### get - -```text -get information about apis or jobs - -Usage: - cortex get [API_NAME] [JOB_ID] [flags] - -Flags: - -e, --env string environment to use (default "local") - -w, --watch re-run the command every 2 seconds - -o, --output string output format: one of pretty|json (default "pretty") - -v, --verbose show additional information (only applies to pretty output format) - -h, --help help for get -``` - -### logs - -```text -stream logs from an api - -Usage: - cortex logs API_NAME [JOB_ID] [flags] - -Flags: - -e, --env string environment to use (default "local") - -h, --help help for logs -``` - -### refresh - -```text -restart all replicas for an api (without downtime) - -Usage: - cortex refresh API_NAME [flags] - -Flags: - -e, --env string environment to use (default "local") - -f, --force override the in-progress api update - -o, --output string output format: one of pretty|json (default "pretty") - -h, --help help for refresh -``` - -### predict - -```text -make a prediction request using a json file - -Usage: - cortex predict API_NAME JSON_FILE [flags] - -Flags: - -e, --env string environment to use (default "local") - -h, --help help for predict -``` - -### delete - -```text -delete any kind of api or stop a batch job - -Usage: - cortex delete API_NAME [JOB_ID] [flags] - -Flags: - -e, --env string environment to use (default "local") - -f, --force delete the api without confirmation - -c, --keep-cache keep cached data for the api - -o, --output string output format: one of pretty|json (default "pretty") - -h, --help help for delete -``` - -### cluster up - -```text -spin up a cluster - -Usage: - cortex cluster up [flags] - -Flags: - -c, --config string path to a cluster configuration file - --aws-key string aws access key id - --aws-secret string aws secret access key - --cluster-aws-key string aws access key id to be used by the cluster - --cluster-aws-secret string aws secret access key to be used by the cluster - -e, --configure-env string name of environment to configure (default "aws") - -y, --yes skip prompts - -h, --help help for up -``` - -### cluster info - -```text -get information about a cluster - -Usage: - cortex cluster info [flags] - -Flags: - -c, --config string path to a cluster configuration file - -n, --name string aws name of the cluster - -r, --region string aws region of the cluster - --aws-key string aws access key id - --aws-secret string aws secret access key - -e, --configure-env string name of environment to configure - -d, --debug save the current cluster state to a file - -y, --yes skip prompts - -h, --help help for info -``` - -### cluster configure - -```text -update a cluster's configuration - -Usage: - cortex cluster configure [flags] - -Flags: - -c, --config string path to a cluster configuration file - --aws-key string aws access key id - --aws-secret string aws secret access key - --cluster-aws-key string aws access key id to be used by the cluster - --cluster-aws-secret string aws secret access key to be used by the cluster - -e, --configure-env string name of environment to configure - -y, --yes skip prompts - -h, --help help for configure -``` - -### cluster down - -```text -spin down a cluster - -Usage: - cortex cluster down [flags] - -Flags: - -c, --config string path to a cluster configuration file - -n, --name string aws name of the cluster - -r, --region string aws region of the cluster - --aws-key string aws access key id - --aws-secret string aws secret access key - -y, --yes skip prompts - -h, --help help for down -``` - -### cluster export - -```text -download the code and configuration for APIs - -Usage: - cortex cluster export [API_NAME] [API_ID] [flags] - -Flags: - -c, --config string path to a cluster configuration file - -n, --name string aws name of the cluster - -r, --region string aws region of the cluster - --aws-key string aws access key id - --aws-secret string aws secret access key - -h, --help help for export -``` - -### env configure - -```text -configure an environment - -Usage: - cortex env configure [ENVIRONMENT_NAME] [flags] - -Flags: - -p, --provider string set the provider without prompting - -o, --operator-endpoint string set the operator endpoint without prompting - -k, --aws-access-key-id string set the aws access key id without prompting - -s, --aws-secret-access-key string set the aws secret access key without prompting - -r, --aws-region string set the aws region without prompting - -h, --help help for configure -``` - -### env list - -```text -list all configured environments - -Usage: - cortex env list [flags] - -Flags: - -o, --output string output format: one of pretty|json (default "pretty") - -h, --help help for list -``` - -### env default - -```text -set the default environment - -Usage: - cortex env default [ENVIRONMENT_NAME] [flags] - -Flags: - -h, --help help for default -``` - -### env delete - -```text -delete an environment configuration - -Usage: - cortex env delete [ENVIRONMENT_NAME] [flags] - -Flags: - -h, --help help for delete -``` - -### version - -```text -print the cli and cluster versions - -Usage: - cortex version [flags] - -Flags: - -e, --env string environment to use (default "local") - -h, --help help for version -``` - -### completion - -```text -generate shell completion scripts - -to enable cortex shell completion: - bash: - add this to ~/.bash_profile (mac) or ~/.bashrc (linux): - source <(cortex completion bash) - - note: bash-completion must be installed on your system; example installation instructions: - mac: - 1) install bash completion: - brew install bash-completion - 2) add this to your ~/.bash_profile: - source $(brew --prefix)/etc/bash_completion - 3) log out and back in, or close your terminal window and reopen it - ubuntu: - 1) install bash completion: - apt update && apt install -y bash-completion # you may need sudo - 2) open ~/.bashrc and uncomment the bash completion section, or add this: - if [ -f /etc/bash_completion ] && ! shopt -oq posix; then . /etc/bash_completion; fi - 3) log out and back in, or close your terminal window and reopen it - - zsh: - option 1: - add this to ~/.zshrc: - source <(cortex completion zsh) - if that failed, you can try adding this line (above the source command you just added): - autoload -Uz compinit && compinit - option 2: - create a _cortex file in your fpath, for example: - cortex completion zsh > /usr/local/share/zsh/site-functions/_cortex - -Note: this will also add the "cx" alias for cortex for convenience - -Usage: - cortex completion SHELL [flags] - -Flags: - -h, --help help for completion -``` diff --git a/docs/summary.md b/docs/summary.md index cd57cf0bd4..3b984a57ec 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -35,6 +35,9 @@ * [API deployment](deployments/batch-api/deployment.md) * [Endpoints](deployments/batch-api/endpoints.md) * [Job statuses](deployments/batch-api/statuses.md) +* [Python client](deployments/python-client.md) +* [Environments](deployments/environments.md) +* [Telemetry](deployments/telemetry.md) ## Advanced @@ -44,14 +47,6 @@ * [Python packages](deployments/python-packages.md) * [System packages](deployments/system-packages.md) -## Miscellaneous - -* [CLI commands](miscellaneous/cli.md) -* [Python client](miscellaneous/python-client.md) -* [Environments](miscellaneous/environments.md) -* [Architecture diagram](miscellaneous/architecture.md) -* [Telemetry](miscellaneous/telemetry.md) - ## Troubleshooting * [API is stuck updating](troubleshooting/stuck-updating.md) @@ -73,7 +68,4 @@ * [Docker Hub rate limiting](guides/docker-hub-rate-limiting.md) * [Private docker registry](guides/private-docker.md) * [Install CLI on Windows](guides/windows-cli.md) - -## Contributing - -* [Development](contributing/development.md) +* [Contributing](guides/contributing.md) From 91c4ce41fa698ef5abec7b08b304e5a234ac1be6 Mon Sep 17 00:00:00 2001 From: vishal Date: Mon, 7 Dec 2020 18:31:25 -0500 Subject: [PATCH 09/36] Add a simple version of the batch tutorial --- docs/tutorials/batch.md | 148 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index e69de29bb2..6a188d487c 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -0,0 +1,148 @@ +# Deploy a batch API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +**Note: Batch APIs are only supported on a Cortex cluster (in AWS).** + +## Install cortex + +```bash +$ pip install cortex +``` + +## Spin up a cluster on AWS (requires AWS credentials) + +```bash +$ cortex cluster up +``` + +## Define a batch API + +```python +# batch.py + +import cortex + +class PythonPredictor: + def __init__(self, config, job_spec): + from torchvision import transforms + import torchvision + import requests + import boto3 + import re + + self.model = torchvision.models.alexnet(pretrained=True).eval() + self.labels = requests.get(config["labels"]).text.split("\n")[1:] + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.preprocess = transforms.Compose( + [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] + ) + + self.s3 = boto3.client("s3") # initialize S3 client to save results + self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() + self.key = os.path.join(self.key, job_spec["job_id"]) + + def predict(self, payload, batch_id): + import json + import torch + from PIL import Image + from io import BytesIO + import requests + + tensor_list = [] + for image_url in payload: # download and preprocess each image + img_pil = Image.open(BytesIO(requests.get(image_url).content)) + tensor_list.append(self.preprocess(img_pil)) + + img_tensor = torch.stack(tensor_list) + with torch.no_grad(): # classify the batch of images + prediction = self.model(img_tensor) + _, indices = prediction.max(1) + + results = [{"url": payload[i], "class": self.labels[class_idx]} for i, class_idx in enumerate(indices)] + self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json.dumps(results)) + +requirements = ["torch", "boto3", "pillow", "torchvision", "requests"] + +api_spec = { + "name": "image-classifier", + "kind": "BatchAPI", + "predictor": { + "config": { + "labels": "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + } + } +} + +cx = cortex.client("aws") +cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) +``` + +## Deploy to your Cortex cluster on AWS + +```bash +$ python batch.py +``` + +## Describe the Batch API + +```bash +$ cortex get image-classifier -e aws +``` + +## Submit a job + +```python +import cortex +import requests + +cx = cortex.client("aws") +batch_endpoint = cx.get_api("image-classifier")["endpoint"] + +dest_s3_dir = # specify S3 directory for the results (make sure your cluster has access to this bucket) + +job_spec = { + "workers": 1, + "item_list": { + "items": [ + "https://i.imgur.com/PzXprwl.jpg", + "https://i.imgur.com/E4cOSLw.jpg", + "https://user-images.githubusercontent.com/4365343/96516272-d40aa980-1234-11eb-949d-8e7e739b8345.jpg", + "https://i.imgur.com/jDimNTZ.jpg", + "https://i.imgur.com/WqeovVj.jpg" + ], + "batch_size": 2 + }, + "config": { + "dest_s3_dir": dest_s3_dir + } +} + +response = requests.post(batch_endpoint, json=job_spec) + +print(response) +# > {"job_id":"69b183ed6bdf3e9b","api_name":"image-classifier", "config": {"dest_s3_dir": ...}} +``` + +## Monitor the job + +```bash +$ cortex get image-classifier 69b183ed6bdf3e9b +``` + +## Stream job logs + +```bash +$ cortex logs image-classifier 69b183ed6bdf3e9b +``` + +## View the results + +Once the job is complete, you should be able to find the results of the batch job in the S3 directory you've specified. + +## Delete the Batch API + +```bash +$ cortex delete --env local image-classifier +``` From 39efeede7a66fe55a34dc1e9ebda2b714e56403b Mon Sep 17 00:00:00 2001 From: vishal Date: Mon, 7 Dec 2020 18:31:50 -0500 Subject: [PATCH 10/36] Remove batch example from the batch tutorial --- docs/tutorials/batch/README.md | 572 -------------------------- docs/tutorials/batch/cortex.yaml | 9 - docs/tutorials/batch/predictor.py | 81 ---- docs/tutorials/batch/requirements.txt | 4 - docs/tutorials/batch/sample.json | 3 - 5 files changed, 669 deletions(-) delete mode 100644 docs/tutorials/batch/README.md delete mode 100644 docs/tutorials/batch/cortex.yaml delete mode 100644 docs/tutorials/batch/predictor.py delete mode 100644 docs/tutorials/batch/requirements.txt delete mode 100644 docs/tutorials/batch/sample.json diff --git a/docs/tutorials/batch/README.md b/docs/tutorials/batch/README.md deleted file mode 100644 index a37cb8f966..0000000000 --- a/docs/tutorials/batch/README.md +++ /dev/null @@ -1,572 +0,0 @@ -# Deploy models as Batch APIs - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -This example shows how to deploy a batch image classification api that accepts a list of image urls as input, downloads the images, classifies them, and writes the results to S3. - -**Batch APIs are only supported on a Cortex cluster (in AWS).** You can find cluster installation documentation [here](../../../docs/aws/install.md). - -## Pre-requisites - -* [Install](../../../docs/aws/install.md) Cortex and create a cluster -* Create an S3 bucket/directory to store the results of the batch job -* AWS CLI (optional) - -
- -## Implement your predictor - -1. Create a Python file named `predictor.py`. -1. Define a Predictor class with a constructor that loads and initializes an image-classifier from `torchvision`. -1. Add a `predict()` function that will accept a list of images urls (http:// or s3://), downloads them, performs inference, and writes the predictions to S3. -1. Specify an `on_job_complete()` function that aggregates the results and writes them to a single file named `aggregated_results.json` in S3. - -```python -# predictor.py - -import os -import requests -import torch -import torchvision -from torchvision import transforms -from PIL import Image -from io import BytesIO -import boto3 -import json -import re - - -class PythonPredictor: - def __init__(self, config, job_spec): - self.model = torchvision.models.alexnet(pretrained=True).eval() - - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - self.preprocess = transforms.Compose( - [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] - ) - - self.labels = requests.get( - "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" - ).text.split("\n")[1:] - - if len(config.get("dest_s3_dir", "")) == 0: - raise Exception("'dest_s3_dir' field was not provided in job submission") - - self.s3 = boto3.client("s3") - - self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() - self.key = os.path.join(self.key, job_spec["job_id"]) - - def predict(self, payload, batch_id): - tensor_list = [] - - # download and preprocess each image - for image_url in payload: - if image_url.startswith("s3://"): - bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() - image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() - else: - image_bytes = requests.get(image_url).content - - img_pil = Image.open(BytesIO(image_bytes)) - tensor_list.append(self.preprocess(img_pil)) - - # classify the batch of images - img_tensor = torch.stack(tensor_list) - with torch.no_grad(): - prediction = self.model(img_tensor) - _, indices = prediction.max(1) - - # extract predicted classes - results = [ - {"url": payload[i], "class": self.labels[class_idx]} - for i, class_idx in enumerate(indices) - ] - json_output = json.dumps(results) - - # save results - self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) - - def on_job_complete(self): - all_results = [] - - # aggregate all classifications - paginator = self.s3.get_paginator("list_objects_v2") - for page in paginator.paginate(Bucket=self.bucket, Prefix=self.key): - for obj in page["Contents"]: - body = self.s3.get_object(Bucket=self.bucket, Key=obj["Key"])["Body"] - all_results += json.loads(body.read().decode("utf8")) - - # save single file containing aggregated classifications - self.s3.put_object( - Bucket=self.bucket, - Key=os.path.join(self.key, "aggregated_results.json"), - Body=json.dumps(all_results), - ) -``` - -Here are the complete [Predictor docs](../../../docs/deployments/batch-api/predictors.md). - -
- -## Specify your Python dependencies - -Create a `requirements.txt` file to specify the dependencies needed by `predictor.py`. Cortex will automatically install them into your runtime once you deploy: - -```python -# requirements.txt - -boto3 -torch -torchvision -pillow -``` - -
- -## Configure your API - -Create a `cortex.yaml` file and add the configuration below. An `api` with `kind: BatchAPI` will expose your model as an endpoint that will orchestrate offline batch inference across multiple workers upon receiving job requests. The configuration below defines how much `compute` each worker requires and your `predictor.py` determines how each batch should be processed. - -```yaml -# cortex.yaml - -- name: image-classifier - kind: BatchAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 -``` - -Here are the complete [API configuration docs](../../../docs/deployments/batch-api/api-configuration.md). - -
- -## Deploy your Batch API - -`cortex deploy` takes your model, your `predictor.py` implementation, and your configuration from `cortex.yaml` and creates an endpoint that can receive job submissions and manage running jobs. - -```bash -$ cortex deploy --env aws - -created image-classifier (BatchAPI) -``` - -Get the endpoint for your Batch API with `cortex get image-classifier`: - -```bash -$ cortex get image-classifier --env aws - -no submitted jobs - -endpoint: https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -``` - -
- -## Setup destination S3 directory - -Our `predictor.py` implementation writes results to an S3 directory. Before submitting a job, we need to create an S3 directory to store the output of the batch job. The S3 directory should be accessible by the credentials used to create your Cortex cluster. - -Export the S3 directory to an environment variable: - -```bash -$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir -``` - -
- -## Submit a job - -Now that you've deployed a Batch API, you are ready to submit jobs. You can provide image urls directly in the request by specifying the urls in `item_list`. The curl command below showcases how to submit image urls in the request. - -```bash -$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir -$ curl $BATCH_API_ENDPOINT \ - -X POST -H "Content-Type: application/json" \ - -d @- <` then type `EOF`. - -After submitting the job, you should get a response like this: - -```json -{"job_id":"69d6faf82e4660d3","api_name":"image-classifier", "config":{"dest_s3_dir": "YOUR_S3_BUCKET_HERE"}} -``` - -Take note of the job id in the response. - -### List the jobs for your Batch API - -```bash -$ cortex get image-classifier --env aws - -job id status progress start time duration -69d6faf82e4660d3 running 0/3 20 Jul 2020 01:07:44 UTC 3m26s - -endpoint: https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -``` - -### Get the job status with an HTTP request - -You can make a GET request to your `/JOB_ID` to get the status of your job. - -```bash -$ curl https://abcdefg.execute-api.us-west-2.amazonaws.com?jobID=69d6faf82e4660d3 - -{ - "job_status":{ - "job_id":"69d6faf82e4660d3", - "api_name":"image-classifier", - ... - }, - "endpoint":"https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier" -} -``` - -### Get job status using Cortex CLI - -You can also use the Cortex CLI to get the status of your job using `cortex get `. - -```bash -$ cortex get image-classifier 69d6faf82e4660d3 --env aws - -job id: 69d6faf82e4660d3 -status: running - -start time: 27 Jul 2020 15:02:25 UTC -end time: - -duration: 42s - -batch stats -total succeeded failed avg time per batch -3 0 0 - - -worker stats -requested initializing running failed succeeded -1 1 0 0 0 - -job endpoint: https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier/69d6faf82e4660d3 -``` - -### Stream logs - -You can stream logs realtime for debugging and monitoring purposes with `cortex logs ` - -```bash -$ cortex logs image-classifier 69d6fdeb2d8e6647 --env aws - -started enqueuing batches to queue -partitioning 5 items found in job submission into 3 batches of size 2 -completed enqueuing a total of 3 batches -spinning up workers... -... -2020-08-07 14:44:05.557598:cortex:pid-25:INFO:processing batch c9136381-6dcc-45bd-bd97-cc9c66ccc6d6 -2020-08-07 14:44:26.037276:cortex:pid-25:INFO:executing on_job_complete -2020-08-07 14:44:26.208972:cortex:pid-25:INFO:no batches left in queue, job has been completed -``` - -### Find your results - -Wait for the job to complete by streaming the logs with `cortex logs ` or watching for the job status to change with `cortex get --watch`. - -The status of your job, which you can get from `cortex get `, should change from `running` to `succeeded` once the job has completed. If it changes to a different status, you may be able to find the stacktrace using `cortex logs `. If your job has completed successfully, you can view the results of the image classification in the S3 directory you specified in the job submission. - -Using the AWS CLI: - -```bash -$ aws s3 ls $CORTEX_DEST_S3_DIR// - 161f9fda-fd08-44f3-b983-4529f950e40b.json - 40100ffb-6824-4560-8ca4-7c0d14273e05.json - c9136381-6dcc-45bd-bd97-cc9c66ccc6d6.json - aggregated_results.json -``` - -You can download the aggregated results file with `aws s3 cp $CORTEX_DEST_S3_DIR//aggregated_results.json .` and confirm that there are 16 classifications. - -
- -## Alternative job submission: image URLs in files - -In addition to providing the image URLs directly in the job submission request, it is possible to use image urls stored in newline delimited json files in S3. A newline delimited JSON file has one complete JSON object per line. - -Two newline delimited json files containing image urls for this tutorial have already been created for you and can be found at `s3://cortex-examples/image-classifier/`. If you have AWS CLI, you can list the directory and you should be able to find the files (`urls_0.json` and `urls_1.json`). - -```text -$ aws s3 ls s3://cortex-examples/image-classifier/ - PRE inception/ -... -2020-07-27 14:19:30 506 urls_0.json -2020-07-27 14:19:30 473 urls_1.json -``` - -To use JSON files as input data for the job, we will specify `delimited_files` in the job request. The Batch API will break up the JSON files into batches of desired size and push them onto a queue that is consumed by the pool of workers. - -### Dry run - -Before we submit the job, let's perform a dry run to ensure that only the desired files will be read. You can perform a dry run by appending `dryRun=true` query parameter to your job request. - -Get the endpoint from `cortex get image-classifier` if you haven't done so already. - -```bash -$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir -$ curl $BATCH_API_ENDPOINT?dryRun=true \ --X POST -H "Content-Type: application/json" \ --d @- <` then type `EOF`. - -You should expect a response like this: - -```text -s3://cortex-examples/image-classifier/urls_0.json -s3://cortex-examples/image-classifier/urls_1.json -validations passed -``` - -This shows that the correct files will be used as input for the job. - -### Classify image urls stored in S3 files - -When you submit a job specifying `delimited_files`, your Batch API will get all of the input S3 files based on `s3_paths` and will apply the filters specified in `includes` and `excludes`. Then your Batch API will read each file, split on the newline characters, and parse each item as a JSON object. Each item in the file is treated as a single sample and will be grouped together into batches and then placed onto a queue that is consumed by the pool of workers. - -In this example `urls_0.json` and `urls_1.json` each contain 8 urls. Let's classify the images from the URLs listed in those 2 files. - -```bash -$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir -$ curl $BATCH_API_ENDPOINT \ --X POST -H "Content-Type: application/json" \ --d @- <` then type `EOF`. - -After submitting this job, you should get a response like this: - -```json -{"job_id":"69d6faf82e4660d3","api_name":"image-classifier", "config":{"dest_s3_dir": "YOUR_S3_BUCKET_HERE"}} -``` - -### Find results - -Wait for the job to complete by streaming the logs with `cortex logs ` or watching for the job status to change with `cortex get --watch`. - -```bash -$ cortex logs image-classifier 69d6faf82e4660d3 --env aws - -started enqueuing batches to queue -enqueuing contents from file s3://cortex-examples/image-classifier/urls_0.json -enqueuing contents from file s3://cortex-examples/image-classifier/urls_1.json -completed enqueuing a total of 8 batches -spinning up workers... -2020-08-07 15:11:21.364179:cortex:pid-25:INFO:processing batch 1de0bc65-04ea-4b9e-9e96-5a0bb52fcc37 -... -2020-08-07 15:11:45.461032:cortex:pid-25:INFO:no batches left in queue, job has been completed -``` - -The status of your job, which you can get from `cortex get `, should change from `running` to `succeeded` once the job has completed. If it changes to a different status, you may be able to find the stacktrace using `cortex logs `. If your job has completed successfully, you can view the results of the image classification in the S3 directory you specified in the job submission. - -Using the AWS CLI: - -```bash -$ aws s3 ls $CORTEX_DEST_S3_DIR// - 161f9fda-fd08-44f3-b983-4529f950e40b.json - 40100ffb-6824-4560-8ca4-7c0d14273e05.json - 6d1c933c-0ddf-4316-9956-046cd731c5ab.json - ... - aggregated_results.json -``` - -You can download the aggregated results file with `aws s3 cp $CORTEX_DEST_S3_DIR//aggregated_results.json .` and confirm that there are 16 classifications. - -
- -## Alternative job submission: images in S3 - -Let's assume that rather downloading urls on the internet, you have an S3 directory containing the images. We can specify `file_path_lister` in the job request to get the list of S3 urls for the images, partition the list of S3 urls into batches, and place them on a queue that will be consumed by the workers. - -We'll classify the 16 images that can be found here `s3://cortex-examples/image-classifier/samples`. You can use AWS CLI to verify that there are 16 images `aws s3 ls s3://cortex-examples/image-classifier/samples/`. - -### Dry run - -Let's do a dry run to make sure the correct list of images will be submitted to the job. - -```bash -$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir -$ curl $BATCH_API_ENDPOINT?dryRun=true \ --X POST -H "Content-Type: application/json" \ --d @- <` then type `EOF`. - -You should expect a response like this: - -```text -s3://cortex-examples/image-classifier/samples/img_0.jpg -s3://cortex-examples/image-classifier/samples/img_1.jpg -... -s3://cortex-examples/image-classifier/samples/img_8.jpg -s3://cortex-examples/image-classifier/samples/img_9.jpg -validations passed -``` - -### Classify images in S3 - -Let's actually submit the job now. Your Batch API will get all of the input S3 files based on `s3_paths` and will apply the filters specified in `includes` and `excludes`. - -```bash -$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir -$ curl $BATCH_API_ENDPOINT \ --X POST -H "Content-Type: application/json" \ --d @- <` then type `EOF`. - -You should get a response like this: - -```json -{"job_id":"69d6f8a472f0e1e5","api_name":"image-classifier", "config":{"dest_s3_dir": "YOUR_S3_BUCKET_HERE"}} -``` - -### Verify results - -Wait for the job to complete by streaming the logs with `cortex logs ` or watching for the job status to change with `cortex get --watch`. - -```bash -$ cortex logs image-classifier 69d6f8a472f0e1e5 --env aws - -started enqueuing batches to queue -completed enqueuing a total of 8 batches -spinning up workers... -2020-07-18 21:35:34.186348:cortex:pid-1:INFO:downloading the project code -... -2020-08-07 15:49:10.889839:cortex:pid-25:INFO:processing batch d0e695bc-a975-4115-a60f-0a55c743fc57 -2020-08-07 15:49:31.188943:cortex:pid-25:INFO:executing on_job_complete -2020-08-07 15:49:31.362053:cortex:pid-25:INFO:no batches left in queue, job has been completed -``` - -The status of your job, which you can get from `cortex get `, should change from `running` to `succeeded` once the job has completed. If it changes to a different status, you may be able to find the stacktrace using `cortex logs `. If your job has completed successfully, you can view the results of the image classification in the S3 directory you specified in the job submission. - -Using the AWS CLI: - -```bash -$ aws s3 ls $CORTEX_DEST_S3_DIR// - 6bee7412-4c16-4d9f-ab3e-e88669cf7a89.json - 3c45b4b3-953e-4226-865b-75f3961dcf95.json - d0e695bc-a975-4115-a60f-0a55c743fc57.json - ... - aggregated_results.json -``` - -You can download the aggregated results file with `aws s3 cp $CORTEX_DEST_S3_DIR//aggregated_results.json .` and confirm that there are 16 classifications. - -
- -## Stopping a Job - -You can stop a running job by sending a DELETE request to `/`. - -```bash -$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier -$ curl -X DELETE $BATCH_API_ENDPOINT?jobID=69d96a01ea55da8c - -stopped job 69d96a01ea55da8c -``` - -You can also use the Cortex CLI `cortex delete `. - -```bash -$ cortex delete image-classifier 69d96a01ea55da8c --env aws - -stopped job 69d96a01ea55da8c -``` - -
- -## Cleanup - -Run `cortex delete` to delete the API: - -```bash -$ cortex delete image-classifier --env aws - -deleting image-classifier -``` - -Running `cortex delete` will stop all in progress jobs for the API and will delete job history for that API. It will not spin down your cluster. diff --git a/docs/tutorials/batch/cortex.yaml b/docs/tutorials/batch/cortex.yaml deleted file mode 100644 index 35ea4e991a..0000000000 --- a/docs/tutorials/batch/cortex.yaml +++ /dev/null @@ -1,9 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -- name: image-classifier - kind: BatchAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 diff --git a/docs/tutorials/batch/predictor.py b/docs/tutorials/batch/predictor.py deleted file mode 100644 index 293c466fd3..0000000000 --- a/docs/tutorials/batch/predictor.py +++ /dev/null @@ -1,81 +0,0 @@ -# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) - -import os -import requests -import torch -import torchvision -from torchvision import transforms -from PIL import Image -from io import BytesIO -import boto3 -import json -import re - - -class PythonPredictor: - def __init__(self, config, job_spec): - self.model = torchvision.models.alexnet(pretrained=True).eval() - - normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) - self.preprocess = transforms.Compose( - [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] - ) - - self.labels = requests.get( - "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" - ).text.split("\n")[1:] - - if len(config.get("dest_s3_dir", "")) == 0: - raise Exception("'dest_s3_dir' field was not provided in job submission") - - self.s3 = boto3.client("s3") - - self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() - self.key = os.path.join(self.key, job_spec["job_id"]) - - def predict(self, payload, batch_id): - tensor_list = [] - - # download and preprocess each image - for image_url in payload: - if image_url.startswith("s3://"): - bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() - image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() - else: - image_bytes = requests.get(image_url).content - - img_pil = Image.open(BytesIO(image_bytes)) - tensor_list.append(self.preprocess(img_pil)) - - # classify the batch of images - img_tensor = torch.stack(tensor_list) - with torch.no_grad(): - prediction = self.model(img_tensor) - _, indices = prediction.max(1) - - # extract predicted classes - results = [ - {"url": payload[i], "class": self.labels[class_idx]} - for i, class_idx in enumerate(indices) - ] - json_output = json.dumps(results) - - # save results - self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) - - def on_job_complete(self): - all_results = [] - - # aggregate all classifications - paginator = self.s3.get_paginator("list_objects_v2") - for page in paginator.paginate(Bucket=self.bucket, Prefix=self.key): - for obj in page["Contents"]: - body = self.s3.get_object(Bucket=self.bucket, Key=obj["Key"])["Body"] - all_results += json.loads(body.read().decode("utf8")) - - # save single file containing aggregated classifications - self.s3.put_object( - Bucket=self.bucket, - Key=os.path.join(self.key, "aggregated_results.json"), - Body=json.dumps(all_results), - ) diff --git a/docs/tutorials/batch/requirements.txt b/docs/tutorials/batch/requirements.txt deleted file mode 100644 index 2c0ef31b51..0000000000 --- a/docs/tutorials/batch/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -torch -torchvision -boto3 -pillow diff --git a/docs/tutorials/batch/sample.json b/docs/tutorials/batch/sample.json deleted file mode 100644 index eb45c463fd..0000000000 --- a/docs/tutorials/batch/sample.json +++ /dev/null @@ -1,3 +0,0 @@ -[ - "https://i.imgur.com/PzXprwl.jpg" -] From f2513735e5a18f083d32f851c7e19a3e4ecf61f7 Mon Sep 17 00:00:00 2001 From: vishal Date: Mon, 7 Dec 2020 18:32:15 -0500 Subject: [PATCH 11/36] Move examples directory to test folder --- test/README.md | 67 ++ test/batch/image-classifier/README.md | 580 ++++++++++ test/batch/image-classifier/cortex.yaml | 9 + test/batch/image-classifier/predictor.py | 58 + test/batch/image-classifier/requirements.txt | 4 + test/batch/image-classifier/sample.json | 3 + test/batch/onnx/README.md | 6 + test/batch/onnx/cortex.yaml | 10 + test/batch/onnx/predictor.py | 64 ++ test/batch/onnx/requirements.txt | 3 + test/batch/tensorflow/README.md | 6 + test/batch/tensorflow/cortex.yaml | 10 + test/batch/tensorflow/predictor.py | 60 + test/batch/tensorflow/requirements.txt | 1 + test/keras/document-denoiser/README.md | 46 + test/keras/document-denoiser/cortex.yaml | 12 + test/keras/document-denoiser/predictor.py | 86 ++ test/keras/document-denoiser/requirements.txt | 5 + test/keras/document-denoiser/sample.json | 3 + test/keras/document-denoiser/trainer.ipynb | 620 ++++++++++ test/live-reloading/onnx/README.md | 7 + .../python/mpg-estimator/cortex.yaml | 8 + .../python/mpg-estimator/predictor.py | 27 + .../python/mpg-estimator/requirements.txt | 4 + .../python/mpg-estimator/sample.json | 7 + test/live-reloading/tensorflow/README.md | 11 + .../onnx/multi-model-classifier/README.md | 77 ++ .../onnx/multi-model-classifier/cortex.yaml | 22 + .../onnx/multi-model-classifier/predictor.py | 99 ++ .../multi-model-classifier/requirements.txt | 2 + .../onnx/multi-model-classifier/sample.json | 3 + .../python/mpg-estimator/README.md | 75 ++ .../python/mpg-estimator/cortex.yaml | 13 + .../python/mpg-estimator/predictor.py | 28 + .../python/mpg-estimator/requirements.txt | 4 + .../python/mpg-estimator/sample.json | 7 + .../multi-model-classifier/README.md | 77 ++ .../multi-model-classifier/cortex.yaml | 32 + .../multi-model-classifier/predictor.py | 63 ++ .../multi-model-classifier/requirements.txt | 1 + .../multi-model-classifier/sample-image.json | 3 + .../multi-model-classifier/sample-iris.json | 8 + test/onnx/iris-classifier/README.md | 3 + test/onnx/iris-classifier/cortex.yaml | 10 + test/onnx/iris-classifier/predictor.py | 20 + test/onnx/iris-classifier/sample.json | 6 + test/onnx/iris-classifier/xgboost.ipynb | 244 ++++ test/onnx/multi-model-classifier/README.md | 69 ++ test/onnx/multi-model-classifier/cortex.yaml | 20 + test/onnx/multi-model-classifier/predictor.py | 98 ++ .../multi-model-classifier/requirements.txt | 2 + test/onnx/multi-model-classifier/sample.json | 3 + test/onnx/yolov5-youtube/README.md | 61 + test/onnx/yolov5-youtube/conda-packages.txt | 3 + test/onnx/yolov5-youtube/cortex.yaml | 13 + test/onnx/yolov5-youtube/labels.json | 82 ++ test/onnx/yolov5-youtube/predictor.py | 65 ++ test/onnx/yolov5-youtube/requirements.txt | 3 + test/onnx/yolov5-youtube/sample.json | 3 + test/onnx/yolov5-youtube/utils.py | 130 +++ test/pytorch/answer-generator/README.md | 3 + test/pytorch/answer-generator/cortex.yaml | 11 + test/pytorch/answer-generator/generator.py | 44 + test/pytorch/answer-generator/predictor.py | 36 + .../pytorch/answer-generator/requirements.txt | 3 + test/pytorch/answer-generator/sample.json | 3 + .../image-classifier-alexnet/README.md | 3 + .../image-classifier-alexnet/cortex.yaml | 11 + .../image-classifier-alexnet/predictor.py | 39 + .../image-classifier-alexnet/requirements.txt | 2 + .../image-classifier-alexnet/sample.json | 3 + .../image-classifier-resnet50/README.md | 59 + .../image-classifier-resnet50/cortex.yaml | 15 + .../image-classifier-resnet50/cortex_gpu.yaml | 16 + .../image-classifier-resnet50/cortex_inf.yaml | 16 + .../generate_resnet50_models.ipynb | 121 ++ .../image-classifier-resnet50/predictor.py | 93 ++ .../image-classifier-resnet50/sample.json | 3 + test/pytorch/iris-classifier/README.md | 3 + test/pytorch/iris-classifier/cortex.yaml | 11 + test/pytorch/iris-classifier/model.py | 59 + test/pytorch/iris-classifier/predictor.py | 50 + test/pytorch/iris-classifier/requirements.txt | 2 + test/pytorch/iris-classifier/sample.json | 6 + test/pytorch/language-identifier/README.md | 3 + test/pytorch/language-identifier/cortex.yaml | 9 + test/pytorch/language-identifier/predictor.py | 18 + .../language-identifier/requirements.txt | 2 + test/pytorch/language-identifier/sample.json | 3 + .../multi-model-text-analyzer/README.md | 51 + .../multi-model-text-analyzer/cortex.yaml | 11 + .../multi-model-text-analyzer/predictor.py | 25 + .../requirements.txt | 2 + .../sample-sentiment.json | 3 + .../sample-summarizer.json | 3 + test/pytorch/object-detector/README.md | 3 + test/pytorch/object-detector/coco_labels.txt | 91 ++ test/pytorch/object-detector/cortex.yaml | 11 + test/pytorch/object-detector/predictor.py | 49 + test/pytorch/object-detector/requirements.txt | 2 + test/pytorch/object-detector/sample.json | 4 + test/pytorch/question-generator/cortex.yaml | 10 + .../question-generator/dependencies.sh | 4 + test/pytorch/question-generator/predictor.py | 36 + .../question-generator/requirements.txt | 4 + test/pytorch/question-generator/sample.json | 4 + test/pytorch/reading-comprehender/README.md | 3 + test/pytorch/reading-comprehender/cortex.yaml | 11 + .../pytorch/reading-comprehender/predictor.py | 25 + .../reading-comprehender/requirements.txt | 1 + test/pytorch/reading-comprehender/sample.json | 4 + test/pytorch/search-completer/README.md | 3 + test/pytorch/search-completer/cortex.yaml | 11 + test/pytorch/search-completer/predictor.py | 20 + .../pytorch/search-completer/requirements.txt | 5 + test/pytorch/search-completer/sample.json | 3 + test/pytorch/sentiment-analyzer/README.md | 3 + test/pytorch/sentiment-analyzer/cortex.yaml | 10 + test/pytorch/sentiment-analyzer/predictor.py | 15 + .../sentiment-analyzer/requirements.txt | 2 + test/pytorch/sentiment-analyzer/sample.json | 3 + test/pytorch/text-generator/README.md | 192 ++++ test/pytorch/text-generator/deploy.ipynb | 80 ++ test/pytorch/text-generator/predictor.py | 17 + test/pytorch/text-generator/requirements.txt | 2 + test/pytorch/text-summarizer/README.md | 5 + test/pytorch/text-summarizer/cortex.yaml | 11 + test/pytorch/text-summarizer/predictor.py | 18 + test/pytorch/text-summarizer/requirements.txt | 2 + test/pytorch/text-summarizer/sample.json | 3 + test/sklearn/iris-classifier/README.md | 3 + test/sklearn/iris-classifier/cortex.yaml | 15 + test/sklearn/iris-classifier/predictor.py | 31 + test/sklearn/iris-classifier/requirements.txt | 2 + test/sklearn/iris-classifier/sample.json | 6 + test/sklearn/iris-classifier/trainer.py | 25 + test/sklearn/mpg-estimator/README.md | 3 + test/sklearn/mpg-estimator/cortex.yaml | 11 + test/sklearn/mpg-estimator/predictor.py | 41 + test/sklearn/mpg-estimator/requirements.txt | 4 + test/sklearn/mpg-estimator/sample.json | 7 + test/sklearn/mpg-estimator/trainer.py | 25 + test/spacy/entity-recognizer/README.md | 3 + test/spacy/entity-recognizer/cortex.yaml | 10 + test/spacy/entity-recognizer/predictor.py | 22 + test/spacy/entity-recognizer/requirements.txt | 1 + test/spacy/entity-recognizer/sample.json | 3 + .../image-classifier-inception/README.md | 3 + .../image-classifier-inception/cortex.yaml | 13 + .../cortex_server_side_batching.yaml | 17 + .../inception.ipynb | 211 ++++ .../image-classifier-inception/predictor.py | 21 + .../requirements.txt | 1 + .../image-classifier-inception/sample.json | 3 + .../image-classifier-resnet50/README.md | 90 ++ .../image-classifier-resnet50/cortex.yaml | 18 + .../image-classifier-resnet50/cortex_gpu.yaml | 19 + .../cortex_gpu_server_side_batching.yaml | 22 + .../image-classifier-resnet50/cortex_inf.yaml | 21 + .../cortex_inf_server_side_batching.yaml | 24 + .../generate_gpu_resnet50_model.ipynb | 131 +++ .../generate_resnet50_models.ipynb | 178 +++ .../image-classifier-resnet50/predictor.py | 63 ++ .../requirements.txt | 1 + .../image-classifier-resnet50/sample.bin | Bin 0 -> 8680 bytes .../image-classifier-resnet50/sample.json | 3 + test/tensorflow/iris-classifier/README.md | 3 + test/tensorflow/iris-classifier/cortex.yaml | 10 + test/tensorflow/iris-classifier/predictor.py | 13 + test/tensorflow/iris-classifier/sample.json | 6 + .../iris-classifier/tensorflow.ipynb | 296 +++++ .../tensorflow/license-plate-reader/README.md | 175 +++ .../license-plate-reader/config.json | 8 + .../license-plate-reader/cortex_full.yaml | 35 + .../license-plate-reader/cortex_lite.yaml | 14 + .../license-plate-reader/predictor_crnn.py | 44 + .../license-plate-reader/predictor_lite.py | 120 ++ .../license-plate-reader/predictor_yolo.py | 46 + .../license-plate-reader/requirements.txt | 5 + .../license-plate-reader/sample_inference.py | 100 ++ .../license-plate-reader/utils/__init__.py | 1 + .../license-plate-reader/utils/bbox.py | 111 ++ .../license-plate-reader/utils/colors.py | 100 ++ .../license-plate-reader/utils/preprocess.py | 59 + .../license-plate-reader/utils/utils.py | 160 +++ .../multi-model-classifier/README.md | 69 ++ .../multi-model-classifier/cortex.yaml | 30 + .../multi-model-classifier/predictor.py | 62 + .../multi-model-classifier/requirements.txt | 1 + .../multi-model-classifier/sample-image.json | 3 + .../multi-model-classifier/sample-iris.json | 8 + test/tensorflow/sentiment-analyzer/README.md | 3 + test/tensorflow/sentiment-analyzer/bert.ipynb | 1007 +++++++++++++++++ .../tensorflow/sentiment-analyzer/cortex.yaml | 13 + .../sentiment-analyzer/predictor.py | 29 + .../sentiment-analyzer/requirements.txt | 5 + .../tensorflow/sentiment-analyzer/sample.json | 3 + test/tensorflow/text-generator/README.md | 3 + test/tensorflow/text-generator/cortex.yaml | 11 + test/tensorflow/text-generator/encoder.py | 118 ++ test/tensorflow/text-generator/gpt-2.ipynb | 383 +++++++ test/tensorflow/text-generator/predictor.py | 24 + .../text-generator/requirements.txt | 2 + test/tensorflow/text-generator/sample.json | 3 + test/traffic-splitter/README.md | 111 ++ test/traffic-splitter/cortex.yaml | 28 + test/traffic-splitter/model.py | 59 + test/traffic-splitter/onnx_predictor.py | 20 + test/traffic-splitter/pytorch_predictor.py | 50 + test/traffic-splitter/sample.json | 6 + test/utils/README.md | 36 + test/utils/throughput_test.py | 179 +++ 212 files changed, 9098 insertions(+) create mode 100644 test/README.md create mode 100644 test/batch/image-classifier/README.md create mode 100644 test/batch/image-classifier/cortex.yaml create mode 100644 test/batch/image-classifier/predictor.py create mode 100644 test/batch/image-classifier/requirements.txt create mode 100644 test/batch/image-classifier/sample.json create mode 100644 test/batch/onnx/README.md create mode 100644 test/batch/onnx/cortex.yaml create mode 100644 test/batch/onnx/predictor.py create mode 100644 test/batch/onnx/requirements.txt create mode 100644 test/batch/tensorflow/README.md create mode 100644 test/batch/tensorflow/cortex.yaml create mode 100644 test/batch/tensorflow/predictor.py create mode 100644 test/batch/tensorflow/requirements.txt create mode 100644 test/keras/document-denoiser/README.md create mode 100644 test/keras/document-denoiser/cortex.yaml create mode 100644 test/keras/document-denoiser/predictor.py create mode 100644 test/keras/document-denoiser/requirements.txt create mode 100644 test/keras/document-denoiser/sample.json create mode 100644 test/keras/document-denoiser/trainer.ipynb create mode 100644 test/live-reloading/onnx/README.md create mode 100644 test/live-reloading/python/mpg-estimator/cortex.yaml create mode 100644 test/live-reloading/python/mpg-estimator/predictor.py create mode 100644 test/live-reloading/python/mpg-estimator/requirements.txt create mode 100644 test/live-reloading/python/mpg-estimator/sample.json create mode 100644 test/live-reloading/tensorflow/README.md create mode 100644 test/model-caching/onnx/multi-model-classifier/README.md create mode 100644 test/model-caching/onnx/multi-model-classifier/cortex.yaml create mode 100644 test/model-caching/onnx/multi-model-classifier/predictor.py create mode 100644 test/model-caching/onnx/multi-model-classifier/requirements.txt create mode 100644 test/model-caching/onnx/multi-model-classifier/sample.json create mode 100644 test/model-caching/python/mpg-estimator/README.md create mode 100644 test/model-caching/python/mpg-estimator/cortex.yaml create mode 100644 test/model-caching/python/mpg-estimator/predictor.py create mode 100644 test/model-caching/python/mpg-estimator/requirements.txt create mode 100644 test/model-caching/python/mpg-estimator/sample.json create mode 100644 test/model-caching/tensorflow/multi-model-classifier/README.md create mode 100644 test/model-caching/tensorflow/multi-model-classifier/cortex.yaml create mode 100644 test/model-caching/tensorflow/multi-model-classifier/predictor.py create mode 100644 test/model-caching/tensorflow/multi-model-classifier/requirements.txt create mode 100644 test/model-caching/tensorflow/multi-model-classifier/sample-image.json create mode 100644 test/model-caching/tensorflow/multi-model-classifier/sample-iris.json create mode 100644 test/onnx/iris-classifier/README.md create mode 100644 test/onnx/iris-classifier/cortex.yaml create mode 100644 test/onnx/iris-classifier/predictor.py create mode 100644 test/onnx/iris-classifier/sample.json create mode 100644 test/onnx/iris-classifier/xgboost.ipynb create mode 100644 test/onnx/multi-model-classifier/README.md create mode 100644 test/onnx/multi-model-classifier/cortex.yaml create mode 100644 test/onnx/multi-model-classifier/predictor.py create mode 100644 test/onnx/multi-model-classifier/requirements.txt create mode 100644 test/onnx/multi-model-classifier/sample.json create mode 100644 test/onnx/yolov5-youtube/README.md create mode 100644 test/onnx/yolov5-youtube/conda-packages.txt create mode 100644 test/onnx/yolov5-youtube/cortex.yaml create mode 100644 test/onnx/yolov5-youtube/labels.json create mode 100644 test/onnx/yolov5-youtube/predictor.py create mode 100644 test/onnx/yolov5-youtube/requirements.txt create mode 100644 test/onnx/yolov5-youtube/sample.json create mode 100644 test/onnx/yolov5-youtube/utils.py create mode 100644 test/pytorch/answer-generator/README.md create mode 100644 test/pytorch/answer-generator/cortex.yaml create mode 100644 test/pytorch/answer-generator/generator.py create mode 100644 test/pytorch/answer-generator/predictor.py create mode 100644 test/pytorch/answer-generator/requirements.txt create mode 100644 test/pytorch/answer-generator/sample.json create mode 100644 test/pytorch/image-classifier-alexnet/README.md create mode 100644 test/pytorch/image-classifier-alexnet/cortex.yaml create mode 100644 test/pytorch/image-classifier-alexnet/predictor.py create mode 100644 test/pytorch/image-classifier-alexnet/requirements.txt create mode 100644 test/pytorch/image-classifier-alexnet/sample.json create mode 100644 test/pytorch/image-classifier-resnet50/README.md create mode 100644 test/pytorch/image-classifier-resnet50/cortex.yaml create mode 100644 test/pytorch/image-classifier-resnet50/cortex_gpu.yaml create mode 100644 test/pytorch/image-classifier-resnet50/cortex_inf.yaml create mode 100644 test/pytorch/image-classifier-resnet50/generate_resnet50_models.ipynb create mode 100644 test/pytorch/image-classifier-resnet50/predictor.py create mode 100644 test/pytorch/image-classifier-resnet50/sample.json create mode 100644 test/pytorch/iris-classifier/README.md create mode 100644 test/pytorch/iris-classifier/cortex.yaml create mode 100644 test/pytorch/iris-classifier/model.py create mode 100644 test/pytorch/iris-classifier/predictor.py create mode 100644 test/pytorch/iris-classifier/requirements.txt create mode 100644 test/pytorch/iris-classifier/sample.json create mode 100644 test/pytorch/language-identifier/README.md create mode 100644 test/pytorch/language-identifier/cortex.yaml create mode 100644 test/pytorch/language-identifier/predictor.py create mode 100644 test/pytorch/language-identifier/requirements.txt create mode 100644 test/pytorch/language-identifier/sample.json create mode 100644 test/pytorch/multi-model-text-analyzer/README.md create mode 100644 test/pytorch/multi-model-text-analyzer/cortex.yaml create mode 100644 test/pytorch/multi-model-text-analyzer/predictor.py create mode 100644 test/pytorch/multi-model-text-analyzer/requirements.txt create mode 100644 test/pytorch/multi-model-text-analyzer/sample-sentiment.json create mode 100644 test/pytorch/multi-model-text-analyzer/sample-summarizer.json create mode 100644 test/pytorch/object-detector/README.md create mode 100644 test/pytorch/object-detector/coco_labels.txt create mode 100644 test/pytorch/object-detector/cortex.yaml create mode 100644 test/pytorch/object-detector/predictor.py create mode 100644 test/pytorch/object-detector/requirements.txt create mode 100644 test/pytorch/object-detector/sample.json create mode 100644 test/pytorch/question-generator/cortex.yaml create mode 100644 test/pytorch/question-generator/dependencies.sh create mode 100644 test/pytorch/question-generator/predictor.py create mode 100644 test/pytorch/question-generator/requirements.txt create mode 100644 test/pytorch/question-generator/sample.json create mode 100644 test/pytorch/reading-comprehender/README.md create mode 100644 test/pytorch/reading-comprehender/cortex.yaml create mode 100644 test/pytorch/reading-comprehender/predictor.py create mode 100644 test/pytorch/reading-comprehender/requirements.txt create mode 100644 test/pytorch/reading-comprehender/sample.json create mode 100644 test/pytorch/search-completer/README.md create mode 100644 test/pytorch/search-completer/cortex.yaml create mode 100644 test/pytorch/search-completer/predictor.py create mode 100644 test/pytorch/search-completer/requirements.txt create mode 100644 test/pytorch/search-completer/sample.json create mode 100644 test/pytorch/sentiment-analyzer/README.md create mode 100644 test/pytorch/sentiment-analyzer/cortex.yaml create mode 100644 test/pytorch/sentiment-analyzer/predictor.py create mode 100644 test/pytorch/sentiment-analyzer/requirements.txt create mode 100644 test/pytorch/sentiment-analyzer/sample.json create mode 100644 test/pytorch/text-generator/README.md create mode 100644 test/pytorch/text-generator/deploy.ipynb create mode 100644 test/pytorch/text-generator/predictor.py create mode 100644 test/pytorch/text-generator/requirements.txt create mode 100644 test/pytorch/text-summarizer/README.md create mode 100644 test/pytorch/text-summarizer/cortex.yaml create mode 100644 test/pytorch/text-summarizer/predictor.py create mode 100644 test/pytorch/text-summarizer/requirements.txt create mode 100644 test/pytorch/text-summarizer/sample.json create mode 100644 test/sklearn/iris-classifier/README.md create mode 100644 test/sklearn/iris-classifier/cortex.yaml create mode 100644 test/sklearn/iris-classifier/predictor.py create mode 100644 test/sklearn/iris-classifier/requirements.txt create mode 100644 test/sklearn/iris-classifier/sample.json create mode 100644 test/sklearn/iris-classifier/trainer.py create mode 100644 test/sklearn/mpg-estimator/README.md create mode 100644 test/sklearn/mpg-estimator/cortex.yaml create mode 100644 test/sklearn/mpg-estimator/predictor.py create mode 100644 test/sklearn/mpg-estimator/requirements.txt create mode 100644 test/sklearn/mpg-estimator/sample.json create mode 100644 test/sklearn/mpg-estimator/trainer.py create mode 100644 test/spacy/entity-recognizer/README.md create mode 100644 test/spacy/entity-recognizer/cortex.yaml create mode 100644 test/spacy/entity-recognizer/predictor.py create mode 100644 test/spacy/entity-recognizer/requirements.txt create mode 100644 test/spacy/entity-recognizer/sample.json create mode 100644 test/tensorflow/image-classifier-inception/README.md create mode 100644 test/tensorflow/image-classifier-inception/cortex.yaml create mode 100644 test/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml create mode 100644 test/tensorflow/image-classifier-inception/inception.ipynb create mode 100644 test/tensorflow/image-classifier-inception/predictor.py create mode 100644 test/tensorflow/image-classifier-inception/requirements.txt create mode 100644 test/tensorflow/image-classifier-inception/sample.json create mode 100644 test/tensorflow/image-classifier-resnet50/README.md create mode 100644 test/tensorflow/image-classifier-resnet50/cortex.yaml create mode 100644 test/tensorflow/image-classifier-resnet50/cortex_gpu.yaml create mode 100644 test/tensorflow/image-classifier-resnet50/cortex_gpu_server_side_batching.yaml create mode 100644 test/tensorflow/image-classifier-resnet50/cortex_inf.yaml create mode 100644 test/tensorflow/image-classifier-resnet50/cortex_inf_server_side_batching.yaml create mode 100644 test/tensorflow/image-classifier-resnet50/generate_gpu_resnet50_model.ipynb create mode 100644 test/tensorflow/image-classifier-resnet50/generate_resnet50_models.ipynb create mode 100644 test/tensorflow/image-classifier-resnet50/predictor.py create mode 100644 test/tensorflow/image-classifier-resnet50/requirements.txt create mode 100644 test/tensorflow/image-classifier-resnet50/sample.bin create mode 100644 test/tensorflow/image-classifier-resnet50/sample.json create mode 100644 test/tensorflow/iris-classifier/README.md create mode 100644 test/tensorflow/iris-classifier/cortex.yaml create mode 100644 test/tensorflow/iris-classifier/predictor.py create mode 100644 test/tensorflow/iris-classifier/sample.json create mode 100644 test/tensorflow/iris-classifier/tensorflow.ipynb create mode 100644 test/tensorflow/license-plate-reader/README.md create mode 100644 test/tensorflow/license-plate-reader/config.json create mode 100644 test/tensorflow/license-plate-reader/cortex_full.yaml create mode 100644 test/tensorflow/license-plate-reader/cortex_lite.yaml create mode 100644 test/tensorflow/license-plate-reader/predictor_crnn.py create mode 100644 test/tensorflow/license-plate-reader/predictor_lite.py create mode 100644 test/tensorflow/license-plate-reader/predictor_yolo.py create mode 100644 test/tensorflow/license-plate-reader/requirements.txt create mode 100644 test/tensorflow/license-plate-reader/sample_inference.py create mode 100644 test/tensorflow/license-plate-reader/utils/__init__.py create mode 100644 test/tensorflow/license-plate-reader/utils/bbox.py create mode 100644 test/tensorflow/license-plate-reader/utils/colors.py create mode 100644 test/tensorflow/license-plate-reader/utils/preprocess.py create mode 100644 test/tensorflow/license-plate-reader/utils/utils.py create mode 100644 test/tensorflow/multi-model-classifier/README.md create mode 100644 test/tensorflow/multi-model-classifier/cortex.yaml create mode 100644 test/tensorflow/multi-model-classifier/predictor.py create mode 100644 test/tensorflow/multi-model-classifier/requirements.txt create mode 100644 test/tensorflow/multi-model-classifier/sample-image.json create mode 100644 test/tensorflow/multi-model-classifier/sample-iris.json create mode 100644 test/tensorflow/sentiment-analyzer/README.md create mode 100644 test/tensorflow/sentiment-analyzer/bert.ipynb create mode 100644 test/tensorflow/sentiment-analyzer/cortex.yaml create mode 100644 test/tensorflow/sentiment-analyzer/predictor.py create mode 100644 test/tensorflow/sentiment-analyzer/requirements.txt create mode 100644 test/tensorflow/sentiment-analyzer/sample.json create mode 100644 test/tensorflow/text-generator/README.md create mode 100644 test/tensorflow/text-generator/cortex.yaml create mode 100644 test/tensorflow/text-generator/encoder.py create mode 100644 test/tensorflow/text-generator/gpt-2.ipynb create mode 100644 test/tensorflow/text-generator/predictor.py create mode 100644 test/tensorflow/text-generator/requirements.txt create mode 100644 test/tensorflow/text-generator/sample.json create mode 100644 test/traffic-splitter/README.md create mode 100644 test/traffic-splitter/cortex.yaml create mode 100644 test/traffic-splitter/model.py create mode 100644 test/traffic-splitter/onnx_predictor.py create mode 100644 test/traffic-splitter/pytorch_predictor.py create mode 100644 test/traffic-splitter/sample.json create mode 100644 test/utils/README.md create mode 100644 test/utils/throughput_test.py diff --git a/test/README.md b/test/README.md new file mode 100644 index 0000000000..1eb711f57d --- /dev/null +++ b/test/README.md @@ -0,0 +1,67 @@ +# Examples + +## TensorFlow + +- [Iris classification](tensorflow/iris-classifier): deploy a model to classify iris flowers. + +- [Text generation](tensorflow/text-generator): deploy OpenAI's GPT-2 to generate text. + +- [Sentiment analysis](tensorflow/sentiment-analyzer): deploy a BERT model for sentiment analysis. + +- [Image classification](tensorflow/image-classifier-inception): deploy an Inception model to classify images. + +- [Image classification](tensorflow/image-classifier-resnet50): deploy a ResNet50 model to classify images. + +- [License plate reader](tensorflow/license-plate-reader): deploy a YOLOv3 model (and others) to identify license plates in real time. + +- [Multi-model classification](tensorflow/multi-model-classifier): deploy 3 models (ResNet50, Iris, Inception) in a single API. + +## Keras + +- [Denoisify text documents](keras/document-denoiser): deploy an Autoencoder model to clean text document images of noise. + +## PyTorch + +- [Iris classification](pytorch/iris-classifier): deploy a model to classify iris flowers. + +- [Text generation](pytorch/text-generator): deploy Hugging Face's GPT-2 model to generate text. + +- [Sentiment analysis](pytorch/sentiment-analyzer): deploy a Hugging Face transformers model for sentiment analysis. + +- [Search completion](pytorch/search-completer): deploy a Facebook's RoBERTa model to complete search terms. + +- [Answer generation](pytorch/answer-generator): deploy Microsoft's DialoGPT model to answer questions. + +- [Text summarization](pytorch/text-summarizer): deploy a BART model (from Hugging Face's transformers library) to summarize text. + +- [Reading comprehension](pytorch/reading-comprehender): deploy an AllenNLP model for reading comprehension. + +- [Language identification](pytorch/language-identifier): deploy a fastText model to identify languages. + +- [Multi-model text analysis](pytorch/multi-model-text-analyzer): deploy 2 models (Sentiment and Summarization analyzers) in a single API. + +- [Image classification](pytorch/image-classifier-alexnet): deploy an AlexNet model from TorchVision to classify images. + +- [Image classification](pytorch/image-classifier-resnet50): deploy a ResNet50 model from TorchVision to classify images. + +- [Object detection](pytorch/object-detector): deploy a Faster R-CNN model from TorchVision to detect objects in images. + +- [Question generator](pytorch/question-generator): deploy a transformers model to generate questions given text and the correct answer. + +## ONNX + +- [Iris classification](onnx/iris-classifier): deploy an XGBoost model (exported in ONNX) to classify iris flowers. + +- [YOLOv5 YouTube detection](onnx/yolov5-youtube): deploy a YOLOv5 model trained on COCO val2017 dataset. + +- [Multi-model classification](onnx/multi-model-classifier): deploy 3 models (ResNet50, MobileNet, ShuffleNet) in a single API. + +## scikit-learn + +- [Iris classification](sklearn/iris-classifier): deploy a model to classify iris flowers. + +- [MPG estimation](sklearn/mpg-estimator): deploy a linear regression model to estimate MPG. + +## spacy + +- [Entity recognizer](spacy/entity-recognizer): deploy a spacy model for named entity recognition. diff --git a/test/batch/image-classifier/README.md b/test/batch/image-classifier/README.md new file mode 100644 index 0000000000..03cc827d35 --- /dev/null +++ b/test/batch/image-classifier/README.md @@ -0,0 +1,580 @@ +# Deploy models as Batch APIs + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example shows how to deploy a batch image classification api that accepts a list of image urls as input, downloads the images, classifies them, and writes the results to S3. + +**Batch APIs are only supported on a Cortex cluster (in AWS).** You can find cluster installation documentation [here](../../../docs/aws/install.md). + +## Pre-requisites + +* [Install](../../../docs/aws/install.md) Cortex and create a cluster +* Create an S3 bucket/directory to store the results of the batch job +* AWS CLI (optional) + +
+ +## Implement your predictor + +1. Create a Python file named `predictor.py`. +1. Define a Predictor class with a constructor that loads and initializes an image-classifier from `torchvision`. +1. Add a `predict()` function that will accept a list of images urls (http:// or s3://), downloads them, performs inference, and writes the predictions to S3. +1. Specify an `on_job_complete()` function that aggregates the results and writes them to a single file named `aggregated_results.json` in S3. + +```python +# predictor.py + +import os +import requests +import torch +import torchvision +from torchvision import transforms +from PIL import Image +from io import BytesIO +import boto3 +import json +import re + + +class PythonPredictor: + def __init__(self, config, job_spec): + self.model = torchvision.models.alexnet(pretrained=True).eval() + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.preprocess = transforms.Compose( + [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] + ) + + self.labels = requests.get( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + ).text.split("\n")[1:] + + if len(config.get("dest_s3_dir", "")) == 0: + raise Exception("'dest_s3_dir' field was not provided in job submission") + + self.s3 = boto3.client("s3") + + self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() + self.key = os.path.join(self.key, job_spec["job_id"]) + + def predict(self, payload, batch_id): + tensor_list = [] + + # download and preprocess each image + for image_url in payload: + if image_url.startswith("s3://"): + bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() + image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() + else: + image_bytes = requests.get(image_url).content + + img_pil = Image.open(BytesIO(image_bytes)) + tensor_list.append(self.preprocess(img_pil)) + + # classify the batch of images + img_tensor = torch.stack(tensor_list) + with torch.no_grad(): + prediction = self.model(img_tensor) + _, indices = prediction.max(1) + + # extract predicted classes + results = [ + {"url": payload[i], "class": self.labels[class_idx]} + for i, class_idx in enumerate(indices) + ] + json_output = json.dumps(results) + + # save results + self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) + + def on_job_complete(self): + all_results = [] + + # aggregate all classifications + paginator = self.s3.get_paginator("list_objects_v2") + for page in paginator.paginate(Bucket=self.bucket, Prefix=self.key): + for obj in page["Contents"]: + body = self.s3.get_object(Bucket=self.bucket, Key=obj["Key"])["Body"] + all_results += json.loads(body.read().decode("utf8")) + + # save single file containing aggregated classifications + self.s3.put_object( + Bucket=self.bucket, + Key=os.path.join(self.key, "aggregated_results.json"), + Body=json.dumps(all_results), + ) +``` + +Here are the complete [Predictor docs](../../../docs/deployments/batch-api/predictors.md). + +
+ +## Specify your Python dependencies + +Create a `requirements.txt` file to specify the dependencies needed by `predictor.py`. Cortex will automatically install them into your runtime once you deploy: + +```python +# requirements.txt + +boto3 +torch +torchvision +pillow +``` + +
+ +## Configure your API + +Create a `cortex.yaml` file and add the configuration below. An `api` with `kind: BatchAPI` will expose your model as an endpoint that will orchestrate offline batch inference across multiple workers upon receiving job requests. The configuration below defines how much `compute` each worker requires and your `predictor.py` determines how each batch should be processed. + +```yaml +# cortex.yaml + +- name: image-classifier + kind: BatchAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 +``` + +Here are the complete [API configuration docs](../../../docs/deployments/batch-api/api-configuration.md). + +
+ +## Deploy your Batch API + +`cortex deploy` takes your model, your `predictor.py` implementation, and your configuration from `cortex.yaml` and creates an endpoint that can receive job submissions and manage running jobs. + +```bash +$ cortex deploy --env aws + +created image-classifier (BatchAPI) +``` + +Get the endpoint for your Batch API with `cortex get image-classifier`: + +```bash +$ cortex get image-classifier --env aws + +no submitted jobs + +endpoint: https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +``` + +
+ +## Setup destination S3 directory + +Our `predictor.py` implementation writes results to an S3 directory. Before submitting a job, we need to create an S3 directory to store the output of the batch job. The S3 directory should be accessible by the credentials used to create your Cortex cluster. + +Export the S3 directory to an environment variable: + +```bash +$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir +``` + +
+ +## Submit a job + +Now that you've deployed a Batch API, you are ready to submit jobs. You can provide image urls directly in the request by specifying the urls in `item_list`. The curl command below showcases how to submit image urls in the request. + +```bash +$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir +$ curl $BATCH_API_ENDPOINT \ + -X POST -H "Content-Type: application/json" \ + -d @- <` then type `EOF`. + +After submitting the job, you should get a response like this: + +```json +{"job_id":"69d6faf82e4660d3","api_name":"image-classifier", "config":{"dest_s3_dir": "YOUR_S3_BUCKET_HERE"}} +``` + +Take note of the job id in the response. + +### List the jobs for your Batch API + +```bash +$ cortex get image-classifier --env aws + +job id status progress start time duration +69d6faf82e4660d3 running 0/3 20 Jul 2020 01:07:44 UTC 3m26s + +endpoint: https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +``` + +### Get the job status with an HTTP request + +You can make a GET request to your `/JOB_ID` to get the status of your job. + +```bash +$ curl https://abcdefg.execute-api.us-west-2.amazonaws.com?jobID=69d6faf82e4660d3 + +{ + "job_status":{ + "job_id":"69d6faf82e4660d3", + "api_name":"image-classifier", + ... + }, + "endpoint":"https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier" +} +``` + +### Get job status using Cortex CLI + +You can also use the Cortex CLI to get the status of your job using `cortex get `. + +```bash +$ cortex get image-classifier 69d6faf82e4660d3 --env aws + +job id: 69d6faf82e4660d3 +status: running + +start time: 27 Jul 2020 15:02:25 UTC +end time: - +duration: 42s + +batch stats +total succeeded failed avg time per batch +3 0 0 - + +worker stats +requested initializing running failed succeeded +1 1 0 0 0 + +job endpoint: https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier/69d6faf82e4660d3 +``` + +### Stream logs + +You can stream logs realtime for debugging and monitoring purposes with `cortex logs ` + +```bash +$ cortex logs image-classifier 69d6fdeb2d8e6647 --env aws + +started enqueuing batches to queue +partitioning 5 items found in job submission into 3 batches of size 2 +completed enqueuing a total of 3 batches +spinning up workers... +... +2020-08-07 14:44:05.557598:cortex:pid-25:INFO:processing batch c9136381-6dcc-45bd-bd97-cc9c66ccc6d6 +2020-08-07 14:44:26.037276:cortex:pid-25:INFO:executing on_job_complete +2020-08-07 14:44:26.208972:cortex:pid-25:INFO:no batches left in queue, job has been completed +``` + +### Find your results + +Wait for the job to complete by streaming the logs with `cortex logs ` or watching for the job status to change with `cortex get --watch`. + +The status of your job, which you can get from `cortex get `, should change from `running` to `succeeded` once the job has completed. If it changes to a different status, you may be able to find the stacktrace using `cortex logs `. If your job has completed successfully, you can view the results of the image classification in the S3 directory you specified in the job submission. + +Using the AWS CLI: + +```bash +$ aws s3 ls $CORTEX_DEST_S3_DIR// + 161f9fda-fd08-44f3-b983-4529f950e40b.json + 40100ffb-6824-4560-8ca4-7c0d14273e05.json + c9136381-6dcc-45bd-bd97-cc9c66ccc6d6.json + aggregated_results.json +``` + +You can download the aggregated results file with `aws s3 cp $CORTEX_DEST_S3_DIR//aggregated_results.json .` and confirm that there are 16 classifications. + +
+ +## Alternative job submission: image URLs in files + +In addition to providing the image URLs directly in the job submission request, it is possible to use image urls stored in newline delimited json files in S3. A newline delimited JSON file has one complete JSON object per line. + +Two newline delimited json files containing image urls for this tutorial have already been created for you and can be found at `s3://cortex-examples/image-classifier/`. If you have AWS CLI, you can list the directory and you should be able to find the files (`urls_0.json` and `urls_1.json`). + +```text +$ aws s3 ls s3://cortex-examples/image-classifier/ + PRE inception/ +... +2020-07-27 14:19:30 506 urls_0.json +2020-07-27 14:19:30 473 urls_1.json +``` + +To use JSON files as input data for the job, we will specify `delimited_files` in the job request. The Batch API will break up the JSON files into batches of desired size and push them onto a queue that is consumed by the pool of workers. + +### Dry run + +Before we submit the job, let's perform a dry run to ensure that only the desired files will be read. You can perform a dry run by appending `dryRun=true` query parameter to your job request. + +Get the endpoint from `cortex get image-classifier` if you haven't done so already. + +```bash +$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir +$ curl $BATCH_API_ENDPOINT?dryRun=true \ +-X POST -H "Content-Type: application/json" \ +-d @- <` then type `EOF`. + +You should expect a response like this: + +```text +s3://cortex-examples/image-classifier/urls_0.json +s3://cortex-examples/image-classifier/urls_1.json +validations passed +``` + +This shows that the correct files will be used as input for the job. + +### Classify image urls stored in S3 files + +When you submit a job specifying `delimited_files`, your Batch API will get all of the input S3 files based on `s3_paths` and will apply the filters specified in `includes` and `excludes`. Then your Batch API will read each file, split on the newline characters, and parse each item as a JSON object. Each item in the file is treated as a single sample and will be grouped together into batches and then placed onto a queue that is consumed by the pool of workers. + +In this example `urls_0.json` and `urls_1.json` each contain 8 urls. Let's classify the images from the URLs listed in those 2 files. + +```bash +$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir +$ curl $BATCH_API_ENDPOINT \ +-X POST -H "Content-Type: application/json" \ +-d @- <` then type `EOF`. + +After submitting this job, you should get a response like this: + +```json +{"job_id":"69d6faf82e4660d3","api_name":"image-classifier", "config":{"dest_s3_dir": "YOUR_S3_BUCKET_HERE"}} +``` + +### Find results + +Wait for the job to complete by streaming the logs with `cortex logs ` or watching for the job status to change with `cortex get --watch`. + +```bash +$ cortex logs image-classifier 69d6faf82e4660d3 --env aws + +started enqueuing batches to queue +enqueuing contents from file s3://cortex-examples/image-classifier/urls_0.json +enqueuing contents from file s3://cortex-examples/image-classifier/urls_1.json +completed enqueuing a total of 8 batches +spinning up workers... +2020-08-07 15:11:21.364179:cortex:pid-25:INFO:processing batch 1de0bc65-04ea-4b9e-9e96-5a0bb52fcc37 +... +2020-08-07 15:11:45.461032:cortex:pid-25:INFO:no batches left in queue, job has been completed +``` + +The status of your job, which you can get from `cortex get `, should change from `running` to `succeeded` once the job has completed. If it changes to a different status, you may be able to find the stacktrace using `cortex logs `. If your job has completed successfully, you can view the results of the image classification in the S3 directory you specified in the job submission. + +Using the AWS CLI: + +```bash +$ aws s3 ls $CORTEX_DEST_S3_DIR// + 161f9fda-fd08-44f3-b983-4529f950e40b.json + 40100ffb-6824-4560-8ca4-7c0d14273e05.json + 6d1c933c-0ddf-4316-9956-046cd731c5ab.json + ... + aggregated_results.json +``` + +You can download the aggregated results file with `aws s3 cp $CORTEX_DEST_S3_DIR//aggregated_results.json .` and confirm that there are 16 classifications. + +
+ +## Alternative job submission: images in S3 + +Let's assume that rather downloading urls on the internet, you have an S3 directory containing the images. We can specify `file_path_lister` in the job request to get the list of S3 urls for the images, partition the list of S3 urls into batches, and place them on a queue that will be consumed by the workers. + +We'll classify the 16 images that can be found here `s3://cortex-examples/image-classifier/samples`. You can use AWS CLI to verify that there are 16 images `aws s3 ls s3://cortex-examples/image-classifier/samples/`. + +### Dry run + +Let's do a dry run to make sure the correct list of images will be submitted to the job. + +```bash +$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir +$ curl $BATCH_API_ENDPOINT?dryRun=true \ +-X POST -H "Content-Type: application/json" \ +-d @- <` then type `EOF`. + +You should expect a response like this: + +```text +s3://cortex-examples/image-classifier/samples/img_0.jpg +s3://cortex-examples/image-classifier/samples/img_1.jpg +... +s3://cortex-examples/image-classifier/samples/img_8.jpg +s3://cortex-examples/image-classifier/samples/img_9.jpg +validations passed +``` + +### Classify images in S3 + +Let's actually submit the job now. Your Batch API will get all of the input S3 files based on `s3_paths` and will apply the filters specified in `includes` and `excludes`. + +```bash +$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +$ export CORTEX_DEST_S3_DIR= # e.g. export CORTEX_DEST_S3_DIR=s3://my-bucket/dir +$ curl $BATCH_API_ENDPOINT \ +-X POST -H "Content-Type: application/json" \ +-d @- <` then type `EOF`. + +You should get a response like this: + +```json +{"job_id":"69d6f8a472f0e1e5","api_name":"image-classifier", "config":{"dest_s3_dir": "YOUR_S3_BUCKET_HERE"}} +``` + +### Verify results + +Wait for the job to complete by streaming the logs with `cortex logs ` or watching for the job status to change with `cortex get --watch`. + +```bash +$ cortex logs image-classifier 69d6f8a472f0e1e5 --env aws + +started enqueuing batches to queue +completed enqueuing a total of 8 batches +spinning up workers... +2020-07-18 21:35:34.186348:cortex:pid-1:INFO:downloading the project code +... +2020-08-07 15:49:10.889839:cortex:pid-25:INFO:processing batch d0e695bc-a975-4115-a60f-0a55c743fc57 +2020-08-07 15:49:31.188943:cortex:pid-25:INFO:executing on_job_complete +2020-08-07 15:49:31.362053:cortex:pid-25:INFO:no batches left in queue, job has been completed +``` + +The status of your job, which you can get from `cortex get `, should change from `running` to `succeeded` once the job has completed. If it changes to a different status, you may be able to find the stacktrace using `cortex logs `. If your job has completed successfully, you can view the results of the image classification in the S3 directory you specified in the job submission. + +Using the AWS CLI: + +```bash +$ aws s3 ls $CORTEX_DEST_S3_DIR// + 6bee7412-4c16-4d9f-ab3e-e88669cf7a89.json + 3c45b4b3-953e-4226-865b-75f3961dcf95.json + d0e695bc-a975-4115-a60f-0a55c743fc57.json + ... + aggregated_results.json +``` + +You can download the aggregated results file with `aws s3 cp $CORTEX_DEST_S3_DIR//aggregated_results.json .` and confirm that there are 16 classifications. + +
+ +## Stopping a Job + +You can stop a running job by sending a DELETE request to `/`. + +```bash +$ export BATCH_API_ENDPOINT= # e.g. export BATCH_API_ENDPOINT=https://abcdefg.execute-api.us-west-2.amazonaws.com/image-classifier +$ curl -X DELETE $BATCH_API_ENDPOINT?jobID=69d96a01ea55da8c + +stopped job 69d96a01ea55da8c +``` + +You can also use the Cortex CLI `cortex delete `. + +```bash +$ cortex delete image-classifier 69d96a01ea55da8c --env aws + +stopped job 69d96a01ea55da8c +``` + +
+ +## Cleanup + +Run `cortex delete` to delete the API: + +```bash +$ cortex delete image-classifier --env aws + +deleting image-classifier +``` + +Running `cortex delete` will stop all in progress jobs for the API and will delete job history for that API. It will not spin down your cluster. + +## Next steps + + +* Deploy another one of our [batch examples](https://github.com/cortexlabs/cortex/tree/master/examples/batch). +* See our [exporting guide](../../../docs/guides/exporting.md) for how to export your model to use in an API. +* Try the [realtime API tutorial](../../pytorch/text-generator/README.md) to learn how to deploy realtime APIs in Cortex. +* See [uninstall](../../../docs/aws/uninstall.md) if you'd like to spin down your cluster. diff --git a/test/batch/image-classifier/cortex.yaml b/test/batch/image-classifier/cortex.yaml new file mode 100644 index 0000000000..35ea4e991a --- /dev/null +++ b/test/batch/image-classifier/cortex.yaml @@ -0,0 +1,9 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier + kind: BatchAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 diff --git a/test/batch/image-classifier/predictor.py b/test/batch/image-classifier/predictor.py new file mode 100644 index 0000000000..92a8cc26b9 --- /dev/null +++ b/test/batch/image-classifier/predictor.py @@ -0,0 +1,58 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import os +import requests +from PIL import Image +from io import BytesIO +import json +import re + +# labels "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" +# bucket, key + + +class PythonPredictor: + def __init__(self, config, job_spec): + import re + import boto3 + from torchvision import transforms + import torchvision + + self.model = torchvision.models.alexnet(pretrained=True).eval() + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.preprocess = transforms.Compose( + [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] + ) + + self.labels = requests.get(config["labels"]).text.split("\n")[1:] + + self.s3 = boto3.client("s3") # initialize S3 client to save results + + self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() + self.key = os.path.join(self.key, job_spec["job_id"]) + + def predict(self, payload, batch_id): + import json + from PIL import Image + import torch + + tensor_list = [] + for image_url in payload: # download and preprocess each image + img_pil = Image.open(BytesIO(requests.get(image_url).content)) + tensor_list.append(self.preprocess(img_pil)) + + img_tensor = torch.stack(tensor_list) + with torch.no_grad(): # classify the batch of images + prediction = self.model(img_tensor) + _, indices = prediction.max(1) + + results = [ # extract predicted classes + {"url": payload[i], "class": self.labels[class_idx]} + for i, class_idx in enumerate(indices) + ] + + # save results + self.s3.put_object( + Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json.dumps(results) + ) diff --git a/test/batch/image-classifier/requirements.txt b/test/batch/image-classifier/requirements.txt new file mode 100644 index 0000000000..2c0ef31b51 --- /dev/null +++ b/test/batch/image-classifier/requirements.txt @@ -0,0 +1,4 @@ +torch +torchvision +boto3 +pillow diff --git a/test/batch/image-classifier/sample.json b/test/batch/image-classifier/sample.json new file mode 100644 index 0000000000..eb45c463fd --- /dev/null +++ b/test/batch/image-classifier/sample.json @@ -0,0 +1,3 @@ +[ + "https://i.imgur.com/PzXprwl.jpg" +] diff --git a/test/batch/onnx/README.md b/test/batch/onnx/README.md new file mode 100644 index 0000000000..b3091bb133 --- /dev/null +++ b/test/batch/onnx/README.md @@ -0,0 +1,6 @@ +# Batch Image Classifier in ONNX + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + + +Please refer to the [tutorial](https://docs.cortex.dev/v/master/batch-api/image-classifier#deploy-your-batch-api) to see how to deploy a Batch API with Cortex. diff --git a/test/batch/onnx/cortex.yaml b/test/batch/onnx/cortex.yaml new file mode 100644 index 0000000000..4bdf7080e1 --- /dev/null +++ b/test/batch/onnx/cortex.yaml @@ -0,0 +1,10 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier + kind: BatchAPI + predictor: + type: onnx + path: predictor.py + model_path: s3://cortex-examples/image-classifier/alexnet_batch/ + compute: + cpu: 1 diff --git a/test/batch/onnx/predictor.py b/test/batch/onnx/predictor.py new file mode 100644 index 0000000000..7f005a0b72 --- /dev/null +++ b/test/batch/onnx/predictor.py @@ -0,0 +1,64 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import requests +import numpy as np +import base64 +from PIL import Image +from io import BytesIO +from torchvision import transforms +import boto3 +import json +import re +import os + + +class ONNXPredictor: + def __init__(self, onnx_client, config, job_spec): + self.client = onnx_client + + self.labels = requests.get( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + ).text.split("\n")[1:] + + # https://github.com/pytorch/examples/blob/447974f6337543d4de6b888e244a964d3c9b71f6/imagenet/main.py#L198-L199 + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.preprocess = transforms.Compose( + [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] + ) + + if len(config.get("dest_s3_dir", "")) == 0: + raise Exception("'dest_s3_dir' field was not provided in job submission") + + self.s3 = boto3.client("s3") + + self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() + self.key = os.path.join(self.key, job_spec["job_id"]) + + def predict(self, payload, batch_id): + arr_list = [] + + # download and preprocess each image + for image_url in payload: + if image_url.startswith("s3://"): + bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() + image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() + else: + image_bytes = requests.get(image_url).content + + img_pil = Image.open(BytesIO(image_bytes)) + arr_list.append(self.preprocess(img_pil).numpy()) + + # classify the batch of images + imgs_arr = np.stack(arr_list, axis=0) + result = self.client.predict(imgs_arr) + + # extract predicted classes + predicted_classes = np.argmax(result[0], axis=1) + results = [ + {"url": payload[i], "class": self.labels[class_idx]} + for i, class_idx in enumerate(predicted_classes) + ] + + # save results + json_output = json.dumps(results) + self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) diff --git a/test/batch/onnx/requirements.txt b/test/batch/onnx/requirements.txt new file mode 100644 index 0000000000..5a2cde2a12 --- /dev/null +++ b/test/batch/onnx/requirements.txt @@ -0,0 +1,3 @@ +torchvision +boto3 +pillow diff --git a/test/batch/tensorflow/README.md b/test/batch/tensorflow/README.md new file mode 100644 index 0000000000..163fe34968 --- /dev/null +++ b/test/batch/tensorflow/README.md @@ -0,0 +1,6 @@ +# Batch Image Classifier in TensorFlow + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + + +Please refer to the [tutorial](https://docs.cortex.dev/v/master/batch-api/image-classifier#deploy-your-batch-api) to see how to deploy a Batch API with Cortex. diff --git a/test/batch/tensorflow/cortex.yaml b/test/batch/tensorflow/cortex.yaml new file mode 100644 index 0000000000..189e1a9b0e --- /dev/null +++ b/test/batch/tensorflow/cortex.yaml @@ -0,0 +1,10 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier + kind: BatchAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ + compute: + cpu: 1 diff --git a/test/batch/tensorflow/predictor.py b/test/batch/tensorflow/predictor.py new file mode 100644 index 0000000000..da4bb39ec3 --- /dev/null +++ b/test/batch/tensorflow/predictor.py @@ -0,0 +1,60 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import requests +import numpy as np +from PIL import Image +from io import BytesIO +import json +import os +import re +import boto3 +import tensorflow as tf + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config, job_spec): + self.client = tensorflow_client + self.labels = requests.get( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + ).text.split("\n")[1:] + + if len(config.get("dest_s3_dir", "")) == 0: + raise Exception("'dest_s3_dir' field was not provided in job submission") + + self.s3 = boto3.client("s3") + + self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() + self.key = os.path.join(self.key, job_spec["job_id"]) + + def predict(self, payload, batch_id): + arr_list = [] + + # download and preprocess each image + for image_url in payload: + if image_url.startswith("s3://"): + bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() + image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() + else: + image_bytes = requests.get(image_url).content + + decoded_image = np.asarray(Image.open(BytesIO(image_bytes)), dtype=np.float32) / 255 + resized_image = tf.image.resize( + decoded_image, [224, 224], method=tf.image.ResizeMethod.BILINEAR + ) + arr_list.append(resized_image) + + # classify the batch of images + model_input = {"images": np.stack(arr_list, axis=0)} + predictions = self.client.predict(model_input) + + # extract predicted classes + reshaped_predictions = np.reshape(np.array(predictions["classes"]), [-1, len(self.labels)]) + predicted_classes = np.argmax(reshaped_predictions, axis=1) + results = [ + {"url": payload[i], "class": self.labels[class_idx]} + for i, class_idx in enumerate(predicted_classes) + ] + + # save results + json_output = json.dumps(results) + self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) diff --git a/test/batch/tensorflow/requirements.txt b/test/batch/tensorflow/requirements.txt new file mode 100644 index 0000000000..7e2fba5e6c --- /dev/null +++ b/test/batch/tensorflow/requirements.txt @@ -0,0 +1 @@ +Pillow diff --git a/test/keras/document-denoiser/README.md b/test/keras/document-denoiser/README.md new file mode 100644 index 0000000000..05f90b9bef --- /dev/null +++ b/test/keras/document-denoiser/README.md @@ -0,0 +1,46 @@ +# Clean Dirty Documents w/ Autoencoders + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example model cleans text documents of anything that isn't text (aka noise): coffee stains, old wear artifacts, etc. You can inspect the notebook that has been used to train the model [here](trainer.ipynb). + +Here's a collage of input texts and predictions. + +![Imgur](https://i.imgur.com/M4Mjz2l.jpg) + +*Figure 1 - The dirty documents are on the left side and the cleaned ones are on the right* + +## Sample Prediction + +Once this model is deployed, get the API endpoint by running `cortex get document-denoiser`. + +Now let's take a sample image like this one. + +![Imgur](https://i.imgur.com/JJLfFxB.png) + +Export the endpoint & the image's URL by running +```bash +export ENDPOINT= +export IMAGE_URL=https://i.imgur.com/JJLfFxB.png +``` + +Then run the following piped commands +```bash +curl "${ENDPOINT}" -X POST -H "Content-Type: application/json" -d '{"url":"'${IMAGE_URL}'"}' | +sed 's/"//g' | +base64 -d > prediction.png +``` + +Once this has run, we'll see a `prediction.png` file saved to the disk. This is the result. + +![Imgur](https://i.imgur.com/PRB2oS8.png) + +As it can be seen, the text document has been cleaned of any noise. Success! + +--- + +Here's a short list of URLs of other text documents in image format that can be cleaned using this model. Export these links to `IMAGE_URL` variable: + +* https://i.imgur.com/6COQ46f.png +* https://i.imgur.com/alLI83b.png +* https://i.imgur.com/QVoSTuu.png diff --git a/test/keras/document-denoiser/cortex.yaml b/test/keras/document-denoiser/cortex.yaml new file mode 100644 index 0000000000..b616a0ff0f --- /dev/null +++ b/test/keras/document-denoiser/cortex.yaml @@ -0,0 +1,12 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: document-denoiser + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + model: s3://cortex-examples/keras/document-denoiser/model.h5 + resize_shape: [540, 260] + compute: + cpu: 1 diff --git a/test/keras/document-denoiser/predictor.py b/test/keras/document-denoiser/predictor.py new file mode 100644 index 0000000000..2554560388 --- /dev/null +++ b/test/keras/document-denoiser/predictor.py @@ -0,0 +1,86 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import boto3, base64, cv2, re, os, requests +from botocore import UNSIGNED +from botocore.client import Config +import numpy as np +from tensorflow.keras.models import load_model + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE) + return image + + +def image_to_png_nparray(image): + """ + Convert numpy image to jpeg numpy vector. + """ + is_success, im_buf_arr = cv2.imencode(".png", image) + return im_buf_arr + + +def image_to_png_bytes(image): + """ + Convert numpy image to bytes-encoded png image. + """ + buf = image_to_png_nparray(image) + byte_im = buf.tobytes() + return byte_im + + +class PythonPredictor: + def __init__(self, config): + # download the model + bucket, key = re.match("s3://(.+?)/(.+)", config["model"]).groups() + + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + model_path = os.path.join("/tmp/model.h5") + s3.download_file(bucket, key, model_path) + + # load the model + self.model = load_model(model_path) + + # resize shape (width, height) + self.resize_shape = tuple(config["resize_shape"]) + + def predict(self, payload): + # download image + img_url = payload["url"] + image = get_url_image(img_url) + resized = cv2.resize(image, self.resize_shape) + + # prediction + pred = self.make_prediction(resized) + + # image represented in bytes + byte_im = image_to_png_bytes(pred) + + # encode image + image_enc = base64.b64encode(byte_im).decode("utf-8") + + return image_enc + + def make_prediction(self, img): + """ + Make prediction on image. + """ + processed = img / 255.0 + processed = np.expand_dims(processed, 0) + processed = np.expand_dims(processed, 3) + pred = self.model.predict(processed) + pred = np.squeeze(pred, 3) + pred = np.squeeze(pred, 0) + out_img = pred * 255 + out_img[out_img > 255.0] = 255.0 + out_img = out_img.astype(np.uint8) + return out_img diff --git a/test/keras/document-denoiser/requirements.txt b/test/keras/document-denoiser/requirements.txt new file mode 100644 index 0000000000..77eb59dc52 --- /dev/null +++ b/test/keras/document-denoiser/requirements.txt @@ -0,0 +1,5 @@ +numpy==1.18.0 +requests==2.22.0 +opencv-python==4.1.2.30 +keras==2.3.1 +h5py==2.10.0 diff --git a/test/keras/document-denoiser/sample.json b/test/keras/document-denoiser/sample.json new file mode 100644 index 0000000000..651595f4fb --- /dev/null +++ b/test/keras/document-denoiser/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/JJLfFxB.png" +} diff --git a/test/keras/document-denoiser/trainer.ipynb b/test/keras/document-denoiser/trainer.ipynb new file mode 100644 index 0000000000..c8b0799b1b --- /dev/null +++ b/test/keras/document-denoiser/trainer.ipynb @@ -0,0 +1,620 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Training a Document Denoiser Model with AutoEncoders" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": {}, + "outputs": [], + "source": [ + "# _WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", + "\n", + "\n", + "import keras\n", + "import cv2\n", + "import numpy as np\n", + "import pandas as pd\n", + "import seaborn as sns\n", + "import os\n", + "import ntpath\n", + "from glob import glob\n", + "from matplotlib.pyplot import imshow\n", + "from sklearn.model_selection import train_test_split\n", + "from keras.preprocessing.image import ImageDataGenerator\n", + "from keras.models import Sequential, Model, load_model\n", + "from keras.layers import Activation, Flatten, Dropout, SpatialDropout2D, Conv2D, UpSampling2D, MaxPooling2D, add, concatenate, Input, BatchNormalization\n", + "from keras.backend import set_image_data_format\n", + "from keras.utils import plot_model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Download Dataset\n", + "\n", + "Download the dataset from [kaggle (denoising dirty documents)](https://www.kaggle.com/c/denoising-dirty-documents/data). You will need to be logged in to be able to download the data.\n", + "\n", + "Once downloaded run the following commands" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!unzip denoising-dirty-documents.zip && rm denoising-dirty-documents.zip\n", + "!mv denoising-dirty-documents/*.zip . && rm -rf denoising-dirty-documents\n", + "!unzip '*.zip' > /dev/null && rm *.zip" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define the Data Generator\n", + "\n", + "Include data augmentation because the dataset is rather small." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "x_dirty = sorted(glob(\"train/*.png\"))\n", + "x_cleaned = sorted(glob(\"train_cleaned/*.png\"))\n", + "x_test = sorted(glob(\"test/*.png\"))\n", + "input_shape = (260, 540)\n", + "height = input_shape[0]\n", + "width = input_shape[1]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "x_train, x_valid, y_train, y_valid = train_test_split(x_dirty, x_cleaned, test_size=0.20)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "set_image_data_format(\"channels_last\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def model_train_generator(x_train, y_train, epochs, batch_size, resize_shape):\n", + " white_fill = 1.0\n", + " datagen = ImageDataGenerator(\n", + " rotation_range=180,\n", + " width_shift_range=0.2,\n", + " height_shift_range=0.2,\n", + " zoom_range=0.3,\n", + " fill_mode=\"constant\",\n", + " cval=white_fill,\n", + " horizontal_flip=True,\n", + " vertical_flip=True,\n", + " )\n", + " \n", + " for _ in range(epochs):\n", + " for x_file, y_file in zip(x_train, y_train):\n", + " x_img = cv2.imread(x_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", + " y_img = cv2.imread(y_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", + " \n", + " xs = []\n", + " ys = []\n", + " for i in range(batch_size):\n", + " if i == 0:\n", + " x = x_img\n", + " y = y_img\n", + " else:\n", + " params = datagen.get_random_transform(img_shape=x_img.shape)\n", + " x = datagen.apply_transform(np.expand_dims(x_img, 2), params)\n", + " y = datagen.apply_transform(np.expand_dims(y_img, 2), params)\n", + " x = cv2.resize(x, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", + " y = cv2.resize(y, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", + " x = np.expand_dims(x, 2)\n", + " y = np.expand_dims(y, 2)\n", + " xs.append(x)\n", + " ys.append(y)\n", + " xs_imgs = np.array(xs)\n", + " ys_imgs = np.array(ys)\n", + " yield (xs_imgs, ys_imgs)\n", + "\n", + "def model_valid_generator(x_valid, y_valid, epochs, resize_shape):\n", + " xs = []\n", + " ys = []\n", + " for x_file, y_file in zip(x_valid, y_valid):\n", + " x_img = cv2.imread(x_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", + " y_img = cv2.imread(y_file, cv2.IMREAD_GRAYSCALE) / 255.0\n", + " x = cv2.resize(x_img, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", + " y = cv2.resize(y_img, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", + " x = np.expand_dims(x, 2)\n", + " x = np.expand_dims(x, 0)\n", + " y = np.expand_dims(y, 2)\n", + " y = np.expand_dims(y, 0)\n", + " xs.append(x)\n", + " ys.append(y)\n", + " \n", + " for _ in range(epochs):\n", + " for xs_img, ys_img in zip(xs, ys):\n", + " yield (xs_img, ys_img)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create the Model" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def create_encoder(input_shape):\n", + " inp = Input(shape=input_shape)\n", + " x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), \n", + " input_shape=input_shape, activation=\"relu\", padding=\"same\")(inp)\n", + " x = BatchNormalization()(x)\n", + " x = MaxPooling2D(pool_size=(2,2))(x)\n", + " \n", + " x = Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), \n", + " activation=\"relu\", padding=\"same\")(x)\n", + " x = BatchNormalization()(x)\n", + "\n", + " return inp, x" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def create_decoder(inp):\n", + " x = Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), activation=\"relu\",\n", + " padding=\"same\")(inp)\n", + " x = BatchNormalization()(x)\n", + " x = UpSampling2D(size=(2,2))(x)\n", + " \n", + " x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), \n", + " activation=\"relu\", padding=\"same\")(x)\n", + " x = BatchNormalization()(x)\n", + " \n", + " x = Conv2D(filters=1, kernel_size=(1,1), strides=(1,1), \n", + " activation=\"sigmoid\", padding=\"same\")(x)\n", + " x = BatchNormalization()(x)\n", + " \n", + " return inp, x" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def create_autoencoder(input_shape):\n", + " enc_inp, encoder = create_encoder(input_shape)\n", + " dec_inp, autoencoder = create_decoder(encoder)\n", + " model = Model(inputs=[enc_inp], outputs=[autoencoder], name='AutoEncoder')\n", + " \n", + " return model" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From C:\\Users\\OboTh\\Anaconda3\\envs\\lightweight-gpu-python\\lib\\site-packages\\tensorflow_core\\python\\ops\\resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "If using Keras pass *_constraint arguments to layers.\n", + "WARNING:tensorflow:From C:\\Users\\OboTh\\Anaconda3\\envs\\lightweight-gpu-python\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:4070: The name tf.nn.max_pool is deprecated. Please use tf.nn.max_pool2d instead.\n", + "\n", + "Model: \"AutoEncoder\"\n", + "_________________________________________________________________\n", + "Layer (type) Output Shape Param # \n", + "=================================================================\n", + "input_1 (InputLayer) (None, 260, 540, 1) 0 \n", + "_________________________________________________________________\n", + "conv2d_1 (Conv2D) (None, 260, 540, 64) 640 \n", + "_________________________________________________________________\n", + "batch_normalization_1 (Batch (None, 260, 540, 64) 256 \n", + "_________________________________________________________________\n", + "max_pooling2d_1 (MaxPooling2 (None, 130, 270, 64) 0 \n", + "_________________________________________________________________\n", + "conv2d_2 (Conv2D) (None, 130, 270, 32) 18464 \n", + "_________________________________________________________________\n", + "batch_normalization_2 (Batch (None, 130, 270, 32) 128 \n", + "_________________________________________________________________\n", + "conv2d_3 (Conv2D) (None, 130, 270, 32) 9248 \n", + "_________________________________________________________________\n", + "batch_normalization_3 (Batch (None, 130, 270, 32) 128 \n", + "_________________________________________________________________\n", + "up_sampling2d_1 (UpSampling2 (None, 260, 540, 32) 0 \n", + "_________________________________________________________________\n", + "conv2d_4 (Conv2D) (None, 260, 540, 64) 18496 \n", + "_________________________________________________________________\n", + "batch_normalization_4 (Batch (None, 260, 540, 64) 256 \n", + "_________________________________________________________________\n", + "conv2d_5 (Conv2D) (None, 260, 540, 1) 65 \n", + "_________________________________________________________________\n", + "batch_normalization_5 (Batch (None, 260, 540, 1) 4 \n", + "=================================================================\n", + "Total params: 47,685\n", + "Trainable params: 47,299\n", + "Non-trainable params: 386\n", + "_________________________________________________________________\n" + ] + } + ], + "source": [ + "model = create_autoencoder((height, width, 1))\n", + "model.summary()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "model.compile(optimizer='adam', loss='mse')\n", + "epochs = 20\n", + "batch_size = 8\n", + "samples = len(x_train)\n", + "validation_samples = len(x_valid)\n", + "train_generator = model_train_generator(x_train, y_train, epochs=epochs, batch_size=batch_size, resize_shape=(height, width))\n", + "valid_generator = model_valid_generator(x_valid, y_valid, epochs=epochs, resize_shape=(height, width))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train the AutoEncoder Model" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From C:\\Users\\OboTh\\Anaconda3\\envs\\lightweight-gpu-python\\lib\\site-packages\\keras\\backend\\tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n", + "\n", + "Epoch 1/20\n", + "115/115 [==============================] - 49s 429ms/step - loss: 1.2062 - val_loss: 0.1817\n", + "Epoch 2/20\n", + "115/115 [==============================] - 43s 373ms/step - loss: 0.5792 - val_loss: 0.1720\n", + "Epoch 3/20\n", + "115/115 [==============================] - 43s 373ms/step - loss: 0.4297 - val_loss: 0.1399\n", + "Epoch 4/20\n", + "115/115 [==============================] - 43s 375ms/step - loss: 0.3160 - val_loss: 0.1023\n", + "Epoch 5/20\n", + "115/115 [==============================] - 44s 385ms/step - loss: 0.2276 - val_loss: 0.0609\n", + "Epoch 6/20\n", + "115/115 [==============================] - 44s 379ms/step - loss: 0.1599 - val_loss: 0.0292\n", + "Epoch 7/20\n", + "115/115 [==============================] - 43s 376ms/step - loss: 0.1091 - val_loss: 0.0112\n", + "Epoch 8/20\n", + "115/115 [==============================] - 43s 376ms/step - loss: 0.0730 - val_loss: 0.0074\n", + "Epoch 9/20\n", + "115/115 [==============================] - 44s 381ms/step - loss: 0.0473 - val_loss: 0.0055\n", + "Epoch 10/20\n", + "115/115 [==============================] - 45s 393ms/step - loss: 0.0301 - val_loss: 0.0047\n", + "Epoch 11/20\n", + "115/115 [==============================] - 45s 387ms/step - loss: 0.0189 - val_loss: 0.0041\n", + "Epoch 12/20\n", + "115/115 [==============================] - 43s 376ms/step - loss: 0.0118 - val_loss: 0.0042\n", + "Epoch 13/20\n", + "115/115 [==============================] - 44s 380ms/step - loss: 0.0075 - val_loss: 0.0061\n", + "Epoch 14/20\n", + "115/115 [==============================] - 43s 377ms/step - loss: 0.0051 - val_loss: 0.0048\n", + "Epoch 15/20\n", + "115/115 [==============================] - 43s 378ms/step - loss: 0.0037 - val_loss: 0.0045\n", + "Epoch 16/20\n", + "115/115 [==============================] - 43s 373ms/step - loss: 0.0029 - val_loss: 0.0045\n", + "Epoch 17/20\n", + "115/115 [==============================] - 44s 378ms/step - loss: 0.0025 - val_loss: 0.0048\n", + "Epoch 18/20\n", + "115/115 [==============================] - 43s 375ms/step - loss: 0.0023 - val_loss: 0.0047\n", + "Epoch 19/20\n", + "115/115 [==============================] - 43s 376ms/step - loss: 0.0022 - val_loss: 0.0043\n", + "Epoch 20/20\n", + "115/115 [==============================] - 44s 380ms/step - loss: 0.0021 - val_loss: 0.0042\n" + ] + } + ], + "source": [ + "hist_obj = model.fit_generator(train_generator, validation_data=valid_generator, validation_steps=validation_samples, steps_per_epoch=samples, epochs=epochs, shuffle=True) " + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEGCAYAAAB1iW6ZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nO3deXxU5d338c9vJpMESMIWlrAvgrIpanCtqLRlq2u1iqJWa+WhLtW+Krf6tFXv2t59bG9tbWu11lK1dQGXupRNa1txl4DsICKyhDWsYQtJZq7njzOBELJMYCYnM/N9v17zmplzrpn5zWH45sw117mOOecQEZHkF/C7ABERiQ8FuohIilCgi4ikCAW6iEiKUKCLiKSIDL9eOD8/3/Xq1cuvlxcRSUpz587d6pzrUNs63wK9V69eFBUV+fXyIiJJyczW1LVOXS4iIilCgS4ikiIU6CIiKcK3PnQRSU8VFRUUFxdTVlbmdynNWnZ2Nt26dSMUCsX8GAW6iDSp4uJicnNz6dWrF2bmdznNknOObdu2UVxcTO/evWN+nLpcRKRJlZWV0b59e4V5PcyM9u3bN/pbTIOBbmaTzWyLmS2uY/14M1sYvXxgZic1qgIRSTsK84YdzTaKZQ/9KWB0Peu/BM51zp0IPAA80egqRETkmDUY6M652cD2etZ/4JzbEb37EdAtTrXVbuNC+PNI2PBpQl9GRCTZxLsP/UZgRl0rzWyCmRWZWVFJScnRvUKoJaz7GLYsO8oSRURil5OTU+e61atXM3jw4Caspn5xC3QzOx8v0O+qq41z7gnnXKFzrrBDh1qnImhY254QCMHWFUf3eBGRFBWXYYtmdiLwJDDGObctHs9Zp2AI2vWBrZ8n9GVEJPH++40lLN1QGtfnHNglj/suHFTn+rvuuouePXty8803A3D//fdjZsyePZsdO3ZQUVHBz372My6++OJGvW5ZWRnf+973KCoqIiMjg4cffpjzzz+fJUuWcMMNN1BeXk4kEuHll1+mS5cuXHHFFRQXFxMOh/nJT37ClVdeeUzvG+IQ6GbWA3gFuNY51zS7zfn9tIcuIkdl3Lhx3HHHHQcDferUqcycOZMf/OAH5OXlsXXrVs444wwuuuiiRo00efTRRwFYtGgRy5cvZ+TIkaxYsYLHH3+c22+/nfHjx1NeXk44HGb69Ol06dKFadOmAbBr1664vLcGA93MngfOA/LNrBi4DwgBOOceB+4F2gN/iL75SudcYVyqq0t+f1gxC8IV3h67iCSl+vakE+Xkk09my5YtbNiwgZKSEtq2bUtBQQE/+MEPmD17NoFAgPXr17N582Y6d+4c8/O+99573HbbbQCccMIJ9OzZkxUrVnDmmWfy85//nOLiYr75zW/Sr18/hgwZwp133sldd93FBRdcwDnnnBOX99ZgoDvnrmpg/XeB78almlgNuxGGjgcLNunLikhquPzyy3nppZfYtGkT48aN49lnn6WkpIS5c+cSCoXo1atXow/qcc7Vuvzqq6/m9NNPZ9q0aYwaNYonn3ySESNGMHfuXKZPn84999zDyJEjuffee4/5fSXnof+tEzsyUkRS27hx47jpppvYunUr77zzDlOnTqVjx46EQiH+/e9/s2ZNnVOO12n48OE8++yzjBgxghUrVrB27VqOP/54Vq1aRZ8+ffj+97/PqlWrWLhwISeccALt2rXjmmuuIScnh6eeeiou7ys5Az0Shhl3Qc+zYPA3/a5GRJLMoEGD2L17N127dqWgoIDx48dz4YUXUlhYyNChQznhhBMa/Zw333wzEydOZMiQIWRkZPDUU0+RlZXFlClT+Nvf/kYoFKJz587ce++9zJkzh0mTJhEIBAiFQjz22GNxeV9W19eERCssLHTHdMai/+0Px30dLnk0fkWJSMItW7aMAQMG+F1GUqhtW5nZ3Lp+p0zeybny+2uki4hINcnZ5QLQ/jhY8ndwDjTRj4gk0KJFi7j22msPW5aVlcXHH3/sU0W1S95Az+8PZTth71bIOcqjTkVEYjBkyBDmz5/vdxkNSu4uF4BtOmJURASSOdC7ngKXTz4U7CIiaS55u1xatoPBl/ldhYhIs5G8e+gAy6fDvL/6XYWIJJn6psRNZskd6ItehHcf8rsKEZFmIbkDPb8/7FwDFY2bc0FEBLz5VyZNmsTgwYMZMmQIU6ZMAWDjxo0MHz6coUOHMnjwYN59913C4TDXX3/9wba//vWvfa7+SMnbhw7eNLouAttXQaeBflcjIkfjL9+offkN3tSyzLgbNi06cv3oX0DBifDpszD/uSMfF4NXXnmF+fPns2DBArZu3cqwYcMYPnw4zz33HKNGjeJHP/oR4XCYffv2MX/+fNavX8/ixYsB2LlzZ8yv01SSfA+9n3etI0ZF5Ci89957XHXVVQSDQTp16sS5557LnDlzGDZsGH/5y1+4//77WbRoEbm5ufTp04dVq1Zx2223MXPmTPLy8vwu/wjJvYfe/jjvWmcvEkleDe1Rj/l/9a8/ebx3OQp1zWU1fPhwZs+ezbRp07j22muZNGkS1113HQsWLGDWrFk8+uijTJ06lcmTJx/V6yZKcu+hZ7aCET/2Zl0UEWmk4cOHM2XKFMLhMCUlJcyePZvTTjuNNWvW0LFjR2666SZuvPFG5s2bx9atW4lEIlx22WU88MADzJs3z+/yj5Dce+gAwyf5XYGIJKlLL72UDz/8kJNOOgkz45e//CWdO3fm6aef5le/+hWhUIicnByeeeYZ1q9fzw033EAkEgHgF7/4hc/VHyl5p8+tsnMdrPvYO8hIk3SJNHuaPjd26TN9bpXPZ8HLN8LujX5XIiLiq+QP9PYa6SIiAqkQ6FWTc2mki0jS8KurN5kczTZK/kDP7QyZudpDF0kS2dnZbNu2TaFeD+cc27ZtIzs7u1GPS/5RLmbeAUbaQxdJCt26daO4uJiSkhK/S2nWsrOz6datW6Mek/yBDjDoEqjY73cVIhKDUChE7969/S4jJTUY6GY2GbgA2OKcG1zLegMeAcYC+4DrnXNNO+L+7Nub9OVERJqjWPrQnwJG17N+DNAvepkAPHbsZTVSuNLrctm/o8lfWkSkuWgw0J1zs4Ht9TS5GHjGeT4C2phZQbwKjMm2z+H3hfD5P5v0ZUVEmpN4jHLpCqyrdr84uuwIZjbBzIrMrCiuP4i06wMW0EgXEUlr8Qj02o63r3U8knPuCedcoXOusEOHDnF46aiMLGjbS4EuImktHoFeDHSvdr8bsCEOz9s4+f1h28omf1kRkeYiHoH+OnCdec4Adjnnmn5ilfx+XqBHwk3+0iIizUEswxafB84D8s2sGLgPCAE45x4HpuMNWVyJN2zxhkQVW6+Cod6lbBe0bOdLCSIifkr+6XNFRNJIak+fW10kAuX7/K5CRMQXqRXoj5wIs+7xuwoREV+kVqDnddEkXSKStlIr0PP7aSy6iKStFAv0/rC3RHO6iEhaSr1AB9iqA4xEJP2kXqCHWsLeLX5XIiLS5FLjBBdV2vWBe9ZDILX+TomIxCK1At3Mu4iIpKHU25V9+6fw55F+VyEi0uRSL9Cdg/VzIVzhdyUiIk0q9QI9vz9EKmHHar8rERFpUqkZ6KADjEQk7aRgoB/nXSvQRSTNpF6gZ7eGnM6a00VE0k5qDVusctPbkNPJ7ypERJpUagZ6625+VyAi0uRSr8sFYPV78MwlsKfE70pERJpMagZ6ZRms+rd+GBWRtJKagV41dHGbfhgVkfSRmoGe1w0yWmiki4ikldQM9EDAG4+uLhcRSSOpGejgdbso0EUkjaTmsEWAc+705nQREUkTMe2hm9loM/vMzFaa2d21rG9tZm+Y2QIzW2JmN8S/1EbqNBAKTvS7ChGRJtNgoJtZEHgUGAMMBK4ys4E1mt0CLHXOnQScBzxkZplxrrVx9u+Et+6DtR/5WoaISFOJZQ/9NGClc26Vc64ceAG4uEYbB+SamQE5wHbA3/6OYCa8/xv4cravZYiINJVYAr0rsK7a/eLosup+DwwANgCLgNudc5GaT2RmE8ysyMyKSkoSfBRnZkto3UNDF0UkbcQS6LWdpNPVuD8KmA90AYYCvzezvCMe5NwTzrlC51xhhw4dGl1so+X300gXEUkbsQR6MdC92v1ueHvi1d0AvOI8K4EvgRPiU+IxyO/v7aG7mn9/RERSTyyBPgfoZ2a9oz90jgNer9FmLfBVADPrBBwPrIpnoUcl/zio2AulNf/+iIikngbHoTvnKs3sVmAWEAQmO+eWmNnE6PrHgQeAp8xsEV4XzV3Oua0JrDs2fc6Hi34Hma38rkREJOHM+dQdUVhY6IqKinx5bRGRZGVmc51zhbWtS91D/6ssnwYr3vS7ChGRhEvdQ/+rzP5fyMqF/iP9rkREJKFSfw89vz9sW+l3FSIiCZcGgd4PStfDgd1+VyIiklBpEOhVZy/SXrqIpLb0CXRNASAiKS71A71dHxj2XWjb2+9KREQSKvVHuWRkwjce8rsKEZGES/09dIBd6zWNroikvPQI9I8fg79dDpGw35WIiCRMegR6fn8IH4Cda/2uREQkYdIn0EEjXUQkpaVZoOtkFyKSutIj0Fu2gxbtFOgiktJSf9hilUGXQJseflchIpIw6RPoF/za7wpERBIqPbpcwDuvaOkGqDzgdyUiIgmRPoH++Zvw8ADYuNDvSkREEiJ9Ar39cd61fhgVkRSVPoHepicEMxXoIpKy0ifQgxnQrq8OLhKRlJU+gQ6Qf5z20EUkZaVXoHc+CbJyIBLxuxIRkbhLr0A/dxJM+A8E0utti0h6iCnZzGy0mX1mZivN7O462pxnZvPNbImZvRPfMuNM0+iKSApqMNDNLAg8CowBBgJXmdnAGm3aAH8ALnLODQK+lYBaj13FfvhlX3j/Eb8rERGJu1j20E8DVjrnVjnnyoEXgItrtLkaeMU5txbAObclvmXGSagFBEOwbaXflYiIxF0sgd4VWFftfnF0WXX9gbZm9h8zm2tm19X2RGY2wcyKzKyopKTk6Co+Vu010kVEUlMsgW61LHM17mcApwLfAEYBPzGz/kc8yLknnHOFzrnCDh06NLrYuMjv7wW6q/kWRESSWyyBXgx0r3a/G7ChljYznXN7nXNbgdnASfEpMc7y+0PZLtjTPHuFRESOViyBPgfoZ2a9zSwTGAe8XqPNa8A5ZpZhZi2B04Fl8S01TnqcDhnZULLc70pEROKqwfnQnXOVZnYrMAsIApOdc0vMbGJ0/ePOuWVmNhNYCESAJ51zixNZ+FHrcjLcsQhyOvpdiYhIXJnzqS+5sLDQFRUV+fLaAFSWw9y/wKnXQ0aWf3WIiDSCmc11zhXWti59D5lc+wHM+C949yG/KxERiYv0DfQ+58GQK7xA39Q8e4dERBojfQMdYMyD0KItvHYLhCv9rkZE5Jikd6C3bAdjfwUb58OHv/e7GhGRY5LegQ4w8BI44QJYX6SDjUQkqTU4bDHlmcE3/+TN82K1HRQrIpIctIcOkNnSC/NV78DCF/2uRkTkqGgPvbr3H4F1H3tHk7bp4Xc1IiKNoj306i78jXf9xu3qTxeRpKNAr65ND/ja/fDFv2D+c35XIyLSKAr0mgpvhB5nwax7YPcmv6sREYmZAr2mQAAu+h20aAc71zXcXkSkmdCPorXJPw5umwuBoN+ViIjETHvodQkEvZNgvHYr7N3mdzUiIg1SoNdnbwkseAFm3u13JSIiDVKg16fTIDjnh7BoKqyY5Xc1IiL1UqA35JwfQseB8MYd3rlIRUSaKQV6QzIy4eLfw55N8Na9flcjIlInjXKJRddT4ezbwUW8I0g1iZeINEMK9Fh99T4FuYg0a+pyiVVVmH/yJ3j7AX9rERGphQK9sbYs885DuvKfflciInIYBXpjff2n0GkwTL0eNi/xuxoRkYNiCnQzG21mn5nZSjOr8ygbMxtmZmEzuzx+JTYzWTlw9RTv+rkrNYGXiDQbDQa6mQWBR4ExwEDgKjMbWEe7B4HUPwKndVe46gXYtx1eu8XvakREgNhGuZwGrHTOrQIwsxeAi4GlNdrdBrwMDItrhc1Vl6Ew7m/QpqfflYiIALF1uXQFqs8jWxxddpCZdQUuBR6PX2lJoO8IaN8XyvfqhBgi4rtYAr22wdc1z8/2G+Au51y43icym2BmRWZWVFJSEmuNzd+cP8Or34M5T/pdiYiksVi6XIqB7tXudwM21GhTCLxg3ljtfGCsmVU6516t3sg59wTwBEBhYWHqnLTzzFtgzfswfZLXBdPv635XJCJpKJY99DlAPzPrbWaZwDjg9eoNnHO9nXO9nHO9gJeAm2uGeUoLBOGyP3uzM754PWxa5HdFIpKGGgx051wlcCve6JVlwFTn3BIzm2hmExNdYNLIyoGrp0JWXnQ442a/KxKRNBPTXC7OuenA9BrLav0B1Dl3/bGXlaTyunhj1Oc8CS3a+F2NiKQZHSkabwUnwkW/hYws2P4lROr9nVhEJG4U6Imyqxj+OBze/LHflYhImlCgJ0rrbjB0PHz0B2+GRhGRBNN86Ik06uewYzXM+C9vOGP/kX5XJCIpTHvoiRQIwmVPerMzvnQDbFzod0UiksIU6IlWNZyxVT5sW+l3NSKSwtTl0hTyCuCWT7yRLwDhSghq04tIfGkPvalUhfnbD8CU8V6oi4jEkQK9qeUVwIqZMPVaKN/ndzUikkIU6E1t2Hdh7P/CZzPgr5fC/h1+VyQiKUKB7ofTboJvPQUb5sHkMbBrvd8ViUgKUKD7ZdAlcM3L3o+jAf1AKiLHToHup97DYcJsyO3knZ90/Ty/KxKRJKZA91sg+k8wfRI89Q34/C1/6xGRpKVAby5G/wLy+3lzqc9/3u9qRCQJKdCbi5yO8O1/QK+vwKsT4f1H/K5IRJKMAr05yc6D8S/CoEvhrXth+TS/KxKRJKLhFc1NRhZcNhn6jYL+Y/yuRkSSiPbQm6NAAIZe5V1/+S48fzUc2ON3VSLSzCnQm7tdxbBiBjx9Iezd6nc1ItKMKdCbu6FXwZXPwpalMHkU7Fjjd0Ui0kwp0JPBCWPh2ldhbwn8eSRsWux3RSLSDCnQk0XPM+GGmZCRCXs2+V2NiDRDCvRk0mkg3FoEx30NnIMPfgdlu/yuSkSaiZgC3cxGm9lnZrbSzO6uZf14M1sYvXxgZifFv1QBDp0oY/08eOs+eOxsbySMiKS9BgPdzILAo8AYYCBwlZkNrNHsS+Bc59yJwAPAE/EuVGrodip8ZxYEQ94ImFk/gooyv6sSER/Fsod+GrDSObfKOVcOvABcXL2Bc+4D51zVmRo+ArrFt0ypVfdhMPE9KPwOfPh7+NP5mltdJI3FEuhdgXXV7hdHl9XlRmBGbSvMbIKZFZlZUUlJSexVSt0yW8EFD8P4l6BtL29OGBFJS7EEutWyzNXa0Ox8vEC/q7b1zrknnHOFzrnCDh06xF6lNKzf1+Gq570umE2LvW6Y7V/6XZWINKFYAr0Y6F7tfjdgQ81GZnYi8CRwsXNuW3zKO9KmXWXMWrKJ+et2smHnfirCkUS9VPLatQ42zIfHvwLznvFGxIhIyotlcq45QD8z6w2sB8YBV1dvYGY9gFeAa51zK+JeZTWfrN7O95//9LBl7Vpl0jE3i4552XTMzaJTXhYdc7Ojy7zbHXKzyA4FE1la83H8GPjeB/Dq9+D122D5dLjot+qOEUlx5mLYezOzscBvgCAw2Tn3czObCOCce9zMngQuA6qOS690zhXW95yFhYWuqKio0QXvLqtg9dZ9bNldxpbdB9hc6l1vKT1Aye4yNpceoGTPAcKRI99X6xYhOuVl0aVNC84/viOjB3emU152o2tIGpEIfPwY/PO/Ia+LN4Y9qAk2RZKZmc2tK19jCvREONpAj0Uk4ti+r/xg2JeUVgv+3WWs3LKHL0r2YgaFPdsyZnABowd3pkubFgmpx3dblnn96SeMhfJ94MKQlet3VSJyFNIu0GOxcstupi/axPRFG1m+aTcAJ/dow9jBBYwZ0plubVv6VltCTbsTPp8FX/8pDLj40DlNRSQpKNAbsKpkDzMWe+G+ZEMpACd1a82YIQWMHVxAj/YpFO5rP4Y3vg8ly6HziTDiJ94IGattMJOINDcK9EZYs20vMxZvYsaijSwo9uZJGdQlj7FDChg7pIDe+a18rjAOImFY9CL8+39g5xrocRZc95o38ZeINGsK9KO0bvs+Zi7exPTFG/l07U4ATuicy9ghBVx6cle6t0vyPffKcvj0r7B9FYz6ufcj6ubFUHCi35WJSB0U6HGwYed+Zi7exIzFGyla481ycHbffK4Y1p2RAzulxpDIxS/DS9+B478BI34EnQb5XZGI1KBAj7PiHft4aW4xLxYVs37nflq3CHHJ0C5cMaw7g7q09ru8o3dgN3z0mDct74HdMORyOO8eaN/X78pEJEqBniCRiOODL7YxpWgdsxZvojwcYXDXPK4s7M5FJ3WldcuQ3yUenX3b4f1H4OM/Qrgcrp/mnWBDRHynQG8CO/eV89r8DUyZs46lG0vJyggwenBnrizszhl92hMIJOEokt2boWgyDJ/kHZC0+GXodY6OOBXxkQK9iS1ev4upRet49dP1lJZV0r1dC751ancuP7Vb8h68tHcbPDwAAkE4fSIM+y60rm/STRFJBAW6T8oqwsxasompRet4f+U2zOCcfh24srA7Iwd1IhRMsoN6tq6E//yPt6eOQZ9zofBGGHiR35WJpA0FejOwbvs+Xixax4tzi9m4q4yOuVlcfXoPrj69Bx1zk2w+me2rYMEUWPA89B8FY3/lndt040LoebaOPhVJIAV6MxKOON5ZsYWnP1jDOytKCAWNsUMKuO7MXpzSow2WTEdsRiJQud87ycbcp+CN26F1DzhpnHfR6BiRuFOgN1Nfbt3LMx+u5qWiYnYfqGRI19Zcd2ZPLjypS/KNay/fB8unwYLnYNV/wEWg++kw4sfQe7jf1YmkDAV6M7f3QCWvfLqeZz5Yzedb9tC2ZYhxp/XgmjN60jUZf0Qt3QALp8D852HsL6HPed4cMgdKoc/5msJX5Bgo0JOEc44Pv9jGUx+s5p/LNgMwcmBnrjurJ2f2aZ9c3TFw6ExJZjD127D0VcjpBEO+5fW9dzsNQkn2+4GIzxToSah4xz7+9tFaXpizlp37KujfKYfrzuzFpSd3pVVWEu7hVpbD5296P6SumAWRCsjIhhvfhIKTvIOZslt7wyJFpE4K9CRWVhHm9QUbePqD1SzZUEpudgaXn9qNS4Z25cRurZNvrx2grBTWvA9fzoav3uftpb8w3lvW6xxvOGSf86FdH03rK1KDAj0FOOeYt3YHT32whhmLNlIZcXRt04LRgzszZnBnTunRNjmPRq2y7A34bKb3g2ppsbcsrxtc+wp0OB7CFRBM0qkUROJIgZ5idu4r562lm5m5eBPvfr6V8nCEjrlZjB7cmdGDO3Nar3ZkJNtBS1Wc88a5r/qPtwd/6eMQagFTrvEObOpzLvQ+F7oMhdwC7cFL2lGgp7DdZRX8a/kWZizaxH9WbKGsIkK7VpmMGtSJ0YMLOKtv++Q7IrU2c570hkWu+QAqy7xl2W3gOzOh4wDY8KnXT99xAGTn+VurSAIp0NPEvvJK3vmshOmLN/GvZZvZWx4mLzuDrw3sxNjBBXylX37yjW+vqaIMNsyDTYthyxIY+TPvhNdTrvG6bcA7uKnTQOg4EIaOh/zj/K1ZJI4U6GmorCLMe59vZfrijfxz6WZKyypplRlkxIBOjB7UmZN7tKGgdXZy/qham13rYdMiL+Q3L4UtS2HrCrjudeh1Nrz/W2+ETceBXti36Qmtu0N+P2jZzu/qRWKmQE9z5ZURPly1jRmLNvLm0s1s31sOQOsWIQYU5DKgII8BBXkMLMijX6ccsjKSfC++SmU5WMA7kGnRS97BTpuXHvrRFWD0g3DGRPjiX/CfB70ZJFt3836Qbd3N+0FWUxhIM6JAl4MqwxEWFO9i6YZdLN24m2UbS/ls0272V4QByAgYfTvkHBb0Awry6JCb5XPlcVRWCruKoXS9t4fetpcX6O8+fGh52Pujx9Br4JJHvW8Az1wEedHAb5UPLdpCmx4w+DKv7daVkJXjLc9Ioe0lzcoxB7qZjQYeAYLAk865/1djvUXXjwX2Adc75+bV95wK9OYjHHGs2baXpRtLWbaxlGXRoN+4q+xgm/ycLAYU5DKwII/jO+fSukWIFqEg2ZlBWoSil8wg2dHboaAlb3dOJAL7tsKudZCZ4+2l71gDb/3EC/bS9bBvmxf6nYbA997zHveLHnBgl3c71MrrymnRxjvjU3ZrbwKznWuhRTtvQrPMHMhs6R0xm9PBO7iqbJe3LtTSu2jmSqnhmALdzILACuDrQDEwB7jKObe0WpuxwG14gX468Ihz7vT6nleB3vzt2FvOsk2HAn7phlJWbtlDeTjS4GODAfMCPxSkRWbgYOhnRy+hYIBQ0MgIBggFjIzDbgfICBqhQPQ6GCAjujwz2i5gYGYEzAgYBMyw6HXVMqtlXfU2Zt6oR+Pw9la9PRzWtuo2zmGV+wlU7iPcIh+A3C/+QbBsB4GyHQTLdhA8sJNg2U6KR/0JAhl0fXMCuavfxFz4sG21dMRkSjoPp+vixzhu4UOHrasMtmBJr+uZ2/v/kFv6Gecse4CKQAvCgUwiwRCRQCa7WvVh4XETCQaMwhUPgwUhmIkLhiCYCcFM1vUdTyAjRH7JR2RW7MQCGTgL4iwDAgH2tB9COKsdmfs2krV/M1gQZ0EIBHGBEJVZbQm3aIeFDxA6sNM7otcCQAAX8J4nEmoJgIUPAIazQLSN1Tq81DmHO3g7eo2Dg7drLK92vyEN7UsY0X9TDv17N3ibav/+Mbye94ja27RvlUnHvKOb9qK+QI/lGPLTgJXOuVXRJ3sBuBhYWq3NxcAzzvvr8JGZtTGzAufcxqOqWJqFtq0yOatvPmf1zT+4rCIcYe32few9UMn+8jD7K8KUVXjX+8sjh+5H1+2vCFNW7fb+8jA795VTHnZUhiNURhwV4QiVYUdlJEJFdHlFxLuO+NMjeJRaRy+9Dl+84sPojWuBa+3qQlkAAAmhSURBVMhlPy0po6UdoCVlrJ0eYTef0N86MsQm0tLKaMkBb31lGR8ua8W/lizleFtLpwyjpe0iRCWZVJJJBRtdKf+9zPvvuDDrRbKoIMsqDivhwk8GESHAc6EHOSW4lJquLv+/fBAZzG3BV/hh6KUj1v+u8hIeqryCYbacF7N+esT6TyLHc0X5fQB8kTWeoB3+DxdxRr8DzxAmyF9CD3JmYCkOw0VDzwE3Vkzio8hAJgTf4NaM17xAP7je+FPlN/hD+GJOts95IrPqD9+h55gf6cuEih8C8F7W9wkQOezxAOce+DVhgvw69Cin2oqD9VWt/0HFzcxz/bkuOIvvBGdWW+/5a3gkk8NjGGKreCT0+yMev8j15o6KWwF4M3MSAY78AI8s/yUTzu3H3WNOOGLdsYol0LsC66rdL8bbC2+oTVfgsEA3swnABIAePXo0tlZpBkLBAH075DTZ60UijorqQR8Nfucg4tzB68jB+4duRyKHt3FULY/ejhxahuPQcxy27PDHVz1/Q51JDe0hBgM1v4UYGYGzD/tGEop+U7kkcOjbTEZgAhkBw+F1lYUjjg7OMT/iqIw4yiJfsifiCIcjhCsriITLcRVlTMtsQzjiCOyezJKyUsxVQiSMRSrBhbmzdT/CWXlk7urC8tKxWCQMB9uEOad1X05tN4DQ3t6sLA5hOHBhzEXAOVq36MCzvbxYWLf4TowIuAjmnHdNhGdOPAMsQMcV32LrnjXeOgBzmIO7+p9HWV4f2mwoZ9/6LKpi1KLRfmnXcxnR/UyydxfA4hXeencosgfm9eTlIWfiHGS9P+LQ80fbAUz5ylk4C9B54Xyyd0bPjVvtm8KPB57O3jb9aLd2Jy3W7Yg+7NBXhku6FnJGt1NpWdqGFksLDz1/1ODc3vxx0Kk4B7kfnXTYuip/OP0UendIzLESsXS5fAsY5Zz7bvT+tcBpzrnbqrWZBvzCOfde9P7bwH855+bW9bzqchERabz6ulxi+cWlGOhe7X43YMNRtBERkQSKJdDnAP3MrLeZZQLjgNdrtHkduM48ZwC71H8uItK0GuxDd85VmtmtwCy8YYuTnXNLzGxidP3jwHS8ES4r8YYt3pC4kkVEpDYxnSnBOTcdL7SrL3u82m0H3BLf0kREpDF01IKISIpQoIuIpAgFuohIilCgi4ikCN9mWzSzEmCNLy/esHxgq99F1KO51wfNv0bVd2xU37E5lvp6Ouc61LbCt0BvzsysqK4jsZqD5l4fNP8aVd+xUX3HJlH1qctFRCRFKNBFRFKEAr12T/hdQAOae33Q/GtUfcdG9R2bhNSnPnQRkRShPXQRkRShQBcRSRFpG+hm1t3M/m1my8xsiZndXkub88xsl5nNj17ubeIaV5vZouhrH3E2kOh0xb81s5VmttDMTmnC2o6vtl3mm1mpmd1Ro02Tbz8zm2xmW8xscbVl7czsLTP7PHrdto7Hjjazz6Lb8+4mrO9XZrY8+m/4dzNrU8dj6/08JLC++81sfbV/x7F1PNav7TelWm2rzWx+HY9N6ParK1Oa9PPnoqftSrcLUACcEr2di3ci7IE12pwH/MPHGlcD+fWsHwvMwDsj2hnAxz7VGQQ24R3w4Ov2A4YDpwCLqy37JXB39PbdwIN1vIcvgD5AJrCg5uchgfWNBDKitx+srb5YPg8JrO9+4M4YPgO+bL8a6x8C7vVj+9WVKU35+UvbPXTn3Ebn3Lzo7d3AMrzzoCaTgyfnds59BLQxswIf6vgq8IVzzvcjf51zs4HtNRZfDDwdvf00cEktDz14MnTnXDlQdTL0hNfnnHvTOVcZvfsR3hm/fFHH9ouFb9uvipkZcAXwfLxfNxb1ZEqTff7SNtCrM7NewMnAx7WsPtPMFpjZDDMb1KSFeWeYfdPM5kZPsF1TXSfnbmrjqPs/kZ/br0onFz2DVvS6Yy1tmsu2/A7et67aNPR5SKRbo11Ck+voMmgO2+8cYLNz7vM61jfZ9quRKU32+Uv7QDezHOBl4A7nXGmN1fPwuhFOAn4HvNrE5Z3tnDsFGAPcYmbDa6yv7dzyTToO1bzTEl4EvFjLar+3X2M0h235I6ASeLaOJg19HhLlMaAvMBTYiNetUZPv2w+4ivr3zptk+zWQKXU+rJZljd5+aR3oZhbC2/DPOudeqbneOVfqnNsTvT0dCJlZflPV55zbEL3eAvwd72tZdc3h5NxjgHnOuc01V/i9/arZXNUVFb3eUksbX7elmX0buAAY76KdqjXF8HlICOfcZudc2DkXAf5Ux+v6vf0ygG8CU+pq0xTbr45MabLPX9oGerS/7c/AMufcw3W06Rxth5mdhre9tjVRfa3MLLfqNt4PZ4trNGsOJ+euc6/Iz+1Xw+vAt6O3vw28VkubWE6GnhBmNhq4C7jIObevjjaxfB4SVV/132UureN1fdt+UV8Dljvnimtb2RTbr55MabrPX6J+8W3uF+AreF9pFgLzo5exwERgYrTNrcASvF+cPwLOasL6+kRfd0G0hh9Fl1evz4BH8X4dXwQUNvE2bIkX0K2rLfN1++H9cdkIVODt9dwItAfeBj6PXreLtu0CTK/22LF4IxO+qNreTVTfSrz+06rP4eM166vr89BE9f01+vlaiBcyBc1p+0WXP1X1uavWtkm3Xz2Z0mSfPx36LyKSItK2y0VEJNUo0EVEUoQCXUQkRSjQRURShAJdRCRFKNBFjoJ5M0n+w+86RKpToIuIpAgFuqQ0M7vGzD6JzoH9RzMLmtkeM3vIzOaZ2dtm1iHadqiZfWSH5iVvG11+nJn9MzrJ2Dwz6xt9+hwze8m8ucyfrToqVsQvCnRJWWY2ALgSb1KmoUAYGA+0wpt/5hTgHeC+6EOeAe5yzp2Id2Rk1fJngUedN8nYWXhHKoI3m94deHNe9wHOTvibEqlHht8FiCTQV4FTgTnRnecWeBMjRTg0idPfgFfMrDXQxjn3TnT508CL0fk/ujrn/g7gnCsDiD7fJy46d0j0LDm9gPcS/7ZEaqdAl1RmwNPOuXsOW2j2kxrt6pv/or5ulAPVbofR/yfxmbpcJJW9DVxuZh3h4Lkde+J97i+PtrkaeM85twvYYWbnRJdfC7zjvPmsi83skuhzZJlZyyZ9FyIx0h6FpCzn3FIz+zHeWWoCeDP03QLsBQaZ2VxgF14/O3hTmz4eDexVwA3R5dcCfzSzn0af41tN+DZEYqbZFiXtmNke51yO33WIxJu6XEREUoT20EVEUoT20EVEUoQCXUQkRSjQRURShAJdRCRFKNBFRFLE/wfQbJrYpKBGxgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "hist_pd = pd.DataFrame(hist_obj.history, index=np.arange(1, len(hist_obj.history['loss'])+1))\n", + "hist_pd.index.name = 'epoch'\n", + "sns.lineplot(data=hist_pd)" + ] + }, + { + "cell_type": "code", + "execution_count": 30, + "metadata": {}, + "outputs": [], + "source": [ + "model_name = \"model.h5\"" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "model.save(model_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "# model = load_model(model_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Testing Accuracy" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "def test_generator(x_test, resize_shape):\n", + " for sample in x_test:\n", + " img = cv2.imread(sample, cv2.IMREAD_GRAYSCALE) / 255.0\n", + " res_img = cv2.resize(img, resize_shape[::-1], interpolation=cv2.INTER_AREA)\n", + " res_img = np.expand_dims(res_img, 0)\n", + " res_img = np.expand_dims(res_img, 3)\n", + " np_img = np.array(res_img)\n", + " yield (np_img, np_img)" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MSE Loss: 0.07084273546934128\n" + ] + } + ], + "source": [ + "steps = len(x_test)\n", + "test_gen = test_generator(x_test, input_shape)\n", + "loss = model.evaluate_generator(test_gen, steps=steps)\n", + "print(\"MSE Loss:\", loss)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sample Prediction" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "img = cv2.imread(x_test[0], cv2.IMREAD_GRAYSCALE)\n", + "img = cv2.resize(img, input_shape[::-1], interpolation=cv2.INTER_AREA)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAADECAYAAABk6WGRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOx9eXSUVbbvr+aqpCqpzANhSAgkkDCEQEBEBkHupZkntVEc2qG7tbu1u7Ubve29IKJt215akBlU2kYBEZlRoEEBAYFAICSBEDJVUpVUKjXP03l/5O7DqTR631vv+p6rV85aWZm++r5z9tnDbw9nfxLGGHpGz+gZPaNn/HMN6f/vCfSMntEzekbP+J8fPcq9Z/SMntEz/glHj3LvGT2jZ/SMf8LRo9x7Rs/oGT3jn3D0KPee0TN6Rs/4Jxw9yr1n9Iye0TP+Ccf3ptwlEsm/SiSSGxKJpE4ikSz5vp7TM3pGz+gZPeMfh+T7qHOXSCQyALUA7gPQAuACgB8zxqr/xx/WM3pGz+gZPeMfxveF3MsA1DHG6hljQQDbAcz+np7VM3pGz+gZPaPb+L6Uey8ABuH3lv/6W8/oGT2jZ/SM/wdD/j3dV3KHv8XEfyQSydMAngaA+Pj40gEDBkAqlUIikSAajd7+EGOQSCT8ezQa5T/TF11Pfxc/CwAymYxfQz/T58Tv/zUv/rw7hawYY5BKpfx/9DlxTuKzu3+W7i0+606D5kvPutN9u9OFrhc/Lz5PnCP9TVzLtz3nTv8T70N/F/dNpHn3ffk2GtM6xDndadAau6+Tfqf/S6VSRCKRmOvF/bvT3tP1d+KlO/Em3U98Xvc9FXm1O63pHvQzPVtcf3d63Gmt3elP13wXr9LP3e9D14rP7L4v3flYHOKe3ImG4prp793XKtL122Sx+1q779l3XSd+0XPEPerOV3Qd8QDxDf3tTrQQ6Squwe12IxwOx9xLKpUiGo1CLpcjEolAo9FArVaDMQa5XH7HvWWM4cqVKxbGWNqd1vt9KfcWAL2F33MAGMULGGMbAWwEgJKSEnbq1Cn4fD4oFArEx8fD7/cjEonwxUajUSgUCoRCIUilUiiVSvh8PkQiEchkMiiVSoRCIQSDQSgUCqjVakSjUXi9Xmg0Gv75QCDACUmMpFQq4fV6oVar+bNCoRDi4uLAGINMJoNMJgNjDOFwmOaPYDAImUzG5xQOhyGVSqHRaBAOh7mCUyqV8Pv90Gg0CIVCAIBwOMyZSKVS8WsDgQDi4uLg9/v5fGn+4XAY4XAYKpUK4XAYCoUCCoUCQBfD0HW0PtG40VxpfcRAtB66XyAQgFwu598ZY9BqtXyetMZgMMifFQqF+N+JXnQNACiVSsjlcni9XsjlcigUCgSDQahUKng8Hsjlcv55uVyOuLg4uN1uzsQi7UVhpDkrlUp4PB4AXcpBKpVCpVIBAH8u7VMkEkF8fDy/H63L7/dDJpNBpVLxdZGyBwCfz8fXp1KpYtYvCjrNIRKJxAi8XC7nfBSJRKBSqRAIBDj/0npofzQaDQAgEAhApVJxugLgvCCTyfj+KRQKyGQyKBQKOJ1OPh+aE32WPk/8pVAouOIIBoP8c0Rzmm8gEOAKinhQNI6MMYRCISiVSkilUs6XwWAQUqkUfr+fK19SWsT/3UGR+Hka4XCYy45UKkVcXByX9Wg0imAwCKVSyb+TUiQ6KRQKTt9QKASfzxdzD5J9mjPJYPf5SSQSTneJRAK/3/8PPOfz+QAA8fHxiEQi/FqZTAa73Y49e/bA4XCgb9++CAQCcDqdSE5ORjgchlwuh9vtxuDBgzFixAiEw2FOQ9oX4rtwOIzMzMwmfMv4vpT7BQADJBJJLoBWAA8CWPRtFxPh5HI5Z2RS1kqlEpFIBD6fD4wxqNVqyOVyhEIhaDQazjD0f6VSye9HhKdNJ4VHm+92u/n/RcEkwkUiEX4tCQRdQ0aCrhEFnZjQ7XZzxSEyRygU4sxLwkn/J+Yk5UjXk0IRGYWUJ93H7/dDrVZzRibGJgVCzyDDR/cgmhFtIpEI1Go1p0koFOL3IeVBCpqUIN2LDF1ycjJCoRAkEglXogqFAnK5nD+Lng10KSdaK92ju3GnQfMlY0C8Q8qGhEyhUHABBQCVSsXvSTSLi4vj641EIhx5E32IjqRQ/H4/fD4fv4dIHxrdkSbxq2iobTZbjPInOtLvpExJAdHfid/p+aQIRV5QKpV8PqS8iGcZY1xuCPzEx8dzQ0m8QvtJfED7IK6R1k3rpeeSQiOlKpFIOL/TNSQjdD3RgvZI/DzJlGgY/X5/zJxkMhk3hEqlkssvyUsgEIDH4+FGkPiOaEl8QWBMlEviYaK5yI9yuZzLPfEU/c3pdEKtVvM1yWQyOBwORKNR6PV6zndk+NRqNdRqNZRKJeLj4wF06TDaH5qLRqPh+uy7xvei3BljYYlE8gsAXwCQAXiPMVb1HddzZEkM5fF4oNFoEAgEEA6HuUKKRCIx1xJDkhImYWSMcYYiZNldKZJSjkQi0Ol0HEkBt11/v98fgzoIZRPCA8CFXyKRIC4ujjOZVquNEUS6nhQKAK50icG1Wi1n/lAohPj4eK4YJBIJEhISuCEhKx6JRBAXF8f/HggEANxmTELiNAi90M+ErERDRcJLghmJRLhAkdKkudNn1Wo1tFotIpEI7HZ7DCokoxkKhfjeiCEQMqgi4lapVPD5fFz5uVwufn8SFgIA5NKq1WoEg8EYD4IUHX1G9HpISEQh745O4+Pj4fV64Xa7uaDR/UVjQnxJKC4UCnFaktGg5xG/it4ozYf2gQw6zYsUBxlZuVzODay4b3Qvuo/oUXUPqYXDYfh8PkSjUa5YRENvt9s5AiXPjp5JaxTpRWsjQ6JQKPiz5XI55HJ5jFdOIIa8RK/Xy2UM6PJc6LMAuEdDMkNKX+Qvl8sFjUbD10tzSUxM5DJAQI2QskQigVarjZERmq9MJoPL5eL8TushIy+GwORyOTweD5ddkj3igbq6Ok4rrVYLp9OJ1NRUHmHw+XxISUnhtI2Pj48BWRqNBjabjXs/3zW+L+QOxtghAIf+d64VLbvP5+Pok/5OTCCXy+Hz+fhmd7f2tBkiwgDA7wGAKyhSFiQgTU1NyMnJ4Z+nEA+hmHA4zAUyEAhwhRAfHw+Px8OZjTZORMXA7TAMXSfmDmgQIxBioefabDZ4PB5kZ2cDAAwGA7lkXNmKFp2UGAkOKV5SyiJaF0MTJKgulwtKpRJxcXGc7iSohKJEZSoaRKIfMSVdzxiD1WqFXq/nSk6tVkMikcDr9fLPk3EmpC+VSnnoRtx3ETUSD4nhBlIO5JqTYae/0f5S6I0AhshXpIhsNhvfUworkKDRfAkwEL8QkhQVslQqhcPhQCQSQVZWFucBMQRBtCQPoXs8mBBtMBiE3++PQak0h4SEBLS2tkKj0SA5ORkA+H4R0CHa0jOIZqLRpPVEIhF4vV5Eo1HExcXFKD1an2gUiEbi3Cl0YjKZkJWVBbfbDa1Wy/kdAJcv4n8Kb4h7JoZoSQ6JB8kYkkEn/eH1eqFQKNDW1galUomkpKQY74z23OfzcVpqtVruDRPSJzknIEfzEUO7pMNo0P1obzs6OjhNgC7jRaFFop/NZkPv3r1jwIioC8k76a7nuo8fxAlVcpuI6Ww2G6LRKHw+HwKBAAKBAHw+H1dKxKDAbeFVqVT8Z3HRxPSBQAChUAharZZvPinerVu3Ys+ePRyB0WYpFArOzBqNBiqVigs5ITFiKlJ0pPSJaX0+H0feLpeLu4e0BvpZdEVJ8YTDYcyZMwdnzpzBqVOncOnSJSxYsAAZGRmIj4/HoEGDOIIyGo2YPHkyVxaisJKystlsHOkCXeEZQkJkDNRqNRYvXowNGzZwZqL50NrEMBaFp8jVN5vNHKnQd/rM3/72N5jNZgDg6I48ElKSXq83RhgcDgc0Gg3//IwZM7gHdeDAATidTi7ky5Yt48JN+ygiVUJZxDsksOSxkcAplUpoNBpOjw8++ACBQADx8fExcXIKbcjlcpw6dQorVqzgvEChNzHspFQq8dhjj+Fvf/sb5zEy+nQvMYYvIlKpVMqRIM1TDO0Rsk5ISMBLL72Ezz//HE899RTfO+JJMtAejwfBYBA6nY7fg7w7Wt+SJUu4cvF4PFi7di33NETFRvwlImfaB3rWr3/9a1y6dAlarRbTpk3Diy++CJVKxXlHzCuEQiEeyiTwRUqVDD/Jfnx8PPdypk2bxg0N8TSBFKVSiR//+MfYsGEDB4JkDESjSeskj5jm6PV6oVQq+bOIx2hPCDyKnhuFimm/yJhnZGRAoVCgs7MTPp8PDocDLS0t6OjogMvlQnx8PNRqNd8X4gcyst3l+tvGD0K502bRIogBiclUKhU0Gg3faACccUgRkKIgqw+AKzoSImI0QpOE0Pv06YOmpibYbDZurQnJud3umHgqbWRCQgJkMhn8fn9MnBgATwyLyRiK2YqJMQrjiNeSsGs0Gmg0Guj1ekyZMgULFizA2rVrkZeXBwDIyspCUlIS/2xOTg6OHTvG6SfmAygMRAqYnk0InNAQISCz2YxZs2bFIE5SKsTEooKnuCK5miKiI4QHAHv37kWvXr1iYrvk0iuVSmi1Wuh0Ov5MqVSKxMREjsJ+8YtfYO/evRy5btmyhf+/vb0dCQkJ0Ov13DCK+0W/A4jJDdD+kJcGdCk3Mrrt7e347LPPoFQq4Xa7AdwOw9F9wuEw1q5dy/eXeJkAi8iLZrMZc+bMAWMMHo+HGzWiKfErGQmZTMYrJ8SkY1xcHAcjRLu4uDhIJBLs27cPixcvxrZt2/ieiaGEUCgEnU7H0R95snQ/oEtJnTp1CvHx8VAoFPjyyy8xb948zrMiwKEvUshiriISiWD79u345S9/iWHDhkGr1WLChAmYP38+bDYbLzYg8NVdbml9FB4l2RdzYySvn3zyCaelmAMiT8JisWDOnDl8f8Q8EHmpx48fx+LFi/keEC+SQqd4v1qthk6ni/EICdxpNBpotVoOskg2yBCKCdjMzEzIZDL07dsXiYmJSEhIQO/evTk6Jw+F1imGDUUP4U7jB6HciWlFRE6bQgsEEIMQAMQIATGSmOCjcAUNERlQqKK6uhrDhg3joYhwOMwNiUqlgkKhgMVi4YizqakJbrebK0NSdg6HA1arlSvllpaWGE9DLpfDarXCbrdzxiNDRdUIjY2NcDgckMvlsNvtqK6uRnJyMux2O9ra2nD9+nUkJyejtbUVdXV1XPmSu0txwaamJjgcDjgcDphMJu5aArcTTyaTiSM1EkaKp+bm5kKv18NkMsXEt2UyGcxmM1paWmJCHCaTCRKJBG1tbTxJLVZbtLW1wWKxwOFwAAAPNdHnyUCbTKYYhUmuNKEfp9MJpVIJp9OJq1evorm5GWazGVarFbW1tUhOTsbNmze5wQoEAqitreVKpKWlBQ0NDQiFQjAYDLzChniO1kqK3mQyob6+Hna7nSs0uVwOo9HIBUulUqG1tRVNTU3Izs7m3opUKoXZbIbdbuc8GwgE0K9fPx7nJdSpUCjg8Xg4fUgmZDIZmpubuXDTfnV2dqKzs5PPkxKjHo8Hra2tiEQiMXRjjOHWrVsxRrOxsRHRaBTt7e0wm83cuwQAs9mM2tpaJCUlwWq1AgAuX76MxMREtLS08PADDbVajba2NpjNZg4siAZKpRJmsxlVVVWcvqNHj0ZxcTG8Xi+MRiM3omTkSZmbTCbOn1KpFBaLBRqNBgqFAvX19TyMRvxP3qparYbb7UZbWxu/JhQKITc3FxqNBkajkc9FTIZHo1GcO3cuBoQ5HA60trZyGYlEIrBarfD5fGhpaYHL5YrhIalUCoPBgKamJoRCIbjdbi5rot5ISEjgoIgMAf2s1+uhVqvR2trK10z3djqdMBgMHLh91/hBKHcqW6KSOTFhSCgKuF0ZIKJPt9vNXXuqqvH5fLzsjhASZempfNLv9+PAgQNob29HSkoKmpqauGtL7hPFn00mE958802cOnUKLpcLI0aM4K7j5cuXcejQIajVarz55pvw+/344IMPsH79eowbNw4AMG3aNBiNRnz66aeYOXMmTxBTcrC9vR27du1CcnIyzpw5gwsXLkCn08FkMmHx4sVISkpCVlYWGGNYsGAB0tLSIJfLMWvWLITDYXz44YdYs2YN7r77bpw/fx5NTU0oLS2F1+vFxo0bMWrUKO4yr1q1CiaTCcnJyVi4cCHa29s53Uno09PT4fV68c4772D06NGIi4uDx+PBxo0b4Xa7kZSUhKKiIgQCAaxatQorVqzAuHHjoNVqcdddd2H16tWIRqNobm7GT3/6U6hUKnR2dmLMmDG4evUqNm/ejPHjx2PdunXo7OzEZ599BrVajUWLFuHdd9/Frl27UFZWhmXLlsHv92PNmjXYvHkzRo0ahdWrV0MikWDbtm0oKChASkoK0tLSsH//fkyePBnJycncaO/fvx8ZGRl44403uPFZuXIlLl++jOTkZAwbNgw6nQ4ajSbGk2lsbMQvfvEL6HQ6PPfccxgzZgwHDS+88AKysrKwZcsWXLlyBTKZDKmpqSgoKMD06dM5//7qV7/iCbLf//73kMvlqK+vR1FREdRqNV577TW4XC5Eo1H89re/RWtrK959911cunQJkUgE1dXVmDNnDjIzM/Gzn/0MVqsVly9fxp49e6BUKrF8+XKsWbOGKynKvWg0GkycOBFJSUkAgF//+td47733kJycjClTpqCiogJr1qzBG2+8gXHjxiEpKQn79+/nAIBKdjs6OvDss89yr2b37t1gjCEtLQ133303Ojo6IJPJ8O677+KFF15AWloabDYb2traeIgS6DKYzz//PDZv3owRI0ZgwoQJmDRpEtLT09HW1oY33ngDtbW1aGlpwb333otoNIra2locPHgQbrcb99xzD/e27XY77rnnHpw5cwaRSARr1qwBAGzbtg1GoxHvvfceAODChQs4f/480tLS8Oyzz4Ixhhs3biAnJwcSiQRr1qyBzWbj8yT9cenSJRw7dgwpKSmIRqPYvHkzHA4HUlNTUVRUBABYtWoV3nzzTZw5cwZ6vR6lpaVYs2YNpFIpKisr8cwzzyAhIQEulwuvv/46/va3v8FgMGD9+vUAgE8++QQejwcdHR1wOp04f/48vF4v/vCHP2DLli1gjMFoNOKXv/wldDod7HY7Xn75ZXg8HmzYsAEulwspKSl46KGHcPTo0e/Uqz8Y5U4JDlLsxLQUoiArSzFOQgfkVpHrTvcRE6OE9Cl2LpPJEB8fj6tXryIrKwsWiwV2ux1erzemsiAajcLlciEtLQ319fUYNmwYvF4vr9KRy+X405/+hEGDBqG9vZ2jJ3K/CwsLEQqFkJeXh8TERIwfPx5lZWW8XpxQ6rvvvotp06YhISEBhYWF+Prrr6FSqdDY2Ig+ffpAqVQiISEB2dnZSE1NhUajgdPpRGlpKU8Kt7a2oqSkBHa7Ha2trZDL5cjIyIDdbufhHolEgq1bt0KhUODGjRu45557eEkWudHV1dW4//770atXL47QotEoqqur8eGHHyIvLw9xcXFITk4GYwwpKSm4desWSktLkZCQwOO6MpkM69atg8/ng16v58rUbDZj6NChkMvlKC4uxq1bt7jg3HvvvcjPz0dJSQkUCgVmz56N7Oxs5OXloaSkBEqlEiUlJZBIJLhy5QqGDBnCE1FXrlxBcnIylEol2tvb8cYbb6CoqAjHjh2Dz+eD2WxGRkYG6uvrMXTo0JhKLJFPotEo1q1bB7/fj8TERMjlcpSWlgIAKioqMHXqVOj1eigUCmRnZyMUCsHr9WL48OFITU3ltPZ4PEhISIBOp8OgQYMgl8tRXV2NxYsXIycnh8d3L1++jEmTJmHgwIFQqVTIzs6GTCbDiRMn8M0338But+N3v/sdUlJSsGLFChQVFcFqtcLv9+Puu+/mCVHyVEOhEPr378/LYRsaGjB58mTEx8ejqakJp06dQkpKCurr6zF27FgolUrMmDEDcXFx3KPVaDSoqKjA4MGDAYAj8NTUVC6zBIDef/99lJSUoLq6GocOHYJer48JY1IIadWqVVi3bh2USiUqKyvh9XqRkZGBhoYGDB8+HElJSTAajQgGg9iwYQOmTJnCq1e0Wi1CoRAyMjJQVlaGkSNH8tAIAIwcORLRaBRDhgyBSqXC2bNn0draCofDgRdffBFyuRzXrl3DT37yE2RnZ/P4OXnn5GVeuHCB/3zp0iVs2bIFOTk5UKvVSE5OhsvlQnJyMurr6zFq1KiYhLBEIsHatWsRCASQnJwMnU6HgoICDB8+nM+NFLfP54NOp8PBgwdRUFAAoAuoZmVlITs7G2vXruX5EKVSifz8fBw9ehTbtm0DANy6dYvT4L9VrP+/v4YNG8YsFguzWCzMarUyi8XCbDYbM5lMrKOjg7W3tzOfz8ecTifz+/3M5XIxp9PJPB4P83q9zOl0MpvNxpxOJzOZTKy9vZ11dnYyv9/PAoEA8/l8zOfzMZfLxQKBAAsEAuzSpUusubmZmc1m1traypKTk1l5eTmzWq3M6XQyr9fLXC4Xc7lcbO3atay4uJi5XC7285//nI0ZM4ZZLBbmcrmYTqdjBoOBtba28s+6XC6WlpbG9u3bxw4fPsz279/PbDYbe//999nhw4eZzWZjZrOZmc1m9sc//pHpdDpmtVqZy+Viy5cvZ0eOHGFGo5FNmzaNr6+hoYGtWLGCeTwe1tbWxn7+85+zuro6Po/S0lJ26dIl5nK52MSJE9krr7zCXC4X69u3L9u0aROzWq2spqaGPfroo8xkMjGj0ci8Xi+z2+382S6Xi02aNInZ7XZmNptZ37592dq1a5nFYmGFhYVs5syZrLOzk9XX17M333yTmc1mZrFYWFFREbty5Qrr7OxkY8aMYc3NzczlcjGtVsv27t3LfD4fW716Nbt+/Tprbm5mtbW1bMaMGayhoYE9+OCDrL29nTmdTvbWW2+x9vZ21tDQwJYsWcKsViuz2Wysra2N1dbWsuXLlzOz2cw6OztZv3792JUrV1hzczPr6Ohgffv2ZR0dHaypqYktWbKE6XQ61tnZyZqbm5nBYGAWi4Vt2bKFFRUVMY/Hw2w2GxszZgxra2tjVquV81lnZyefdyAQYCNGjGDXr19nN27cYIWFhay1tZUZDAZWVlbGrFYrM5vN7NVXX2WVlZXM7/czp9PJ/vSnP7EdO3Yws9nMVq5cyWpra1ldXR0bN24ca2trY2azmfXr149VVlaywsJCZjabmcfjYWVlZcxisTCTycRqa2vZ4cOH2fTp09nGjRuZ1+tlOp2OtbW1MaPRyKxWK/N4PMzlcjGfz8f5dc+ePezixYvM7/ezP/3pT+zgwYPM7/ezUCjEkpKS2MGDB5nT6WRFRUWssrKSOZ1O5nQ6WWdnJ+fLyspKlp+fz+x2O2tra2MNDQ3sd7/7HXO5XOz8+fNs9erVzGazsRs3brBHHnmEr8lms3Feor2zWCysvLycdXZ2ss7OTlZZWcn+/Oc/s87OTr4fdrudLV26lOn1erZy5Uqm1WqZ1Wplb7/9NhsxYgTXB5s2bWKXL19mLpeLPfvss1x+m5qa2Lhx45jVamV2u53V19ezt99+m82aNYtt2bKFGQwGNmnSJOb1epnH42F9+/ZlbW1trL29ndntdi5D/fr1Y0uWLInhd6vVyurq6tgf//hHZjAYmN1uZ8XFxcxisTCn08lGjx7N6urqmMvlYvHx8Wzfvn3MZrOxN998k9XV1bGmpiY2ZcoUVl9fz5qamlhRURFbu3Yt27RpE9Nqteyzzz5jn332GSsuLmbbt29nbW1tLD4+nn366aesvb2dLV++nFVUVLCsrCz28MMPs5s3b7KamhrW2trK9u/fzwBc/Da9+oNA7oSWKT4uWn6xYoPK+ERkTe4yXaPVapGcnAytVsuz1WLSKhgMwufz4eDBg0hLS+OJKMYY6uvreWyUPuPz+bBjxw7MnDkTSqUSe/bswYoVK3DoUFeVZ3p6OvR6PTQaDXbs2METmowxZGdn4+OPP0ZOTg6PJY4ePZrXAiuVSgwaNAhpaWlQKBRYuXIl6urqMH78eNy8eRO1tbUcmX7zzTeYNm0aotEoHA4HDh8+jLNnz0Imk2H37t34j//4D7S3t0OhUKCjowOLFi2C0+nEhAkTMGHCBNjtdqSkpPA4bSAQwMcffxyTUKM4M4Wmxo4diylTpsBisSAlJQV9+/aFxWLBW2+9hfvuuw9GoxGBQAAzZ85E7969sX37drz++uu4ePEiACAlJQW9evXC0aNH8c4776C5uZm7/v369UN6ejoOHz4MqVQKk8mE++67DyqVCseOHcOiRYs4k9rtdjz99NOYP38+3n//fbS0tGDGjBnIycnBhQsXYDKZMG3aNBw8eBDnzp3DqFGjkJGRAZ1OB5/Ph0OHDiEajeLDDz/E9OnT4Xa7sX37dqxYsQJHjx7l+0WJ+5SUFOTm5uLIkSOYM2cOWlpaoFKpkJKSAr1ej9dffx1OpxNr1qxBW1sb3nvvPfTt2xe7d+/mpXZ5eXk4ffo01qxZw9cNdFVXmEwmPP3009Dr9dwTM5lMcDqdWLduHWw2G15++WUUFRVh8eLFyM/Ph0QiQUZGBveCPvnkE5SXl/P8BFWU7dq1C3l5eZBKpSgsLORJv7feegvHjh3DvffeC4/Hw+lHXhuFRuVyOT777DPYbDYYDAZ4vV6cOnUKCxcuRGdnJ37zm9/gvvvuw7p165CSksJzVxKJBOXl5dwLInkuLy/H+fPneVh09erVeOyxxyCTybB161bk5+cjHA7j4sWLOHjwIBISEpCcnAyJRIJ169Zh/vz5nE+3bduGgQMHAgB2796Nixcv4tixY3jssceQmZmJzZs345133sGSJUvw9NNP49FHH0W/fv3w5ZdfwmQy8XLcsWPHYsuWLTwfQnrG7XZj3rx5qKmpQWpqKvLy8tDZ2Ym3334bU6dOhd1uRyAQwOzZs6FSqfDRRx/htddew5UrV3jIqnfv3jhw4ABWrVoFvV4Pv9+P2tpapKam4urVq+jTpw8qKip4uaVMJsO1a9dwzz33cNlMS0tDZmYmjvkj1G8AACAASURBVBw5gvXr1yMrKwtjxoxBOBxGYmIiFAoF9u3bh6ysrO/Uq7KlS5f+z2jo/4uxfv36pY899hgPqwDgdadiwo0YCbhdt0vKmzHGFZVYW01JB/p/Y2MjPvroI1y9ehWzZ89GNBrF559/jiNHjkCn0+Huu+8GgJgDLrt27cIDDzyA7OxsHDp0CMnJyZg6dSp0Oh2CwSBaW1vR2NiI7OxsZGdnIxqN4tq1a3C73cjKysLVq1dhNBoxZswYJCQk8GQZYwyZmZmQSCRobW0FACxatAgpKSn46quvUFFRgSeeeAKRSAQHDx5EWVkZNwqU5Bk2bBja2trQ2NiIYDCIoqIiVFdXY8GCBYhGozhy5AgA4K677oJMJsPZs2fh9/vR3NyMsWPHIjExkde5SyQSXLx4EdOnT0cwGMSxY8cQiURQUlKCpKQk3Lp1i7udlZWVGD9+PDo7O5GWloa+ffvCZDLhxo0biEajKCoqgsFg4Im9+Ph4uN1uFBcXQ61W49ChQwgEAkhLS0N7ezsMBgPGjBkDuVyOEydOYMKECTHH1CsqKrjBLCwsxIEDBxCJRHDXXXdBp9PhwIEDGDZsGIYPH85DG5To7N27N/r06YOdO3diwYIFyMvLg9VqxY0bNzBhwgS+dgprmEwmGI1GyGQy3LhxAy6XC5MmTeLhitTUVLhcLgwdOhSlpaU8aR0fH4+8vDykp6dj9+7dMecgJk6cCL1ej/LyclRVVWHhwoVQq9XQ6/W4du0a6uvr+T3HjBmDxsZG2Gw2XL9+HfPmzYs5j9HQ0IDMzEwMHTqUV0JR6enWrVvx+OOPIxqNIiUlBTt27EBbWxui0SgmTZqEcDgMh8OBuLg49OnTJ6a2XTz5W1tbi6ysLJSWluLo0aMYO3YsFAoFKisrEQqFMHjwYPTv3x9ff/01TCYTN07FxcVcscvlcpw8eRK1tbVob2/HzZs3MWLECG5UVq5ciYKCAthsNsydOxcDBw5EamoqbDYbHA4HtFotPB4PJk+ejIyMDOzcuRMPPvgg5HI59u/fj6KiItx9992oqqoCYwwTJ05Ebm4uAoEAmpqaUFdXh7lz5+LEiRNQKBSYOnUqGGM4duwYpk6diqKiIl7rzhjD8ePHoVKpMH78eKSkpKCuro7ze01NDSZNmgSr1Yrk5GTk5OTAbDajpqYGEokExcXFcDqdaGlpwfXr11FdXY1nnnkGarUa4XAYra2tuHXrFsrLyzFhwgQkJSXxkB7JczQaxciRI+F0OnnRRHV1NX7+858jPT2dJ7NbWlowePBgmM1mfPzxx6alS5duvJNe/V76uf+fjhEjRrCTJ0/ySgMaKpUKXq+X1zWLiL77EX3qVUI9KCj2DoCfWiRGFo82UxkgfSdhobg/tTAQEQqAGKagZ5GhoQoFuj9wu3yLeraIdbvkfdDBiUOHDuGTTz7B+PHjUVJSgsLCQsyZMwcHDx7kSeNoNIqEhISYfhtU7SPWdJP3QhVF5N2QQNO6nE5nzElAWgvlK+h6MqoOh4MbGrVaDZ/PB7/fzxUl0Qy4fTyfkpZksMUDIpRQF49909oIWTPG+MEdojudBqY8CFXZiLXidDiMaujJoNFzaK+A2/1VxPwNgBglKManaYiHYWjQXovtJjweD6/tFmOmtH/kmZI3ReV6lIOi+VKfIspDnT17FsFgEFu3bsXOnTtjehhFIhEkJSXxQzhEK5o3VZRQYpHo0v1wGwEkAk1ivxaxbJDkg67tnkNTq9WQyWRYtGgRNm7cyOvc6WAWVaZQaSbRIRwO89Oy3avgxHMnpCfI66d1EV3pepJZytlR/oIOrxEd6DtV2pEc0MElpVIJh8OB+Ph4SCQSPPvss0hOTsarr74aU7bY0tKCiooKaDQaxMXF8SqYSCSCgQMHcrmkMsjf/OY3SEhIwB/+8IeYMxBA15mVffv2YenSpeWMsZF3UKs/jLAMgJg2A6JgUb2ryCi0OWJdrPg7cPtEHylO8f908IAEgxQWMTqddKTNpwMt5BGINd70fPIYxBOjxEBURgiAl0/SNfSz2LBq9erVyMrKwujRo3H69Gm0trZiypQpMYJEjEFrJQVIwit6LsSwYpKZvsilJ+VLtBQTdCIzA13KmtZB+0TKiX4nb+pOR/tJmcbHx/Pae1KANE+iJf0snncQj8qTMaHkJ9GVAAEZUKCrhPDKlStcEZCSEeuyxQMptK+kxLoLPtE4EAjwE6Vk5IiGZNTF+1Hlkrg+Wnv3k57iSUuaC4EK8bj7n//8Z1RVVXGPjUoStVotN2biIS2xAIGSwDRIcYutIGjv4+Li+GlnGuKBNvHMCq1BVEpEr5s3byItLY2X79LpUJEeRF8qayWkS3tBte8isAJiw7oi+CODQ7JJ8yOax8XF8fWIPayI9yl0KQJNMrynTp1CZWUlOjs7ER8fj0ceeSTGgyFeJANAvEDykpiYiLi4OFy4cAG1tbW8rPqBBx74BwNJe9K9JLX7+EEg95KSEnb8+HGOLmlDCcmQsJEiEI90K5VK3pKABIasPm0keQBkJIgxRNQiHvoBwBEwEZNKxILBIEcP5DmIB3XIa6D50fFnmh9VD4gHZggV0mdcLhcPP1E9NVW1yOVy3qJBPClIjYZoHcRAdJ2IykRjRHMmRSSeFxCZT/QwxLYGpERIQYhnC4iGYiMwUnZ0TzGsQHMjV5bWEwwGOdIl40773N24iDX7lJuh4fF4YLfbeQUSCQu1ZSB6UMktrYkUIV0rVtaQohaNGN2DaCYaOpG3SVkQremgHT2H/kdCTPXf9BxaK1XeqFQqfuxf9CxJOdD6qJ6d5iWeI6H9Jd6kv1G8l55PMkhroCP+tFckM6JXS96G3+/nB8IUCgV0Oh2nKx0Son0WgQJwu0MoGTvy3sLhMHQ6HTe8dCiJ0DjxJlUDuVyuGI+c5JaeIcojzYFkSpQfsROk2K6EjKZ4+ryyspIjfIVCAZfLhXA4DK1Wi3HjxnFecjgcCIVC0Ov1nP5iopTOfPxXjuFbkfv31lvm/3SQ8iS0SUQhpiTERBsqWjMAMUd+6cg1oVn6n+jaEBoj4yA2ERNPx5KBIfRKKJMScN3dQ1JWpPSJAcQTnuJBHWIOeq5UKuXxuHA4jKSkpJij/4QqRGREaEhMiqlUKrjdbn7MnMoURUUqKuzuioy+iL4isic0RApIDH/QXtKgPSK0Td4TKScxZENeG5VTimE3KjGluZCwiaEcUubk3dA+UtmpWq3mZYg+ny/mcAgpQxoiUqcENPElXU/KRzyoRvQSjSutAbgdqhEVKd0fuN0niU5dJiQk8DXSmmn+crmct2MmL0REd+IeiD2GyLARv9E6RC+G6CkCAuovIwIIAHztZOTocB7dNxKJxIQ76RAPyZgYrrtT+wiSM5H2Pp8PCQkJnOepoZvoXZIRFb0vohHQZVjoVLZotMkzJRqJ4EQMxRLQpLYP1C6FDB/tNR3yIr0gAgeFQsHDMQSsdDpdDHih0KMYtu4eArzT+MGEZUg5kFDTJlIDKlG5kIIQQx+0YbSZ4okyEhTgdixQjKdSLLq7+0PMICpSEXEBtwWHKnncbjd3y0mREsNTSEScj4iMRAQMgMdfgdshAFGBkQIWE8si8iJFLyI4UXmLVRLiXGhdovISUTtdL66RjBExtqgUgNuNsOgzhIaIzhQKo//TaVmaOxlkEZFGo9GYDphi2wExpk77R7wBgAs7DRHhEt2IjmKIT+QZkYeIPqKRI8VANBLbTouGhDpfikChO/ITjSh9p/mRMiVPjuYnzkPkfaI7hVvE/afvBCZormTI6Rg9yazIU2KYSQyDEPigQeBLBCViTkHcEzLSBAJoj0TZD4VC3MMl3UHzobmIgEPcd9EY0h6L8k+0o3vR88m7697qhOhEwJCMhVjdR4csifZi2IvuRyEumhvNhXQktWz4rvGDQe4U7wJuJytFFAHcPjpPzEibFolEkJCQAOB2AoSIDIC7tsScYjglGo3y5knkvhFSoPgpWV8SUHI9iXFI2ZMCp42n5li0HjIWoocC3EY+pLxobYSwCFWQQu6eaCSGIXRKDC+iB0rq6HQ6LlzEfGJ8FwB/YQhwuz2A6PWI+0NKiO5B15GgdFfMFDYgxS02LhO9MLG9L/EHKTExFEZotKOjg3tu1DaC4pLUeKw7Mg+FQjxU0D0RSMqP6Cuiayo7pEFtmGlNlPClPY9GozFhQTEhTIJP3gXNQ2wjTNeTF0j3IHRJXicNUjJkQKm1hohOaS4ul4t7YQQ+RPBAayW+of0hz1CMVxNoIb6k/aJ2E6FQCL169eI0pQN45GmJRkqn03FeokQwAL5eOthEckHGWrwPrZfoRvcjT4pki0b3l2t0D5VRrJ32mpQs7W1CQkJMuJOMEQDeq91isSA1NZUDK4/HA61Wyw9Q+v1+Hq4kwEa8QvJP+yr2ArrT+EEgdzEJSEIFdBHHZDLh+PHjHGESgYlwbrcby5Yt4y6PmLwU69vJQooJFPpO1xOTUAKVlJPI7KRYSBgJdYixdrL+Ho+HIwZSuMRYhHZp3vQ7ISYx1i26g6I7L8b/Ghsb8e6778YgV5o7fYaUgYhI6Xli/J3mLyJYsRqEjJeIBEUG7O6VdJ8/eSlieMjr9XJams1mvPbaa2hsbOTKQvTexNJFmvuJEyfQ1NTEBYCMKikP8XmEJsVkq6iUxGeJ+QMxbgsAb7zxBj7//POYsI7YrlqMn5NxERU+0ZYxBpPJhH//93/nz6V5AuCggTwa8kZFJdUdTRNPhkIh3Lp1C/v37wdw28MQ+wqRoRSTnsS3brcbR44cQV1dXQySp8/S50npisibntfa2oqzZ8/i3LlzOHnyJO+xIiJXki8x10ND3AsxD0Q8KIb4iBZED1L0otGm30mu6Vqv18tzL8TX5G3SftD9RI+wvr4eBw4c4MZDzF+R/BIoIlAIgCtwp9PJ9U19fT0voyW+EaMZRBexM+W3jR+EcgfArSvFF2kjLBZLjNIQrbtEIkF6ejpu3rzJUTlZVNEAUOkgue3EuGKHSfoCbpePkSEh5pFKpXj22Wd5x0CxxI0UmFqt/ofujyTMlCSlg1Ri4oY2H7hd/UPuMAmqGPYgYSDrrdVqceDAAZ6MDIfDHPGICICUl6iAxUoMADHPJCTXvZqDytKoPamIcojp6f5kLMWXlxBdaR/feustOBwOhMNh9OrVC/X19UhPT+cCRcqa4q2ESIlPKisrYzpC0j6Lryuk0A/NjdYkhlhIeIiO5FaLSkihUECv1+Po0aPo3bt3TAiAKmZo30lRisqFeJNCbQqFAn379uXNvYg3SLgJtYuVZJS/oPWKfEixcQC8ZXVGRgbfO+IdmovIv0qlEm+88Qaee+45SCQSpKamoqamBhkZGTHhJfJeiYfFtsniOtvb2/H1119j7ty5uP/++/Hpp58iLS2NK09aAw2xuiUYDMLhcMQoTOL3aDSKW7duYcaMGdz7EtvkklckJs7F0C4BGCo2IMMgfu8OIEivEOgSE72ZmZkxQIGeR3rFbrfjlVdeQTgc5joiGo3yxC/xiE6nQ1JSEjcOJEvEdzQnrVb73x5i+sEodwonALdDAX6/H9nZ2ZgwYQJfYDAYhMFg4AzR1NSEhx9+GO3t7ejs7OShDlIcYkmX0+mEx+PhhCJvoa2tDUAXwzidTq5kOjs74fV64fP54Ha7ce7cOYwbNy6mf0ogEOAuJ2OMd5CkUAltGiF3uq9Y6hWN3n7tGjFEJNLVfU58NZroVYhCDwA7duxAZmYmzGZzTOyXnuH3+2MqUyhkQd0ZSSi1Wi2Uyq7XfFmtVkQiEX4IhpJFVqsVbrebr4/CY4S+iZYdHR38FCCt3ePxoL29nc/L5XLhwoUL2LNnDw+xXb16FY888ggSExP/4UQygJjwFtAVs96/fz8YY7yWnZSgzWbjxp/4zGq1or29nX+WfvZ4PHwvKcRFyh24HQOmtQ8bNgxlZWUxLaKp0sHr9cLlcvGKEZvNxg86AV1lmW63m6OympoaPProo1ypMNZ1mtLpdEKr1UKtVvMmb9RmmpKj1AWxs7MTjHUdGKMKFo/Hg61bt2L06NFcIRHooetEg2uz2XDgwAFMmDABANDc3Iy9e/fyroYUTiTeN5vN/J2tdG+SF5lMhp07dyIvL48Dgzlz5iAYDHKAxBhDW1sbV/TEuwaDgaNyiUSCzs5ORKNRGI1GTpcPPvgAarUanZ2dHNXabDbO06QzqD2yxWLhshcOh2GxWLiipfnR+lwuF3+bGNFJLIoQ38O7fft2jB49mv+fOsTStXS4jMpWyRNwOBz8lDK9F2Hnzp0oKSlBe3t7jOdL9yTeYoxh+PDh36lTfxAxd9FdJoulVCpx8eJF7NixA4sWLUJJSQnkcjn27dsHAEhMTMTMmTNx8+ZNuFwuHDlyBI2NjVi+fDmA2+EFoMuF2b9/P1dCI0eOxODBg9He3o6KigoAQEFBAYqKinDo0CG0tbXhrrvuQn19PW7duoWXXnoJFosFe/fuRVZWFsrLyzFt2jQA4EfbjUYjnnzySRw/fhwtLS0YO3Ys3G43zpw5g9/+9rccfdTW1qK5uRkSiQQTJ04E0NVO1ePxwOfzYebMmVAoFNi/fz+Pe997770x4QBiWrGU8NKlS1Cr1bh8+TIcDgfmz58PubyrPe2FCxe4YZw7dy4Uiq4Ws0ePHoVUKsWQIUPQv39/LugKhQI1NTXYvXs3CgsLIZF0naB96KGHcPLkSUSjUbS0tOCJJ56AVCrFF198gUgkgs7OTjz66KOIRCL44osveGvhhx56CGfOnEFzczPy8/O50pk9ezZXzF6vF1euXMGoUaNQUVGBkSNHIhzuOtBVUFCAkSNH8oZQM2fO5IiR9tlqteLSpUtobW3FwoULER8fD4PBgGvXrvGXpBQXF6O1tRUVFRUIBAIoLCxEfX09jEYjhg8fDofDgYqKCjzzzDMAwOO9YnXI6dOn0dbWBqfTieHDh0Oj0cDr9eLChQtob2/nr6WbMWMGjhw5gsrKSrz11ltYsWIFZs2ahX/5l39BS0sLqqurYTAYMHHiRBQXF8c06lIoFDh69Cj8fj9aWlrw5JNPIhKJ4NChQxg0aBDcbjdu3ryJiRMnYtCgQTh//jza2tr4ydP58+fjwoULaGpqgt/vx8WLF3m1xUcffYR77rkHvXr14gaXwIfdbsf58+djqj7q6urgcDhw+fJlfPPNN1i4cCEKCwshlUpx4cIFbqznzZsXU35MwKOgoAArV66EXC7HoEGDUFZWBo/Hg6+++go3b97EyJEj0dbWhmHDhqFfv35gjOH8+fNoaWlBKBRCWVkZAoEAysvLeXWM2WxGfn4+vvrqK/Tq1QvNzc0YMmQIfD4fvvjiCw7c7rrrLuTk5CAajaKyshINDQ3w+XyYMWMGTp48ifLycvz6178GYwy7d+/Gww8/DKlUioqKCrS0tMDv92PWrFkx3i/xwjfffIPW1la43W5cuHCBe07hcBjHjx/nVXiLFi3CrVu3eMM2q9WK9PR0KBQK1NXVobGxEQAwb948/mxay+DBg1FQUACTyYTy8nJe7kldXisrK79Tr/4gkLtYFUDumFQqhdvtxqFDh2AymaDVavHXv/4VSqUSQ4YMQUJCAjweD7Zs2YJp06Zh+PDh2Lx5M38LDFnaQCAAo9GIbdu2oaSkBGlpaZDJut6J+PLLL/Pj4m1tbbBarRg3bhxOnjzJ3z6+adMmRKNRZGZm4uLFi/jZz36GqVOnwmKx4Pnnn0d2djaSk5Nx/fp12Gw2jBo1CqdPn0Z+fj6mTJmCLVu28JDRo48+ioqKCsyaNQu3bt2CVCrFzJkzcf36dRQWFuLSpUvw+XzYunUrAGDs2LHcFaeErRhrpZhjNNrVtfGpp57C9OnT8frrr8NqtSIajeKll17C8OHDMX78eLz77rtQqVTo6OjAjBkz+FF3QlIUUmhra8Pp06dx7tw5HDp0CFOnTkVhYSFefPFFpKWlobi4GDU1NVAoFFizZg0KCwsxdepUmM1muFwuPP/888jJyUFSUhJqa2tRUVEBnU6H9evXY8KECZg2bRreeustdHZ2IiMjA9988w3mzp2LyZMnIykpCZs2bUJBQQHOnj2LwsJCGI1GAMCyZctgsVh4WScdvgoEAvjRj36EWbNmYcKECdi6dSuCwSD+8Ic/YPjw4cjKyoLJZMJzzz2HF154AaWlpejVqxcsFgvvcGmxWDBq1Ch+jJ54ErhdtVRRUQGDwYCZM2fC7Xbjxz/+MSKRCL788kvU1NSgtLQUwWAQVVVVKC8vR2FhIfbv389DWEajEc8++yxeeeUVjBgxAtevX4fRaERzczPWrl2LQYMGIRqN4i9/+QuysrLwox/9CBMmTMCHH36Ic+fOISkpCYwxTJgwAbdu3cKDDz6IY8eOobq6GmPGjIHX68Xly5dRVVWFpqYmzJo1Cw6Hg58clsvlyM/P572OKJxDcd20tDSMGzcO8+bNwzPPPAO5XI733nsP48ePx7Rp07B582YcPnwYoVAIf//731FbW4v+/fvzU9AU/gLAD5VNnz4d//Zv/4ajR49iwYIFaG9vx6VLl5Camopt27ZBo9HgwQcfxAMPPIDOzk48+OCDaGpqwoIFC7Bq1SpoNBqcP38e48ePx5EjRzB9+nRMnDgREyZMgNPpxCuvvIKSkhLIZDLcf//9mDdvHu6//3688847PIyxcOFCnD9/HtOnT0dqairsdjuKiorQ1taGQCCAbdu2Yf369QgEAlCr1Vi4cCFGjRqFAQMG8PAeyaBMJsP169fR2NiIWbNmcY9VLpejqqoKc+fOxaxZszB37lysXr2a95MifTN06FCYzWa8/vrrKCoq4rrJarXC4XCgoaEB9913H2bPno1HH30UJpMJy5cvx+zZszFmzBhUVVVxT+2TTz75Tr36g1DuQOwr0QilDhs2DH369MGIESMQDAaRmpqKNWvW4PTp0yguLkZ7ezvvP97Y2MiVKMU3xVh5ZWUlfv/73/Na5/Pnz0Ov18NgMHDkRgeEDAYDUlJSUFtbG4NmW1paeOy1pqYGR44cQUtLC8xmM2bOnMkrCgwGA++nTQzh8/nQ2NiI8ePHIxqNYv78+YhEbr9I+tq1a/jxj38MlUqFnJwcrFu3Di+++CKGDRvGXU4SQko8Ed0AIDc3FyUlJTCbzRg8eDC0Wi1u3LiBwsJC9OrVK+blHGq1Gnl5eViyZAn+8z//E7m5uTzkQ0I6ceJEGAwGTJkyhcfojx8/jqamJly+fBnTp0/HjRs38PHHHyM3NxdSqRSPPPIILl26hCNHjvAXDfzoRz9Cbm4uioqK0L9/f16FNHjwYOj1eoRCIU5/MlRmsxmRSAT5+fm4ePEiBg0axMNUdB3FTYPBIDo6OjBmzBhEIhG0trZCIulqAZyUlIRLly7BYrGgtLQUn3/+OZKSklBeXg6j0Yji4mIUFRXh6tWrGDZsGORyOerq6rjLLZYdRiJd/X1GjRqFaDSKhoYGHhv97LPPwBjjfeIfeugh5OXl4eLFi7xSJykpCUOGDMHRo0eRmJiI8vJyzJo1C2VlZairq0NHRwc0Gg0aGxvxySefID8/H9FoFG63G06nE4MGDUJFRQVvj2w2mxGNRvHpp59yFJ2SkoKHH34Ye/fuxejRo6FWq9HY2IiBAwfy6op77rknprJLPLwlkUhw8+ZNlJaW8sRmY2MjpkyZwgHT4MGD4XQ6sWvXLh5OSk1N5WEgMSFqtVoRDocxcuRIPPPMM3jiiSdw7tw5DBw4EIWFhejTpw8GDhzIcxS0f6NHj0YwGMTAgQORlJSESZMmwe/3c0+3T58+iEQi6NOnD7Kzs3kxgtFo5OdSBg4ciLS0NITDYTQ2NmLcuHGIRqMoKSnh7/El0HH16lXuuQJd+auXXnoJZ8+ejUmyU2h17969GDVqFGQyGerr6zFo0CAwxrBnzx4YjUae+xs4cCCi0a624b1790ZKSgqkUim++eYbHvI0m83o3bs3nE4nbDYbMjIyeM6B+Iyaq1mtVhQVFfHcXlNT03fq1B9EWEZMZIlJnsuXL2Pp0qXo6OjAzZs3cfHiRXz00UcoKyvjREtPT0c0GsWmTZvw4IMP4vjx47jvvvt4kocxhk2bNuH8+fPo6OjA3LlzsW/fPpw/fx4vvPACBgwYwHu0JyYmYuXKlbyx0aZNm/Dwww/jzJkzGD58OPLy8tDe3o6qqipcvHgRoVAICxcuRCgU4jHGHTt2oFevXpDJut5atGjRIhw/fhzZ2dmYPHkyBgwYAMYYdDodrFYrpk2bhsWLF0MikfAY7JUrV/D+++/j+vXr2LlzJ5577jnY7XZotVrOyKKraDAY8MgjjyAtLQ3bt2/H0qVLcf36dXzxxReYP38+1Go1tm7diuLiYnz11Vc4f/48Fi5ciPz8fLz22mtwu93Q6/XcYMTFxUGn0yEnJ4e/l5V6l8ybNw9AF5pdu3Ytb2BGyd1Tp04hGAxi4sSJUKvVPNbf0NCAxYsXIxqN4vDhw3j55Zdx48YN5OfnIzc3l3tMQ4cOxcCBA3H69GmUlZVh7dq1eOKJJxCNRjF8+HAUFxfzCgaKR3766aeYPXs2bDYbNmzYgLVr1+L999/HkiVLeIdB2uPf/OY36N+/Pw9DmEwmHD58GK+++irC4TBMJhOqq6uh1+tRWFjI8z8dHR3461//ildeeQVXrlzB8ePHUVVVhbi4OHz55ZdYv3493w+73Y6MjAysW7cOU6ZMgd/vR0FBAYYNG4ZQKITnn38eeXl5PJm3ceNGFBQU4OTJk7wRF+3v22+/jVWrViEhIQGff/45XnvtNUSjUVy6dAnLly/HSy+9hA0bNvCT0x0dHXjvvffwyiuvAACOHz+O1atXo66ujisXKjGl98IC4P2FVq9ejXXr1sHhcEClUiE9PR1TpkyBzWbD/fffz98ZcOLECWzcJpIWGAAAIABJREFUuJEbWvJuCAhIpVK89NJLePnll9G3b19kZ2ejT58+SExMRHp6OgwGA37yk58gMTERe/fuxauvvspBRVZWFq5du4bHHnsM1dXVKC0txauvvopf/OIXHCydOHECjz/+OFJSUnD9+nXEx8fj3nvvRTgcRkVFBZ566ilUVVUhIyMDkydPRl5eHpe7UCiE3bt3o6mpCTKZDF9//TX+/Oc/w2AwoLW1FUeOHMG5c+fwy1/+Ek899RSA2+dM5HI5PvjgA7zwwgsIh8M4duwYVq5ciaamJrz//vuYN28eZDIZqqqq8OSTT+LGjRtobm7GokWLeE7j8OHDKCsrg0QiweXLl/H444/zt60NHjwY0WgUf//73/Hyyy/jq6++wsyZM+H1evHhhx/i+eefx+nTp3HlyhUcOXIEQ4cO/Va9+oPoCrlx48alTz75JC8To8qN1157Dbm5ubDZbAgGg9wyFxUVYdGiRfjLX/6Cp556CkOGDMG6deswbdo0FBYWIjExkSecJBIJTpw4AQCora3Fww8/jMLCQuTn5+PTTz+Fw+FAc3Mzb//75ptv4le/+hX69++PdevW4V//9V8xePBgyOVyXL16FYmJiRgyZAiGDBnC4951dXVobm5Gbm4uVq5ciZ/97GfIz8/HiRMnuJLo168fTpw4AbvdDovFgmCw63V2x48fRzAY5Gg1JSWFv7uytrYW999/PzQaDcrKyvDTn/6Uh6wIbSkUCmzfvh0zZ86EVCrFO++8g4yMDN6J8PTp0zCbzbh69SpycnIwePBg3Lx5E3Fxcejo6EA0GuWthKlkSyLpemtR7969MWTIEABdOQmK+be0tKCxsRH33XcfT0obDAYYDAb+Nh25XI6GhgaYTCbk5eVh27ZteOCBB/jboXr16oVgMMi7Zur1ev6SiatXryI3Nxf9+/dHU1MTVCoVtm/fjunTpyM3NxcA+LkDiUSCnJwcnDt3DhcuXMCkSZNQUFCAgQMHYufOnQC6Xm6QkZGBUCjEX1tHryw8fPgwGGOYMWMGrFYr6urq0Lt3b47kqeQ1ISGBv3bu8uXLcLvd0Gq1KCsrw8CBA3Ht2jWYzWbcuHEDMpkMSUlJqKmp4cmvadOmQaFQ8M6BhMp69eqFEydOICsrC3l5eZg6dSqv5qqqqsLkyZO5x7Nx40YMGjQIp06dwgsvvIARI0ZgwIABuH79Ojo6OnD16lXI5XLk5eWho6MD165dg9FoRFpaGkaPHo2JEyeiT58+yM3NjSk7JI/O6/Xi+PHj/OU0Op0OmZmZGDJkCL766ivodDo0Njbi7rvvRn5+PmpqamAymVBTU8PvSZVFEokEu3bt4l1BL168iHA4jEmTJkEmk2H9+vVc2TU0NHCvlTozVlRUIBgMor29HRkZGdiwYQMef/xxbjxqamoQiURgNBoxdOhQKBQKnDhxAuFwGDU1Nfw1e2PGjOHdTY1GI65du4aCggJ4vV40NTUhISEBBoMBGRkZGDVqFBoaGjgf3HvvvRgyZAhHymTAEhIS4Ha7UVtbC6PRiKysLAwdOhQpKSno7OyE3+9HVVUV/H4/SktLee6jvr6eG0w6e9DR0QG9Xo/MzExotVpcuXKFJ8cXLlyI9PR0VFRUwGg0oq6uDj6fD9nZ2WhoaIBCocCuXbu+tSvk/5Vyl0gkjcuWLXti2bJlP122bNmTS5cu3SiRSJKXLVu2b9myZf++bNmyWcuWLTuwdOnS73xlyIYNG5Y+9thjMfWyjDGkp6cjMTERAwYMQEZGBi/9ufvuu/lbbkaPHg2dTge9Xo+cnBwUFBTwagCqy87KykI4HIZer+fxOXq7k0aj4W9KArp6uIwdOxbx8fHIyspCeno6CgsLeW11aWkp0tPToVQqeXvRzMxM/oYivV6PUaNG8f7fjDHk5+dDKu1qKyCXy5GWlob+/ftDpVIhIyMDarUaGRkZGDx4MKRSKfR6PeRyOXJzc9G7d2+eSS8pKeGVPhTGotxC7969eRlhcnIyfwVdIBBAQkICxo4di6SkJIwYMQL9+/fn/S3Kysp4X27xngB44oe++vXrx98wlJ+fj9TUVOTm5vKSrgEDBiAxMRH5+fkIhULIyspCQUEBj/f26tULcrkcffv25W8ooiZuycnJvKpCJpNhzJgxUKvVSE9Ph1wuR3l5OW+zLJYlymRdr7mjdrPDhw+HRCLhrZXj4uKQmZmJrKws9OvXDyqVComJiUhNTUVmZiYUCgWGDh3KS/2USiVGjhyJpKSkmJI82iOfz4cRI0YgNzcXQ4YMQUZGBn/jlUKhwODBg5GRkQGZTIacnBxkZGQgNzcXaWlp3EhSNdKQIUOg1+vxv5h78/Ao63N9/J6ZJCQzWWYmK9lISFjDTkAExIoCVkVpqSgoLse1bm3VVnvq17bWU63VWq16XAABFxCr7CDIJmENi4EsbCH7HpJJMjMJITPz/v6I95Nncqz9fk/P+V2+18XFZJb3/SzPej/LJz09XXp2O51OZGRkIBAIwOl0YtKkSQJllZWVSdwoKysLQC9EwWKgzMxMZGdnIz09XYTAiBEjYLfbMWLECAnGORwOyfMmLRG2SU5ORkREBDIyMmC325GQkCCHQft8PmRnZyMyMhLx8fHwer1ISEhAenq6rDfQB7FmZGQgMzMThmFg8ODBGDVqlPRUWbJkCRYsWADDMDB+/HjZV9La5MmTYRgGxowZg7CwMMTHx2Po0KGSKeZ0OuHz+ZCUlASHwwG/3y9ewahRo2Cx9LbDttvtMrbExESkpqbKeQ+ZmZmIi4vD6NGj4XA4kJ2dLXuZnp4uJ1UxiMorIyMDFy9eRFxcnJzHQF6NjY1FQkICxo8fDwDIzs5GQkKCZPRwz5gs4XQ6ReADkBTH3NxcOVPV4/Fg2LBhGD58OBwOB4YOHYrBgwfj4sWLWL169f9Oy1+TyVQBINcwjAvqvZcAtBqG8aLJZHoagMMwjKe+6z4TJkwwdu/eLVYj81aB4OZG/TMkdE61rlrVxURsRgT0FS7w0lWatF50QQefx+ZD+nOgr8Md15BHt9F1NJlMQf1wmHmh2yxwXHoOvB8DVZcuXUJZWRlGjx4t82MvEXo6XA9Wy+mxMruGUIYutuAcmc5Hl5qQxzd7Kmum8375bN0mQFe8cu31WupMChay6Lxg3aOlvr4eX375JUJCQtDU1ITHH3/8v+Dg+l563GazGVarNaiVA4uvWJxiNpulKlFXX5IumAFBemPAjbTI/i5MNeU6WSy9xzi63e6gYhpCV7pWQUNauikX404UvBs3bkRKSgomT54sAk4XnAF9bbBJT8zu4LroQjvSm87A0pWPOveflZjcY73WpG2d481/zP3XFbGXLvWex/rcc88hPz9fUlY5Tj6f89J4tx476y/4TMYRdAq0xWKR3kpMdeTakPbofeuOpnocTLnms/g5x8R5W61Wac6nK1pJ+xUVFaitrZUamp6eHkm7Ju2FhYWJoRka2nuMo05rpmdtNpul+dngwYP/f235exOAFd+8XgFg3v/Nj8h4ZHYSpxbGFAwARADrQCwXiYsLIMjKYy4tF5dEQsansKWg1xWqWplQUBJ+4IJTyOmiK53/q4ta9Lw0zMK58f6EltLS0oKUnBauuhCFgpJpaSwA6l/MBCCogIOMCwR3B9SMp6v7mKlDWIbrwt+SKblvep46SM0gFZ+rxxEdHY0BAwYgLi4ON998s8yNz+OlBQrXj+vB/zkmHbSnMtW5/3qu3DudH87ncg9JV7otBAApJCJtcJwsrKKg5fz7Vy7rmo/S0lLU19ejqalJ5sC5aZpjWqNuy6EVnl4n0oYukOPacM/4+/7rTGu/P/1yLcgf/FvTBgA0NDRg2rRpUttB/tRVs/ytpiGtTDgG7jXHrXvlcGxUVqQ9beTxHhw376ML+rSC0QWHgUBv4aVOAaWBAgT3h+F+k3/Ii9yHzs5O6Tvl9XrR3d2N1tZWyYtnlhwv7u93Xf+q5V4OwAXAAPCOYRjvmkymNsMw7Oo7LsMwHN91n/Hjxxv79+8XjcuN7V8WDQR3T+QGscMfiZOH55KwKby1xU/BwvnTJSfT8vfcBFa7sWqTRKatCloDTM/j3wDEAu+vEDheamMSDRmdhMMcZj6TRVAAxAug16AtarYc0JWYGrfXRP7Nfsm66DkBCLJUWGmn94P7pwUt/2eOLisZabVqQa8FKteJ3Ri15QlAGJiCmKl9HCutNT0GMh7XiLnfISEh8Hg8AuNQ4FIoaMWmrWadZcJDRFghrddRW8jE8NmAS3shpAv+ljRmGH19iLQF3X+vtJDnWHlP0jHphffgeLQ1r61U/oZ0CfR5CIZhoL29HRaLRQ6zZisJfodj4D5wPvQGtAKhQuFYaOxpz1lXhZMnWS1LBUI+Z+Un6cXn8yEiIgIej0fGReFOXtB8wvGQVpixpvvv9PdyuQbcG4vFIhi8x+OB3+9HR0eHZAaRvqKioqQ/FqEY8nNGRoacmsVgOGn6uyz3fzVbZpphGHUmkykBwJcmk+n0/+0PTSbT/QDuB4C0tDQRVBRKuocD0NdfAoAIZjImG++QwMmceuNopZBJqE11cyFtvWnXjkJBu+v8nBY7CZf51xSeJAYA8mygr9cMUxDdbjeioqJE6JHI6dqTuGhBkrlIzACCCJtwEf+nBaFhEApMrfgYiNaeCtdSu9pWq1WqQbk+VET9LUmmqOqyeN0YSzMmoQ/tedFN1jAL6YSChBBWIBCQk31Yhct10W65hlKoiLm29PaI//J9MjN/x7lqRaIFLwUSGVgLR9ImhZNWFECw16irbnlPrXC0xc/X2jPkGHSLagpPLVA5HzbZ094s15z7yq6drO7UkFx/j49z4x7QwPg2T4H7zT3i2vBvQo8U/BpG0TxJSIU0QKHt8XiktQTXj2uloT7Suf5ce936RDV+ppMSNB8TggkEAvB6vQD6LG/SAscWGhqK+vp6WS/Oqba2VmKQUVFRcDqdcq9/dP1LsIxhGHXf/N8EYC2AyQAaTSbTwG82bCCApn/w23cNw8g1DCOXKVrcLG0VaxePzM8F54J+8ywAvUKTbi8A0bzcMFqWZGDt6vM1hRIvjkULT46HSkO7jDqPWFsRtAa0m6/deQ1LaMue9wwNDQ3q+qgVHz0Z3oMEyTnrNeoPN5GxaDVqHFbPXTOEFiz8Tn/cVVt9at+DxqS9J86T99dQBX/L+fJiewmt/MlYnCP3hUqcAkTfk/un14F7o2lOCy3Sg4ZGuDcaztEWNpWI/i73hJf2qEhnhOe0Naz3mftPr470onFzbV1qgdkfrtH35JpoL5VrSiMA6M271z33ubZ6vvr3jCvQg9HwlYbRNA1pr1VDTXrPaJDwnlQ2VBz0KLQXo/enPyxHOuJztMLT4yUNfFvzOd3vipW/9MZIs0Cfxa+blfH95uZmNDY2oqGhAXV1dWhubobb7cZ3Xf9t4W4ymWwmkymKrwHMBlAEYAOAO7/52p0A1v+ze2nmJQbMDTh+/Dhee+01+ZxnOpLhOjo6sHDhQrGoA4GAnCpOwUVLLhAI4OGHH8bNN9+MgwcPYtmyZXjjjTeEYEgAJKKuri60tbWJBUDLJxAIoKysTCwYCjgKMTbS+uSTT8QLoSKgEiHxlpWVwWTq69hI2IIEpgM4S5cuxZ133ilrw8+112M29xZs3Xfffd+qHPz+vtOTSHxUdCaTSdZOQ0PEFgk5caxvvfUWOjo6ZD7aK+CeElrhGE0mE2pqarBixQqcOHECL774IpYvX46TJ0/ilVdewUsvvRRkeeoSeSpMdu/zeDxYvHgx9u7dKwKQwsXtdksDLjKUtrJ4vf322/j444+xZ8+eoKMDdTC9rKwMGzZswJ49e7Bz507s3r0bb775JsLCeo/5Y7tb7h37s1OYaeXOdacQ8Hg82L17twgrrWQ4VnpGpC+uMb+vG5Vt2bIFeXl5WLt2Ld5++220tLQEeQ4cIy1FjptKr7OzE3v37pU9poLtDwXRGiVdd3d3Y+nSpSK0qPx0nIZj1F7l+++/j/z8fGzatAkLFy7Evn37cP78eTz88MPw+/0oLy/H3/72t6AYgI7LEXahQNWQK2koIiJC6JM8Qm+StEAhr2FHYt1A3zGgpEV6m9pCJ69ow6y7u1vSb+ndGoYh1jnHzjoMegSMmwBAbW0tzGaz9ESqqalBXl4etm7d+p1y9V+x3BMB7DOZTCcA5APYbBjGFwBeBDDLZDKdAzDrm7+/exAKc9MuEBeBjYU0gVD42O12nD9/XrrjaZye39FWbF1dHQKB3p4XY8aMwddffy0brBlPa2jikrSWzWYzVq1aFRSo7O9CdnZ24vz580GuNJ9BgRsWFoZVq1aJJqelqK023t9kMqG+vh51dXVBLiovDQ+w6k5b9Hx2fwyUvyVD6n3Q1oMWNiRS5jAzBsH5a6VgMvUdbk7BeeLECUycOBFZWVlobGzE2LFjJd31woULMi/tcWjsmOOPjo5GTU1NUB9tjRPTQjQpTB/oa6EbGhqKLVu2IDU1NUgAa8jkwIED2LRpE4YMGYKcnByMHDkSf/3rX1FTUxNkRVJ5kX7omfWHabTVyWwNfdCEtji1Z6YVUn+a4vpu3rwZhYWFGDNmDIYNG4a8vLxv3VeuEQUihSEVJytNNY3p/Tebe89HKCkpEePFbrfDZrMFeT2a3mk4UXEZRm/TuJKSEskRP378uNSE6KwwbSnTM+d4NHRG/tTepzbyuAZa5ujvAH3ZMDS0KAt0nIVrQg9Ue4pms1lqMIBe+IXH6QUCAXl95MiRoAQQ7g0VMemEcoH/WCfT3t4uDdL+0fXfxtwNwygDMPZb3m8BcPX/472C8FkKIFaA3njjjQCC8Uz+6+jowLx584IWg7gxXWbii2azGcXFxfjDH/6AyMhI5OTkwGq1wu12y3FmJDxuWn+cNiQkBBcuXIDT6ZTqPgoLCp2enh44HA688MILQRk/GscOCQlBS0sLnE6n9MnhpvbHPGkt7dixA3feeWdQwEd7MiTQLVu2SEk4LTXCOYFAQAJKVIY8xouMR0FJQUDC1hhzaGioNGnTcJYOdBKD1xh6c3Mz6uvrcdNNNyEkJAT79+/Hu+++C7/fj0GDBklgTgtB7dJSYPp8Ptn7sWPHikAh3XAv6Z1oS03HF9LS0jBt2jSxAjUmXFdXhz179uAXv/iFHGcXERGB2NhYzJs3LygATMFGy5UYL5WkVhoa1omJicGcOXNknPTsOBcNKTG2wDXlXvCeTz/9NNauXYvIyEgMGzYMU6ZMkTiFjiHpgGdXV5fwhmEYcDgcmDdvnuwBaZbCmrSwfft2/OAHPxD+sFgsmDlzJurq6pCUlCSxKD6Hni/H6/P5kJeXh7lz5yIiIgI7duxAbGys1IaweGjPnj246aabpBkY+ZC8RyHM/dbrroWjVlZcLw3B6kCphmxotPj9vTUx/Js0xWdog1DzgdfrFU+jqqoKDQ0NqK+vR3l5Oa688sogRIDzcTqd0taavMWAf//g9Hdd34v2AwCEYAGIBXj48GGBImbNmgWPx4PPP/8cQK/bfffdd2PLli1ITEzExo0bsWnTJrz77rtBViyzKCwWCzweD1JSUjBp0iS0tLRg9erVUtRw6dIl7Nu3DwAk62LSpEnYunUrwsPDUVdXh5tuugmHDx/Gpk2bpEthINBbKhwTE4OxY8fiq6++wrx587B+/Xq43W786U9/gmEYUsEIQA7IXr9+PcaNG4evvvoK06dPx549e1BfX4+EhASYzWZMnToVR48eRVVVFaKjoxEREYHrr79eBLF29SiIEhISsHTpUjzyyCMwDAMulwsHDhxAXFwcWltbkZKSIn0s6uvrMXbsWLS1tcHn8yE2Nha1tbXIysqSHiomkwkrV65EdHQ0ioqK8MQTT0gnR5vNhquuugrr1q1DIBCA3W5HeHg48vLy8Pjjj0tfe7qXLNa49957RSHpgPbkyZMBAOvWrYPdbkdFRQWmTJmC7OxsVFZWory8HCEhIYiJicH48eOxZcsW3HTTTSgtLcWGDRtw3333wW63BzFnT0/vgcMHDhxAIBDAmTNn8Otf/xp+vx+rV69GRkYGjh07hunTpwuz0xq8/fbbsXnzZmFACuff//73yMjIwObNm3H06FE8/PDDuHTpEt5++2089dRTOHz4MPLy8pCTkwOn04k9e/YI5JCSkoLTp0/j0UcfxbZt25Cfn4+HHnoIMTEx2L17N3bu3Inf/OY3WL16tdD//v37pYoSAKZMmSICl0Kvp6cHSUlJeOCBB3DLLbdg4cKFuP/++2UP169fD4fDgaNHj+Kpp57C2rVrUVxcjBEjRiAuLg47d+7E5ZdfjiNHjuBXv/oVbDYb1q9fj8rKSowfPx5lZWWYNWsW4uPjsWXLFrz++utISUmRIjMAcLlcIph0wBMIPqaOSkYrtfXr12PhwoXigd9zzz1obW3FRx99hEGDBqGiogIHDhzA448/jrCwMOzfvz8I9rnyyiuxfv16VFdXIz09HXV1dWhoaMA111wDoA//nzZtmtAeIdsvv/wShw8fxmOPPYaoqCg8//zz0nfpjjvugMPhwObNmxEXFyf9ktavXy905nA48MUXX+DJJ59Efn4+nE4nGhoacNVVV6Gurg5ms1mquuvr67F3714MGzYMDQ0NQUZkeno6CgoKMGnSJHg8Hpw6dQrDhg1DU1MTbDYbvF4vOjo6pJJbn173bdf3onEYmUYHTi0WC+rq6pCfn4/a2loAwLFjx+BwOGCz2XDhwgX4/X4UFBRg8ODByMzMxM6dO8Vi0Dg+mZbtNs+fP49t27YhLi4Od911F3w+H/bt24fPP/8cI0aMQFZWFsrKylBQUIB3330X48ePl6PUhg8fjoaGBkydOhXDhw9HQUEBqqqqsGPHDgwfPhyBQABHjx5FR0cHDh8+LALlk08+wahRozB06FBUVFQgMzMTDQ0NuOKKKzBkyBAp7nA6ndKKFgA+/fRTOBwODB48GEOGDJG2p0BfwAsAVq9ejdLSUkyYMAEejwdjxoyBxWLB2rVrpeFUbm4uVqxYgeLiYmRmZmLbtm0YMGAAcnJysGPHDqkuPXz4sGQrHTt2DImJiRgyZAi+/PJLmEwmlJWVYeDAgdJXpru7G7GxsaisrMSoUaPg8XhEYeixAgjysDweD4YPHy6xA8PoPXbs3LlzGDt2LNxuN3p6eqRT5ogRI5CZmQm3241AIICTJ0/C4/GgtrYWycnJch8NM1y8eBHr1q1Deno6cnNzUVlZKdZjdHQ0Zs6ciVGjRgXFTegZnT9/XjwJXhcvXkRmZib8/t5WEV6vFz6fD8XFxfjyyy/h9XpRWFiI6upqqWAcMGAAli9fjhMnTiArK0tO6HI4HNIj3efzITExUXr4xMTEoLKyEoFAb3Mwu92OzMxMVFVVCVxFa5Tr+Ytf/AKjR4/G+vXrsWrVKvEQT58+jdDQUGRnZ8uapaWlSdO9CRMmAABiY2Oli6TH40FmZiYOHDgAu92OQYMG4euvv4bVaoXNZkNXVxcmTZoEAOJd1dbWynkDOpZDa1kHNzl+ejWdnZ0YPXq0BGqtVivq6upQW1uLtLQ0jBo1CmvXrgXQa/ytWbMGOTk5GDFiBKqqqtDZ2YnU1FTs3LkTsbGxSEpKQnh4ONauXYsRI0Zg8ODBqKyshGEYQZlVnZ2dcrg1jcqvv/4agUAAcXFx6OnpQX19PcrKyjBs2DDpl9TV1YX4+Hi0tLRg2LBhiIiIwAcffIBz585h+PDhsFqtcuIUAMljZ4HmsGHDEBkZKRXkTU1N6OnpwcGDB4NoIhAI4PTp04iMjERKSgoKCgrQ0dER5GH8o+t7Idx50QLhBs+dOxfjxo0LchNfe+01HDp0CHfffTfKy8uxa9cuzJgxQ/pqAH2FSz5f72k9xCQ3bNiAZ555BrNnz8bChQvx6aefShDt/vvvR2JiIk6ePImSkhLcfvvt8sx77rkHR44cQXR0NAoKCnDo0CEkJiYiOjoa8+fPR0FBAR599FHExMTg6aefxo033ojDhw/jiSeegGEYuPnmm5Geng673Y7ExET5zYEDB6R8ffPmzUhMTERISAgKCwtx6623Ij8/H8OHD8esWbPgcrmwePFihIeH480338Rf/vIXvPrqq2KZfvLJJ7jvvvsQFhaG2bNnY+LEiejq6sIHH3yAe+65R8qwDx48iAULFsDn8+FnP/sZxo8fj4qKCtx5550YP348Tpw4gSlTpgAAjh49irvvvhtXX301mpub4fV6YbFYMHDgQGzduhU33ngjAoEAfvSjH2HHjh24/fbb0d3djRMnTiAmJiYIy2QATWOheXl5uOuuuwRDNplMeO6553D33XcjIiICp0+fxrBhwyRd9KGHHsJvfvMb/OAHP8C5c+ewbds27Ny5E4ZhYP78+bDZbOjs7AQAcbGffvppLF++HDk5OYiPj0dHRwdaW1tx6dIl7N69G2PGjBFYjKc26bRA0hNdcQo/KrRdu3YhISEBH3zwASZMmACz2YwFCxagoKAAV1xxBRISEvDUU0/BarWipKQEzz77LIYMGSIpvEeOHBFPLSoqCgsXLoTD4UBWVhZuvfVWbN26FbGxsRgwYACKi4uxcOFCUURAH45eVlaGuXPn4t1338XWrVvxyiuvYNmyZdizZw8WLFiAH/7wh4iLi8N7770n+1JQUICrrroKUVFR+O1vf4sBAwbg8OHD0ibDZDLhgQcekP7i9fX1CAQCeP/996WfC5/vdruxadMmaVnRP5DOAh0qcR3kbWpqwsSJE3HZZZcF1UF89NFHGDt2LJxOJ/x+P6ZMmQKLxYIvvvgC8fHxKCgowLFjx7Bw4ULh40ceeQSTJ0/GzTffjNGjRyMuLg5Hjx7FkSNHMHv2bDQ2NsLr9cqBMzwMZffu3aJQ5s6dK3E5AHjiiScwZMgQnDhxAgAQFRWFG264AVu2bMH2liLOAAAgAElEQVQtt9yCuLg4PPzww/jiiy+kGygVANMuA4EAYmJikJqaisGDByMjIwM2mw0jRoxAYWGhtJMAIE3bxo8fj+7ubmRmZsJmsyEyMhLNzc0Cqzmdzu+Up98L4U6sWWe8XLp0CX//+9/x3HPPobCwEJs2bcJnn32GzZs3Y/Xq1di0aZNYcxaLBUuWLMGjjz6KDRs2iAU3YMAAOJ1OmM1mVFVV4cMPP8SYMWNgGIbg5uwSFxoaioULF+L666/HnDlz0NDQgLVr12LFihV49tlnsXr1aoSEhOC1117DpEmTUF1dje3bt8PpdCInJwfTpk0TpvF6vbj22msFy+3q6sLVV18tn7e0tOCNN97AxIkTUVNTgy1btqCmpga33XYbrrzySsyaNQvt7e04ePAg5syZA7PZjHfeeQexsbFYt24dFi1ahDvuuEMyZ9rb23HllVfC6XQiLy8Pd9xxB3bu3AmXy4UrrrgCycnJciLPK6+8goSEBHzyySeYNGkSfD4fVqxYgbFjx6K7uxvvvfce7HY79u3bh3379klO95IlS3D55Zdj48aNiI6ORl5eHk6fPo3y8nKUlZUJc3z88ccAgPz8fAAQ6ICZJRQIPJ1m6tSpYuE3Nzdj+/btiIuLw969e3HkyBEcPHgQv/vd7zBp0iSsWbMGdrsdFy5cwEcffYRRo0ZhwYIFePLJJ+FyuYLKv5k/vnfvXvzgBz+AyWTCsmXLUF9fD7vdDr/fj/3790vWSlhYmOC2DEKztSxx9c7OTqxfvx5mc29rg88//xzNzc0AgKKiItxxxx1oampCdHQ0cnJycOnSJcm4ys3NxebNmxEbG4vXXnsNERER+N3vfoc5c+Zgy5Yt4mVdd911CA3t7Qy6YMEC1NTU4Oabb8a0adMwZ84c1NbWStUiBXxDQ4Ocv8qYAr9/4sQJyU6xWCxobm5GSEgIPv74Y4wePToo2Pr73/8ec+bMwebNmxEIBLBy5UpMnToVhmHg7bffxqJFi9DV1YXDhw/jrrvuwldffSW8u27dOhGwhmHg9OnTCAsLEwiS2UzMQmG8KDw8HPn5+bj77rvhcDjE8u/p6cHu3btx1113ITQ0FB9//DEeeeQRadG9YMECTJs2DVdffTUaGxsRCATw8ccfY/r06ZJBxN7ow4cPx7hx41BaWoq2tja0tbVJNlVnZyc2bNggDb8+/fRTXHbZZaisrERhYSE6OjpQXl6OadOm4bLLLsO4cePgcrlQUlKCvLw8uFwu1NXVobGxERMnTkRubi5iY2Olb/yFCxdQV1eHtrY2ybKbMmWKrJPX68WBAweQnJwMn88nDcTq6uqQkJCAqqoqZGRkICYmBl1dXZg5cyYuu+wyWdvvlKv/WwL7//ViIEzn1H7xxRfiHtfU1KC5uRmtra2YMGECJk+ejJKSEowd2xvTraurk8MO+hciXLx4ESdPnkRdXR26urokaMUGYM3NzUhPT0dRUREaGxtRWloKr9crR4jV1NRg3Lhx8Pt7TxtKSkrCqVOnYLVa0d7ejvHjx0s/jJCQEBQVFWHMmDEoLi6GYRhIT09HRUUFXC4Xzp49i66uLrS0tGDgwIEoKSmB1WrFuHHjcPbsWbjdbpw9exYejwcjR44UAqmqqsL58+fh9Xqlmi0mJkbSF3t6euByuXD48GEAQElJCaKiotDR0YHGxkbs27cP5eXlmDBhAlwuF4qLi4WR9OHHLS0tOHHihDTBSkhIQFNTE6qrq5GQkCDl82lpaSgrK4PVakVhYaFUzh08eBCDBw+Wvuq0fumecj+Ki4ulsx8DvQMGDBDmOX78uFjvZWVlYjkSTuEaM0WzqKhIglrMjgGAq666Cm1tbWhpaUFZWRkefPBBCXzpfjAa4uJ78+bNQ35+PlpbW9HS0oKzZ89K8yqTqbc5WXh4OKqrqyVgHRoaCpfLJa2Je3p620F3dnbK0WvXXXcdgN4upaNGjQLQ6yFGRkbC5XKhoqICw4cPh81mw4QJE3DmzBm4XC6UlZXJmHVWGPmjuroaLS0tOHr0KBYvXoz09HSMGzcOqamp8Hq9KC0tRUFBAUwmE0pKSqQ3PvnlzJkzYvyEhoaisLAQLpcLNTU1yMzMFIjKYult1saj70JCQnD06FFcc8016OnpwcmTJ/HSSy+JYKdC57gJ1fn9vdWa+/fvR2xsrJyJy+eHh4djyJAh8Hq9yMvLEyNswoQJOHv2LNrb23H+/Hl4PB4AkANkuru70dXVhaFDh+LMmTOoqanB+fPn5SxlBuYJFzmdTlitVrS0tAhWX1ZWhuTkZERFReGKK65AQ0MDmpqaUFJSAq/Xi5KSEukUSXnS1taG8vJynDhxAocOHRLDp6KiQjwSZiJ1dHTIXtrtdnR3d6OtrU08XPJLZmam9KCpqKiQdufMPvuu619qP/A/dU2YMMHYtWuXEBoJgC7UoEGDYDKZUFdXh46ODumsd/jwYWGCoqIiBAIB5ObmikvOBXK5XDh9+jS6urowZMgQDBo0SI7xS09PR2xsLNra2lBRUYGwsDDExcUhOjoatbW1cLlciIqKwsCBA+UcxvPnz2PUqFFwOBxyhBuzJZjtcurUKdG4PT09cu/Y2FjExMTA5XKhtLQUEydOlDzcgoICEZzsU19YWIiQkBAkJyejra0NWVlZQQVEFIrFxcWyNuXl5Rg8eLC0M3W5XIiPj0dsbKzENs6cOYOcnBxhYvYaLygowKBBgxAbG4tAoPf4wJaWFsHUmZlSWFiIoUOHIioqSgg9LS0NDQ0NqK2tlaPAdD4+g3/V1dVyHqTNZpMufjyftKioCCNHjkRVVZUE7HgaU3JyMiIjI3Hq1ClkZWUhIiJChH58fLzAcTrTqLq6Gm1tbRg3bpzsD9B7tNkrr7wiqZC0JnUxVkVFBTwej3SSZK6/yWSSY/CYzujxeDB27NggeIdK5ty5c7h48SIyMjKk42RRURF8Ph+GDh0KoNfLOXv2LKxWqxz0EBLSezCM1+vFkCFDYLfbha45hsbGRpjNZjQ3N0s3Uh5iAfQmH5w+fVp+bxgGiouLMXz48KA+MSdPnoTZbMbll18OAJg8eTL+9re/wWKxYMiQIQB6BfvJkyfR1dWF3NxcRERESEvp6dOnS3C/rKxMmpwxNsDfM+ZBgeVyuaQ7KD1xoNdgS01NlQNZOjo6RLkWFxcjEAhItlloaCi+/vprjBw5UjKKvF4vKioqxACyWq0iG1jqTyXZ1taG9vZ2xMbGoqKiAoMHDxZ+8fl62wjHxsbC6XTC7XZL6wAeyt7e3o7Kyko0NTUJ1m6326XlBtDbVmDAgAGor69He3s7zp49iyuuuAJ2ux0tLS2C/dODoRKqra2VDpHE8mm5P/zww/+w/cD3Rrhv3749KKLOVDpd+s58Uwo3Xfmm04L4G7rX7N6nO0BSw+ucYRaL0KpjVJ+pULSQWXlKa53FMox662frtCzm0DKuwHvwuUAvw7rdbhH4upovEOhtJKY7UernMDMC6MsUISRCWIFCVOcCs8cK79c/v17nRDPgyHno/is6L5p7yXGx/J1z5Hro1Lz+OeFAX7Bd50dTCNMaYs2ATrPTfUP4TOLoq1evxowZM/Dss8/i+uuvx5gxYzBw4EARyLoARqecMTDPdeq/twzQ0Vrl9/p7MBwX98vr9Qo0xJxtrjWtM74mL/A7pGWm6ul0Sr2OPT094l31HzehHK4reWzx4sVYuXKlpIZyDbTl2N3djY0bN+L6668X6I20RDogb+nAOd/TvE1hRlqKiIiQ+IbeA/6W6YLMbiMEyvtwL3R+Oj16jod1IYyr6L2h59G/SLC1tRWdnZ2SpFFdXS3FWUAvTEbDxW63S7ZYeHi49I8qLi5GVVUVpk2bhqioKGlNwoxBZroRYtQ58KR3k8mExx577H+tt8z/yKWZEegrYOJmkCj43f7FHSRibbHTC2CEmkEa9oYg8/H+Whj4/X5JfSMGaxhGUDthjs9kMknObf/WCHQv+T2dJaBzmLUAITapx8f/KTz0mHR75LCwMHR2dgoWx/xyWqocN5UDc6VZFde/6lUXXHGPNLxCAtOFKrTUmOPM+3IPgOAOfhRaFID8js5R1wVs/C0LfyIiIgD0BVApJIG++gN6DSEhIYKB5+fnY+HChThx4oSc0EUrjJitzWYLqh/QQlA/h3vNNeVrXhTGWvHqKl6tIGmIAAjKUeczSUs6n5t7yz3ozx+kOe47vxcRESGKhcqks7MT7e3tOHLkCKZPny50QLoh/MQg6r59+zBx4sSg5m7aa9YGB8fC+evWC9zjixcvIiIiAhaLRTKRtMIiDdDI4n2IX1NhMQ1Xt4AgP1PA67RMGlBU3gkJCSIPWP/AcSYkJEiF7+HDh3HhwgVERkbCZOptPEdvIyQkRAQ8m/XROMvJyUF2drbkzlPe0KA1mUxB8UB+pnmCPPCPru+FcKdgBhCEJeniBF0erC14bWXReiMBc2EoaHiRySms+F0Wo9B150IDwb04SOz9rWcSVGhoqDANGYwKRTMSBURPTw/cbrccgEwBqS1fLcB1ChwFqq6oY1aCruzj/CmsaNmTGb1er1gm/RmJDMT70/pnUy6+p6trOVcSJfdWF2ZQ0epCrYsXL4olZRiGPIOFVhQc2mvidyn0eCA0GUZXABuGIemvFotFYKuvvvoKubm5cmITgKAGdrT2qJip1GhIUDnRqKC1RWHL+dFy1oyuLXubzSZjozDV3iR5g0JQ0x8Fan9+ogLlONn9lN6pFn4WS29l7owZM+T8XADSfIvz9/v9iI6Oxpw5c4QuuQderzeoJzxpib8nz+pgNc8ToHdBuuacSIvaetVGAhUflQyrWNktlsc96p5T9FJsNpvwBNdSrymL8TgH0iXrX3p6eqRKmggBlQLfo1wjHfCid6AVc1tbG8LCwgSa1YqQCjg8PFwOgPlH1/dCuFMIcFE4eQ1rkKi5yHqB+B5fk3h0EyNtkfNzXZ3GDaXQ1daGhlooJIA+YuNzqXA4BgpdegaEeHQeMH9Li68/xKItdy18Obf+1hkQDEvxPc5dW+caeuD4gD5PShOiZn666Vqg6XXgd/V66vn0t/Y5Fj6La8x91x4VP+dFJtQCQa8N15tCgXPkvbOzs1FUVITKykpERUUhPj5eaAHo60CqPcn+cwH60iV5aWuTypWwEe+r2yRwTTgPQmnEj7X3YrFYxH3XdKrpSQtRLQj7f86907zCvSEdcPykfx181lXBXF+9//TiSJcUlP09UhoDhFB05pIeN2WF9k6oRDQsRYNQH9LC52lITMMc+lCf/nCcNu4sFgvOnz8vQpj8oJWC7n/DveF3OWd6KPQ0+sObGsLj2AnpaqjsH13fC+GuNbS2LLQ240bQAiURaPgE6GsuRkvSZDJ9a4tPjWtyDFqo0GrXxNPd3S2WgMb7eQ/2Zec49HsaeyahkfnJyDqFk/PWEXHdVE1b5lrAcIwcLz/nPAAEQSSayYA+JUBLj9+jVQP0Hdah//b5fGKJci3JnGSwiIgI8ax0uTitV2ZI8Hf0prg/WuhSwfWHr8g83EdtPWrLkp7gxIkTkZGRgQ0bNqC4uBgOh0MOHabAYIYDrWltCQ8YMCCo9apWULy018D70mvifbifXBvWFGjYSu8l70uviXvF1/y/v+InPZGO9HgoVDkOHa/g87iWXq9XYD2uDT0sQkZasPUXyOSN/l4QW0CzxQWVmI6zAX2HlFCQkldpwNATbWpqEpiSFm94eLicdhQeHi5zplIgf3BNSKc9PT3wer04ffo09u7dK0qO68h0WpvNhvDwcEmy4H35fa6J7v2vDb6BAwfCbDbD7XbLsZY+X2/La9Z8cD7fdX0vhDsAwZ3oUmnrSwfSGhoaEBUVJQKQBEKmotDQQqZ/wJRET8bQwR4dAKJ7ry1xrQy4UQyokmi1xawxUSosEiIFVH+clMxOhtEQh2EYkj3hdDqDmlRlZWWJtUrYQlv/2tXUlqcW7Byrtka0ZcY1BRAkAJqammAymTBw4MAgRtbQEe9FC7W1tRWBQADx8fHw+XyorKxEREREUH45BQ7Hp+EN7omel25epi1tQglagHGMdrsdaWlpOH/+PEpKSqRSMhAICE5NAc0eLyaTCa2trWhvb0dWVlYQPKNxcaAPCuOa6pgLA/vd3d2orKxEWlqarBv3TNMw11YLXM6bwkUfGQn0nUzW35MivRlGbzyJ56SWlpYiMjISSUlJQQJeKyntbei9pjLnmDlWrgv3iTygaUpDghR6HD8/6++N0hugMUM+pMHAA24Mw4DdbofVakVYWJhYzRqL1/TKSxsTbrcbVVVVknLJS8eX2D6htbVVxhwVFSVzIbzIsesgOeeklS4NXSo18oLVav2nlvv3Ks+9u7tbGuSw/eWePXvw+uuvi4DfsGEDqqurZcIk0k2bNuGxxx4D0NebpqurCxcvXkRbW5sIfAZYAEjJMYmWQoeLxh7aWtPyn8lkkuAmLV0SMbFpMrB28Ql/kEGYJ6shA46RiojjZKD11KlTuPnmm+FwOIRYn3nmGbhcLjQ1NUn/HVq2GsriunINiOW9+uqroqRY8g/0MbVmMOYR854vvPAC3njjDfz5z38OIlSOXR/MoYUBAKxatUoE4cMPP4xVq1aJx6XdVO3FcT49PT1B+0Fa0MU03AOuOyENndMeFhaGa665BjfccAN27tz5X5Q0GYxeIb2Ljo4O3H777fJ9zpkWLNf5r3/9axCsxx4h3AugN6j605/+NEhwkRZJC/yuFpy6CpQwDgWPhtL6Bzh1IFHDX4FAALfddhs2bdokz6flSiUfEREh3iatUs6VhkVERIT8zX1jLEt3cPV6vSgoKJD9pBLm+GnA6cwYHQfRh3LQINPescPhQEpKCrKzs7Fu3ToJcpLfeE96n36/Hz/96U9lLblHxcXF2LVrF/Lz8+XkNypni8Ui7VDKy8tRWVkpKYtbtmwJgmPoVWuoSK8xeZQZQFarFWazWRSEw+FAUVGRQELfdX0vhDuxJLqVTCsKBAJ4/fXXJUDj9/uxadMmyXsnQwcCAVx77bXS950LRwIiUdJSpuCg5qYw4fN1WbXW7ux1TtyeykBbKGazOegAAlontFSI7fW3OunaWywWREREyKZSw5ORfD4ftm/fjmHDhmHgwIGIjo5GVlYWZsyYgcjISCQkJGDhwoVB1izdUs6PgSQAklvOoBAFEIlWB8No9bKDJueydu1azJ07F3/9619lTwnb6OfQEibkcODAAdxyyy0yZ6/Xi1tuuUWIXge4KFQ4FsIXVBS6n5AWANotpuVOmiMuymdlZmaitLQ0SDFTcPE1GdFsNmPv3r2Ijo4W4U63OSwsDJGRkUILmzZtEouUe8Cx6fVi/jLXjbABYUb2FeLc+CyNq3PPqSQoaLWhwedrXJjHC1qtVkRFRUnLDyoHehtRUVFSnEVcW1vMWrGbzWaBbqjwdJomAOzatQsxMTGiiK1Wq6ydhvDIO/yfXggFLHmJe8Dv2Ww2GIYhFamxsbFBZzDQw+WYm5ubMW/ePOTl5eHAgQMoKipCfn4+Tp48KbnwnZ2dqKurE6XjdrsxcOBAHD16FB988IFkx6SlpWHSpEmyboSEiL8TYmEqpPawOFc2CAsJCUFERAQuXLiAkSNHCtT8Xdf3CpahUCHUUl9fj4qKClxzzTWy+dXV1WhsbERXVxfS0tLE4q+rq0NycrJgbjxc1uFwICoqCoFAQCwuBqxo1ZaXlyMuLk7y2H0+H2pqamC1WhEbGwu/34+qqiqkpKRI5WX/wgOfz4fU1FQEAgG0tLQAgMANZAIAUsQTFRUFu90uwpTpVgAk3YwES+XCDJfi4mLp/1JSUoLc3FyMHTsW4eHhOH36NLKzs2EYveX87JLY2tqKyMhI6fzY09OD1NRU6QXjdDpRU1ODxMTE/wIJUBh4PB40NzfDZrNJtSp77euUM3okGk5ra2uDy+VCdHQ0oqKiAACHDh3CDTfcIPPPzMyUFDQd19B4K+9XVVWFQCCAxMREtLe3w+fzweFwoL29XYrF9DwqKiokLY0C6Pz587BYLEhISEBbW5vgmy6XCzExMfI8DUWZzb3FQh6PBwUFBRgyZIhYXV1dXaitrUVkZCTi4+PR2tqKrq4uREdHw+12IyYmRui1oaEBcXFxck/SS0VFBUJCQpCYmAjD6K16vHjxolTD8iorK0MgEEBycrLsT3t7u3hUycnJAPpOSHI4HJKuR8+Wz6YBUVFRgZiYGAwZMkSqn/1+P5qbm9He3o6MjAwAfTn3/G1oaCgqKipgNpths9kEXuMYDKO3SZzL5UJPT4/00mlsbMT+/fuRkZEh9E5FQiVosfQWO3V0dMBkMklxltfrlaKt+Ph4oXen04lAoLf4LiQkBJmZmaiurkZtbS2cTieqqqokI6qtrQ0ejweJiYkIDQ1FY2MjDhw4gAsXLkjxIau3W1paEBkZia6uLkmTpXFJhcR+NW63GxaLRfjE5/OhpaUFVqtVAuQsvKKCIM4fFhYmBX6EXqh8Acih4klJSUHw0bdd3wvLXVu9QF+yv9VqxeDBgzFv3rwgqCIsLAxutxvLly9HaGgoPvvsM8ydOxerVq2CYRh48MEHsWfPHhQVFeHll18OslRJMD6fD5s3b8bzzz8Ph8OB+++/H83Nzdi2bRv+9Kc/ITQ0FE8++ST+8Ic/YPny5WhoaMD8+fMRERGBXbt24f3330djYyMeeOABWK1WnDlzRtoBFxcX48c//jFaW1tx/PhxFBQUIDQ0FAUFBbj77rsRFRWFl19+WSCoQCAgxAL05cvr4gsGAg2jt3I3JycH69atQ0REBEJDQ5Gbm4vPP/8cb731FpqamnDp0iWUlZVh/vz52LNnD3bt2oXnnnsOr732Gjo7O9Hd3Y1NmzbBMAx8+OGH0l9dY6wad3/55Zexdu1aREdHY/fu3Th58qR4P9OmTZM+JTpnnQr0+eefx7Zt2+B2u/HSSy8B6O09s2PHDrFMv/76ayxevDgo44jEz1iFFvZ1dXV49dVXcfLkSURFReGxxx5DWVkZIiMjcdVVV+HDDz8EAHz55Zf44x//iMjISPz617/Gn//8Z3R3d+O1115DT08P3nvvPWzfvh0RERGoqKjAvffeGxQv0QFbk8mEdevWoa6uDjabDdu3b8fixYvh9/e2D37hhRdgs9nw/vvvo6CgAOHh4SgvL8c999wj67l27Vq8+uqriIyMxHXXXYdTp04hJCQEZWVlGDVqFAYMGIAXX3wRPp9Puo1euHAB8+bNk7149913RcDu2bMHFosFhYWF2LFjBwDgpZdeQmRkpPT8iYiIwAsvvCAGEmmKB08bhiFrtHz5ctx1113CIy+88AJOnTqFnp4e/PGPf5TAOT0EBi9ramrw4osv4uTJk/B6vZg9e7bQQkVFBT755BN0dXVh//79OH2696jl8vJybN++HYmJiSLQCfnRMq+oqMCGDRvQ09ODZ555Rg6J+fzzzxETE4MDBw7g7NmzMJt72+ree++9KCgoQGdnJ+bPny9B8U8//RSzZs2Swz9eeOEF/P3vf8e5c+fw05/+FAcPHsTHH3+Mc+fOYcmSJaiqqsLRo0fx6quv4vjx47BYLHjnnXcERo2MjIRhGNJHhrnrM2bMwLvvvou3334bhw4dQnR0NBobG+FyufDb3/4WALB27VqsXLkSHR0dKCwsRGhoKP7jP/5DPLzm5mZs3rwZTU1N8Pv9eP7558XD7ujowMsvvwwA/zTP/Xsh3IE+ods/3Wn8+PFIS0uD2dyb08xGWLrbGosBhg4dCpPJhL1798rpMNdee20Qvgr0pV7+53/+JzIyMpCQkIBJkybB4XDIe0lJSUhOTsaGDRsQFRWFwYMHIysrC6mpqZg9ezaSkpIwZMgQDB8+HHFxcRg8eDC+/vprXLx4ETk5OUhLS0NkZCSOHTuGjIwM9PT04Ouvv8bevXuFWWlJARA3l+6ZhhmAvtPnu7u7kZCQgISEBDQ2NiItLQ0Wi0WOnuvq6pIDSDIyMjBkyBDMmjULM2fOxEMPPYTPPvsMXq8XXq9Xys/PnTsnfe2Bvlx6rtnFixdx7NgxzJkzBykpKZg6dSry8/Pl+MKcnBykp6d/a3CNgdJrr70WKSkpyMzMhNVqRUFBQRAsUlBQgGHDhgUF3nw+n2DcOkvE4/EgOzsbTU1NyM3NRVxcnJT+JyYmircUFhaGt956C+np6UhKSkJSUhLWr18vzaI6Ojowffp0jBw5EjabDVVVVRg/frz0UOmfFWWxWLBs2TLk5uYiNTUVAwcORHZ2NgBIp8Tk5GTExMQgLS0NdrsdNTU1GDt2rEB6K1aswLhx45CSkgKbzYaUlBSYTCacOXMGt912G1JTU6UF7Mcff4zrr78eSUlJgvd7PB6sXbsWXq8X7e3tyMnJgdvtxnvvvYecnBwAEOv10KFDuHTpElpbW3HdddcFBWUJ5YSEhEifGY598ODBMJl6+88MGjQIU6ZMwdChQ2XNqHy5Ln6/HxkZGWhqasKECRMERgN6ocBVq1Zh5syZSEtLQ25uLo4cOYKwsDApq2c+t4ZAufZr1qzB7NmzkZGRIXP96KOPMHPmTMTFxWHSpEk4ePAgurq6kJ6ejpSUFIwbN06weCrV8+fPIzU1FXa7HV1dXSgsLERycrLEPg4ePIja2lrpttje3o6BAwfCZOotUqT3T2ucSiMyMlJkkdPpRG5uLqZOnYqUlBRs2rRJYnOEZZ1OpzRL2717N0aPHo2YmBgAEG8tNTUVnZ2dSEhIgM1mE6iG8CBbE1RWVn6nTP1eCHdaAMTZiY2uWrUKCxYsEKz8q6++ws9//nPZ9J/85CdoaWlBVlYW7rzzTuTm5sLlcuH111/H2bNnce+996KhoQFut1uUBXF49tOeP38+vF4vnn76aenrsGjRIoSFhWHXrl0YN24cFi5ciP3798uzk5OTceutt8IKM04AACAASURBVOLAgQO477774Pf7sX79eowaNQqLFi3C8ePHcc8998DhcOCTTz6Bw+FAINB7GMVrr72GX/7ylxJNB/qi7RR0tMa1krPZbNKk67777sOPf/xj/OhHP5I2t3a7HQsWLEBpaangswcPHsRjjz2GsLAwZGRkIDMzE2+++SbefvttLF68WLIhLly4AJ+v92QjCmS6xREREVi5ciUeeughgXE2btwo/VCOHz+Oq6++OigQrtP+1qxZI21sN2/ejHnz5qGpqQlLlizB/Pnz5TjCpUuXwul0SgZNd3e3pJHSAmRswOl04vDhw2hoaAhK6/T7/dJD5kc/+hGqqqrgcDhw6623wufzYc+ePZg4cSKA3gDy8uXLsXLlSiQkJKC+vh7vvPMOMjMzxbNhEJW47oULFxAfH49AIIALFy7g1ltvhd1ul949s2bNQn19PdatWyeW81tvvYVBgwahu7sb1dXViI+Px5w5c9DS0oIFCxYgNDQUbW1tWLp0KYYOHYry8nLs3LkT27Ztw5IlS+BwOMDWHK2treju7sYrr7yCd955B4sXL4bT6cTKlSuxdetWpKamIjw8HM8++yyamppw+eWXo7S0FL/61a/Q0dEBoC99kkqzpaUFjz/+uDRY27BhgxT7PPLII1i4cCEAiOJjphrxfsIHR44ckUy2zz77DEOGDIHL5YLH48GSJUsQGRmJjo4ObN26FSNHjoTJZMJbb72Fm266SQKg9CoI+XzyySd48803ER4ejs7OTvz85z/HihUr8OabbyIjIwN+vx9bt27F0KFDMWDAABw7dgw/+9nPYLVasWbNGmRnZ8PlcmHNmjVoamqCy+XC+fPn8dZbbyEtLQ3Hjx/HunXrBEqKjo7GsWPHMHz4cGkKNnLkSMTFxeHUqVOYPn060tLSEBsbi9DQUDQ1NaGurg7t7e2C+yckJGDUqFG4+uqr4XK5YDabkZ2djR07duDKK69EaGgojh49ivHjx2PTpk2iHENCQjBmzBjx9lwulyjRpKQk6cG/efNmzJgxQ+Ct77q+F8KdwRMGXhh4+vDDD5GWloavvvoKFy5cwLJly5CZmYlNmzbh73//O0pLS7Fnzx4888wzmDt3LtasWYMVK1YgNzcXP//5z6UpGAM5zNYggTudTnR2dsLtdmPjxo3weDzSGKigoACLFi3C3/72NzQ2NuLDDz9EXFycFJQ0Nzfj/fffR2xsLDZu3IiioiJcfXXv6YIlJSXIzs4WZly6dClqamrwf/7P/8Fll12GX/7yl9Io6de//jWOHDkCoK8sm0FABuSYmdDS0oKlS5di1qxZoogo/ABg586d+NnPfobNmzcjLCwMH3zwATIzM4Vh6uvrcebMGTz//POYPXs2LBYLmpqacM011yAvLw/79u0TggL6slGSkpJgs9nQ1taGjz76CMXFxZgxYwYslt5zOwcNGiTFPmRM9n0JCQmRlsPLly9HXV0dtm7dCrfbjeuuuw6lpaXYvn072tvbsXr1alRWVmLlypXSC5vrTUVDWOajjz7C3LlzYbFY0NjYiEWLFiEqKgp79+7Fo48+iuPHjyM2NhYJCQno6enBqVOnsGjRIrz55pu49dZbUVpaij/+8Y9ihW3cuBEXLlxAdXW1YLkMrOsgV1paGurq6rBkyRJMmzYNS5cuhcViQWZmpljmbW1teP/997Ft2za0tLSgtrYWra2tsFgsSEtLE5qYMWMGPv30UwB9aaivvvoq5s+fj5iYGCQkJAj8OHv2bGzYsAG33347SkpK8O///u9Cb0OHDkV6erocpLFjxw4sX74czzzzDJ555hk8+eSTcDgcAPrad7DfSWhoKKKjo2EymfDee++htbUVH3zwAWpqaiTF0+124y9/+Qv+8pe/BAVwyVeBQAAffPABbrjhBgQCAaxbtw6/+MUvsHnzZnR2diIxMVHaGpw7dw5XXnklBgwYgJaWFlx//fVYs2ZNUDok1zs1NRUpKSkC1+zatQuZmZlISUlBW1sbVq1ahcLCQkyfPh1Ab+YVT6vauHEjnnjiCRw4cABr1qzBmDFjsGzZMrz33nvS6M4wDJw8eVLibkCvsQJAMPysrCx0dXXh6NGjyM7OxqFDh8QSZ599i8WC8vJynD17Fq2trQgJCYHb7cZPfvIT8cDLysowYcIE1NfXY/r06bjrrrukeVl5eTnGjh0rSRVfffUVxo8fj5iYGJSVleHGG29EZWUl3G43QkNDMXnyZJhMJnz00UffKVe/VwFVYssk9KlTp6KwsFACQBRiZrMZWVlZaGpqwqRJk7Br1y5UVlYiJSUFKSkpctr47bffLgcoMGALQAKiVqsVJ0+eRGhoKJKTk+X0lkOHDqGpqQn/9m//hvDwcBE0zNi5dOmSnEJeVFSE6upqPPjgg+KO5ubm4ty5c6iursaIESMkHevqq69GVVUViouLsWjRIgDA/v37MWXKFPT09Ii1oHPqiauREJubm+WEnuzs7KDqOZar8xQmXRDk8/kkiMtj9xik4ck6kydPDnKnGae4/PLL8dlnn8Hv98PlcuGBBx6A2WyGx+ORtC+uKzOTGBibNm0ajh07BrfbjXHjxuHUqVMYOXIkxo4di9raWkyZMgXNzc1itWRnZ+ONN97AD3/4Q+m7TpiKsA+x8ClTpsDn6z2QZfLkySIcWltbBcagRVdVVYXbbrsNoaGhmDt3Lux2O/Ly8nDvvffCarUiKysLo0ePxunTpzFz5kyhSXp6Pp9PuvrxRKLTp0/DZrNJ8PHEiROwWq0YO3YsYmJiYLfbMWrUKOkHzlS/oqIiREdH4+zZs4iJiYHD4cCdd94pJ2YtWLAA7e3tmDlzJk6cOIGxY8ciJCQEo0ePxrXXXgu73S4nZEVGRmLSpEm48cYbcfz4ceGRMWPGICIiAvn5+SguLsb9998ve8Q5+f1+WK1WLFq0CCdOnIDNZpOxJycn4+6778bBgwcxYMAATJkyRXqdEDoF+tJtw8PDMXXqVPT09CA9PR0dHR3S7fOqq65CWVkZ2tvb8eCDD4p3PmrUKDQ0NEirbhp2hGg4Lx40bxgGJk+ejBtuuAElJSVoa2vD/fffL3Sn2wRkZmaiq6sLI0aMQGpqKlpbW1FTUwO73Q6n04mWlhZERUUhPT0dTU1NSExMRFtbmxQgmc1mtLW1ISEhQboxNjY2CgTjdrsRFxeHzs5OhIaGStA3IiIC0dHR6OzsxMiRIyVOyINAAIjlPWHCBDQ0NMDlcolnmJaWBp/PJ0kRNJQI7cbHx6O9vR0pKSm4/PLLcfLkyX8oU78XXSHHjx9v5OXlSeMfnaBP64mFIAyo6spGAGLdAn2l2DqFEugjbF0gQaWiK/RYgcnn89m8n2EY+N3vfofS0lJ8+OGHUuZM4c4UK2a3MFWM49DpaBaLJSgwYhiG9GHXMQJm0BCi0Cl1QF/FJi1deifMASbDAH3ZOPwNhT8vWhucq85Lp7A9cuSIWOPLli0TjBPoy6Vmto8uO9cFLKwdCAQC8nu/3w+32y15vVqQ6PFzHSkQtDLjfXRhmc6Z1xdjGbyf7kPEsWuaYsCX1YVayHHtSUPsjcQEAN5T90UilMX10c3IdGVnZ2en5IpzbroYTmeb6bkT+iOvGIYhcRnyA9eWipl0SiVK3tBeL6FUph0z6H3p0iU51UrTdExMjGTT6PHr9fL7/dI1kXTC3jpcZ36XY6d3yHuRBsPCwtDe3o7CwkI5dIbQJoO2PKWNtQ4ul0tSKdkjPj09HSZT7wlcumc/eTYjIwMNDQ2IiYnBmTNnEBcXh4EDB0qfqJ6eHnR2dqK5uVmOZ4yMjBQPqv/h2HrPtFHDe3EPyaePPvroP+wK+b2AZXjR3aNw0XmfQLAg5oaTMfm3FmbM7aX25DNInDoopIs9uJBAX1EKszfIlEVFRUhJSZFncty6uIQpkhSeOnee2SQ6uEWsHejLzqAiYICJc+f4KVBI7LqghUqFn7GZkg7Okil0ERfQJ9g0RKOx9DfffBPnzp3DD3/4w6C8Y+4XhQ0FDy9CLFxT4uoUbBaLBTU1NTJ3ZtzoAjCuI4Nl/B2JX++9tjT7F4mRfngfzo1z1Xnofr9fsHi9/kBfEQrhg/b2dlE6fL5uZEYBpLOKuOcUUFwfjp00SoHG/QL6+t9wLfgdnW2lA5bMade0zu9z7zX/6dx1rbg5B9Iq78UzSonL0xLW1d+a5/m/fi6fwzFR0ZN+GQ/RtQt87ff7UVFRgVOnTqGwsFA+ozI2mXpbktjtdrS3t0tKKguWXC6X0FRHR4fMq6WlRc4G5pnKLJT0+fp6u3s8HrS0tEg8pbS0FD09PWhsbJRiL86NqZXMCqMM0YYI58l5cJ//2fVPLXeTybQMwA0AmgzDGPXNe04AnwDIAFABYIFhGK5vPvs1gHsA+AE8ZhjGtn82iHHjxhl79uwRa5yTY38XEiEtF1q1FLY2m02Euk6p0lV4tDz4PVoMtJD4HC00SGR0YSmoGHwko7C4ROdj08LWp75oTJr/aHkwM0SXjlMYG4YRlINPBcDsCRICBaruZkkcTzea4ue6So7C3mw2Swc9bTlzPagovF6vFDHxOxS+WiCQiHWBC61BEirXnczPjpxaqFAIcA+5R7rQTCtDzoVrTq+Ll7ZoOW4qB46bUBeVjO4tRCiAljBphEpetwvWRgEFDK000p4umKInw73mHPQpTFSM7CLJ9wgbkP6oeBh4Jh/Qq3C73bh06RIcDocIFhac0bOiBU0low0q8hdxfPbtIc3x2VxT7pU2rrRhxf81PEs+0gJN0w5ft7a24uDBg6ipqUFDQ4MoF6vVKqemcc/5fWaWcY8vXrwoMFtbW5sUH3V0dCAmJkbkQGdnJxwOh2RyXbp0CcnJyYKLky5Zqaz3hllmujiOgWrueX86Z/CaMiAiIgLh4eG44447/qV+7ssBvAFgpXrvaQA7DcN40WQyPf3N30+ZTKaRAG4FkAMgGcAOk8k01DCM78y25waTsLgoLGYiY9P9o2vHwqRAICDNhrQw4e80YVNRsIJVBxBJ0CzC0VWK1PxAr+C12+0iCDo6OoI+o+dAi1OXrms3VEMFhmFIgyoyD79Pa1EfuBsa2ttWmKXULJWm4OZakFG5fmRCvibMpT0UKgK67Lq0m68JV/Fz7XFx/zR0RIteW65k9P7Vm/QEaKFSgei/tbVI5tDxAv0sWs68Jy035lNT2VBY695GHJsueuNRh4S1uCYaNjQMQzr9kbapUKhY+7vhpDEqWGZ0UejzexSmQF8HR96LipV7oZvmkc6APniDNSXcDwoVrjPXpLOzU84OoKKmIvX5fHC5XKKsCDXoWAzpi/MnBMr1oQHGveN4tUFExcQ14H1qa2tRXl6OM2fOoKWlRQwQs9ksypi4N3Hr0tJSUZAejwdxcXFwu91wOp0wmUyIiYmR7CwqNyqFqKgoNDU1yYlaXBumQ3ItebYCaSEQCMDj8QhPkze5vpy79u65PoQvWW3Mdtnfdf1T4W4Yxl6TyZTR7+2bAPzgm9crAOwB8NQ37682DKMbQLnJZCoFMBnAwX/yDNk8EiA3SLvLJAadB01rjQxD60bjWCQoakCNY2nGIzRAItLErq1AoA+/JyFzoWl1UaDS0tLFQRTw/L0uzqEFrKECPptCTQsK4r9AX9MxCiy+5hppC5jEz/nxM+3pkCmJQ3PNKCA5Ts6b68A5UNloz4vP5qWtZhIuBZX2pDj+/u2XeVFZ8jU7K3LNuba0JMm0vKg8NGzHFEjOhe0OaJFqxcR11tYnIaj+86cQJo3x2RqWIEynlQv3hftK7F7TFp8B9EFrHJN27TlP0pFeB9KPHoumI/JsSEgIOjo6ROBz/nw26VobSfyc/2vviM/gPmnPQK8jhXJ9fT06Ojpw6tQp1NTUBNEoPSD2eGGwFABaWlpgsfS24bDb7YiPjw+CRFhJnJSUJCc7mUwmdHR0wG63i3DlEZukYd0lU3sWnBM9Oi1fNP9oo5PeilZoNBx8Pp94bN91/XezZRINw6j/ZnPqTSYTEy5TABxS36v55r1/etGKogXOyVJwuVwudHV1YeDAgUFWL4Wthj36C3agL+Clg020bnWPDS3AaTH19PSIu0Ym1VghX9MCoVbl4nM8ZAhuLF19nbnAv9nOk56Mxl7JMGRuWg9cBxKayWRCU1OTpG31twwoHPVJM7pqkUyp11L35dDMyzVgfxR6KtxHrgcVKItHEhMTg3BnjYHrQCSfQyGvFYVeLx1/oLXFSwfnNNMAfQe00xoHIBXBFOz0HDQUxHn2x8a1wNMemlaCZrNZxq0FM9dLrzMtONIfISL+lvMFgk820vCYTi4gH7S3t6O2thYjRowIUlQUUAyGEzrUcRke8sxx2Ww2gb4YsGRBFXmDa2Cx9PZTIqzK93kv3kd7uRcvXkR1dTWqqqrQ1NSE2tpa8doouPmbpKQkmM1mOVc1ISEBDQ0NuHTpUhDsyGpTVgkzf7yyshKDBw/GwYMH4fF4kJmZKWeqhoSEIC0tTbxBKqrw8HB4vV4EAgGBUQcMGCCWPpEJ7k9nZ6f0lWloaAhqD0FFQdokrk+l/s/a/QL/8wFV07e8962gvslkut9kMh01mUxHW1pa/ot7zEwPv9+P6upqHDx4EAcOHEB+fr7gzNx04u+6KRgJlVah7iKpha7P55NeK1xQ/paMfPbsWaxYsULcVWbG0LqhAuju7kZhYaGcr0gB4vV6RSgGAn2nKmlFo91sClbOCehr20pG43tAr4IpLi5GXV2dtDLlWHfu3Illy5YhIiIiyNqnINOBZm29E96h8tAWGxUp3UuuY/9e7jrwR6XLz3fs2IGVK1cGzZljACCwju6wyTlr4cffM4OKSurMmTNiaXMOvKfGfrl/xGcZ4DWbzcjLy0Nzc7N04+Q8qIx1FTHnyrUgnKENDO4916k/ZEOFS9rjxftq4aeNH9KLxne5pzSY9N5x7n6/HwcOHMAbb7wBoFeI0HvSjau0EuyvOHQzL3rNhmGgsrISX375ZVAcSkNQpGdtqJFfNETHvz0eD44cOYJdu3ahoKAA586dkzkSw7ZarUhOTobNZkNiYiJiYmKksIueodnc28ysvb0dgUAAUVFRspcs6mtpaZFgaVpaGtLS0qQuhZf2cjn2mpoa4Tv2k6eSo8FFHqYFD/Sd+aBje/p/oK+Cnfvt8Xj+11r+NppMpoEA8M3/Td+8XwMgTX0vFUDdt93AMIx3DcPINf4/5t48PMr6Xh++ZyaTPTPZ95CQkJAEAkkIO4ICsguKoggiCGhdW21ta3uqB2ttPedq5ViXumNRoSIgmyCi7CD7lhAIIetkz2Qyk8lMtpl53j/i/eEbfqf2vO95r9/lc11cCcnkeb7Pd/ms9+f+aFpRdHT0gJBCZ2enxKR+/vOf46uvvsKCBQtw99134+OPPx6QOORBoFXK5JYai2QclQeGWp5VesTF85+avKML9Pnnnw/YwNyYVEb8V1FRIQvIz6ubGrjRIUiNddMyZSyboQGSQXGzUzjwZ11dXejs7MTVq1fR0dEh70B3/PTp08LloYae+EwVVsoDx43FkmtVCVDhMDZPWBctIAohCgqGrbg+QP9GPXv2LK5evYqenh4pCqHQVhUYPQ51o6tWoBqe4f7p7u4WIis1JKEKWl60pKxWq1hanJtLly6Jt8aEs5r/4R77/hyIcaIm1Pg9BSaVHNedY1SFHO/Pd1VbuJFVUP0ZhTXnjfuIwoPGCAWaurdfffVV4aThvFI4UZDwvRmeYpxahR02NTVh4cKFEhIzm80Sz+fe4ecZflB7BavCnrKAyKMDBw5g+/btWLduHVpaWtDZ2SnvScZEJj5JwaFpmnj67HugaZrUeoSGhkqZP6vYzWaztNnMysqCw+FAbm4uenp6EB0dDY/HIzF07ku1LaDZbEZwcDBiYmJgMpkQEREhBWJfffWVvDuVotfrlfNNgd3X188bTxZLeoHcJ4zNR0REiNL+Z9f/V+G+A8Dy779fDmC78vPFOp0uQKfTDQaQCeDU/+SG3JgUjHTzdu3ahYKCAomHTZ8+XYQT8bR2u10OYFBQEDo6OkRbAjdirgyD2Gw2sbR8Ph8KCgqkqMnn88Fms8HpdEp8ddOmTQgLCxNmOCZxmZBiSMHtdmPRokUSw3O73ejs7ByQbANuWKUU/EQsUMAxCcbEKQUgcMOK07T+Cjq+97x585CbmysMc5rW3z39m2++wcqVK+FyudDX14f29nZYrVYRwBScHR0dsNvtAPrd7c7OTimw4PuSpoBrQaFOxakillwul1DD0jJlGbnX68X+/fuxYsUKcVVVN5lxXH62p6dHwjiqt6NpmswdBRHncM6cOQO8MdWq9Hq9QklRV1eHjo4OUUJMOPf19WHz5s0Ck2PYpr29XUI9Pp9P4q7d3d2wWq2ilCi4Oe/853K5ZG+qiTTej0yFFBis+WBXHu4T3oucQpx/CklVkHLsDJtQQG/evBmrV68WgUMlydL37u5uOSuqgm1tbUVbW5sI4+7ubnz22WcwmUyCb9+wYYMUqDF0w73f1dUFq9UqY6PSoWDjZ86dO4ctW7bg1KlTqKysxJAhQyRsoVIiq0ZDZGQk4uPjUVlZCZvNBovFAqfTKV6c0WhEZWUlQkJChJIhMDAQCQkJcDgcgqKxWq0wGAw4ffo0Ll++jM7OTin2c7lcaG9vF5gkY/R6fT8KiS34uru70dHRgba2NmRmZgoKzGAwyFmjN8sQInNCPDtdXV2yL1nkSOOAIct/dv3LmLtOp9uI/uRptE6nqwPw7wBeAbBJp9OtAlALYNH3G+myTqfbBKAUgAfAE9q/QMrwBYEbiSZ+TwH0+uuv41e/+hVSUlIwdepUuFwulJaWoqKiAvn5+bh+/TomTJiA1NRUtLe3Y//+/dA0DUOGDEF2djaam5uxe/dujBkzBs3NzWhtbcWsWbMQHx+P69evY/v27fjVr36Fnp4eVFZWory8HE6nE3feeSd0Oh2Ki4sRFxeHI0eOICQkBNOmTRNkA8fY29uLvXv3YuTIkcjKyoLL5cLly5dRWVmJyZMnIz4+fgA+monU48ePS7XcrFmzBghDo9EIp9OJ4uJiWCwW3HvvvWhraxNe6HXr1uHhhx/G5cuXUVtbi/nz58Ng6C/Hv3r1Ktra2hASEiLkVh6PBxcuXEBTUxMSEhJw6623wuv14siRI2hpaUFUVBSys7OxY8cODBs2TDbxkiVL0NDQgJMnT0qnoKqqKtx1111wOBzCwldYWChwsKNHj8JkMsFsNiM/Px86nQ7Nzc04cuQIDIZ+OtTMzMwB1mtvby9OnjyJ+vp6ZGdnw2KxSOFJXV0dIiMjMWHCBEmW1tTUoLi4GOHh4RgxYgTCw8NRW1uLffv2CWEacca1tbXIzMzE0KFD0dDQgDNnziAuLg41NTXo7u6WtTYY+ovKwsLCBsTm29racOHCBTgcDgwZMgR5eXmor6/HhQsXMGjQINjtdjQ0NGD69OnSjYrVy3q9HoWFhTCbzdi3bx+uX7+O5557DteuXUNtba3QCFRVVeH8+fMwmUzIz88XxVJaWor6+noAwKhRoxAREQG3243q6mpUVVUhNDQUU6ZMGQCRVFFb9EhoXGiahqtXrwpyhJY0Pcrdu3fDaDQiLS1NuFamTZuG3t5etLW14fLly3A4HBg6dCiys7NRWlqKw4cPIzo6GpWVlcjIyEBxcTGqq6vR1NSEwMBAzJ49WzzR06dPw+l0YuzYsUhKSsKhQ4eEyresrAyhoaFISUlBcXGxeOJUbklJSfD398eVK1eQn5+PpqYmdHd3Iz09HTabDW1tbUhKSoLZbEZbWxuqqqoQFhaG69evo6OjA0lJSUhLSxOmRk3TEBERge7ublRVVYnlTiF65swZOJ1O1NTUIC0tDeHh4WL0MPnqcDjQ0NAg7RnLysqQlZUl81FdXQ2TyYS6ujokJCSId20ymVBWVgaHw4Hk5GTY7Xa0tbUJEV5VVRWCgoKQm5sLAKJ0Ojo6kJGRIYVW/+z6l5a7pmn3a5qWoGmaUdO0ZE3TPtA0rU3TtGmapmV+/9WmfP5lTdMyNE0bqmnann91fxmIfmCVJK24kpISPPnkk3jttddwyy23yObweDx4/fXXoWkalixZgjvuuAN1dXV4+OGHcffdd+O2227DgQMHEBgYiD179uDgwYNYu3Yt5s6di/Hjx2P9+vU4cOAArl27hg8//FCKC+bMmYOwsDBERUWJ9VNSUoLFixdj7ty5+OUvf4nGxkbxCpg1P3z4MNLT01FaWgoAWLhwIdavX49JkyaJ9c64LsMx77zzDlJSUnDnnXfi97//vVS9MW5nMBiwefNm5Obm4rPPPoPb7caf/vQnKXEuKSnB1q1bkZ2djc8//1ySeE899RRGjBiBW265Bffccw/S0tJw6tQpzJ49G6NHj0ZYWBh27NgBna6fn8JgMAg/+969e3H8+HHs3LkTd9xxB8aMGYO//e1v2Lt3L8aPH4/3338fubm5GDVqFJ5//nk88cQTmDBhAgoKCnD58mW8/vrrePzxx7FgwQK89NJLOHTokHg5jz/+OO644w6MGzcOd999t7B9MiR08OBBeL1evPzyy0L+9atf/QpFRUVYvHgxnn32WVitVnR2duK+++7D+fPnsXTpUuHY37x5MwICAnDw4EHs3LkTBoMBy5cvh9lsxoIFC7Bo0SL84x//wM6dOzF27Fi8/fbbWLx4MQoKCiSBx2RYe3s7Zs6cKWG9J598Erm5uRg/fjz2798Pn8+Hffv2YcKECXj99dcxZcoUjBo1SjpJXb58GcXFxZg9ezZqa2vR3d2NI0eOYOjQofjkk0/g8/nwi1/8AlevXkVwcDBqa2tx6tQpLF26FI2NjZIgvXDhAi5cuIBZs2YJ7URISAjmz5+PU6dOYcqUKdi1a5dw7avhNuYXaO2qMeLi4mLceeedAt3j7w8cASY0TgAAIABJREFUOIDk5GS8/PLLGD9+POrr6/GLX/wCvb29+Oijj/DEE09gzpw5WLx4MZYvX46wsDBMmjQJHR0dePnllzF8+HCcOnUKJSUlGDt2LBYsWIDf/va3gjZ58sknMWXKFBQVFeHy5ctob2/H+PHjsW7dOuTl5cFgMODChQv46quvpDEGm5eXlZUhPDwcdrtdoIhDhgzB6dOncejQIWRlZWHr1q2orq6WhKXT6URwcLCQbl24cAHr16+Hn19/R6bq6moRrAkJCTh16pRQFzidToSGhmLUqFESuz937hxMJhNiYmJQVlaG4OBgdHd3IzIyEs3NzQgJCUFbWxveffdd2Gw2jBw5Eu3t7Zg2bRpGjhyJ2tpa6HQ3CskyMjJgsVgQGxuLkSNHIjo6Gl9//bVQYjQ2NiIyMhJbtmxBbGwsMjMzsXPnTgnJ/q+E+/+NS43T0n2ie2QymTBx4kSsXbsWzz33HI4fP47MzEyMHDkSqampyM3NFYu4qqoKhYWF8Hq9aG5uxsiRI+Hz+TBjxgxUVVUJ0VRFRQVGjhyJzMxMTJo0CQkJCRI60ev1+MMf/iAYVYPBgNTUVIwaNUo4myMjIwcgF/R6vZAK5efno7u7G1FRUTh69Ci2b9+OoUOHDkicAUB1dTXWr1+P+Ph4Cd/QqqcF5fV6ceutt6Knpwdjx46Fw+FASUmJhHIKCgowZcoUeDwe4WKprq5Gfn4+IiMj0dDQgPz8fGiahg0bNqC5uRl79uxBVVUVfvKTn8BgMCAsLAyvvPIKOjs7cdttt2H27NmorKzE1KlTodfr0dLSAk3TMHv2bHR1dWHu3LmIiIhAbm4utm3bhujoaHz55ZfYs2cPpk6dio0bN6KgoEDCKoWFhTAajbBYLCgoKICmaWhsbMTw4cMlrMOvGRkZGDVqFLKysiR2Tn4Svi+xxbW1tRg3bhzcbrcc+nHjxolLfvvtt6OyshIFBQVI+74ZhMvlQktLixCvzZkzB729vcjOzh4AodPr9fK33d3dqKmpEa/EarWKMpgxY4ZY/UajETU1NcjJyYFer8fWrVsxdepU+Pv749q1a4iNjUVKSgpOnDghsWar1Yr8/Hz09PRg06ZNuOWWW+ByuVBeXo6oqCj4fD5s2rQJkyZNgs/nw5UrV6QhSXNzM7xeL3bs2IGHH34YZrMZNTU1gjdXYaPqpWmaxK337duHkydPDgj5ZWRk4PTp08jMzISfnx+qq6uRl5eH3t5erFu3Drm5uZIvUI2RxMREREVFAQBOnz6NwYMHS1hq+PDhACDj//LLL7F3717ceuut0oFr5syZ0Ol0aG1tFWOA1nNUVBT0+n4isc7OTgQFBaG1tVVQJDabDUVFRdKUOicnB3l5eRgxYgQaGxsHdEZjOEvT+rHnLpcLDQ0NYimbzWZJpg4ePBgtLS0Sr6+rq8OlS5dgMpkkhETFSX4ahosHDRoEo9GIpqYmNDQ0iLWu1h1Q7lmtVoE4MkTT0NCAlpYWDB06FO3t7UhOTpb8ltp28IeuH4VwZxKFriQF529+8xsAkMRFeno6hg0bJkRAy5cvh8FgwNdff43nn38eBw4cwL333gufz4d169ahoKAAZ86cQWpqKhISEjBnzhy0tLTg7bffRmFhIZKSknDx4kX88Y9/RF1dHS5cuIBz585hx44deOyxxxAcHIzq6mo8+OCDiImJwVdffYXf/OY3qK6uFjeWiigtLQ0fffQR+vr6cPXqVfzud79DcXExiouLBXGhZts3bNgg8Uiv14ucnBwUFxdLMpTJkrS0NGzcuBELFy7E/v37UVdXJ/HaRYsWIS4uDi+88ALuvvtunDp1Cl988QXuueceaJqGTz/9FFlZWThx4gS++eYbLFmyBIsWLcITTzyBxMREnD9/Hk6nE7t27cJTTz2FdevWCU/5xIkT4XA48P777+OBBx5AWloaNm/eLIKR8dbnnnsOCxcuxMqVK9Hb24vW1lYsXLhQ3ikvLw8ejwcbN27EXXfdBa+3n9ExNTUVR48eFZSIn58fUlNT0draKqRqZWVlWLlyJXQ6HTZs2IA1a9YIHerMmTMxePBgXL58Gfv27ZOwS2BgIFJSUjBlyhRs2rQJDzzwAHQ6HZxOJ4qKirBy5Uqkp6dj48aNuP322yVJyfwGC97efvttTJo0Cf7+/tiyZQvuueceGI1GbNy4EcOHD8fJkycRFxeHjz/+GNOmTYPNZsN7772HoqIiXLhwQVhEe3r6+wDr9XoMGjQIb731FiZPnixCLycnB01NTfjggw+QlpaGq1ev4ptvvkFlZSUsFgs2bNiAIUOGwOfz4ciRI8KBcu+99+Kxxx7D0qVLhTmxr6+/0QOFBoWFCtf1+XzYs2cPnnzySdx3331Yvnw5MjIypN4hLS0N27Ztw8qVK6Fp/WyGv/3tb2Gz2dDU1ISlS5fCYOgvUhs1ahR8Ph/OnDmD5cuXIyoqCrW1tdixYweWLVsGn8+HQ4cO4bnnnoPFYsGxY8fwwgsvYNGiRXj44YfFgt26dSu8Xi/eeustQa6FhYXJmFwulygMJsDJyBgSEoLk5GTExMTA6XRixowZiI2NFXQKwRp9fX1IT0/H+PHjMXjwYMnVtba24tq1a4iOjkZNTQ3y8vJQW1sLr7e/R25UVJTw+zN+39jYiOvXryMxMRGtra1ISUnBlStXxANtbGzE5MmTERQUhAMHDggzKVFUPp9P9tq5c+ckWWu323Hu3DlMnz4dhYWFyM/PF7rgsWPHwufz4dixY0hKSkJfX5/E7f/ZZVizZs3/37L6//X1zjvvrFmxYsUAWF5AQABef/11CY989dVXsFqtklB97733xGo+fPgwfvrTnyI2NhZlZWU4fvw4Tp8+Da/Xi/DwcAQFBSEhIQE5OTnYuXMn9u3bh8jISAwaNAi//OUvMXr0aIwcORKnTp1CQEAAmpubER0djQkTJmD9+vXST/KFF14Qa4CWCQBZmPr6ehgMBgwdOhRVVVXweDwoKSnBggULBiSsfD4fEhISYLFYkJGRgX379sFkMiExMRGpqamyAQjx8/l8sFgsKCkpQUdHB0aMGIEjR45g0qRJ2LRpE7Zt24bIyEgkJSWhsLAQJSUl0pyXMcVJkybh9OnTyM3NxZ49e6DT6VBXV4f6+npJKK1evRoNDQ04ceIEMjIy8NFHH2HmzJlCVfrqq69ixYoVoqQqKyulfdj58+eRnJyMK1euIC4uDidPnpQ44+jRo5GYmIiysjKcOnUKZ86cgZ9ffys5NvmgxfLxxx9LH9U33ngD9913H9xuN/7jP/4DaWlpcDqdyM7Oxr59+5CUlITNmzeLhTd69Ghcv34dSUlJKCgowLBhw7Br1y6YTCa89957eP/992Ew9NMc/+Uvf8GqVaskeUoIpZ+fH9ra2rB9+3aEhoYiLy8PSUlJKC4uhsPhwJ49ewRbbTQa8eabb2L16tXYunUrvvnmG0RGRiIzM1ME5vHjx9HZ2Yl58+YJAickJETaFTY3N+OWW24RQbdlyxYZz+TJkxEeHo6+vj5899130uEoJCQE//jHP2AymWCxWGC325GUlISpU6ciIyMDhYWFAxAqLN2vqKjAxx9/jJEjRyIxMRFGoxEHDx7E0aNHMWrUKCQkJAgE9PHHH0dHRwcaGxsRGxuLjIwM1NXVSbu8zz//HG+88QZ0Oh3Onz+PsLAwXLlyBaNHj8aBAwfwyCOPoKOjAy+99BKGDRuGrKws5ObmYsuWLbJfYmJiEBYWhueeew4BAQEDqi7Z3ESn6+fxv3z5Msxm8wBhbTKZ0NHRgczMTAQFBeHChQsICgqCzWZDVFQU7HY7rl69Cj8/PxQXF2PmzJnQNA1NTU1IS0tDa2srhg8fjoyMDISFhaGiokJaDFJBNDc3Iz09HQEBAYiOjpb90t7eLu0Z6UFERkbCarVCr+/ndU9OTsa5c+ekGUlERITQDjDk++2332L8+PGIjo5GREQE4uPjUVNTg9DQUMHyjxo1ShpzlJaWIiEhATExMWhtbcWlS5ca16xZ8+5/J1d/FML97bffXkMrnOERo9GInJwc1NTUwOFwYPTo0ULF2tvbiw8++AAzZ86ExWLB8uXL4e/vj+joaJw5cwbTpk3DxIkTERAQgMLCQuh0OgwePFj4uAcNGoTCwkLExcVh+PDhcLvdyMjIQEpKCo4dOwabzYalS5fC398fXV1dyMjIgMfjwciRI9HV1YWJEydKA2vCL4nSmTFjBsLDw1FZWYna2lrce++9UqTBQhBN0xAZGYkRI0bAYrFg7Nix6OjowIQJEwSPTcWh1+sRFxeHs2fP4oEHHsCYMWPkb0jNmpaWhuzsbOTk5CA2NhYXL16E1+vFokWLYDQaMXr0aGRmZiI9PR3Xr1/HbbfdhtTUVAwZMgShoaGorq4WPvTz588jJycHHR0dmDp1KsaPHy/InaioKAwbNkw8rcLCQkFLZGVlIS4uDnl5ebBYLLjttttgtVqRnJyM5ORkRERE4PTp05g6dapYxEVFRYJkUsNRREUwJmkymZCbmwuDwSAJVbrJ9913n3SHCgwMxJEjRzB8+HBERUUhJCQETU1NsFgsWLhwofxdb28vIiMjkZOTMwB7T0RTcHAw8vLyoGkaUlNTYTabcenSJfT29uLuu++GwWCQxscxMTEYMWIE0tLSkJSUhNGjR4sgO3z4sIQqioqK4PV6JWzI8F1qaipSUlKQmZmJU6dOYcmSJUhKSsKYMWMQExODjIwMnD9/Xrr1FBQUQKfTISMjA7W1tYiJiRELOjY2FrfddpvAVdUwYFBQEM6cOQOr1YrQ0FCkpqYC6KeczszMRF9fH4YPHy6GR0ZGBoKCgtDY2CiGx+jRo6UA6I477hAakNTUVJSUlGDo0KGIjo5GfHw8hgwZAoPBgJycHHg8HhHAdrsdbrcbmZmZiIqKQnV1Nc6ePSvQSafTCZ/PJ31Og4KCkJSUhIiICHR1dQkjY29vrxQ0Em4ZFRWFzs5ODB8+XEAXiYmJ8Hg8GDx4sPTPjY+Px5UrVwTSy3vrdP20A6xKpeFCL8FutyM8PBxmsxmRkZHwer1ITExEUFAQYmJiBPHV1dWF8PBw6QDm9XqRlJQ0oFiPqBmfz4eUlBRBdRH51dTUhIyMDMTExCA0NBSNjY3o6+unE3e5XBgyZAiCg4Nx4MCBfyrcfxSUv4WFhdq+ffuEr8Xlcol2Dg0NHcC6p2ka9uzZg9/97nf47rvvBlSeqnErwsyCg4NhNpvhdDoH8JIQCqlW7QE3+GW6u7sRFhYGj8cjiRVaFjeX1tPSpoDifdQKM8L0yPvCYiMmv7ioVGzEIzNWSDgb4Zd8Z7WqEcCAKt+enh6YzWZJFjNHQOQE8fR8Zm9vLx544AF89NFHgtEnZhq4AcNkSEZt5K3iwBnPdLvd8kyOTVWIHDfnlJ8hhp4wSyp9leeHz6P7vmbNGsyfPx+vvvoq3njjDan8Y0EZC9y4J9SiOV5qHoXCnjFV4AaklqEc4q1VBcGaih07dmDhwoV455138NBDD8mh5lyocD5y2agFVswRfPnll5g/fz7WrVuHpUuXCtcMn0Vhd3NtB9eVewXAAAVKCKpKXcH7cowUcvQg1QI7tT6DF5FGer1eeM6BGwRiXFvGpXfv3o3y8nIEBgZKTUVHRwdcLpfw8vj7+6OlpUXCQEB/DcD169elmTqbobe1tUni0+Fw4L333sO0adOQnJwsbf0cDgfMZjNKS0sFnkj4JhUo559oGoZKExMTBXba3d0t8G1y1pCXpqurS5Qtcyf0Ljk36l7zevu7LdGip4FLEAafx2peQl6NRiMeffTR/xVx2P+Viy8N3OAeUcm9ePn5+eHAgQOIiYlBc3OzJCQDAgJEs7vdbvj7+0unE5XgyuPxDGjNxkOsfs8CHP6MiobkQMANXL5qaVNRkJOaFytqKcz5XhQE5H5XebKJbKCC4KGgEgFuVCCqDIhMUGnajfJ/9T7qoWRrvPDwcAAQ7O65c+cwbtw42eQUFmplJ59P74ZzR5SGWnFJYcmvHJu64YnvJ00qN7TKs8N15fypBVINDQ04cuQIVq1aJUKEh4DKkTFRjqOzs1PuwfGwUERdK1r0VMKkO1B5vUNCQiSE1tHRgfPnzyM8PFzCH3wHriP3D4Ut95xKquV2u3Hu3DlERkYKR7jqZXAPsUqU863uEZ4ftdqT+5o/V6uL+TtCcjnXnAO1aEs1QPi39GJ5jqigaczQWCouLkZlZaVUcjocDjnDNJ5IPMZ6BavVKsItNjZWmmLQG6GC8ng8OHfuHHp7eyU/FhISIh603W4XeCFpevkuNFgYJmpvb5f909raiq6uLqSnp8s8cr2Mxv7ObmrXN3qKVHIcG9DvSVFuUcGqRiyNmZ6eHtkvapEdPdAfun4UljubdajJVB4GWoacFLXEmxuB1hf5VNQNqfKb0DIiZJDWam9vr/AzAxDaUpU5kUJCrTrkAeAB4oZkQQKtMVKK8r3IC0JLRw1HcWFVAiEqDL6/SrTEv7nZuubPVa4UtdiDB4f/p9ADbtAaEIvf29sr92CmnvzkFPAUdBTs/L9er0dHR4esDalLu7u7JcnMNablybmmYuUa8DMUtHw/Vvdy7/DdTSbTAKoKtRJULavnAeReUNedBWFUwirEkIeaFAyqQuBaM4nGIiti9LkfKMjpIfDwUpHQ6lWpIG6ucOWaqZa0mkS9eV05h/SE1HsAGOAlcs55/tRqX1qZKucPgAGKgnuX73v9+nVs3boVbrdbvCh/f38kJCQICoaUAGFhYXA6nVKkyGpx7iPmxnQ6HaxWq1RMs7G0x+OBw+FAZ2cnOjs7Zb7ZTL2zsxNnzpyB2WyGy+VCaGioxM05f6Ghobh48SKKiopkz8bFxUlxWltbG0wmExoaGpCamioWNnMlvAdRPlTa9NiIzElISBDjk3uJ600DhEqO62cwGLBs2bIft+WubiAeNGbKVWwuhSI1GDeTak2oiRlOws0bWbV4KRCcTqegJijUurq6BnR+ASBUrKzG4wEwGAzSlZwVcbTSVD4SCl7ySdBSMJlMIiQoROim0QKgUOM88WAxSUMLnYefn1ddbgBSsamik26GAnJcFJwUbnxPxghpQdC15aYjTIwhFlXoEclAOB09HlpxDAHodDfoiQGIu07hrc4nQzWsxGQpOoUvk/RqKE4NU1GoU4lwjFS2FCpcT747lQvng6EMtZuY2oWJio6GAr0Xrgu9Kd6X+5e0sjcLZioqCnLVg1L5jWiMqEpNDSmq3oDT6ZRCO+4fKhWurcFgkLWg4KWSYsiOysnn66/6Li0txb59+wD0e+VxcXFS8TpkyBD09vYiLCxM9lh7e7vkFVgEWF9fD7fbLbkyhkJiYmKk6M1qtUq/geDgYAQEBMBkMuHq1asYPHgwzGazVCZHRkYiMjJS5j4sLExChKQqSEtLE0/MYrFg+vTpQlsQERGBtrY2mM1mtLS0yPP8/PzQ2dmJuLg4dHV1SbgpKSlJPAiPx4OIiAghC+Rzudc6Ojpk/QAMqGqnLPqh60ch3LmxaAkBEMuXFguLDngYaN1wA1PQqZYrIVvqxFFYq1a3Stur0hIwLMGDzL8HblAIM5RCxaBpmpAvUdEAkHvw/nwHChkmLfkcCi5aunyuim0lMRHnTH1/xvk4Rwyf0FJU76N6SjzI3FAGg0Goc2n58HkA5P+cDwo/fk+Fw/WkwAduxNepIEimRAWhJgQZTuN7UinfbM2q5f+0mKlo+Xf8qj6L+4lry7GrcXm1cpXvxMPHEnuG/KhsqCg5VnLC8L3J5Mj5UGmdmVfgPuNeVmtB1FwK9446x/wdFbVqcHDe1dAYBT0VuXp/ho84xyq0j3uWXpvqfQDAyZMncfXqVRHIYWFhEg6JiooSfn2z2QyDwYD6+noxWOglmEwmURZ2u11i3aQvYFiPXiWVYE9PD5qamgbMCYuACH5gopjngrkqem96vR7R0dFITk5GdHS0KB2n0ynnHein7khMTJRnkUaAeQHOE88hzw+fq8oHnhc1Ka6GZNTmM//d9aPAuQMDtRAXlIfvww8/xKOPPopt27YBuNFTNDQ0VEiKuKEYFuHhJLEQDzeFNSef4REKRmAg5zoLmbiRabXx9zwkDBExfs7n8zCqMWC206PFpWkayIwJ3BCoPT09Ysn99a9/FU+Dmpshh5sFGq1Ujl0V+rxURsuAgAAZExVaY2OjHGhicmnFU2C9//77cDgcwndy8+8ZJ2fMU3XdObdEGISFhWHt2rVYuHDh/3FAmWBvamqC3W4Xha8mYhkGYjhBpXqw2+1YvXq1wNb4O5XYDLhxEIEbyUcqj9DQUHH1NU1DfX09fv/73+Obb77Bo48+in//93/HwYMH8dRTT+HXv/41Tp06hRdffFF6n/JiolRFTlCpUqHxUq1xehJqUpl70OFwyLj5LO4VCtibk3gMKcybNw9HjhwRlIrKxsnzoCb61PyNavWrhpBerx9ALHbmzBkUFxfDbrdLyIOFT8nJyWK9+3w+1NTU4Ny5c+KdJyUlISwsTPhoCGFmEREL4LgHOjs74XQ6xUvhXiZCJywsDMAN44wKwGAwIDc3F83NzXC5XCguLsbp06fR0tICp9OJoUOHDjBODh8+jDNnzsBiscBsNksLRnq5DQ0NSExMlIp0GqdcM7Wwil8ZGqLRw8Q9140yg57BH/7whx+UqT8a4a4KIQpFoB/XuX79erhcLnR0dMjveMAZkgBuxAqBG+gRusT8ucfjkZgnDxI3NAU1Dx7j44x1qVY1v+eCUiEQpaIKU1rgKsmRqhgYM+fXjo4OHDp0aIDQ3bZtm8SMaSWqsX+GBvhM1StQ49mcY4YqVE+JeOje3l6sWbNmwMFR7wP0k6R98cUX0tScAhW4IcDa2trw/PPPizDiXFJIMH4PQFx4t9s9wLJmCMhoNOJPf/oTTp48KUqMio17hUlJ0jwwPBEaGor6+nph8DMab1AIc314aPh3XG/1nbxerwjrEydOYOXKlZg2bRrCwsKwfPlyTJ8+HRMmTIDb7UZOTo4geRhfZshGtUZpBHB9OMdcGypoNUENQBo5A8Dzzz+Pw4cPi3HBc8F9zzOjvpPRaER8fDw6OjqQnp4usEYiaUJDQ0WYqNBKCiYaLRROKq6eHgyF0rlz59DV1SVQQjUkSOOMqCyvt79gkbmR7u5utLS0oKqqCp2dnWhpaZGWiTExMTIW1ftm6IvKjUqotrYWHR0d6OrqgslkknAn2SBramoQExOD8PBw+Pv7Y/DgwTJ3DN8ZjUa0tLSgo6MDXq8XsbGxAvHkenm9XunOxjg9G2YzXEUvR6fTCZ1IdHS0jJfFWmpITM0V0qP/oetHI9wZX6Vmo3D68MMPkZOTg08//RQLFy4UjQf0E+kQ8fDfCUsAA4Qm3e3AwEBx8QAIjaeakLrZhaN7rFrvPIz8yrAAK+cAiPCl0mLHF1V4mc1mcfe7urqwc+dOFBYWimusaRr2798vh5zCjvFP3lNlkKPAYjKOQuNmS0tVRrSSv/jiCxQXF8sB45pQKHKjf/nll0hOThZ0gYqk8fPzw/79+1FcXCzCi+Ph+vl8PrGk9Ho9tmzZghUrVgxAflDp+vv748KFCxgxYoSEQ6hMAUhpt8p6SKXCWLcqgPjeXFvOARUGFSkFntvtFte/sbERV69eRXR0NPr6+rBnzx7BxZtMJkyePBnbt2/H/PnzodPpYDKZxOLX62/wq/Owcq+oqDCGfbimnFsaHWyq7PP5UFxcjPz8fLknz4hqADFExT3d1dWFuro63HXXXUhLS5PzRyXAdaRFT2ppCiYaPrwfzxu9HYOhvwBo27ZtcDqdMJlMiI+PR3l5OSoqKlBRUSFFOklJSUKGl5KSgsGDByMuLg7JyclCuTx69GhERETAYDDAarWKl6Vpmuwht9uNwMBAZGRkwO12IzQ0VAwBwiFVtFN4eDh8Ph+Sk5MRHx+Pc+fOwWKxoKKiAomJiYJtZ3FRa2srqqurxVuaMGECoqOj0djYiEGDBgliymAwIC4uDq2trXA4HMJSSzlH74vrROoRvsPN+5DGJtewq6tLqrR/6PpRxNyBgdSk7CV68eJF7Nq1C/PmzUNxcTGOHTuGkpISZGVlwd+/v9vKnDlz8Prrr0vS4qWXXsI777yDpqYmsaIOHDiAu+++Gw0NDdixYwcKCwvx6KOPinBXe4q+++670Ol0cDgceOGFF2A0GnHt2jXs378f/v7+yMnJwZgxY/Daa6+hqqoKd955J8rKynDp0iUsXLgQFy5cQGlpKTZu3ChhjzfffFOQALNnz0ZycjLeffddtLa2YtKkSejp6YHFYsEzzzyDo0eP4qWXXoLdbseoUaPgcDhw5swZjBo1SoqcvvvuO2H1e+SRRyRWSIFw4cIFHDt2TBjuKisr8fOf/xzNzc04c+YMLl68iHvuuQejRo2C2+3Gpk2b4HA4MGXKFLhcLvzlL3/B6NGjceHCBfh8Pvz973/HiBEj4O/vj5KSEvz+97/Hq6++ivz8fEyfPh2BgYFoamrCjh070NTUhNtvvx1WqxWvvPIKioqKcO3aNRQWFsJut+PTTz+VMvWVK1eitbUV3377rSiQmTNnyvpTSX377be4cuUKhg4diq+++grLli2D3W7HN998I2iKmTNnYsiQIXC5XHjrrbcQGxuL3t5eCccsWLAAZ8+exeXLlxEaGorFixfjnXfekbE/++yzkn+hV8ODSOHK2GhcXBx+/etfy95Vcztz586FTqfDrFmzYDAYcPz4cTQ3N+PnP/85/Pz629J98803cLvdaGlpwaOPPorGxkZs2bIFISEhGDZsGAoLC6HX61FVVYWTJ0/i8uXLuOeeezB+/HiBz3GMb731FjIyMrBz50489NBDMBgM2Lp1K6xWK9rb2zF37lxkZ2fDZrMJsdrw4cMxcuRIHDp0CPfffz+am5vx6qumb2i/AAAgAElEQVSv4mc/+xlCQ0PxwQcfyBn46U9/KslcGi6Eir799tuoqKhAX18f1q5dC5PJhFdeeUVCcPv374fT6cTo0aPh8XjQ2NiIadOmiXKxWq2CiCFXitvtRltbG2JiYiSs0dPTg/379wsPEOG6EyZMwOnTpxESEoK0tDTU1NSgqqoKRUVFCAkJwdGjR1FXV4fc3FxUVVVJeKmurg5GoxHZ2dmS1G1paUFoaChaW1tRVFSErq4utLa2SkHR4cOHkZOTA5PJhIsXLwqjZWlpKaqqqqRTU3l5OVJTU0XIp6WlwePxoK6uDna7HdXV1Zg1a5bQOAcHB6OtrQ319fXi3Q8fPhzd3d04c+YMgoODYbFYMHfuXGiahpMnT8JoNErl+w9dPxrLndatWlQzbtw4mEwmrF69Gm1tbdA0Dbt27cIDDzyA+fPnw9/fH1u3bsWMGTPwxBNPoL6+Xiy2efPm4fDhw5g0aRJuueUWlJaWYtasWcLvwQbHKjrC7XbjypUrWLVqFSorK6Fp/RC4NWvWIDk5GY888ghOnDgBr9eLGTNmiBu2atUqtLS0IC0tDcuXL8fRo0fF4n377bexZMkSPPHEE3jvvffg9XpRWlqKO+64A7t27cKtt96Ke++9F+vWrQMA3HbbbdDr9fjJT36CwsJCXLt2DbNmzcKlS5cA9Fu4n332GaZOnQqTyQSbrZ+Qk1aq0WjEoUOHMG/ePLzzzjtYtWoVMjIycObMGaxZswb33nsvDAYDDh8+DK/Xi1dffRUPPvggKisrUVVVhRkzZkCv1+PBBx/EiBEjcPjwYRiNRgwfPhyrVq2Cx+PBxYsXMXfuXFy5cgUBAQFobGzEmjVrsGzZMhw6dAhHjx7F7bffTqgWcnJy0NXVhf/6r//CXXfdhWXLlmH//v1wOBz4wx/+gNWrV2P+/PlCysaYLq2ZqVOnYvLkyXjooYeE8Gzjxo04fvw4Vq9eLdZ6YGCgsBEuXboUx48fFwsyPz8fPp8Pc+fOFQ+mpKRECMaAG4lauryqV0YvgRYtscx2ux0jRowYEOP3eDwoLy/HlClTsGLFCllbTdOwdu1aHDx4EPfffz/27t2LtrY2rF27FqWlpZgxYwZKS0vh8/U3WXnxxReFy+Xo0aPSlo6eV1BQECZPnowVK1bgkUcekUQ5y/+JGPL5fPjzn/+MVatWYfr06SgvL4de39+Crq+vDydOnEBmZqZ4c1evXsWKFStw/fp1ORvcYwy5MKFoMplw6NAhQYfMnTsXb731lpB4aZqG7777DllZWZg4cSIuXbokEEiGwhwOB8rLy/Hll18iIyMDmZmZqKioQFtbG3bt2oWUlBQUFRXh7NmzEtq6ePEiwsLCUFBQgP3798NqtWLMmDHYvXs3vF6vnIvjx48jKioKEydOxOHDh9Hd3Y28vDzs3r1bmvTs3r0bTqcTOTk5EjayWq2IiIjAqVOnBJ1TVVWF5ORkREVFITMzUzzE5ORk4bevq6uDyWRCeHi4UDYz7xMfH4+zZ89KkRWVZE9PD2JjY1FYWIizZ8+it7cXFRUV0DQNmZmZ6OjogJ+fH2w2G/z9/TFs2DBUV1f/S5n6oxDuKtaWuFiGMHJycpCYmIhhw4bhwQcfxIQJExAWFobo6Ghs374da9eulaIClgyvWLEC69evx+9+9zuEh4dj27ZteOCBB0TwzZ07d0BohNaBXq/H9u3bkZWVhcWLF8Pf3x979uxBXFwcrFYrNm3ahGeeeUYOzKFDhzBu3DgcP34cVVVVGDp0KL7++mux9vR6PdavX4/IyEg4HA7MmzcPBQUFKCoqwgcffIAJEyYgNDQUNpsN06ZNE7cxLy8PdrsdXV1dWLlypYwfAJ555hm89NJLyMnJwdKlSxEZGYmPP/4Yr7zyCl588UX09PTgqaeeQldXF9auXQuPx4PVq1djxYoVSEpKwsaNG7F69Wo89thjkhfIy8vDwoULcd9998HlciEvLw/jxo2Dz+fDY489Ju/Z29uL//zP/0R+fj4+++wzPPjgg/B4PJg4caIw/+3atQs/+9nP5EAUFhZKvHX79u3YsWMHPvzwQ3z++edobm7G2LFjhWvlwQcflLATY74Upq+99hrGjx8vCJRPPvkE//Zv/4aenh5cuXIFI0eOlDj6kiVLUFBQgLVr16K3txd///vfsXPnTuTl5SEuLg7Lli2DpmnYsWMHVqxYgcWLFwO4UWHJcA1DG2q+RC0yCQgIwKFDh/DAAw9IqIlu89ixYxETEwOHw4Hp06dD0zTYbDbs2rULmZmZ+Pjjj7F582YkJyfDYDBg586dWLZsGe6//34AwIIFCxAXF4cNGzZgxYoVePTRRwfUabBW4NVXX5W1cbvdePrpp/Hss8+ip6cHly9flk5CBkM/59FDDz2EZcuWoaysDLt27cInn3yCxMRE3H///TCZTDAajdixYwdyc3Nxzz33ALjRxJror7Nnz2LWrFlYtGgRCgsLJYzKfNatt96K9vZ2FBUVYdSoUQgICMDhw4dRVVWFOXPmIC4uDhEREYiJiUFPTw9CQkKwe/dupKWlCR49KysLb7zxhljBfX19Utw2ZMgQabDj7++PoUOHIi8vD263GwUFBRKzz8/Pl31ZVlaGzMxM5Ofnw2AwoKioCFFRUXC5XEj7nr7D7XYjKioKpaWlEgZZtGgRRo4ciaamJgQHB6Ojo0PoPdrb25Geni6kdT6fD+Xl5RJ9YM9Wr9eLgoICBAYGYu7cufD398eRI0cEo+92u/Hmm2/i3/7t33DbbbehuLgYf/3rX9HU1IS9e/eKkffyyy8jMzNTmrnQ+Pln149CuBOVwZgk0F+Q1N7ejnnz5iE6Ohpmsxnl5eVYvXo1gP44+bfffovbb78dAQEB2LBhAy5duoS4uDhUVVVh//79UiBx9uxZ+d7Pzw+/+c1v0NjYKHFrJkAeeeQRNDY2oq6uDs888wzcbjdKS0vx3HPPYdmyZbjrrrvQ2NiIkJAQfPLJJ8Kz8umnn2LKlCkwGvuJpNasWYMvv/wSjY2NmDlzJgICAnD+/Hk8/PDDOHDgAEJDQ3HkyBGsWLECRqMRW7Zswa9//WuUl5fj22+/xUMPPSShlx07dmDXrl24cuUK7HY7vv32W9mUQH+C+L777sNvf/tbPP/88xKr++STTzBmzBgAELf68ccfx5IlSxAdHQ2n04nHH38cOTk5qKiowHPPPQer1Yqvv/5ahOyVK1cQFBSEESNGiEXK+N9XX32FS5cuyUafMWOGeD82mw0HDx7E8uXLodPpUFlZCY/Hg3nz5gn3fltbG3bu3In58+ejubkZr7/+OhISErB582b09fVJIwKDwYDdu3cLg+SXX36Juro6zJ49WwjKvvvuO+zfvx+bN2/Gm2++iYaGBmzbtg0ffPABrl27hn379uHvf/87Fi9ejI6ODuzfvx+PPPIILBYLxo0bh6effloUKxU3E3PExjNko1bI2mw2fPjhh5gwYcKAOoqKigqsXLkSPp8PGzZswLPPPourV6+it7cXCxYswFNPPYUVK1aIOz5s2DBUVVVh7NixaGtrg7+/P4KCgvD0008L5QC7BrH+AujPJx09ehT+/v7Yu3evNKWJj4/HiRMncOLECRw5cgRnzpxBdnY2iouLMXr0aFRUVGD9+vUoLCzE8uXL8fTTT0sjiCeffBLXr19HVVWVhJ78/Pwkfm00GrFr1y7JeezcuRO33nortmzZgtbWVrzyyitISUmR3E55eTni4uKQn5+PzMxMGI1GxMbGClggLi5OajxGjx4tFj+LgDIzM2GxWFBaWoqJEyciKioKVqsV48ePR2dnJ2pqalBUVASfz4dTp05h1qxZkg9pa2vDpEmTEBQUhKCgIEyaNAnNzc347rvvMG3aNPT09KC8vBzh4eGizA8ePIiEhARER0cPoNe+fPmyQCCvX7+O6OhoQbacPHkSgYGBOHTokODok5KSxOsJCQmRMCIbBJWXl6Ourk54/1988UWsWrUKxcXFqKmpgcFgwJw5c0QZ7Nu3T2DPAQEBGDJkCBoa/tsOpnL9KIQ73WTC3+gG19bWIjs7Ww7VxYsXkZaWJpnwyZMno6WlBV1dXfj666+x5nsStPPnzyMwMFAKB6Kjo6WLTFFREex2u7jytLhcLhcsFgsAwGq14s4774Tb7caYMWNQVlYm2fampiYYDP0NqYcPHw4/Pz+UlZUhLy8Per0e7e3tGDZsmEAy6aJv2LABYWFhKC4uBtAPwUxPT0d9fT02btwIo9EIs9mMsrIyDBo0SDq//O1vf0NhYSGKi4sRGBiICRMmSKXekSNHAGAANYDP199+q6SkRDDVzNbX1NRA0zRs3rwZLS0tqKurQ3x8PJqamnDXXXchMjISV65cwZAhQ1BZWYmEhATYbDaMGDFCQhReb3/fx6KiIpSUlCA2NhYRERFoaWmBTqfDP/7xDzQ0NKCqqgqpqamora1FRESExA4BSB5k2LBhcDqdOH78uDQ9ZpEYk8ddXV3SA7a6uhr19fWIjo4Wy4Wx6uLiYly7dk3QCKWlpbj99ttRXFws0FK32y2YaIvFAo/HA4vFIh2szp49ix07doh1ziQ3D76aQGS45/LlywOKeQwGA0pKSpCeno62tjZ89tln0Ol0gsiwWCyw2Wyw2Ww4cOAA2tvbBQPd0NCAyMhI9Pb2Cj97b28vvvjiCzQ0NIjXwvARk/wWi0UgeSNGjIBO19+4JDAwEKWlpdDr9UhKSkJLS4uwPHL/sv6hqqoKfX19sFgsCAgIgNVqxV133SUoNrIhapqGgoICmEwm6PV6nDt3DllZWZL7aGhokOS60WhEfn4+oqKi4OfnJzz0gYGBCA4OlpyQz+eD2WxGZ2enKM22tjZERUWhr68PERERaG9vxx133IGoqCjB4QNAU1MTBg0aBJ/Ph7Nnzw4ouGtpaZEw37Vr1+RMXrhwQeCSiYmJ4tk0NTUhPT0dwcHBKC0thc1mg9vtxtWrV4VBlcCD8vJyOBwOSYy6XC5UV1cjJycHNpsNHR0dgvwiAig6OhrBwcEoLy9HUFAQGhoaJP7f3d2N2tpa5OTkyJwx2d/Q0IChQ4cKn7vFYkFUVBSampp+UK7+KOgHRo4cqe3bt08y7qwMfP755/HCCy+IK+pwOBARESGFKUxGWCwW4fI2GPqbcRgMBiQkJECn00kFmb+/P65evYrAwEDptsLEh16vFwFPJjjC7QjDIj0tyfSTkpIQHByMiooKsUYsFgt8Ph8GDx4Ml8slFmhISAjq6+sRFxeH8PBwtLe3IzIyEh6PBy0t/f3Fmb2vrKxETk6O4Hbr6uqQkZEhKBEmX9LS0iQ5Rdc4MDAQLpcLjY2NwmYJ9MeIm5qaoGkaBg0aJHBIdu9JS0uTg15WVoaUlBRRgB6PR5pfc66uXbuGtLQ0qRhuaGiAx+NB2veNMTwej0DLKCTtdjscDgdCQkKEUInzyP6gUVFR/weFAiscQ0NDYTab0dfXh5aWFnR3d2PQoEFob28X7pD29nYRmGazGXa7HT6fD5GRkairq4Om9TM99vb2Sks4CgQSxDHnQ0SICitk/LmhoQHt7e2CYElPTwcAEfrx8fHo7e0VhAnnoLGxEXa7HYmJiQgLC4Pb7RZe/+zsbAA3oJnkZo+JiRGIJlEiDFfZbDaEhIQIyR0TlxkZGbBarQINZQyapfAejweRkZHw8+unOA4KCkJoaCh6enpQX1+PoKAgwaCz1oFeLguAiIJpbW1FRUUFqqur0dDQgMGDB6Ovr0/47FlIRMoLJlGJ/Glvb5dkM2GYRC81NTXJWnKszG+xIQ2rdtmMWq/XIyQkBA6HA8HBwbhy5QrcbjeysrKEH95qtWLQoEEyhw6HA4mJiYLBb2lpQXR0NBITE6HX6/HCCy+Id5qfny9jcTqdck4PHDiA5cuXo6amBk1NTcjJyRG0Dznbm5ubRTCfPn0at99+u9AksOWhz+cTQ8Rms2HYsGFSsc/3bGhoQHp6Oh577LF/Sj/woxDuBQUF2sGDByVhVVdXh+LiYmzfvh0ff/yxFP/wsAMYgEvn4rIYRi2FZwJIrVYjtpSwKFpcKib45upFYGArQBWmyLgaf86LG4UxQ45HJZLiYgI3ClA6OzsFk833oIWjJtMYKvF4POLaqnE4teCJ41YLmtS1p6Vlt9slfMU5JYTOaLzRkJjl3VwTbj7i/YnoUDHlfK5aoef1egdUhqrJVLXilBWW/HpzwpOWs0rPwHfk90w4c605V/yMWjXI9aai4lg5TxRwfD8KQMJ5ST/AdVbDaCounDhozgHXiMqaJfH8Oc8CjRKOne/BdSCcUSW142f5HjrdDfI4lZZALdpTi8L4O6KzVKjsp59+KrBkk8kkSiAwMFAscO5fUvtyLYiOoiJgM3oVS851aWpqEow65QDHy/PP+WGxGLllOCf0Mshh4/X2sy1GR0ejo6MDLS0tUtzIOTlx4gSGDh2KlJQU4Z8BgC+++AKJiYlISUmR6lvyvOt0OkRHR8Pn66cXUWs6goODxajjOKjg2IybvV157vg+5HoPDw/H008//ePnlqEA48Zva2vDyy+/LBuWVgMPqpq07OrqQkhIiLhAPFQszCAKh0KHwoh4Vx4Oeg4sViHTnMouSAHs8/mkUIHCkFWStPjZBJdEZHw2FQ0FJw+pmm9Qq/0o5FmqTHeNv6fFTXysWpCiFuEQ4UEWSFUAUjHw0FPxUfhS0FJxMoFGZUVFwq/8p1IAqNYHu9uoY/N6vVKlR2gsv6o4bJUNj0KZ78G5VqkNuLYqdl5Njvr7+0vYgVh2NUHKsTPhSqFPBUtrmjF7f39/dHZ2oqenB+3t7QPoGijMuKc4ZioN7h3SCbtcLtTW1opLT4XGAh56VKqhwr3HcQMQ747njfOlKluOk+dHNUhUbh2+o8fjEXgfACmzd7lcCAwMRFJSkiRhaQSxqIsxdxYIsvyfCq2npweJiYlyHlmTYTab5bMmk0nml0YAwRGqAlR5fohcSU9PR1dXl1SGBwQESK/b4OBglJWVScOPsLAwJCcnIzU1FQ0NDejr68P169cRGhqKsWPHoqGhAd3d3cjNzRWkDZPfzc3NMJlMUuRENsju7m6kpaWJdx8WFibt+Ww2m8ggla+H79be3o6YmJh/2SD7R2O5Hz9+XEqmaf2x5Fq1YGmlqZY7NwsVAHCjWrWnp0eQFzyAAESzk4yMwp+KALhRkKGyI/LeKpKC4+Jc3my9d3V1CaaeykYthKLQIZTOaDSKN0Dhrx5C9f48cCriiDwt/J5C2mAYSBjGSklWgbKkW/UsVE4SWtUcK6vxqEBU0jbghuXLcdKyVb0GWl0UrOq88B70OjjHqnLmuG5WmlxjHjLgRkGbKtS5FvwZ309Fb9HS5//5t1RkXKfOzk4R1BRiFIbq8yjUmbxVq20pAKk4XS6XCDauAzmWDAaDFMAxcafX93cB4n6nglUNI64fqzq5r/lMcrbU19dj3bp16Ovrw9y5c1FUVCReDM9DaWkprl27Jk2sKXTDw8PFU1b3NC1RxqnVPcR55HwQdcIYPYUgsfY+nw/Xr19HZmamxKN5r6CgIJl3FvmxapYeF+8fGRkJk8mEa9euyb5uamqSFnp6vR4xMTFob2+Xd7Hb7VJMGB4ejtbWVqkuz8jIgNPpREREBMxms1S20mil0A4JCUFlZaWEe/neLFLjGaXx4/F4pCEJ5dIvfvGLH7flDmCA66tW89FK58GmoOXfAJCDplpwAKTqTWWnCwkJEUZGWku0UijUeR8KQX42LCxMNiufy2pNPl+18JjgpHdBocdNz3sAEEuLB5JWNA+RWmlLYUiiLeAGBlmv18NkMsl4aPnSUqSQpQDle1OZUGDRM+A8q8qJSpcWEi1YNZzBi4qa1rAquCk06YGR6ZPCmxa1amlS6avVtnwGLUqGsLi2PCCq96V6AOpaqWsL3FCOvPhsde44Bs4bx841JmUBDRPOEd9NFbp6vR5Op1NCFFwbh8Mhljhj1Nx75BhyuVyIjY0FgAHsimplqhq64Xh43rjeKobf5XLB6XSisrISo0aNEo8M6Ach1NfXC6NqaGioKFiGSfhehG7SWKN31d3dLTF2FanEuDz3Gs8Nc0U833wmlTS9c84tLX232y3JXuYaSIdAZRUTEyMd1eLj4xEYGIjk5GRZv8bGRqSkpKC4uBhjx46F0+mEw+GA3W6H1+tFVlaWePQqvYfBYBC+eo/Hg87OTqEZjo2NFQgpK6wZSlKVOOeTNCxUnj90/UvhrtPpPgQwD0CLpmnDv//ZGgAPA2j9/mO/1TRt9/e/+w2AVQC8AH6qadre/8EzxO3m4bw5HkcCHgoUus+0BGhxk0KW1j0nhovNMmAeftUVVePMqtak0FWFEoW2GvvlZPNZFPQqmx/Hz/cDblTnUnBR0TEJRauRCoqxZT5PVYAsFaelSAXE+6hl+0QP0Nqm56KWsPv7+4sFSguQgo3PZXIMgFggN1vjtEbUmDoFgVrur84j15T7g54ElTcFNK0Y3ofzrgprxp+p7FWvhAKU68i5UAt41K5SHJPP55NiFIZnGMbh3mFVJxUX31lVqlQQDodjgJJjtyfir5kEJAqDc2Sz2SQ0QSROX18f4uPjkZWVBaPRKCyM9Po431wjKi16keRWIUlXVlbWAJI1TdNQXV2N9vZ2QbRwb9BIYzKeRhGJs9QQIBUJ54ANOVR+GIZ1OD6dTid0u/Hx8dJL1d/fX5RafX09AgIChHpa0zRpnuLz+VBRUSEeQW9vLzo6OsSCJtae4U4m/l0uF1pbWxEbGwudTofs7Gz4fD4JlVCoR0RESCcwm80mnEl1dXVyPhwOh3APmUwm2TcMuajQcMoYtgmkHPhX3DL/E8v9IwBvAFh/08/Xapr2Z/UHOp0uF8BiAMMAJAL4RqfTZWma5sUPXIzbcrAMe9CaULlmeKjUrki8B90xupoUZGpjCgpAVRBTObAQxO12Izk5WTYyD7rNZhvAtqha2hwD3WAKVLWbEIW3GrcnIkW1CHn4Ge/X6XSSUOHh50V3VhUonEM+Q21qQmuYnBcBAQGShKKloSb3uLHUuDEFO4Uj34mbkd4Xf8cNTuibao3Tqqe1wkbDnDPGamkx8rkMy3FctIj5ngxn8N3Uz/Ar9x33kRri4R7iu5B5MTIyUt6bcXoKKP49Le2uri7h96bVyr0SFBQkVdcMH6hNGRg+4NrTOGAcmbkPVi2qIQmGcRoaGqDT6aTXLueQAkP1zKhQVMPDaDRi8uTJAIDk5GRZUwBobm5GcXGxdE8zGo3yPc8U35koJJ4xvf4Gvw73P40JPttgMAzwiLkHGP5RFQPpgtVuT35+/SyMKispw7MulwshISEDWkEyjMZxOxwOADe8OrvdjrFjxyIoKEhyDKrnEx0dLeFhdoBTPaWQkBDxOLh2hGhzzYODg4VPiEYhWVOp/FRFrMq1/+76lzh3TdMOA7D9q899fy0A8A9N03o0TasCcB3AmP/JHxJNoJI40friwaGw9vl80lCAE0z6VjVurQoC3oPan64qP8Nk1R//+Ed8++23sNlsAxJwvb292L59u8Td3nnnHcyePVvieKrQpgtOIRgcHCwbj4eGv2MyjpauXq/HHXfcIRuXnB78nF7f3y/02LFjQnm6bt06WCwWsQq+X7cBcVZatNwce/fuFQghN6CKHFIRGGqiWk2WsrMMC8Q4B+o4/Pz88NBDD+Fvf/ubhKu4phRILMMOCwvDp59+KnPsdDpl3Yh/p+BliEYl0GI8nAnid999VwQlv/LwMGbrcrkkJsuELt1ku90uoQk1QWa322Gz2eTZqoVPK3Pfvn0ICgoShRYaGioC2s/PDxs2bJCDe+rUKbzwwgvitbKuIy4uDnFxcTLPbBBTW1srMdvQ0FC8//772L17txT+tLW1Cdzv2LFj2L17N/bt2weLxQKr1SrGUXt7OzZu3CjUAQAk4U0vZ+jQocjOzpb+xnq9Hm1tbTh//rwkl2mB6nT9BWs8kzS2XnvtNRFsAKT+w+VyobKycgCqh8gRh8MhWHQqWJ/Ph7179+Ls2bOIiIiQ+o3g4GDExMSgsbERf/7znwUCGRgYiOjoaJELpDrwer3SCPvSpUuw2WzQNA1VVVVwu92orKzErl275LmBgYFITU0VAyU3NxcxMTGoq6tDSUkJqqqq5AwNGTJkAFiCIAqbzQaz2Yzw8HCBmTocDjlbVKrZ2dkICQlBQkIC0tPTkZiYKPuXiVubzSay5oeu/00R05M6ne6STqf7UKfTRXz/syQAFuUzdd//7AcvutzMoFMrq3Fw/p+ChTFfTgxjl9xURI+w8lBN1vj59Te15d8xllleXo7Zs2dj6dKlUkTAQxsQEIBNmzYhKSkJvb29WLFiBb744gsRGgxtUAGRpIjCS435EtpEi4ljZ8Jp06ZNADBg7Nyg/v7++Prrr5GSkiKFJCtWrJBu9pqmSUUeXUsePFrSLpcL27dvx/Dhw4Xjg2OXjfG9wmRYh3NLgUzhSsGrQk15kClsrVYrlixZItYYrQ5/f3/xQoiv3rt3r7i9DE2pDVfU0AuTmEzEc4w+nw/Hjh3DfffdJ40WentvsEACEAVD95cC3+v1SmKUFjGVLy0uktQRUqcyN/r59dMOHzt2DCaTSThH2tvbJdnKvchKzOPHj6OgoAAABqCu7HY7uru70dPTA5fLJVC62NhYEYyc36KiIlk3o9EoeHoqCovFgosXL+Ls2bOor69HV1cX4uLicP/998u541kMDAwUD5h7iPPGojPGw6OjoxEUFCSWM0MMxcXF0ks4Ly9P3qG9vV3WW9M0JCUliSHidDolKcw8gNpgxs/PT4oEzWYzTCaTePD0rh9++GE5B21tbQOS9LTGqcgiIiIwZMgQmM1mVFRU/D/UvXl0m+W1Pvp8kix5tuRJkuc5HmJncoAQEkIgDdAEKBCmNlAK/fV7wAgAACAASURBVME5paWHrp5CRzrRcy7ltJz2MCRQUtIGAiEhQEISQhwyx3ESx7HjRJ5t2bIsyZbl2bKs+4d5dl7za+m55/7WulytlZXElvR93/vud4/Pfja6urrQ3d0Nm82G4uJiFBYWSi2A0N729nZ88sknaGpqwujoKHJzc2G329HX14eYmBg5B/Hx8YiPjxeGWL1+BsJIqKzX64Wmaejv7xeQAVNc3Ee/3y8puPr6elgsFtGJ/J7Pe/1PlfuLAPIBzAfgAvDcpz//W+bkb8JxNE37X5qm1WiaVuP1emflSYHLSAduECvTat6blKEAZnlebrdbDiTDQL7oCYyOjsLtdosn63Q6ceLECWmMUvO6zGerXYL03MLhMPr6+gQW5vF4xPOlMRodHRVvi+EfUyzAZXQGu2pVSCa7/qjEurq6kJycLB5Ib28vAoGAKFg+m9PpFCWi5qHD4TA8Hg+6urowMTEzEpDQK/7d19eHrq4uacDh+gMzHYGkJaU3wehATQnw8E5MTCA7O3tWt+fQ0JBA0OgNEl/MZhtGbmywYQGOqZyuri709fUJhpmeGZ+zrq5Ovhe4nI7zeDxSx6EM0RjRg+QzBwIBWUd+x8DAACYnJ+H1emW+wPT0tBiG1tZWoXpluoxKmU05TU1NMu2ehbqioiL4fD7huOdZ4IEPhUIytIJG0mAwICEhQRQsIZbM/1NBsk5Bme/u7obL5RJyPKZzmH7q7++XexsfH5du3qGhIbS1tcmIOq/Xi+HhYfHgIyIikJ2dLTDi0dFRtLW1SfMQEVler1dQMMyXU37ogPHc8F64FuyIpg7gcwEQT7++vh4+n0+Uo9frhcfjEWeEkSzrTrxPnmGLxQKDwYCuri6YzWbZC6bXiHZhQ9jQ0JAYUeogNsUFgzPNlwQ5qMVwNszpdDNTnlgPooypAILo6Gi0t7dLao+Qy897/Y+UezgcdofD4VA4HJ4GsBGXUy9OAJnKWzMA/E0ChHA4vCEcDleGw+FK8jSrA5aNRiM2bdqEZcuW4ZVXXsHo6Ci+/e1vi2V79tlnpfK+fft2BAIB4RXp6urCgw8+KJOa6FUx3/zhhx8K+953vvMdwVYfOHAAJSUl0kAUDM5wnPDeCgoK0NLSgr/85S9YuXIl/vrXv6KhoQHNzc1Yvnw5fD6fEFzFxcUhLi4OL7/8skxnr6ioQG1tLY4dO4YVn5IrnTx5Etdddx2MRiM2b96MtrY2vPDCC9DpZrhdyH73gx/8ADExMXA6nXjkkUeQk5ODhoYGdHZ2YsWKFRLSf/TRR0J8tG7dulkoFkYNb731FjIzM/Hxxx+ju7sbjz76KF566SW0trZi3bp1giR48cUXJS3z5JNP4i9/+QuSkpJw/PhxNDQ0YPPmzejq6sKLL76IyclJrF27FnFxcfB6vdiwYQPi4+PR0NCA9evXIzo6GmfPnsXu3bsRERGB3/72t3jxxRfR3t6O999/H+FwGE888QQWL16McDgMh8OBf/mXf0FUVBTWrl2Ln/zkJxgaGsLGjRvR09ODhoYGPPHEE7NQLUzbtLW14d1330U4HMbPf/5zfPvb34bD4cDHH38MTdPw+9//Hps2bcLOnTuxceNG3HLLLdDpdKivrxe+7hMnTsDlcsFkMuE3v/kNgsEgdu/eja1bt6KlpQVJSUn45je/iT179kjB7YUXXhBOk7KyMkxOTsqhplMSGxuLvXv3YtWqVbJno6OjSElJgd/vx/bt2yXsPnjwIAKBADo7O/HKK69IeodGNCkpCW63G7fccovQFtTX12NsbAzJyck4ePCgGKv+/n7U1NTA6/Vi165d2LZtG37wgx+gqqoKhw8fxu7du/Ef//EfOHz4MB566CFs3LgRu3fvRmtrK4xGI5588kmcPHkShw8fRiAQEJrtffv24Xe/+92swj2pbA0GAw4dOoS8vDzpkuU0rH379kGv1+PcuXPYs2ePIECee+45jI+Po729XRAjO3fulNw9h6PQyDGVQwfn+PHjyMnJgd/vR1VVFVpbW9Hc3Izf/va3MBqNonDJK9PX1wefzweHw4GysjJkZGTg2LFjMJvN0Ol0ePHFF3H48GGpn505cwYTExOoqalBTU0NrFYr8vPzAcx0oft8PrjdbvziF7/A6OgotmzZgqamJnzwwQc4ffo0kpOTceHCBaSlpWHv3r0wGAyoq6uTYvw777wjnDnV1dWwWCxob29HSkoKkpOT8fvf/14cstdf/2wZ9P+Actc0za789ysA6j/993sA7tE0zaRpWi6AQgDV/53vZK6SqQsAKC0tBQAUFBTA7/ejrq5OEAHbtm2TOYrLly/HgQMH8Nprr8FkMuH48eOYP3++5MyAy8NlGxsb8fzzz6OoqAg2m00aFYCZlnKz2TwLFUBvOxAIoKKiAk6nE3PnzoVeP8OyNzAwIF4B2+hVRMDmzZuRm5sr5GeRkZEoKytDfn4+4uLicObMGQnVSURWWlqK6elpHDp0SDilv/GNbyAuLg6NjY0oKyuTIQEAZkFFn3/+eZSVlcFsNmPt2rWzCp5MGdXW1sJsNiM/Px+ZmZl46qmnkJCQgMzMTMyZMwdWq1W8IobGDocDy5cvR1xcHEpKSnDy5EmUlpZKnpCpgMnJmfFp2dnZ0DQNp0+fRmFhITRNEwbPgYEBjI6OYt68efjTn/6EFStWwGq1AgAqKioQDofx2muvYXh4WDr7cnJycPDgQfz5z3+G0WhEQ0MDysvLBQkCXG5CIxWxzWaD0WjEkSNH8F//9V8oKiqSPHl5ebnk3PPy8qDX63H+/HmYzWY0Njbi1VdfRVpamox5IwVsV1cXsrOzZyEcIiMj8f7772NoaEiMen5+vnjRbF6hIWptbRX46NDQEBYsWIDMzEzBcMfGxsLhcGDv3r0wmUzo7u5GYWGh7DWjAcoz+VOamppQVVWFYDAIl8uFlJQUBINBZGRkSL8HURcs3g0NDcHpdGL37t1ISEjA0NAQMjMzodfr8cc//hHFxcWIi4tDZGSkRHNE9XAABqPbyclJSa0wddDR0YHIyEi43W588MEHSElJgdfrFbgqoyWmgbKzs0X5M/1XUFAgUZfNZoPJZEIgEEBvb6/MWuX4RZvNBp1OB4vFgqGhIRmAzUgsOjpaoiF2rXZ1dSEiIgIpKSnS/VlQUIDMzExJiZJUr7m5WfaRqWQWTNX5rnQmb7jhBhQUFGBgYEBoAwoLC9Hf34/CwkIYDAakpKTI/WZmZiI6OhoJCQk4fvw43G43Pv74Y6HXIGKGg7s/7/UPlbumaW8AOA5gjqZpTk3THgLwf2madl7TtDoA1wH4FwAIh8MNAN4CcAHAHgDf+kdIGQCzwnMWIIaHh7FgwQKUlZVh6dKleO+99wT5ERkZiQ0bNmDjxo246aaboNPp8OMf/xiVlZXCfviTn/xEwikai4iICHznO9+BzWaTKen79+9HbGwsTp8+LSkB4lHVosWf//xn3HnnnVi2bBmKiorw0EMP4eqrr8aqVauwefNm3HXXXYiMjERVVRUee+wxBINBuN1u4XHp7u7G1772NZSXl6O6uhrf+MY3EB8fjzfffBN33303xsbGkJeXh1//+te44ooroNPpcPfdd8PtduPxxx9HV1cXOjs7sWHDBmRkZIgX8+yzz+JrX/saxsfHBZ/LHOUjjzwi+VPWDzRtht7hvvvuw69+9St0dHSgvLwc999/P6qrq/HP//zPmJiYwBtvvIH169cDAF577TU8/PDDyMjIAAB8+OGHKCkpQWlpKZ599llce+21mJ6exrx586DX6zEwMICVK1fi4sWL2Lhxo/C3fPzxx7Db7YiNjcWzzz6L6667Dq+99hpSU1PFC7v11lsRERGBt99+W7jMo6KicMcdd+BnP/sZFi5ciIiICDz88MP47ne/K40zhHSywLxo0SIYjUbs378fy5YtQ1VVFRISEhAdHY1f/epXuPLKK3HLLbfg4MGDuPPOO+FyubBjxw5YrVY89dRTsNlsSEhIQH9/P44fP47MzEysXr0abrcbycnJiIiIQE5ODr785S/DYDDggw8+kMjl3LlzWL58OWw2G5KTk5Gfn4+MjAxYLBbs2bMHPp8PExMTGB0dxZkzZ3DPPffAZDJh7969uOaaaxAKhbBx40YUFRUhMTERK1askNkGKlbeYDBg165dwl+ydetWZGRkiJK79tprYTab0dfXJ2ykREe1trZieHhYuikPHToETZsZ0FFaWgq/3w+dTocTJ06gvr4eVVVVUuStra1FZWUlLBYLzpw5g6uuugp9fX2S/iMGm1FEIBBAU1MTzp49C7PZjIyMDNx6662YmppCXl6eDJ6ZmJjAokWLhHuH9bVFixZhenoa+/fvh9lsFu+f+oJe//Hjx0Ueent70d/fD7PZjEuXLsm0JZU3qrCwEGlpaWhoaEBFRYUUyvv7+zE1NYX6+nokJSUhLS0N3d3dOHfuHB588EGUlpaipKQEGRkZcDgccDgcklfPycnBJ598gsWLFws54JkzZ7B48WKYTCaBl27cuBFz585FfHw80tLSMDg4iLfeegulpaUip3RGkpOTkZycDL1+hqo4Ly8Pvb29/++HdYTD4XvD4bA9HA5HhMPhjHA4/Go4HF4fDofLw+FwRTgcviUcDruU9/86HA7nh8PhOeFw+MN/9P18sXBJRIbRODPKqqKiAhaLBa+//jpuu+02bN++HXfddRdcLhd+9KMf4YorrkBcXBwWLlwITZuhHKivr8fWrVtnwfh4IHJycmCxWDAxMYEf//jHeOSRRxAfH48dO3Zg/vz54v2pxbTu7m5s2bJFrPKjjz6Ke++9F5s2bUIoFILL5cJXv/pV+P1+LFmyBDfeeCP8fj9iY2ORkZGB/v5+/PSnP8Xtt98u6ZaioiIMDg5iYmICN910E1wuF/7pn/4JZrMZmzdvxh/+8Af84Ac/wGOPPYZHHnkEdrsdb7/9Nnw+Hzo6OrB582YhUnvggQfQ3d0t8C9iz99++23JEbIgODAwgJtuugmrVq3C2bNnceTIEbz99tvweDz405/+hKysLOzYsQPbt29HQ0MDdu/ejaysLOHzeOGFF3Du3DksW7YMo6Ojghc+f/48bDYbvF4v9u7di6ioKOzatQuDg4PYtGkTzp8/L/lShtoNDQ3isRw8eBBr165Fd3c3pqenhbTpo48+wrp165CQkCBNNGlpaWhqaoJKNsdCFpFQiYmJaG9vxz333INnnnkGKSkpSEhIQFRUlFybab2ioiJhb9TpdFIjGB8fx/PPP49169ZJnnPlypVS1P7hD38Ip9MpXZmFhYW4dOkS3nnnHblPi8Uiw7U5g+BLX/oSHA4H4uLisH//fsyZMweJiYn45JNP4HK50NbWhrKyMmn8cTqdaG9vh8/nkyIwi4M0PgMDAygvL0coFBKWxKamJhw8eFDSGADEk66qqsLNN98Mr9eLkZERwaKPjo6iu7tbjIjH48GZM2ewdOlSyW37fD5kZ2ejv78fycnJmDdvnqBo7Ha7FGg/+eQTlJeXo66uDoWFhaKgRkZGUF9fLyAJFg27u7uRn58v1ArqbFODYYZ35dChQ+LgEXI5NDSEnp4e6PV6dHV1YWxsTIrU4XAYVVVVWLVqFRwOh0SiRHSR+XTRokW4dOkS/H4/srKypKC9ZMkSKTaTJ95kMmFgYAAJCQmIiIiA1WpFQ0ODsEFqmoYrr7xSZr5mZGQgOTlZisculwtWq1Vqgiy6JycnCzKOE+iSkpIQExODoaEhfPDBB1I4P3PmjEAu/65OffpTmtz/L18vv/zy0w8//LCEacyrG41G7Ny5E5OTM0xuwWAQN954o3yusbERRqMRS5cuhdVqRWdnpxQhOVVF7UCdnJyE1WpFR0cHxsbGUFRUhNWrV8NoNGLDhg1YuHAhli1bJjAmFmODwSDa29sRCoVQUVGBEydOQK/XIyUlBUVFRTh37pzQo3700UcwmUyYN28eTCYTDhw4gNHRUezduxcPPvigeBxELfT09CAUCmHp0qU4duwYTCYTli5dCrvdLrA2h8OBW265BcFgUMZ4zZ07F4mJiTh8+DAiIyNRWVkJk8mEY8eOAQC6uroEwsYCDtMQkZGRKC4uxoULF+DxeLB27VqEQiF8+OGHWLduHbxeL+rq6lBSUoKKigrk5+dj586dwsT4la98RVI309PTcLvd8Pl8aGhogMFgQF5eHqxWK4LBIBwOB0pLS3HVVVchGAyiv78fLpcLZrMZRUVFElYHAgG0tLRgZGQElZWVcLvd8Hq9aG5uxpVXXgmbzSZpkXA4jKGhIcydO1eY9Iiw0LQZ8qiOjg4MDw/j5ptvluk6AwMDcLvdSExMRGFhIaKionDixAkYjUa8++67KCsrw+rVq2G329HT04PJyUnk5uZi5cqVMJlM0hWckZEBt9sNl8slxoFFbzoTU1NTWLBggRTxgJkItaOjA+Pj41iwYAFSUlKwZ88eqXMcOHAARUVFKCkpEaXDRi5GEuzqJA3BpUuXkJubi4yMDOTn5wsAgfj2S5cuzeqUjI6ORnZ2ttRyiAhJSkrC4OCgtLjHxsbC7/fLjF6mEXW6Gb76srIyxMXF4cKFC0hMTJRuaavVKkVv0nuUlJSgsLBQDACRWETTtLW1ISEhAXq9HomJiYiMjERHRwcASAoiPj4ezc3NyM3NRWpqqiBMEhISpIDf2dmJuXPnIioqCqdPn8Y111wDo9GIuro6Mb5MX2VlZQl+vKGhQQwJHUt2mHo8HhiNRqSkpCApKUkKpXq9HklJSVJbYIqGEE4+LxsCOzs74fV6MTExgcTERCQnJ8PpdELTZoZ15OTkICYmBi6XC8PDw7Db7YIGIsz5woULuOaaa6DX63HixAl86UtfwtatW11PP/30hr+lV78w3DIff/yxCODIyIhAsOh9s42agqPCJ9klptPpZDI6BZo/Zysvc+LMCxJd88Mf/hDPP/880tPTJWfOdJFaYWeBkfUBwsWYdyTcLTIyUmBVhw8fxlNPPYWTJ08iFAoJjp95x/HxcSFm4vWIfyVLI71Mrgc7H4kRVwnRCAtTce48fERecK1JKEXFzwIVlZIKf5yamhLvhfdK2B7XhzQJaj8BoyDmitXmL51ONwsdQiQS6WqfffZZ/PznP0dERIQ0bbBJhWvF52KXJqMUUsCq/CLMWQeDQVRXVyMtLQ35+fl45JFH8NOf/hRWq1Xul5h/nU43i8ud1+DvyTao8tADl7tw2WRD+C4A2QPmdFkkBS7PZOX3RUdHy8xRAAgEArNmbvI72bDHKG5sbAwjIyMYGRlBf38/SkpKBHvOvDGLuoTdjo+Pw+12Q6fTwel0IjExcVZ3LPdncnISNptNGoIAyBQr9owQPsi5AmqXLwDJ3QcCAVitVpGZiIgIQYexkYmfJ9yU9SNi1r1er6RzAKCurg7j4+MoKCiQulkwGMTJkydRUVEhg90pyzwfQ0NDaG9vh043wyfDgRhMO3k8HhQUFGBqakrmObAr3mq1IhAIYGBgQM4r1yIQCMjZ4RkbHBwUyO/o6CjGxsZgs9kkhcrIPikpCe3t7Xj77bfx5JNPyvOEQiF8/etf/7vcMl+IYR1UFCoWm/lhNvEQr0psNKln+XMqXCJkqCz4PcQoq116wAx8at++fbjtttuEnpPKkxtPpUqFAmAWNI+pAXqQPAD//u//Dq/XixMnTuCuu+6ahVwhtJAFQbU2wGImC3FqxyBRCQCk+UrlMWG08VmjTaNCHLXa/EXIFTHDKk0sDY1ef5mYinvEdeF6sztUbZjiOrN3gY07REaFQiGBudJIHzp0CA0NDcIRPzg4KF6xCmtVG6b4bwCiJGgg+fyxsbGzmskcDgdGRkZw5MgRrF+/Xoq6VKo8RHypz8nWdV6bh1SlHVDXkFhs7iELkcTh01Hhc0VGRsJsNiMhIUFQKFarFXa7fVahl4aFskLHhntrMpmkmM+WeKPRiOTkZMFNq0My+IwkxGLBkPBDQjJTUlIkSqLcMfXD9n21H4GKH7hMG0FFT2ZL9osQoUbZ5t7yO3k9Gkg6F3QAx8fHcfbsWdTV1c1CJJlMJmRkZODChQsSXXm9XrS3t2NsbAy9vb0IBoMwm82IiYmRRid2eHNdvF6vpJNU/n/m+S0WizBe8lzz3KnnhI4N751RCL35wcFBHDhwABMTE+jo6MAVV1wxqznzH9EPfCE89/nz54cPHDgwq4BJr4geH5U3KV254Z/9zPj4uBxgbjwwe5QfC47EhA8NDcFqtcr30rtUuV6mpqaQkJAgConKS238IUKAiz84OCiHTi1qqp+jIlKfgdEJr81DQRQPC868H7Wxh7hlVYDoXapt/yp3BT1h4tJVegOuiUrToBpOlWaAWGYaB3q3qtIFLjNlqnQMmqbB4XBgYGAAfr8fPp8PiYmJMJlMctgyMjKE+oFrqHbU8hCqUQTzuVxj/s00HZkJ1U5jRgFGo3EWXbOaYuH7iDtmlESDR1mgHNK54LN+tveCe8dnYXTAtWX0wGdnQ8zIyAja2toAQLx0l8slfQVDQ0MYHh6G0WiUcXJDQ0PIzs6WYRv0Pru7u5GXlyfFeSLAONGMOH2SajFKnZ6elhRDfn6+KDaeI07qYpTtdrsFox8TEyPdslNTU8IFD2BWtMo6Etde9eopb+ytGBsbk45QOg40UmrvAAdV09GoqKiAXq+Hy+WSKJUduxzzabfbceLECXR3d2P58uUoKiqadY4ISQUgiDkaVTYtApejFpX+OC4uDj09PUhLS0MgEBC6EyKdEhISZO/ZGPnggw9+sVkhuej0xllo0TRNDiaVC4sbwGXrTY+T3haLsiqBlgq1pEdHi0naUV6P30dFS8vMqe1s+AAuswfSyKjPYLPZJA3CMJKCQAoC8uao6R9CAEmoRIXFZwiHw7M4aVRlC1zmWqenzOvSO2bKi0pdTXMxjULFzfSJSqlM40hlwzVS0y2MKniPND7cEzUK8fv9MsKQ7d42m008sUAgIHtts9mkFZ5GkN/NtRoaGhJoXVRUlHSmqt269Gw5/YpywX9TBqgUuN80ggCEeI4GhDLIBjbuBZX330pbqUaSBlJNL6qpQCJKSDxFp6GoqEhCfzaiqc1eXG++hoeHxfN0Op2Ynp5GRkYGcnJy4PP5xHCNj4/j/Pnz6O7uljF4TI0NDAxAr9cLrQJhherAGCKd6O0zPcS1Vam6uV+USVIIqGvKpiF6wDSQbIgiSZhOp0NJScmsSHlsbAwWi0XkkogbUhSMjY3B5XKJjLOhUKfTicGJioqC1+tFTk4O5syZg9TUVHR2dsp9FxcXw2AwwOv1Ij4+HrGxsejq6pJnY5cxZYqeP50Gko6p8sUUMmccUD8wRfW5evV/pI3/D79UxaQKIj0wNVWiphwYplPBAJdTGvSkgMser4rJVT1/HtzPhvwqyuaz4SDvhfentg2reVUqH4ZbDD9VegIeNIauPNxqrpswPzUU47NTsdFwqHlNrgWbJLhG/J0aOdBLZOqLv+P6UmnwGmQ7VBU8150KS41QaPS4L3xvT0+P4JRJNgVg1ntp3EZHR6V7kp2kdAb4edYQmA5g2oGKmevD97BGAVxmoqRM8t98BjUKIH8OHREqa5XMjPtNY8bojUqEa6I+Lw0c15YGgntKY6Hup6ZpiI+PFz4aRhT8HjWNODw8jN7eXskxk1BrYGAAXq9XFBBrATTYJDBjBKgONeE0KTUl9VnqCjUVyGvSS+d7udZsIuJ60CGhY8YzrdPpRNlR1ik/bEakHNC583q9sodZWVkIh8OIj4+XYij3nZBJn88Hg8Eg/Srs7SBahj0VNFoxMTHicXPUIIBZVB90RBhZhMNhMbp8hujoaHE+1BoXZYzP+fdeXwjPXX1R8aoHnIJNj5kCxA3/LPe4ahn5e4apVLLqIWO+zG63z8p3c2IK74VGgIpFDZfZXEVvQ60TfNZghcNhKeDQG+e9BAIBSRHQMyS3Bb1P3iMwY3TUAiw3Xk1XcU1VRckDonqGXBM1T82/VYMKQOBkVGhUbmwxpyfH3gT+nzlilTvE6XRicnISFosFIyMjcDgc4qkwbcDIjfdBildGMFQmanRE2BqjJN6nypUyPT3D8a+m79QIRq3bUAYYbamFXKZguB7qunHvVSeGB57rTwVPmaKnx+cCLtNS8xpcC16XjktZWRnGx8fh9/thtVol+qEizMrKQmpqKiYnJwWlwjQVZZOKmLKsUjSQS8bv9wu7oqqA1M+pvCykLmb9Q80dq4M1WJSljDGy5Pg75uXj4uIkPTk8PAyr1SpKkpEMi5cARF/MnTsXg4OD4kDRay8uLhbjTjoJAEhMTJRpb3q9HuXl5XJvRUVFMkh7ZGREoo6hoSHYbDb4fD6RP9WB5XWo2+gsGQwGeDwe6HQ6qQGGw2FBKtHJ4yzhz3t9ITz3z+Yg1Tw2BYAbTIsHYFbKgl4tBUn9DiosHmwqdB4On8+HEydOSD6dRRlen9dSC2G8XxqLiYkJvPTSS7OKieqzUKFSAU9MTMzy9jVNw549e3D8+HHs2rULW7duFVSCqjypVOjV836ZG+QzqflbChUFhX/Ue1M/p+btuTdqGAxc9rTo9fIzRKWoh3JqagqdnZ3Yv3+/rCe/m0RS9IojIiKE6U9NIdHLIyvi+++/PyviYF1ErYlQ2alFTzoJ9AhVI6zWIrgmVD7q2nFP1EIp/63WsBhRce1oJHp7e4UFVZUx9bNqyovPp6YpqcxHR0fx3HPPwefzzYpGY2JikJKSgtzcXBQVFcFut0sxlh2bXCfuJVlIuc+9vb3o7e2VSV1xcXECL6bnTu719vb2WWumpkfZCavmn1XngzJMFFlDQwPOnj07K5XE9aOnHAqF0NPTg08++UTOa29vr3jo/BmNAQ1wKDTD/0OWVdZ+MjIy4HK5hLedUWJ8fPz/5hyw+cvv90s9g8g88tazE5byp57FAM43QQAAIABJREFU6OhoQcepPQvcc8oio1+mtviHUYuq4/7W6wuj3Omd8AHoTaoQPypX1QMCLg93Zs6Si8I0ATATAZDjm9fkgXz55ZdRX18vHgxweZQcP89UAO/PYDCgp6cHR44ckfvcsWMH4uLi5IAQBsXPswBEr4iKBAAee+wxLF++HKtWrcLNN9+M1tZWQUqoxTSGtXFxcVKppyeqGhyVMK2zsxMff/yxFAVVNkQAwmbHSIHPqSIUGDGpxT71WfgZALOUKQBZD3ZS0tgODw/PGmDg9Xrxy1/+EgUFBdDr9bDZbLBarVIo42FkAZQGhMpGRSXwbypwNR/MCJAesEorzMiCB5IKmsbzsy/KIhUxybvUNaBc8hlYKFPz8FQC09PT6OrqwvHjx8VbVmGSdDKIHEtMTERnZyeSkpJmOT98ZqYI8vLykJ2djbKyMlxxxRWIjY1FUlLSrLkIweDMmEWv1yv7wtTQ0NCQeKdEs5BYToXTMqohyRYdmejoaGkMoxdPJcg6l8lkwjvvvIPx8XFB8lC+SDVCIxAZGYmmpia0t7cjHA4LIILnn8aTEakamfE88nkPHz4sUY7FYoHb7ZbxeEzpkaqXQ7T9fr+8x2g0SvQwPDwszWvkxhkYGEBnZ6dEfYyQIyIipOmQUQSjKdanVOeVRorZCNXZ+luvL0RahqE7w3jVylJx0hozzKWCUPOWLKLS0vGlpiDUoqLBYMCFCxdw33334Te/+Y2EePwMDyqVND9HJMyrr76K+++/H8CMECUnJ0sBhYVXGhh6ErTYanGNhuEPf/iDKKX169fDaDTC6/XOGnFH0v+IiAh4PB7JyVEReTweqabHx8ejr68Pr7/+Ou6++24hMCOviJrqoGEj3zmhb9wXek4qyoZ1A6KTqChYtGKIrNfrsXXrVjz99NOSU3Q6nQId4/N9+OGHMBgMcLvdSEpKEv4Qi8UihdeUlBS88847uPfee2fh49k1SKNKpa7T6dDX1ycsfmp+3ePxCDSThXrKXWxsrDgDKiKGVL9se+fMWUYCiYmJszxttVhKGY2PjxfmUuZ0gRnFHQgE8N577+HGG29Ec3OzpOi47qSXUJXrnXfeKe/h+jLVwWHoExMTolg0TUNlZaVwl3NwS3JyMoxGo7Bs8jkvXrwIs9kMm80m59VoNCI7Oxt+vx/V1dX40pe+JAqeTIiBQEAcAxppvV4vMwTYIMSCfSg00+19yy23yOxSDpY2m83Cq09DcPbsWVx//fVCcWAwzPC00Ei63W6YzWZ0d3eLzMbExCA+Pl5G5/n9fqSmpmJ6ehqtra0SPTidTilkulwuDA4OorKyUhxC4tRTU1MRFxeH7u5uIXUjvTFTyrW1tZgzZ44wlEZFRclgmqSkJOnSDoVmGFnZn6FGlVT+LLgyAv281xdGuVNRU/GpaQLmwD6LLmC+lUVN4HJOWFXk/H4uCL9nbGwMZ86cwR133IFAICAHmE0Uhw4dgk6nw5w5c4Qvnd7gwYMHceTIESxatAh5eXmCJKirq0NzczO+/vWvS3PHqVOn4PP5hLho7dq1s4ptjFxef/11lJSUYPHixcjMzMTIyIgw5pWXl8PhcGDlypXIzs6Gy+XCyZMnMT09jcTERCxbtgwtLS3Yu3cv8vPz5aBYLBYcPHgQ5eXlKCwsRGFhIU6cOCHIFE3TcNttt4lyOnTokGB9y8vLUVtbi56eHqxfvx5jY2Po6OhAUVER9Ho99u3bJ2HytddeKykK7mFNTQ1cLhfGx8dRU1MjkUEwGMTx48cxPDyMQCCApUuXorW1FXV1dbDZbAJji4mJwYULF8SwVlZWIjY2FufOncP8+fMxMTGBvLw85OTkSJv89PQ04uPjsXjxYuj1ehw4cEDyqiRS83g8uHjxotAR5+fni5dNZ2FwcBCnT5+G3+/HvHnz5L6qq6vR29uLlJQUZGRkoL6+Hn19fcjLy8Pg4KDkZAOBgODzw+EwcnNzAQBNTU1oaWnBLbfcIrBF8rgsXLgQJ06cQE1NDTIyMpCRkQG73Q6Xy4XR0VH4fD6sWLECwEx3djAYREdHB1auXCkDI+jUXLhwAW63G0uXLhW2yGXLlsHn86GmpgYPP/ywDIXw+Xzo7u4WVAvx1qdPn8aCBQvkPJBgj9DLm266CZqmyUi7cHhm4IXBYIDVakVXVxf0ej1KSkok0mNuv6+vT5oVy8rKMDY2Jp/t6emRDlK1qKsS0wEQdExHRwf6+vowf/58uY7P58PY2BjOnTuHzMxM9Pf3y6AVs9ks09ZaWlpgNBrh8XhEyRKllZCQgM7OTqESUeHEbrcb4XAYc+bMQU9PDy5cuICCggJMTk5i//79KC0txdjYGBobG9HY2AiLxSJNZv39/UJ7nZqaCo/Hg9raWtjtdtm7iooKxMbGigNDWgXSOLS2toqx+HuvL0RahggN4HJahoqAP2OxQS04qSOyeDDV6rrqITPPOjIyItC1V199FYWFhfD7/ejo6BDIXDAYxG233YaSkhLMnz9/VthOnpZweIab+dZbb4Ver8fRo0fxwAMPYM2aNejs7MTw8DAMBgOqqqpw/vx5LFy4UDgvmAogUmNkZATHjh3D8PAw3njjDdx2222Ij4/HqVOnYDabsWHDBixZsgQOhwPr1q3DH/7wBzz11FO4/fbb8eyzz6K6uhqBQACHDx/GwYMHsWDBAqxduxalpaXQtJmBALfffjtKSkrQ0dEBh8OBG2+8EW63G8888ww0TUNLSwsefvhhLF26FHFxcTh58iT27t2L8vJybN++HZGRkdi2bZukgrZs2QIAQiugpnlIO3Dp0iXccMMNGBgYwPDwMKKionDhwgWsX78e1113Ha699lps27YNoVAIeXl5GBkZwX333QebzYbOzk48++yzqKyshMvlwpYtW6TxxuFw4Nprr8Xq1avx4IMPCnfPDTfcgIULF+Ls2bOIiopCX18f3nzzTWmVB2YikZ/85CdISkqC1WqF2+2WFAIV/BtvvIHvf//7WLJkCY4dO4Zjx47hueeew3e/+11EREQgISEBv/jFL3Dx4kXo9Xps374dSUlJKCsrw0svvSRTvJ5++mlJJwBAe3s7LBYL9u3bB7/fj2eeeQY7d+5EZmamyBSjT7vdjlAohFOnTuHkyZMyK6Cvrw9vvfWWeJ779u1DQ0MDqqursWfPHnz00UfYu3cvLly4gO7ubuzYsQMOhwO9vb04d+6cKCx2XL799tt48803UVpail27duH8+fPQ6XTwer0ScRQVFSEtLQ3nz58HMDNyb8+ePcJKyQHRbOCh53/06FHxvPkyGo3YunUr4uPjMW/ePKSkpODUqVOIj4/HwoULcdVVV2Hp0qUS8ZLf/oUXXkBGRgbC4TD8fj/C4TBycnKQnZ2N9PR0nDx5UuoBJ06cwPT0NLKzs6HT6YRSNzIyEmfOnBFlbbFYcPHiRdxwww1ChJadnY2cnBykpKSgubkZHo8H6enpiIyMhN/vh8PhQGdnp+yZw+FAT08P2traYLPZYLfb8e6776K6ulrGAQ4PD6OyshLZ2dnSNMVu1UAggK6uLly6dAnHjh1DWVkZ3nvvPdEHmqZhy5YtmDNnDsrLy+H3+9Hc3Ayn0ylUw3/v9YVQ7gxfPwvJY9jK8EQt9gGXc7tqwZGeteoVA5fhYmrePhAIwOVy4fTp0xgYGJC8HHN4P//5z/HSSy/JVHPm/6anp3HkyBGkp18eMnXu3DlcffXVGBoaQnNzs+SXt2/fDqPRiNraWkRFReHee++d1ekaFRWFwcFB2Gw2fP3rX8d3v/td1NXVIRwOIzs7G3V1dcjKypI0zOTkJLZu3So0usFgEAsWLIDJZMI111yDrq4upKenQ6fTYdGiRTh8+LB4OtPT09i5cyeWLFkiVKfFxcXQ6XTYuXMnLl26hKioKCxatAj3338/lixZglAohEWLFmFsbAxnz55FUlKSNHW88sor+OUvf4nKykqJUmhsd+3aJdfp6OjAnDlzAAB79uxBW1ubhKN5eXmCArJarVLAOn36NJxOJwyGmek7eXl5sFgs0Ol0gopgMayzsxP5+fnQ6XRCoMVUTUNDA371q1/JXNbGxkZJi3R3d2P+/PmzeiDC4Rl2xdTUVPT19eGKK65ASkoKPvroI6SkpIicMqQuLCyE3W6X8XC5ublCqWs0GrF37140NTXBYDDAbDajpaVFeHlSU1PR3t6Ojz76SNAjDocDFosFg4ODGBwcRE1NjfCbR0dHw+Px4MiRI8KbTjgkvVuen5SUFPT29srs0+bmZkRERKCjowNNTU2iVN5//33J109OTiIrK0vSk0zjpaenQ9M0lJaWwmw2o6enR1rm29vbBdURExODvr4+WafJyUkUFBQIYonYcXbY0lGjBz48PCwRjlo4np6ehtlsxltvvQWj0Sh0CBUVFYLcYa1hZGQEZ8+eFYQPYcXkRY+LixO6b9J3jI6Owu/3IzExEVarFbGxsdDr9bBYLKipqcGePXukmYtdr/39/cKpk5mZib6+PmiaJnlys9kMo9Eo3P+ssdXW1iIcDqO1tVXqGSkpKRgYGMDcuXMl5ZqXlycc8pQXk8mE9PR0HD16FACEf+fvvb4Qyh2AKHA2mqgIDHqEfA/zpvzDAoV6QFUMOnC56MqC4bZt2/C9730Pa9aswZo1azAxMSHzJf1+Px5++GH88Ic/RHt7u3TU0YBER0dj9+7deOCBB3Du3Dn09PRg586dSE5OFuV16dIldHR04KOPPsIDDzyAm2++GXfddRcyMzMFNhcOz0x7/9nPfgaj0Sg0n8Te5uXl4f3338cDDzwAg8GA6upq/OhHP0JXVxduv/12TE5Oori4GBUVFYiIiMCcOXOkcYrIIX7+7Nmz6OnpwcaNG5Geno5gMIi9e/fioYceQkdHBzZt2oQVK1YgFAqJ0JaUlOC9997Drbfeivr6ehw8eFBC4NbWVvzxj3/EsmXLsH37dgCQ/LTJZMKGDRsk3XHgwAHcf//9cDgceOWVV7By5UpRVN/85jdF6a5Zs0YY8LZt24YFCxbAarXizJkzeOihh2RMG1kIydlz5MgR3HTTTQiHw9iyZQuKiopw/PhxbN68Ge+//z6+973v4fHHH8fk5CSOHz+Oxx9/HKtXrxYSOhr+kZERdHV1obm5GXl5eWhra5N6gMvlwsqVK0XJVFRUSE2Egzfq6+vx1a9+VVAmv/vd7/C9730PmzZtkk7b3bt346677sLAwABWrVqFp556Ct3d3Xj11Veh1+tx/PhxXH311UJQVVtbi0WLFmHhwoUoLi7G0aNH4XQ6hbPFZDKJcmMnLx2GCxcuiNw2NDRgenpaphtVV1ejvr4eXV1dYryzs7ORlZUFAIK2CYfDwleTkpKCQCCA2tpazJ07F16vFw0NDViyZImgpN577z2kpqYiMjISmZmZSE5OFqdsYmICx44dEwrfwcFB7N27F0uXLgUAnD59WhqyiJKy2Wy4dOkS1qxZg9WrV+PNN9+EpmloamqSjtITJ07gqquuwsjICKqrq+F2uxEXF4ehoSHU19djyZIliI6ORktLC66++mpYLBZR0DabTZw+m80mUUZPTw88Hg/Wr1+PZcuW4dy5c5iensa5c+dgs9mQk5OD4uJiGYAeHx8Pj8cDAFiyZAnKy8uRkpKCmpoaXHvttejs7ERUVBROnTol6VHSM9MY5OTkoLe3F0uWLEF2djaMxpmhMQsWLJB5yb29vTh27BiWLVuGuXPnfq5O/UKwQm7YsOHphx9+eBaZFENC5tBVDDZ/zoVRUR2sXtNzVw3B5OTMRPj9+/fj4MGDuPnmmxEKhXDmzBkJrRcuXIjR0VE0NjbKiK/Vq1dL6ojRxY4dO3DttdciIiIChw4dwtDQEG699VY899xzEtZVVFQgNzcXbW1t8Hq9QnPK3Jqmafjwww9RXV0tOPlTp07h8ccfl+LySy+9hIULF+LixYt44okncOWVV8LhcMBkMqG5uRnj4+Pw+XxYvHgxurq6YLPZBG0CANu3b8c111yD6OhoYcWbnp5Gc3MzWltbkZmZKURK3d3dwvQXCoWQmZkp0c2ZM2dkEMOiRYtQVVUl3tC6detmoT+4VhMTE3A4HGhpaUFOTg4qKiqkEKnT6WRQydVXXw2XyyW46LKyMiQlJWFsbEyoe/Py8lBRUYGYmBhUVVWJIv7a176G1NRU1NXVoaurS9bYbDYLra7T6cSaNWuQnZ2NgoIC7Nu3DxMTE2hra5vlMXs8HjidTkGPhMNhdHd3S76URcuCggLceOONiImJwalTp1BZWYnJyUns2LEDubm5s7pqh4eHRRFwPfLz85GQkIC+vj4ppK1evRpJSUk4cuQIUlNTJf2Tnp4Or9cr2O6CggKMj4/DYDDg0qVLMnSZSjQpKQnT09M4ffo0pqenhVfc4/GgtLQUExMTcLvdyM/PF2505tQHBweFJqC+vh5WqxWJiYmYmJjA0aNHsWzZMqSkpODkyZNYsGCB8MKnpKQIyog01PX19SgrK5MOVqZY7Ha7pC1bWlpQUFAg0duePXtw3XXXST2N9Qru4+TkJMrLy5GZmYn3338fc+fOhdFoRFVVFa688koZlMIemKmpKdjtdmRmZsJsNqO9vR2rV68WNsrh4WEZoqHT6YSyd3R0VGpSJpNJhmjEx8cjOTlZ9ElHR4c0ReXl5cFsNqOjo0NQQQkJCairq0POp5OhMjMzkZaWNovojUg1DlN3u92w2+1y1tLS0tDe3i4DQxISEpCbmwu320398XdZIb8Qyv2ll156+sEHHxQ4HvPe9KjogdPbZXGUaBoiBJgzpXdPoSLmNhQKiQCnp6fLRCWXy4WsrCzk5uaisLBQ0CQGgwHz5s2TsIgGg5Y2Li4OixcvhqZpKC8vR3Z2NpKSkmC327FkyRIkJSUhPT0dbrcbERERyM3NRc6n01PUZpmKigpo2gw5lM1mQ2FhocD+WlpacP311yMmJgbl5eXQNA1paWnSQMHJ7KQIJlUo1y8hIQEpKSlYvHgxjEYjMjIy4PV6YTabUVJSgpiYGJnIRCVWUFAg4TE798rLy5GXl4f58+fDarUKMobGhAqRxpdFLLPZjLy8PAmH7XY7LBYLkpKSUFFRgfHxcaEmDgaDMsUpOzsbCQkJsFgsMg2ooKBA0llJSUlYsGCB5E7dbjeKi4tleHJxcTGKiooQCARgMpmwYMECwWZz6LLFYpHOzO7ubqmD5OTkSFqA8LjMzExpwiGdKyO5rKwshEIz1Ah6/QyPu81mE5wz942yazabYbfbZbh2Tk6OdDiS6TE7O1uQHSzEpaamwmq1Ijo6WlgGExMThaiKKBKiR7KyspCVlQW9Xi/rRMRMSUkJAEjxNDU1VepSvA+DwSCY+OjoaBmgQWQR0TUkt9M0TdgWe3p6kJ+fPytVOjw8LE1MAJCZmYnU1FSJNI4cOYJly5YJTJEwYIvFImc9OTkZ0dHRYujoSMTExCApKQlDQ0NIS0sTZBkNJeGy6enpQm1MVFZ6erqkUMbGxiSNSTy6Xj9D7xsZGYnExESMjIzIQA0i4ZKSkmQoNteTBG2JiYmyfuQE0uv1sn6EN0dFRcnw88TEROmAJYzaZDKJTPT09HD4yxeb8nf+/Pnhw4cPC6SMXrja0aVCEal4qcgJo1Jx6fTmWW1XjYKKkQ8GgwIdpLKl5VXz9ioKh9/P7+F7WFRUibcAiNfHtVa7VdkAQ+w4MEOXMDIygg8//BDJycnilai8GnxeFUvNa6mdqvRi+Dfzsvw9P8PGMHbescDNyEeNlvh5tYCq1jRIysT3EnPP9WTOmkKtdiBzbbnfPDxqlyevzWIV15voKLVDkgV64txDoRniLA54Jp6YKAleh3BKdb+ZZ1ebbgg17OvrE9wzcJmagN/PPWZqxGQySeFVTfkRD01uEq6J2ojDXCxz2Ix4yIjK7spQaGYAxPDwsDg5lGPKEOGC5DMiFUR8fDzsdrukKtkQFB8fL3NIed9cs/HxcbS2tsJsNqO3txfLly8XueM1CZGljE1OTuKvf/0rcnNzEQwG8eUvf3lWU5hKacy0B1EtgUAAoVAIKSkpIhdEwxDS6Pf7Jf1BGfX5fNIDQGAGIYxerxfFxcVwOp2IiIhAcnIyRkZG4PP5YLFYZvWKsHkpGAwKfJGyz/PIISgDAwMSJaqOF/d3cHBQGjApN+p+8VwR7srO2/vvv///H8RhPLCqMlR/zg0kllZts6d3wHCM36u+KIgUHl6D76ci4zWpjJjXZyGWSpWoHrW1GLjM5wLMLvryc1QYaseqiiE3mUxoaWlBZ2cnAAjZEotnvFcqbT4L00YUIHrRqtFTm4h4f+rrs0r/sy9+jt/L9eIaqURpKpyVa0kDo6bbVENFI8DfszjOe+Wh5+9VpU8DqSp4vo/e7MTEhKTbqCg1TZPmM76Xa6b2XJCE7LNGRH1+7i2f57MySU+TTUZETlHOuDdqByedHt4Ta1M0GJQ9ngsWoakYjUYjBgYG5LyQf4cItMnJScH1qw1TTLMQTUTjTodKpVBgkxL3Yd68eRJBq/0rKoUH5dNiscBsNsugD71eLw1nlA8AYrzo/LCXhJE2nYvh4WG5LzI7jo2NSVMgodU8a5zpyj90cBITE0WhUk5ICcEicFpamuwzDRmNL1M6Ho9HjDiNP+VSlW0WgLmHJDzki3qGOpIsmX/v9YVQ7lQ49H7VLkm1pZsLoTIpElVDoafAEh9PAaTl48FRvVOVFlbdKAofFYzaBcrvUA8a74UVcypt1XuksKhQL+L11caf0tJSFBUVyXVVxU4vDphNlDYyMjKr41BVCPy32t1G70/1xicnJwX5QsNENAnJoaampkTJUSnxHtR9VENrGmO+l140MGO8enp6MG/ePHlG3guxzlxHGiNePzY2FsDlQRGMMKanZ4aP9Pb2CoUtgFleFpuvLBaLdFlSPthVyRF6xFhTTtThMeFwWMYQ0gkYGhqSepAqB9x7FrypiNQ1UuUQuIznZuGOskDK2qmpy0NU6FEODAxI4xDRH/SU6aXzfHEdBgYGJK3Bbk+mg6gAR0dHRYlPTEwIKoz9Dtddd90s3nYahZiYGPF4qYS51mvXrgVw2WFhdMMURjgclhSfTjfDGUNZYpMRh3szF11RUSF0x5qmSYTFwi+bojRNk27YiYkJpKamIiEhAampqZiamkJ7ezv0+pnZqPz+3NxcoRcYHx9HSkqKQG/ZcUolznWn1860GZ0j6hoafDocNKg8C3ypMqUq/r/1+kIodyoA1VNkaA5A2sXVVAgFBZjtvbEdXa/Xo7+/Xzo+qeRUnDz/zz/01KgM2eo7MjKCyclJaZlWvWF2ELJjk92cVOgUbnojasqB16CwqwRYajcu10eNDngQaMy4TlSan61X0OtUh3qo6Qx6MioElZ4gPRGVvoHrxYiBxSE+L4t0wAzyggZZ9aZphBMTE8XQUh64Pvwcm9XoAXFP1evRs+ru7kYgEBClTmVLRUTDRqWocuCoESHXjvtO71Dl6edzcL/9fr8ofnqrIyMjQoRGR4UdoeRFJ8kdX9xrhvmMNomq4hAJQkc5aIMeZm5urhgssjeqcN9QKCT7wzUxmUwCgQyFQrBarVKnUqMVonNodJkDHx0dlQ5vyhL3nilFne7ycBU1nUhZUg0gjXdfX58o53A4LHTASUlJQj3MTufIyEhhcwRmajPEtQ8ODiIUCkkum+c0MTEROt3MbOOenh709PQgMTFR7nloaEjy/oRlkg6A9Q+fzyfDU1gPodPIuiBpFRgZkLYZmHEwSa6nZiHU8wBA0sXquf97ry8MFFK1TsyRU3lRYao5ZBWzzv+reWV6MPRs6dnxEPI9akel6n3Su1SjAt6nisAJh8MStlNBsDBHD5BKg8pXTS8Bsxu3VIGnIeJ1aLXVWgD/VlMhNJQMQdkYw+9QU0VUtFxf/s1nVfOlvI6azlLrIWrdQx1aoq4tv5fUvVQCDN3V9acsMPWhRin8Tu5PKBSCx+OBy+WSoc9U4PS4+R3kPFG9IOByf0VERISw8NHAqKkiXp8duXyph5FKVv0c/03FT/kjtwqNgZrqIpKD8s6/AYhCJ6qDMs38MKcl8SzweSkDLOLRcSBnEfeZSpR8MKTV4J5S5rkH4+PjkrZRMfeqMlLTLmp6EMCsWQ2MrvgdlDmj0SiRAx0KNaK32+1Sq2AUy9+zUzkUCgkqhqmTvr4+9PT04OTJkwIb5efsdrsAMlj4p3PEebM0vJStUOhy4+VnnQVGSmraknJP3cFzxb2nzlE5kVRn4G+9vjDKXRU+HmZ6NPRgAcjv6DWonhyAWYqEgkTFTz7l6elpoTVVU0K8NhcXgNASqCkgek0TExMC66Ol/dd//Ve88MILIsAqWRg9KDV6CAaDkjKiceLGOZ1OuY/29na8/PLLorB5SAHMUrhcGzLHsVikKlYaEtUwqfzYNEKvvvqqNMoMDg6ivb191qg9pmkAiHdNAWVKhnUKtXYxMTGB7u5udHZ2Cs8814R5c+bluZc0tNz/sbExkY1gMIjW1lY0Nzejs7MTTqdT9onXJ848Pj5e9k/TNHi9XrS1tcm1uL7j4+P4/e9/L4pcrQUwbKb3S/4cnU6H2tpavPTSS+JRAxD4HMNoeqRqrYFRFH9GznM6Ccw3c/CD2WxGZmamKIzh4WEZLh0TE4O4uDhJSzL1wo5JRgtsjmpra4PP50NfXx+am5vFA6bsMxqmx8uUDJUsydx0Op10p5LXiB2m9EZVB0eN4igfk5OTcLlcOHTokIya4/lWQQeJiYkYGhoSyCD1h9/vlzF4ACQ6YVdrfHw8qqurZ+XbSQGRmZmJkpISpKenC2otLy9PBlVz8pTf7xeZIM2EyibKdeeQd2DGeUpOTpb8/+DgIILBoMCOaegYlXF2MteFhphEaz09PWhpaflcnfoPlbumaZmaplVpmtaoaVqDpmmPf/rzRE07QPHTAAAgAElEQVTTPtI0renTvy3KZ57SNK1Z07RLmqat/kfXACBejFpc5IFRW8PVQhvzZ/S8OIJNLVLQQ6EAAZfTOFwofl4tCtLiGwwGGaTB/1NhqLlDehWffPIJCgoKxJLzHqh46aGqBVF6sPxbp5uZdH7vvfeKog6FQpgzZ44cLlp+/pset5r/56EgZSuVrGr8KEBcF/U7582bJ2v6b//2b3jmmWfEK+P7RJB0lwenkKSNB0A9wDTITqcTfX19knvmGoVCIeERomIBIN4O14upkM7OTly8eBHd3d2zFAsVOTHgHEk4NTUlk+wJPcvNzRWjRqQDr0evVTXKqufO++S+nT9/XtJqXFve144dO2AwGEQZ0dtmhMF7IBKH5GIs4BF1QWXCa4+MjKCvrw8TExPo7e2F0WicNSqO1+BoNnU/srKykJGRgfHxmYHiZWVlAGaUJzH2NARnz56Fy+USxkXuMesQPFeU+02bNgnahs+RkJAgMgpcbi7kegaDQWzZsgXz58+Xe+S5o8EmZLe/v1/kkNS+9Ny5PpQri8WC0dFRdHd34+jRo9Dr9ZLTJ4ePTqdDcXEx5s6dC5/PB4/Hg5GREXHSSBLGnD+RMHq9XlIyer1e9ocvOmEejwcejweHDh0SSu+pqalZNQR1SE8gEBCdR/55RoXvvPMOMjMzP1en/nc89ykA3wuHwyUArgLwLU3TSgE8CeDjcDhcCODjT/+PT393D4AyADcCeEHTtM+fB8WbUdAtVHzA5bQFAMmxApiVxlAPGZUcPQF6XfQy+H1qSMhFVAtmvA9W0dXCJA+6WlCk1ea4LYaTanjFv5l64e9U6Fw4HMb58+fFm5uYmEB9fb0QgtHQfRYpw+9gyM8CpJpWYkhHZcWX2gzGP4sXLxaPqbGxEWVlZXKY+QxMIfD/NJZ8Tn5eVfKhUAgFBQWCyadxUfddTZeoz8zX+Pi4TA7y+/2iJCIiZvjgGamxhsFIj2vA71XfxzUlNDI3N1f2jLLCvWLUw5/xHru7u5Gfny9KmpSxbNVX033qulGeKcdU+AMDA7P2ioMhWNSm4SHnz2fXkQaeThDXkWk1pm1Y8COdLYeTM7JjmoNKjcqMKVNOaVJTUxERM9OMmB7lmaJR4M+5FtPT03J9Foi5X/Ra6XHTI+f9cwQmf8Z1MRqNgpLiXsXGxsLv9yMQCMjMWJXdMzo6Gl1dXcKQyuhHBSdQ3hgVq3qBSCI+L+WDe+l0OqUBkUgfyml8fDyio6NlfixlXk1TEz//j17/sKAaDoddAFyf/ntI07RGAOkAbgWw4tO3/RnAQQA/+PTnb4bD4QkAbZqmNQO4AsDxf3QtHggWZd59910cO3YM119/PTRNw9mzZ/HEE0/A4/Hg6NGjiImJgdPpxDXXXIOBgQFpGS4oKEBjYyPuvfdeNDU1wefzweFwIDU1FbfeeitCoRBOnz6NQCAgpP1ZWVniTaanp6OzsxP33Xcfmpub0d/fLymiG264AVVVVTh9+jSWL18uYe7y5csxMDCAnJwcOJ1ObNmyBUajEY899hgmJiZw6tQpaNoMI+Ftt90mYSo3l2kMg8GAhoYG/PGPf8SCBQtw8eJF5Ofn45VXXsGTTz4Jp9OJM2fO4NFHH4VOp8OpU6fgdruRmJiIuro6fP/735fwcXx8HC6XC5cuXcLRo0fxxBNPoKurSxqEvvWtb+H73/8+jh8/Lm3r+/btw5o1a+D1epGamooVK1Zg//79cDqdSEtLE47quro6oZyNjY1FSUkJPvjgA2mAOXToEH784x9j//79UvBas2aNhPTENZ86dQp//vOfcf/996OyshKbN2/GI488gvXr1+Puu+/GypUrsWvXLlitVpw9exYLFy5EMBjEBx98gIiICNhsNvT29mL+/Pno6+uDxWJBY2MjSktLUVhYKEqOxdaGhgbExsaiv79fGCEnJydx6tQpmcN5ww034JNPPsE111yD7u5uNDY2YunSpYiKioLD4RBlEhsbi4KCArS3twtjosFgwBVXXCGHnoXl7du3w+PxoKamBsXFxdDr9WhsbBRoZn5+vnDnqEo/FArB4XBA02YGm5SVlcHj8aCjo0PSHYsXL4bH48Hu3bvFQ+3s7MSCBQsQDofR29uLiIgIWK1WtLa2ShctG2I0TYPL5cKFCxcAzAzaZtonOztbFDUww2qp081QS5eXl2Nqagrd3d3o7+8XNk8yWBYVFcHj8QgOnYoqMjISPp9PmBtDoZBMR3r33XcRGRmJvr4+KYyysHnp0iVMTU2htbUVBQUFgpRqb2+HTqfDuXPnUFBQIAY/Pz9fokM6NR6PRwavAzNIpNbWVgAzqZuysjJcvHgRTqcTkZGRcLlcaG1txcqVK/HBBx8gJSUFiYmJ4kheffXVGB0dlT0ZHh7GwoULERERgZaWFnEACgoKkJ+fLzWW6Oho7NmzB7feeiump6dx9uxZ9Pb2orKyEj09PWhqasLVV18twz9o+Pv6+nDhwgXExsaitrb2c/Xp/6Ocu6ZpOQAWADgJwPqp4qcBSP30bekAupSPOT/92ed9r3idzClysT/88EO4XC4sWrQInZ2dCAaD2LFjB+rr61FZWYmBgQGMjo7i9OnTyMjIwHvvvYd58+bBYrGgrq4O27ZtQ3l5Od555x3U1tbKdf7617+ivLwcWVlZqK2txalTp5CWlob33nsPUVFRSEhIQGNjI7Zu3Yq5c+ciJycH586dQ0tLCwYHB7Fnzx709fWhsrISmzZtwujoqHTl2e12DA4OYuvWrTAYDKitrcULL7yA0tJSREdHCy+1WjhTm0IIu1q1apWQhnV1dSElJQXz58/Hli1bpIHj3XffhcFgQHFxMZqammZZe51Oh5qaGsTHx6Oqqgrj4+M4cuSIpE9IR2q1WtHR0YGCggIEg0E4nU4htTKZTCgrK8OcOXNw/fXXC7viG2+8IXBNh8OBpqYmWCwWtLa2isJsbGzEyy+/jPnz50uzklpv0Ov1aG9vx5EjR9Da2gqfz4dt27ZJLryrqwvHjh1DVFQU5syZg6amJjgcDjkI1dXVsNvtsj56/Ux3aG9vr/AB0eOdnJzEiRMn4HQ6hQmUstfV1QWTyYSSkhI4nU4AwKVLlxAREYH4+PhZY9qOHj2K9PR0ZGdno6OjQ35WVFSE5ORkpKenIyoqSuTMZDIJooJd0JGRkaipqUFiYiKys7PR0tKCqqoqKZxRNgwGA1pbWxEfH4+8vDzU1tYK0Z3NZkNubi4OHDgAj8cDt9uN/v5+NDY2wm6349ChQ+jt7RV+cL/fD03T0NjYKOk4cuBzLJzBYBDWTbbfM61nMBjEqcrOzhbPksYvNjYWaWlp8Pl8SE5Ohtlsxvz585Gfny9eOfPgXq8XJ06cgMViQX5+Pnbt2gXgcuNYbm6ucMerReQ9e/agpKRE6g+apuH06dOIiopCWloampqaZFwmGSzT0tKEmZNY+vz8fMTGxiIuLg4XL16EwWBAdnY22traEAwGZS4Du3o5yKejo0MQeHa7HW1tbZiamsKZM2ekwcnhcIjBI9KMeXdGKHReh4eHxbEjFUQoFEJ6ejpqamoQGRkpETidhdjYWCQkJKCsrAx5eXmfq6//28pd07RYAO8A+G44HP48ImHtb/zsf2uD1TTtf2maVqNpWg07z2gNWf2/8847UVFRgbvuukuIpvx+P1577TU88sgjiI6ORnNzM4qKinDPPffAZDLh8ccfR2xsLO677z48+uijSE9Ph91ux9jYGL7yla/AZDLh+PHjKC4uRnp6OoaHh/GVr3xF+FGeeOIJrFixAnfddRceeugh4ZQYHh7G7bffjvz8fNxxxx0oKyvDbbfdhsjISDQ2NmJkZASbN2/Gk08+iby8PNTV1WHx4sVS8JuYmMB3vvMd1NXVCe6YRVfmiVVE0Ny5c4WQKhgMory8HEVFRZiYmMDChQsRGRkp05VIZXr//feLN8ewcdWqVUhPT8dXv/pVZGRkYOvWrdKB+9BDD2HJkiXYu3cvHn30UWRnZ+PXv/411qxZg4MHD+KOO+7AyMgI6uvr8eCDD0pr+d69e2E0GnH+/HnU1tZi3bp1yMrKwsGDB/HNb34T+fn5+M///E/JoX7rW99CXV2d4JTViUxr165FSkoKbrjhhv+bujcPj7I+14DvWTLZk8m+LxASSEjYl0R2lF1xqwWtKKKtbT3dzmk9PbWnxdrj0XPsqd20ilJLLQhIVVAJEMKWAIEECCH7vk6WmUwmk5kkM5N5vz/G+8kbTvWc7zrfH35zXVzGLDPv+3t/v2e5n/u5Hxw7dkye/5YtW7BhwwY8/fTTMBgMuHr1Kh566CGsWLECM2fORH19PR555BGEhITgzjvvxOuvv46AgAA0NTXhvvvuQ35+vqTsTMmPHj2KLVu2wN/fHx0dHYiPjxfHevDgQbz88svYsGEDuru7UVJSgpKSEgQGBmLp0qUICwvDzZs34fV6UVNTg7a2NsybNw+tra0ypcfpdGLdunVTYBrAB//U1NTgzjvvRExMDLRaLY4ePSoCbU6nEzdu3IBG42umCgsLE0rcuXPnkJycjICAADz88MN4++23ERoaivj4eISFhYm+SX5+Pjo6OrBmzRqEhYVhcHAQNpsNc+bMEZEts9mMjo4OUeDkmRsaGsK7776L9PR06blYunSpsGYcDod0hHo8Hhw4cACtra0wGAx44403oCiK0E/z8/MRGBiI8+fPY/r06UIaYKQcGBiIl156CZGRkaI709zcLE6/q6sLBQUFUyi2Op1OPvt3v/sdKioqEBISgsOHD+P06dMiKbJp0ybMmDEDiqLgrrvuQmhoKNrb25H+meRHbW0tjh8/jrlz56KqqgptbW0oLS2FwWBAb28vtm3bhvDwcDgcDmzcuFEyOUJRy5cvR3NzM5YuXYrIyEjce++9ePfdd1FaWoqOjg5MTExIJO7xeFBSUoKOjg6sWrVqSvZTUFCARx55BBqNRiDEkJAQtLS0IDk5Weom8fHxUl8gZBoSEiL6RIShPu/1vzLuGo3GDz7D/ldFUf722bf7NBpNwmc/TwDQ/9n3uwCokf5kAD23v6eiKG8qirJIUZRFUVFRgjsx4vF4fFKuu3btQmRkJA4ePIjOzk4YDAasXLkSMTExuHDhAi5fvowrV64gMjIS7777LpYtWyYR2+joKO6++26MjY1h6dKlSEhIgF6vR0VFBTZs2ACr1Yo33ngDubm5iI6Oxv79+5Gfny9G1uFwSDv0nj17kJaWJmPNtm/fjtDQUBw+fBgvvvgiYmNjUVRUJBvS4XDg2WefRVdXF86ePYv9+/fjRz/6Efbu3QuDwYC6ujoZ5KvRaKbwj0+fPo3HHntMjFBjYyN27twpcr8/+tGPcPXqVbS0tODxxx/H2rVrcffddyM1NfW/sS4iIyNx5MgRbNjgq2v39PRgYmICFosFd911F0JCQnDmzJkpnXOFhYUoKipCeXk5mpub8eqrr2LJkiUyOKO9vR07d+7E8uXLsWHDBtjtdsTExODMmTPiuBRFQWFhIfbt24fvf//72Lt3r1yXGq+32+0iSLV3714sW7YMExMTmDVrFlJSUqDT6bBixQoUFBTAbrejtrYWVqsVaWlpyMzMFDhJo9Fg/vz5mDdvnhRP3W43goODRWmSWjQ3btxAdXU1uru7UV5ejrKyMrz88svYtm0bysvLcezYMWRmZqKgoADXr1+XYnp3dzdWr16NBQsWYOHChdBoNLh69apIDB8/fhwRERGor6/nuZH7nTVrFrKzs8UxzJw5U6Kyvr4+PPXUU1OKyoCvrpCRkSETr8haogCb2+3GihUrJIpOT09HVlYWhoaGUFBQgIULF8LlcuHkyZNISkrC+fPnMXv2bKHskXtPrXKXyyWFVIqTkdVhMBjQ1taGJ554Aps3b8aZM2d4jnHXXXchNzcXs2fPFnE0Qjzt7e0SdVKLxeFwIDc3V7DvlStXYnR0FGNjY4iOjsa0adOkoE0efUNDA771rW/hrrvuwtmzZ+HxeNDa2orc3FxkZ2cjNjZW6KelpaUiznfhwgXodDrU19cjICAAPT09cDqdmDNnDsLCwpD+mchfZmamNGjV1NQI/HL27FksX74cH330ERoaGqYU871eL9ra2jBt2jSkpqaK2F1jYyPq6+vx9NNPo6ioCBUVFVOokKxbzZo1C1arFcHBwbh48SJSUlIwMjKC06dPY82aNeju7obRaBT9H46lrKurg5+fnwxN+bzX/4YtowHwNoBaRVH+S/WjowAe/+zrxwF8pPr+do1G46/RaKYByARw5Ys+g1Hc7YXO6upqeDwe2Gw2VFZW4vHHH5eNbLPZcPnyZQQEBODWrVsYGRlBdXW1FEUmJiZEorS8vBxJSUloa2uDx+NBdnY2hoaGUFxcjBs3bsBgMGBoaEgkUlnZj4mJgd1ux7Vr19De3o7a2looioK6ujoAPtjoxo0bWLx4MQBIgcVms0nKRGYIdSvy8vJgMBjwq1/9CpcuXZIClJo7bzKZEBcXJ9TDmpoazJo1Cy6XC+fOncPEhG/I74IFC9DU1ASz2YyGhgZUVFTIe9GwAJCGDGpOW61W3Lp1SzSn1XNDvV4vPv74Y6SkpKC5uRl6vR4mkwmjo6NoaGiQ1vLm5mYMDQ0JnMLag5qn3dbWJjK9c+bMEeaLmuNPuMZut2NsbAxjY2MCEXm9XiQlJaG/vx/d3d0yXYg1EjpHPz8/JCUliRBXU1OT8PpZ5CWDxW63y+Fob29HZ2en6OQ7nU7k5OSgvb0dGRkZCAwMhMViQW9vr6hBkmY3ODgoMrljY2Nobm5Gf38/Ojs7YbPZhLMOQBpl2KBD1s7o6Cja2tqwbt06pKenT+Hvcz+QSjg6Ogqz2Yy4uDjBq2/evIkVK1ZIDYHMrrq6OhQUFEjmEBISIjWptLQ0kRwgOyg8PByRkZHweHySshxDB0BqQaT98nymp6fD5XIhJiZGHADXn8O61R2i6mwmJiYGIyMjGBgYQHV1NVauXCm2IDExUXoMqN2i1/sUU8n4IRstNzd3CjOMhVMyh+icXC6XYOIs2gYGBgqV1Gw2Y3x8HAMDA1LQBXxQntVqRXx8vIyvZFbE7uc5c+ZMKZ7b7XaBisfHx5GRkSGZAwvcBoMB3d3dIr1NaDAtLU3qAvyZuoDPgJU4Pgdrf97rfxQO02g0ywFcAFAFgNy3n8CHux8CkAqgA8BDiqIMfvY3zwHYBR/T5vuKohz/os9YuHChcu7cOTkM3JTr16/Hq6++isHBQRnWQCaJ3W5Hbm4u2trakJqaCoPBgJs3b4oUKQA0Nzejvb0dOTk5Io+ak5MDALhx4wYKCwtx7tw5nDp1CoODg6iursby5cvF0bS0tEj6zcOVkJCAJ598Er/85S8xNDSERYsWCV2xqqpKhlKUlJTI5zU3N8NutyM0NBQJCQkwGAw4evQopk+fLhocwCRF0+PxoKysTGhqzc3NmD17Nmw2m7AYUlJS4Ofnh6qqKhk3FxkZOaURxOl0CkWM0278/f1hs9mQkpIinaFtbW3IzMwUI8KBIwsXLhSBK8qaer1eGSc4Pj4u4+YmJibQ3NyM7OxsuZ+amhppg+f1kqmh5ttfuXIFExO+QRdtbW1CT5yY8Il8lZeXT5mNywiLTSVkuHR1dSE8PFxGooWFhclh9vf3R2trK5xOp+ybuLg4hIeHo7e3F2azWQaINDU1YcaMGQgKCsLZs2eRmJgoHGXS11jMCw0NRX19vYyFGxkZQUJCwhQD7fV6UVdXJ89/dHQU3d3dIkRmNBqFRUMuN/dgf38/nE6nKDK6XC7BzwMCAgR/vn79OlJSUoSCl5SUBH9/f/ldi8WCjz/+GDt37hQGDzn7iuLr+hwaGkJUVBQ6OzuFPkpnxP3rdDphNBqFfUSuOADRk/d6vejv70dAQIA0EkVFRQnjY2BgQDB+Ki+OjY2ht7cXISEhmD59uvRnMJgjjTEmJgahoaFSo2IwMDo6ivj4eAQHB08Zt8ieBafTiczMTHR1dcHr9Uqtxmw2Q6PRyHNhljB79mz09/djdHQUIyMjiI+PF9kGm80mOjiBgYEYGhqC3W6XoRyhoaGyH9PT0wVPJwRIx0UpA5fLherqamRmZsJgMKCpqQkajUZqV5QscLvdKCkpwfj4OB5++GFMTEzgiSee+FzhsC+FKuTChQuVoqIi2TCkD86ZMwdVVVVwu90YGRkRutbtTUqMJsj5ZaONWmWQDsNsNqOurg6rV6/Grl27kJSUhJ/+9KdTuuVYhCOfm3gXi6NPPfUUysvLBcsFIC3i/BzSzwAI55mR2/j4OLq6upCSkiKUL4/HI3RGNu1oPmtuUad0agokI2FmDGzOIr1MLSOgjp7UDUXk5jI6o6YKZUkZGfPnVM2kQVCLLbEgTIohG3DUNMPbo3d19yZ1dlhnYEfxwMCAGEMaODXHmAaAz4H3RmkI4qbcW8SVb6dEsoGN1EgaNZ4RFjoVRREYTe1MuW6kxDkcDrkXYHK6kMPhEOVF9XtzD6qdPY0qI1P12gIQ2t2JEyewbds2YcewDlBYWIjU1FRER0ejq6sLCxYskKyD/Gyv1yvDxdWdk9Qh4vlLTk6WngtGkmoxNTprPmu1gBfv1e12Y3h4WGiUvHd2Fs+dO1cMMjtkqXSp7odQNwh6PB709/fD4/HI6MSWlhYkJibK/zOj5P5Wa9dERUVJVOz1ekWL3Ww2IygoCDabTYIJSv6SMslzSFonJUh4jQxAeBbtdju8Xq9IhjscDuj1eiFZ8LkTniarqbGxEWazGS0tLcjKysKSJUvgcrnw9NNPf7lVIXngmN7odDrU1NRg3bp1MiyBBk6NSd6O36oxezULhWmbx+NBc3MziouL4Xa78YMf/ABZWVn/7YDRkKsV/XQ6HZxOp1AzOVAXwJQ2dDV3nhAPx4xRBImMGBofRVEkwlEbdV4XNwYNKY2Gej1ooFnEZFTFa1M3FtG4c8CGw+EQI0/HQiPHz+ffMfXX6XTisIjJ3t7qTUPIZ6zu9qQxYPRFWiijehqP7u5u3Lx5U96bQw2Y4tL4qQXP/Pz8RBCN7elkLvA50tjzcKq7JRlBqznbNOy8X75I7VTz1RVFkYiXh50GkU6QcBQpsfx7OnreExkk/HvubzrS4eFhmfV569Yt0RXnZ1Fjxu12Y8mSJQAgTBn2cNBx8F4ZHPFz+Gxo7NXYOdeRv8s1YaABQGAE1gwINzECd7lc6OjoQFRUlGj48HPUglvsluY+4PPX6XSIjIyUZsfAwEDk5OTIfYaFhQHwdZtTJ/3mzZtwOp2YOXMmrFarQDU8J6wRkN3CzI1ibdHR0SL5zGKv2+2G0+kUp8W9wXoiMz6eee5z9R5hwxnhZe5HMmfuuOMOxMfHi5jcF72+FJH7ggULlDNnzsiB5GJw49BrsylJvTiMXNjlRmhDfV9qo8IX2+bVBhGAFHH4d4wUaTC50Ri182saMn4+nZTaKKizA34mDQ4NML/P++PP+Z58v9sbjrhRAEjkwGibn88Uj0bi9oiUkYe6IYlGlvfPteVm5Zozeqeh4nswleV68DmonRGzHHWzEvU+Wlpa5MDwHpOSksSZEq5hFM89wyxQ3T/A5+V0OuV6KNhFA65W5aNTAyARLNdAfYgZxXPP0tlRa53Pi79P58jITh2NMtKnmBu7i91uN6xWq2jQBAcHSzZrsVjkffhcUlNTRdaYzk5NveM+YHZCZ0mGkcvlkoCDwYXRaBQ2Fp0SgwIqkrL1Xp3t8nMURZHMWl2P4H1zfzBoUDtbrVYr926322VuK3X5qVBJ7XrCjxydxzNCR9fd3S0sGEb1wGRjJLtfY2JipEGKWDy19j0eD+rq6pCSkgK32y38d9oLisjxfAcEBCAiIkLOWHh4uEA6tAts+mK2zeAyMDBQakwUHNPr9fjWt771uZH7l0Jbhp5efcDVhVEeDv7s9iYPGjguxu3pLQ02NzAwafD5d2o4iO+nNvJqA63uTCXkQviBURY/V+1Q1NAA75vvr3ZG/D122TKD4D8aHd4XDSXxXRoNGlF+n+umLlzze2oBLB5eHkBgEtq6HRZjtsTmDH4mAKFuqT+b98174f+zmYXfGx4eRm9vr2xo/qMh535RHwQ6Gjoytegbr1ndGcqioRriUGeI6kyQ981nT3iCn0UeN7FwGngebt6nWn6Az5bOh3+vprLyetSGjwdb7cxYqFNrgtNJcI/R8Kn/0SEzewIgOjQAhGbIgIOfe/tzVmd96v4COgx1QMJASg17co+q4UxmB+rzy31IR0rnwjUi7EF4j9xxAKJjRCfNWodOpxPdekbu1JOhUBmzKWYtlCgh9KTWzlHvS55Dfs2OeT57Xqefn59kOtwPtCtq3Syuu1p25fNeXwrjzgfFaEodVQOTg7H5UFgwUzcBcUMRN2XkRS/OKI4PgOmvWjeED4ROxul0ymAD9SGmlKcau1ZDOTzANAg8gDzIxBtDQkKmRIQ8ZMCkoeEh5KGlwiFfNHSMDHmoCEWojRHxYW4WGnDCRmpjyHWgsaWTovNSO9LbMx9uaDWsxbXhhuRnAZBZmWqox2w2ixYKBcrCw8Mlde7p6cGhQ4fEQHPgg9rhqzF+XgejJq1WK3xyDrVQZ3fEUdW1De4jqpGqawlcR0bsGo1G2Ehs3fd6vbBYLBLREzYgo4QZA88D9wsZHCwQ+vn5RgMaDAZhg5BWGhsbi8jISNFDIgPJ7XZL447RaJTzFBISgtjYWPmZWiSL60eohwVY1nzUZ4Lnj9kaMDlDQVEUDA8Pi+HVan0SAOzRmJjwteYDkOI9z6haG169Z+x2u2QsgYGBwpsnjEMHx2zY6/WKDC/HK2o0vo5xo9GImTNnitHk3ggODsbExKTMMhU0NZpJhc6cnBwEBwfLrNnBwUHZB3QM/v7+CAkJESloRVGmCPpxzQnZhIWFyf1y73Z3d0tNgOv6/x6YBQIAACAASURBVAvjToxSDVvQaPb09KC3txcABNflQaVxpUEDJg0XDTwdg9rI8vfpAJqbm9HV1SUqg/wcblhuUOpcA1OnFHV0dGBwcHAK9KKO4JkdMIqhQ6EBUhtrwktqY6mmNVLDw+12o7W1dQrUAUx6dzVerIYGGMF6PL4ZkuQhq/E7Rhk02mrpY0YSNODqyJKpr7qIyuhSHQGrGTNer1d0QwBftNbY2AiTyQSv14vQ0FApgvf09Ex5tjSOfMaEVW5/7urrolNXF5bdbjdMJhNaW1vlcPO58W/V66LOCGlsg4ODRUqCk3cYJTLaZvqvLpZxH9MRM3ggVEGnTsx7aGgIbrdbmBk87DwvlITQaDTiDHg/3Cu8ZtZPeI3ErNVdqyx0q2tJvG71PqMBVT9bNTzHa2WwxCh7cHBQ/oZGj2eLRdOhoSE5m2rI0OVyiS4MnaL6LNEhBQQEwOl0SkFanbHHxsYKRZYRNNeG8BFpjizIMhgiFMYzS7tFYgQnOanPLZ+1OotVF/FZm+O549minfB4PAIPqVGBv/f6Uhj32zFeRgajo6P42te+hj/96U9yGL/5zW9KAUyv18ui8X3UqbM6quTPaJy0Wi2OHDmC5557DkajEceOHcPDDz88JdJUb16tVotvfOMbU9gqvKaHHnoIR44ckQOqrnqr4RwayO9///uSAqpTSjUOrjYqvJdXX30VNTU1ckC//vWvixFVsxV4vbfXH3joud7/+Z//iZKSEilKqddfzXqgY7i96KeGkggDqJ3v7caUz4iHixuf6bKi+JgepaWleOWVV0QBMC4uDpcvX5bCnlarRVxcHL761a9KKzcHqqiVOAEfHfbkyZOynsx+tFqfXOuFCxfwzjvvICIiAqGhofj1r389ZV/p9Xp5TlwDYNKJcu15L8TB9Xq9RJ0UORsfHxfq4u1MKzq64eFhVFZWCmWPES4DDHLAaWBpeCcmfEMoGLgQW6bj4dqQ78/Axev1SuZA1ozZbBYDxr+n8VVDUDS4ariQcAeNVkBAgEAdHHri9XrR1dWFlpYWvP/++1AU38CW0dFR1NTUwO12o7i4GFarFR6PB3v37hUSBJvSWFj3er1ITEzE2NiYzE+NiIiQoKG3txd79uxBdXU1KisrERUVBY/Hg7y8PAQGBsoUJtYVuH79/f3o6OiQZ8x6AxlR7CugIeYaqsdhhoSECIxDQ826B4vYfH6sGdHBsY5Ip8ih8jabDW+//bZMivqi15fCuAOT9ENGXB6PRwb3PvTQQ7J5XnvtNYFFqNp2+/vQwHAT00iqMWK9Xo/nn38e9957L+Li4rBz507s27dPIhB6SeLRTHt1Op1wVoFJbe577rlHDgswWYBjlE3DrigK3nrrLdhsNhm2wIeqZmEAk7UIFmOOHDkiWKiaGgdMzv3kzzhfktEjDwPhg8bGRmzduhU7d+6UVPb2CFWd6aijP94Prw+YVLcknEQK4d+Lkvk9r9cr8rFut0+TvaOjA52dPmmiY8eOyTSlpKQk0b7h+/GzSf9UQxHDw8NwuVz429/+JlQ4ZlXkxQcHB2Pfvn1YtGiRGLGf/OQnU+onVOrj/dIwezw+pUer1SoOk5h2UFAQBgcHhXVCo8oIm9fKfUh2Eo1mRUUFUlNTBbIgbn97HUSn06Gvr09E7ZjlGQwGMcgApFjLf+wXULPTWJjs7OycwioKDg6Wwd/cIxw9qMaLuee4Xxm5q58Fxxbq9XqcOHECc+fOxde//nWJwAMCArBoka82SPneiIgI2Gw2rFixYspwbc57ZfGTfQcBAQEIDQ0VaJfONC8vD7NmzRJ7QUepjrz7+vrE+EZGRiIzMxMejwcREREYGRkRlpr63qkuyQ5vr9crYyPNZrM0CLJOEhkZCQCSzRGeJVefTYWEgGivOKawsbFR3pO24PNeXwrjro4AGV1rtT66VGpqKhISEuDx+MR4iLFaLBYMDg7C6/XCZDIJK4BGaWxsTMSU1OkLF4zCSDqdDp2dnTIRnZGaxWIREX9GrykpKXKQR0dH0dvbi8HBQWRmZiIqKgqArwDV19c3haWiLugNDw9jYGBANkN7e7vocFssFgCThVZSJScmJtDa2ipNJexapCKiyWQCMCmNPDIygs7OTvT398s1qDONvr4+3LhxAzqdT2WOPNu+vj4ZW+bxeGAymWCz2WA2m9HV1QWXy4Wuri7YbDYMDw+jp6cHdrsdNpsNJpNJBvbyfnt6etDf3z9l3f38/ASvVGcGDodDjOP4+Djmz58vE3F4eOjAyP9mZEdn1N3dLV2FQ0NDsFgsaG9vF2YFjbWap8/op7u7G21tbbKOXq9XiomsCXi9XvT29k5hWPH5UImQtLjx8XHpzuVAF/4d/9FYqtPs0dFR9Pf3y/1ZrVbZ04qiyJxT9RAMBh2EEycmJiQAYYRvt9sl0uYzCg4OFu3ziYkJCTjoQMjZd7l8052YQaqhGHU3J1/MVKln393dLY6ezUq9vb2w2WwyuJvYONeLXawjIyOIjo6Gv7//FFiFgmx+fj7VTTpJ2gidTjdlqI7JZEJQUBDCw8OFccJrdjgcYvS53/i3Ho9H9gmhQ6fTKbAko3c6c8JDrHPRoaphYnVww+ZIOhgGiKxR0AmzK9lkMiEhIeG/Qal/7/WlMe6kbXGj+fn5oby8HDt37oTRaMR7772HlpYWvPfee2hoaMChQ4ewadMmvP766zAYDKLFoNFo8LOf/QxHjhzB2NgYfvazn0nxFYBE+4qiID8/XzTSf/GLXwjccuDAATQ3N6O+vh7PPPMM9Ho9Kisr8cQTTwgt6aWXXkJbWxt+//vf47HHHoNOp0NDQwOOHz8OjUaD3bt3S1s7o7bCwkJcuXIFmzdvxsTEhOjSPProo/B4PFi5ciX27dsn2DwNgL+/P9rb2/HEE0+I+lxnZyeSkpIQHByMP/zhD0JFq6+vx5EjRzA8PIwXX3xRIlYaRkbdRUVFUkTq7+/Hu+++C6fTicLCQjQ1NaGxsRGVlZW488470dfXh5/85Cf4zW9+g4qKCmzZsgVjY2M4fvw47rvvPtmcDzzwgDRjPPvsswCAsrIyDAwMyLOlsSGjh86mp6dHDlVcXBzWr1+P+Ph4KIqC2tpazJs3Twpb6kJqUVERvF4vqqurMTAwgJ/97GdwOp146623YDAYkJiYiHnz5snhUhfMtFotZs+ejeTkZBGiYiTa0tKCsrIyDA0N4Xvf+x7sdjsuX76Mf/qnf0JERASCgoLwt7/9DTExMSgtLcXhw4fxxhtvwGg0oqysDOPj47h48SI0Gg0KCwv/m6xzcHAwCgsLcejQIWi1Wpw9exZ6vR6XLl0SeqfdbkdNTY1g4UePHoXH49MtKioqkoagN998E9XV1XC73di7dy/MZjN++9vf4sSJEzh//jwOHz6MgIAAkbZVFEUMVXBwMEZHR1FSUgJFUQRmICb+/vvvY3BwUGQ8SMG8cuWK0FXfeOMN4eMTDtHpdKiursbx48dhNBpx9OhRlJSUwOv1oqOjQ2o9NIjkzAcEBODixYtSqKT8cFdXF/76178KT350dBQffPABenp60NfXh9deew0RERGy/1iYHR8fx1133YWYmBgEBQWhq6sL3d3dsFgsOHz4MPR6n7RBUVERTCYTNBoNLly4gI6ODvzlL39Bf3+//C4nJL344osSAHV1deH8+fNwOByorKwUEgAj+/b2dpjNZvzqV78SpzYx4WseLC0tlSCAk6JMJhPKy8sRGBiIEydOoKKiApWVlRgaGoK/vz8qKiqwatUqyUy+6PWlMO7AJHeaHt7r9eLatWsiF5CRkSFCP319fZg3bx4mJiYwd+5cYQcQ07p58ybmzZuHwcHBKSk1AOnM0+t9wj2JiYkAIJNfLly4gN/+9rdISUmBxWLBzJkz4fV6UVZWJmJPNTU1yM3NxdKlSxEdHY2srCzodDrRBHe73di0aZOkfYAvDRsdHcXixYuRkZEBm80Gg8EAi8WCuLg4pH+m1cFiKzAZwSuKIjII6mG+X/3qVxEZGSlRqcfjweuvv4758+fDz89P2u+BScVNYsTUrwkODsb+/fuxfv16pKenY+nSpTh79qx0MXq9XuTl5eGZZ54RffSJiQmkpqZK6j5jxgxERUUJxKIoCs6fPy+dfmQBMGIht53XxAxrZGQENptNKGpz5sxBb28vdDqddBHabDbU19dLZhcTEwOHw4FZs2ZJxpaUlITt27cDADIzM5GcnCx7S1EU2QOBgYFIS0tDcnIygoKCJIsZHx/HyZMnsXjxYhiNRqHkRUdHy/McGxtDfHw8NBrfCDyHw4GkpCRERkZi0aJFKCoqQkZGBgCImBodEmG/kpISgXWSk5Ph8XjQ09ODvLw8hIWFSWOOVqsVrfrg4GDEx8fj0qVLcDqdwqyg/klfXx/i4uKwaNEiJCQkoLS0VKJuykczYmU/xI0bN0R8Kz09He3t7QgODkZmZibCwsIQGxuLwMBAwfFNJhNOnTolezo5ORnAJOzJCPTjjz8W+V8/Pz9UVFRIP0RwcDBiY2MFaiLkQj53TEwM3G43Ojs7sXjxYhEFozxGc3MzIiIiZA8lJCQIE41wYUBAAMbGxgT+6OvrQ2FhIUJDQwW7t9lswmWn8N7q1asRFRUFl8uF5ORk0ahiPUdtT8rKymAymYSZQ5E5Zq/j4+OIi4tDdHS0zJ8gy+jy5csSdKSmpiIoKAhFRUVITU0VBxAbG4tz584hNjYW4eHhiIqKEnkLdbb0915fGuPOBg81hLJnzx7Ex8fDZrNh1qxZePHFF7F69WpRoaMEbF1dHb773e8iLCwMe/fuxXe/+10kJiYiPT0du3fvnsKr5UG7ceOGKD7+9a9/xdatWzE2NoYf/vCHopz3wAMP4IUXXoDZbMbevXuRmpoKm82G73//+9iyZQsCAwNx9OhRSfPvuusulJWV4amnnoLb7ZZhw4ria4Hfvn07Tpw4gZ/+9KcICwvDjh070N3djWeffRYulwvp6em455575HpJxVMUBW+88YYIJlksFhw4cABr1qxBY2MjTp06JVzcjz/+WPRrdu/eLRtN3SVXVVWFwcFBKbj9+te/loNfWlqKjIwMrFq1CgcOHMC2bdswMTGBBQsWYOvWrfjzn/+MrVu3QlEUHDx4EI8/7tOOq66uxve+9z2Jql566SV8+9vfxne+8x1p2KEDByahG+q1UHxqYGAA4eHhsFqtWLFiBfbt24eMjAyhpZ04cQIPPPAA/Px8OuMZGRnCfz958iRWrlwJf39/JCcno6ioCBs2bJjSGxESEiIFxvr6eixevBhWqxVFRUUAfBS748eP46OPPkJISAiuXLkCg8GApKQkZGZmYsaMGdDpdCgvL0d+fj7Gx8cxb9489PX14d577xVphDNnzggP+sEHHxRYyul0wmw2Y3R0FNu3b0d2djZefvlllJaWSvr9wAMPwOPx4Ny5c1i7di1cLhdef/11kY7t6ekRTD46OhqJiYnIyMhAcXGxTIFKSEhAWloadu3ahenTp+OFF14Q50eao9vtxqVLl3DmzBmkpaWJ/g4dYW9vL+644w5MTEyI+uXo6Cj+9Kc/SRFz9uzZ2LRpk9RbgMnpSOHh4SgoKBD9l2nTpolwW3Z2NkZGRoStQ+y7vLwcZrNZ4MBz586JBn51dbXUjd58801kZ2fD7XZj5cqV2LZtG/z8fANJ/Px8E6BSUlJEi31sbAz79u1DWFiYqIY2NDTIfTKoYBdpf3+/nBFme4SOMjIyBEZ58MEHMX/+fLzzzjswGo0YHx9HWFiYQGSzZs3CwMAA1q5dC71ej9LSUsTFxcFut2PDhg04cuQIXnrpJWFTnT9/XqiumzdvFllo2oLly5fL6ER1Uf7vvb4Uxp2FNhaCAIhA0nvvvYf6+nr8+Mc/hlarxYEDB+Dv74/i4mI8/PDD0Gg0eOGFF7Bp0ya8++67yMnJEfU2ADh//vwU7jAr0B9++CHSP1Nro5zwvn37kJubK9FmU1MTjh07hg8++AA2mw379+9HaGioRAI9PT2wWCx455130Nraiu985zt44YUX8Pzzzwv9CpiUVyguLsZ//Md/yGQZu92OTZs2IT4+HqdOncK//uu/ygAIsoUYYfb29kpqevnyZbS1tcHr9eL5559HXl4e3nnnHZmWRFre6dOn5bM1Go3IDBw7dgxz584VzDguLk5wwytXrmDNmjWYmJhAZ2cnHnroIXEwrHts27ZNsPwHHngAw8PDeOGFF1BQUIDR0VH85S9/weLFi/GTn/wEiYmJwq//4x//CGCSgz86Oor6+nopjg0PD+Py5cvSMs7vZWdnS+E4OTkZiqKIBIHFYkFLS4vIoS5ZsgTV1dW4ePEijh8/jqCgIFRXV0On003RJVcUBWfOnEFsbCy0Wp/M8dy5c1FSUoLQ0FBERUVhbGwMR44cwerVq4XGFxMTg46ODtERJ4snPz8fSUlJ0oMRExODtLQ0xMfHo76+HkNDQ8L/Zo3DarVi9erVyM/Pl6Bj0aJFMBqNqKqqwtmzZ2GxWGTWq9frRWdnJwoLC3HPPfdIFkJY49q1a9LklZ2dLfj2+vXrkZ+fL4XG4OBgmM1mtLW1we12IywsDMPDw7h58yaqqqqkIF5cXCy67+Xl5bBYLGhqahItcaPRCKvVioaGBjidTtnv6oKq1WpFb28vFi5ciEcffRQxMTFob29HXl6ekB3UNL9z584hOzsbFotFlFSJt2dkZKC8vBxDQ0Mi7EbN9sbGRtkvNptNOOf19fXo6+sTKKqrqwu1tbUoLi7G/PnzERERgcuXL4uMMsXRysrKkJeXJ70jy5Ytg1arRUtLC5YvX47a2lqhaS9btgzz5s1DcHCwZJ0MpCYmJtDU1ASj0QibzYbR0VFcvnxZhmw/+uijghp4PL75vvzMrq4uWCwWUew8f/485s+fj4sXL8rw8i+0q/+fWOf/44scdGAql3jWrFnweDxIS0uDy+VCZmamjOzq6uqSr7Va30DptLQ0LFy4EFevXkVNTQ2uXr0qei6MGBkVt7W1ISwsDDqdDgsXLkRdXR2WLFmCHTt2wOv1ora2VrQ6UlNTkZWVJRX/r3zlK7h58yauXbuGWbNmISgoCImJiVi7di0qKipQVlaG5cuXS8MNXw6HA7GxsWhpaZGJTBxuwFoDR6Kp2TgAMHPmTNTU1GDevHno6urC3LlzZd2Sk5ORlJSEuLg4bNy4EXV1dWhqahLjTayXLeednZ3Izs4W5samTZtQX1+PK1eu4KmnnhKYZ+7cuVPU/DweD+bPny90siVLlohSZ0hICGpra6UhhHKu27dvx/j4OJxOJy5fvixsFzaJ0TgFBASgsbERnZ2dUxqPli5dKs5Jp9MhKysLnZ2dGBwcRFdXFyIjI9HX14eOjg5kZWWhr68PdrsdixYtQnZ2Ntrb2wUuYsGcmGh/f79w7HNycuDxeJCTk4P58+ejoKAALS0tsNvtmDlzpuDk3DuRkZEwmUwi00o1UDKrCgoKUFtbK3LBVD+k4/b394fRaER3dzdCQ0OxevVqWCwWpKenQ1F8Q7WNRiP6+/uh0+mwcuVKNDc3o7OzEykpKcJ7J5RhMpnESJjNZslQoqKi0NXVJTUGPlvABy1NmzYNixYtwvDwMLxer0zNIuWPsAGVDlNSUrB8+XIEBATAarViYGBgynsDECZHaGgoOjs70dXVhbVr18pe5PNm5E74wuPxYMaMGcJh7+npQWJiojjz0NBQREREICwsDMuWLRNtdqvVKoM1AAiEwqYsGsHp06fDbrejqqoKsbGxyM/Pl+xx+vTpQsdlBD9jxgxxUJmZmVIUd7lcohdvNBrR2toq2Tv3ABlmWq0WWVlZsNlscDqdSEhIEK2amJgYmEwmxMbGSmS+ZMkS1NbWYmhoCA6HQ/oZ2NPS19eHkJCQKdz6z3t9abRlOOiCQwKIDZMiRgobC400gOqKMTcYq/o0bmSdaDS+4QpGoxHPPvssDh06BGCyUYielu/LIg8A2ZQ8HKyEs3GFxocPibQ7fs0oS439chPwIQUFBUkBlp6f18WJPSxC0amphzQTg2NNQW0kSUuzWCx47rnn8Ktf/Up0NYBJ0ScKKKnvmdASACmEktJGlhKzLwDyfMi0ACadNgBxYlarVcbaabVaST9Zq1DTEdU4PfVf+F7cG2yAIU+d2CuNK9e2pqYGSUlJ+N3vfodf/vKXwmigOBPnd7pcLhw4cACPPvqorCnvj9kHG8xYBKVh1uv1U9QEST/s7e0VRUu9Xi/fp+Ilu2YdDscUfjjZMi0tLdDpfNo5UVFRogBJyJHrxMyGe5pzSf39/TE2Ngaz2SxNYNHR0RgdHcXg4KCM5fPz80Nqaqo4bBY3qcfCvcVC6vj4uBg47inWbLg2lKtubW3F8uXLYTabpWdBrR/jdrthNpsF3uBzIbWZCqxcb9Zi2EjocDgwNjaGtLQ0nDt3DosWLRJYkFCGw+FATEwM/Pz8BHdXa9zwOZPCyGyGn8UOYvLk2TzHYIWjFQMDA0Wjh8Vh9puQN28wGGAymaTTlgw9SlzweZOdQ9YRgC+U/P1SRO7qJhw+SGJd/JpGnMaT0T152OyeBHyQjnpYg5q//corr+DkyZPYuHEjgKlt6Tz8LGwCkwJfZDuQQ0/IhJ/Lz+ImYiROrrK6o4/RmxoL5vcpgUDjfXs08Pc8Nt+DbfrqLkE6LtLoioqKsH79ehiNRll7bnquGaMTrg3fS+3EaDRpsNnApZYwYIMG740GnkU5wiOUlFB3MtIoMepXUwfVn817ZBBA48J0nw6XmZDH48GHH36Ic+fOYdmyZVIkZCDg9Xrx4YcfyhzWO+64Q96DDkctV3t7tybTfz8/P1ljh8MhzoBTo8LDw6cwHnhwXS6XMD7UHb7qTk021QAQCI5OmEwXaphz/9LBABAjSGiQ+1oNZ/IZELYiL5s/43PiWSF7BIBooJPa63Q6odVq0fbZ8BZq/nOdaKjIiVfbAXVfCvcHnbX6DPH58m/PnDmDzs5O5OXlyf2HhoYKFTI6Olr45+pGQlJ0aRu4Z9Q9M+rrodMn95wZGrNkNQ06JCREAjKKkamDPIraKYoi+0utN0ObODIyMqXT/nPt6pclcj937twUY0FvSeiARocvVqx1Op1U/bk49HY0TOpOTafTCbvdjsTERDkITJPJtwZ8cAhTRXpaemB19sC/pSfnS611QwyS2QQ3rtqIkf/NCJIPTr1xeT98Zoyc1REl75P3rdFo5PBptVqYzeYpuCDf3+VyyWHntbNww5ea8UL2AjMCYFIyQt3BC2CKge/s7ERnZ6e0cxPzVTf6cG0JafE5hISECB9Z3TDDtXK5JmV/2XBEo63uFGZDSmJiolwb6YZkMjidTmHHcJ+o71dRlCkNSOo14l6yWq0S5fKZ03EywuQ98hnycNNI0TGZzWZ5H64D1zQ8PFwMBg89IUH+Dp2P0+lEW1ubNMDExMRIhzCLiiMjIwgKCkJlZaVAPYQY+DX3vNlsFnYL9zvH5xGus1gs0l1psVgkC2VxVK2tHhsbC2BStZUZA88I+xwYTDCgcjgciIiIQGNjozhCOicaUj7HiIgIDAwMID4+XpwzHRA1YrhfOKKRNoYUVe5t3jOL9PweHbpO5xsTaLVa5VnTHtG+8Z/b7UZERIQ0ADIb4hkBIE163H+7du368kfujH7YxcZUX81RV3sqYqBMU8iTV0fyPGT0rnq9HqGhoTLVBoBAJcPDw5J2csPRwfB9AMjXjCjosRnpqw2NOpvgf2kseM28DzXGzkifm47RpToiV0fSxDJ5feromu9JwxweHi4/J/xEbjKvm9EiG0D4e/wMOjlgMlKi8VIrZLKITQdisVjQ3NwMh8OBkJAQGI1GhIWFISkpSdLx0dFRhIeHT9EI4RqoYTJGpTS6/Gy1FgvvSd0zoNPpEBwcLPrcNPiER3Q6HcLDw5GUlCRRF5tP+GzJhacjoCPVarUC8TAjURQFERERkoEyeOCwcLXxZaMMU3nuiZGREYFoXC6XNEUxmlQ3ZwUFBUnLPz+T0abNZoPNZhP8mu9lMplQV1cHnU4nFEOPx4NPPvlERk8yS+I6A5AisTqi1uv1UixkhkshNU4Ko5YLW+yNRiPi4+MRHR2N/v5+dHV1ScbB9aWj4/uSykjjSaYKYZGEhAQkJSXh/fffl7OsVnV0Op3o7e2Fw+GQ/aIODmlQ7Xa7XIsagmNAwEyMcCCNtFrKm818AwMDElioBcQ4nnBwcBBWq1Uyfzo8RVGkCxnw1Y5ot77o9aUw7jxAjFhooGmkaVy4oPwZb04NdVCrmxGZmk7IqJIGlsaHHpfaKswS1F8Dk7K3fIBM92j01FIHvC7eA6NtQjx0GsAkrMLfYdSg1oJhizeLaDSY/Ll6Pfg9XjujDH4WISZg0hnx0Py9iVIApLjGiIKQCfFiHmxemzorojKhyWSS6Gd8fFwq/pw9GRwcLOkon5vNZhOFPkaxbDWnwaEDp5EjnsmoXa09dPsa8fPUNRP1M1RrqHC91XuUho7QBZtXWFTkXlPvIf6XGQ/3uLr4zuyBn8n6DtUPgckZmoxm+Sx5jQyYiAkzmuUeIrbOgIdrS5x8+vTpcDgcAqdxf3KvMUqPiIgQw8aOWzpZvV4vBUqXyyXt9wy+uC4DAwNiUJl9UQqA+09dh+L98n6YCcXFxcn7ajQa6VVQkxsYlGm1WqHgMpCjwwMgHaoMMml0uR4M0G4/B2pbwmwrNDRUuoEJV6qDIAACEWm1WllTddDAgI8yBWopir/3+lJMYgIgBo9FDKb11PlmZMC0nBuL3u92nJWbgjAHH5LNZpNIjdGnuqDKxebGodMhHMEUkcUjPjx1AZdwCLFgbhxmFxS/4s94sHmYWcjki99jRMj3t9vtUthksZVrwohcDRkw0lU7Id4z783pdArOyu+rIQB1tMAon4aN6T35wDwIXq9PRmWSEgAAIABJREFUkKqjowMAptQsGGlyziSfIdeZa8sIlc+BjUMsIqqjZ3X7OZuH3G63NCRpNJMKnYyA1M6Oe0d98DhajVLO3Jfj4+NS0GQwwb+j41EbAf6O3W5HUlISFMXXA6EegkFsXq/Xy7xTrqPNZpM6TkREhET56sHJlDBmNsSsicVCZnVkwUREREjkSHjB5XLhkUcekX3mdruFnz0wMCAGkgMp2KJPCimFsiwWi+xrFhPpLCnrMDw8jISEBKE8ApMCbIQd2z6b88tmNj5jsp7YU0IjzFd6erpowNTX16OhoQE7d+6cYkgZMPJvCTkyMqc9IHTJZ8yf8exSo4kOnUV9nU435brp7CmMp3ZQPNvU2lHvHwBCi3S73XKePu/1pYjc6YkZndLrarVaVFdXY//+/bh27RrOnDmD9957D9euXUNxcTEaGhpgtVrxxz/+Uf6ebAAukNpoM9WhCA9fjLDVER69OTApAqbVavHxxx/j0KFDUwqm9ORqOIWGnJAJI1l2zamLjOqCDTcWH6Bac0INLwGQgtPEhG849f79++V31feljupu17CgEaIz4Ibi+hNbpYqlupgETGYzzA6YXQGThSg2KVGTndg6B0Q0NjbKz2lc1XojzLBuz5J4D8zKqEXCA0joxmw2o6ysTLIhOlQAclCZPVLfRh1VMYICIHzuW7du4fr165LyqxtN1MWu22sufAY0gBy7x2ujEVfr6IyNjQn8wazAZrOhpqZGjDIdK+UD+vv7pahNKIABh5rFNTw8LPo/AASuIVWRbe/h4eFyJnhGHQ6HNPow2h4eHhbMnk4/NjZWIlm9Xi8dluHh4VAUBYmJiRL86PV6hIeHizIijRvhTzVUA0CgPK4vzxUHpPv5+cZSqqdVDQwMyB4kdHd79smsOSAgYEqwRkhV3QTI6yGczOekntZ1u7ibuoYwMTEh+5AZN8/l4OAgqqqqJGMm1DYyMiJ6Vp/3+lIYd2ByeIN6Af39/XHo0CFs3boVGzduRExMDFJTU7Fy5Ups3LgRQ0NDCAkJQXFxsUSjLB4SC6PRofcldMMDqGa6cHMwZeQ1UQnu6NGjmDt3rgxhoGFlVAZMzpzkZ9PB0DCziYSGlUMcuGnJhqDD4yZT1xv4tyyKMcV+7733JAoljs5oUY3Ds6bAA0QMUO04uC40rupiLj+fEAY3Ig+c1+vTDykuLoai+Oh7/f39ssHJFomPj0d4eDhaW1slO+MBZLpOOQcWt8hV7+vrg8VimUJ3jYqKEliE2LpaJZDRtsfjkaIVBaKY4dTX1wOYFIfyeDwyCo0G/7/+67/Q2dkpzUqs17DzkwdWbRC4VjTWBoMBJ06cwNtvvy1ZIot1hHQATKn/AJMDQ9555x1Mnz59ioMgC8Zms6GpqUm03VnUJeOHBU3+PmEbarvs2bMHXV1dYqiYEVmtVoEEdDodYmNjpcmJAYLH45FGIJvNBo1Gg4iICBw+fFhYUhUVFejr64NWqxWZ4s7OThliwoI99xrgG8bCAjkDM3LzObpOXZD3eDwYHh4WcbmAgACkpKRg48aNqK2txcSEr9N7ZGREajB08upzwLmtXCNG5OwoZT9CaGiodP/SKHPAtp+fH2JiYmRv6vV6RERESJBHFhSfXU1NjawlzwX1jEZGRmTAuJpg8vdeXwrjTq/Hoga98MjICL71rW8hLS0NVqsVb731FtavXy+RMTvZHnroISnQMLrk4VJH04ySyRNl5KvRaESNTo2zcxMTdz106BDS0tJw//33y4MhFkjHwQIMoyNyVZmGqTFEVuoZSTK1ZgRG2INpHgCBHajeSIO4b98+GdjNar3dbhfjQCM2MDAghk0NqdChqPHZkZER0cqgQST2R/61x+ORDTc6Ogqn0wmHw4EjR44gJycHAwMD0Gg0wo5gNhAQECCMl9WrVyMwMFAUBuls+KyYxTCCprGJjIwUXFbd+EUjyqipsLAQM2fOlBSfaTEjXmqdAMDs2bOn1GqYzRD+cDgc0kSWk5MzBdLjf1kwJDWOzyQoKEjYWiaTCaWlpVi4cKFAFmNjY5LaOxwOkSkgTENmi9VqRVJSkkz9YVaTmpqKmJgYREZGYtmyZQLXsMmG+06v92nRGI1GkVc2GAyS+YSEhGDGjBkStZL5EhUVJcwcrVYrWjw9PT2yjlwnPz8/REZGwmAwoKWlBemfaUL5+/sjKysLS5YskT3E5h4aUWLdAKQASqeoZtRRt0Y9SNtsNiMiImJK9h8ZGSkGWK/XY9myZdDpdIiOjkZoaCiGh4fR2dk5pUZG407YhgEhP5+wDo01bQZh0dLSUqSlpUm2zqh7dHRUsv3g4GBxUMwST548ibi4OHHY165dQ2pqqnTsBgQESN3i/4y5azSaFAD7AMQD8AJ4U1GU32g0mt0Avg5g4LNf/YmiKJ9+9jf/AuBJABMAvqsoyon/6XPUXHQurE6nQ3p6OsbHx2Gz2VBZWSkHlg+qoqICYWFhOHToEBRFwTe+8Q1hh1y9elU6Fh977DFotVpcvXoVbW1t0Ol0uO++++DxeNDe3o7r169jZGQEO3bsEL4xo2W9Xo/r16+jpqYGn3zyCRYuXIiYmBg0NTWhtrYWeXl5mDZtGoaGhnDs2DEsXLgQnZ2dcDqdWLlyJT799FPccccd6Ovrg8lkwp133omKigqYTCbce++9iIyMhMvlQkNDA3p6eqAoCu65554phk0NrZw9exZDQ0PIzs4W41BeXo45c+ZgbGwMDQ0NaG1thb+/PzZu3ChSCJ2dnbh27Rry8/NFaIkZwmfPDRMTE7h27RpMJhNSU1ORm5sLrVaL69evo7e3FwkJCcjLy4NGo8HHH38sQwTa29sRExODpUuX4tKlSzh79ixCQ0MRGxsrVMeWlhZJk6dPnw5/f3+cOXNGBNFqampgs9mQn5+P69evAwBWr14tUa/aabJ57OrVqzAYDMjKykJMTIxkLmporaWlBdnZ2TCbzZg9ezYiIyPR2tqKnp4eBAUFwW63Y86cOQgICMDp06dFm9/tdqOhoQF9fX1ISkrC9OnTcePGDbhcLty4cQN33XWXGH610+vv70dzczMWLlwoxre1tRV9fX3CDOG9ms1m2WuEA5jhaTQa9Pb2oq6uDunp6YiMjERPTw9qa2sxNjaGuro6WX91Q9mNGzeQmZkJPz8/tLe3w2QyYdmyZbh8+TJ0Oh1WrFghbJre3l50d3cjJycHPT090oB18+ZNzJo1C7W1tUhPT0dVVRWio6ORmpqKsrIyJCYmIjExEX19fbIvuc84UGbFihWw2Wy4ePGiGM3o6Gi0tLRg2rRpiI+PR3BwMKxWq0TRer0e0dHRgsfn5eWhsrISDocDycnJyMrKwvj4uOzv1NRUqXk4nU5cvHgRmzdvlppVf3+/ZBN0zAsWLEBAQIDIXaxbtw5msxlmsxkJCQnwen3a7t3d3YiJiUFsbCzq6uoQGxuLgIAAVFZWIjc3F2FhYZiYmBD2EuCDSi9evChdsMuXL4fXOznIe2xsDLm5uVOeN+ALaLq7u6esc3h4uDR+Xb16FTk5OVJjc7vdaG9v/0Kb+r+J3D0A/klRlGwA+QCe0Wg0OZ/97NeKosz77B8New6A7QBmA9gI4DWNRvOFbHtGPAAkLVJDJW63GwcOHMDw8LBgeTqdDoODg9izZw+2bNmC/Px8/PznP5do+l/+5V8QHx+PO++8E1euXBFscs+ePVi3bp1MQSkpKcGlS5ewdu1ahIWFif6HOopVFEUO6le+8hWkpqaKrs3XvvY1PPjggzh8+DA++OADnD9/Hr/5zW+wbds2LFq0CB9++CEuXLiAF198ERs2bMCWLVvw0ksvYcOGDVi5ciX+8pe/SHSwb98+rF27FjabTbBXpojj4+Nob2/HV77yFdx///3YuXMnnnnmGcHnOjo6sHPnTrhcLuzduxdr1qyR6N7r9WLHjh3w9/fHQw89JNfLSJgRqqIo+MEPfoBly5Zh27ZteO+99zA+Po7HH38cc+bMwd13340TJ07g/vvvx4ULF5CamornnnsOS5cuxfbt2/GP//iP0Ol8OhxDQ0MoKChAUFAQBgYG8PbbbyM3Nxf9/f343e9+J3IDeXl5uHXrFqxWK7KyslBTU4OJiQnMnj0be/bsEfog4SnWMoqKivDKK69g1apVyMrKwtGjR6W+oXZWVVVVqKurw/z589HY2Ihnn30WN2/elHXKyclBbm4uWltb0djYiPfff19gpp/+9KfIy8vDqlWrcPLkSdjtdixbtgzLli3D3XffPUWvm1nNL37xC3zyySeYPXs2uru7MTg4iJ///OeIi4tDTk4OLl68CL1ejzvuuAN33HEHli9fLvADoYKenh709PRgz549MBqNmDt3LioqKkRBNCYmBg899BAeeOABBAYGoqmpCYODgwgICEBbWxsSEhJkQEl0dDS6uroQEhKCzMxMHDp0CKOjo+jp6cErr7yC/Px8LF26FL29vcjOzkZYWJjs3dbWVrS0tODo0aNITEzE6OgoqqqqMDw8jKKiIng8Hhw4cAAajUYYTxcuXMCCBQsQHByMgQFf3Ge1WrFy5UrMmjULBoMBJSUlaG5uBuCby0BJ5zVr1uDIkSOC2R88eBC1tbXIyclBR0cH/vCHP8Bms+Hll1/GjBkz0Nvbi6GhIYHNEhIS8Mgjj8BgMODMmTN46623sGDBAhw6dAi3bt2Cx+NBZmYmOjs7MTAwAIfDgbNnz8JgMCAkJAQmkwlarRa//vWvER8fj+zsbFy9ehV2ux3Tp0/HJ598Aq/Xi7S0NJSUlEg9gvNRAV8GFB8fj/HxcSxfvlwCIz8/P8ydOxdJSUkoKSmRTJx71t/fHxkZGRgfH8eWLVuQmJiI5uZm1NbWIiwsDENDQ/jtb38rzuS1115DUlLS/824K4piUhTl2mdf2wHUAviid70XwHuKoowritIKoAnAkv/hM8SYElOjUSWWd+PGDaSkpAgGpSgKBgcH0dDQgLCwMLS1tQm8UVlZif3796OkpATvv/8+Fi9eLAXBU6dO4Yc//CFWrlwJr9eLQ4cOYWRkBEeOHEFzc7Pg6WSWkEvLVFaj8anyzZ8/X/RmHA4HTCYTNm7ciObmZtx9992YmJhAbm4uNm/ejKamJskSuru7sXXrVmi1PtnemTNnilzxqVOn8Oyzz+LOO++cwkxhevn++++jtbVVinFZWVliFJKSkhAVFQWNRoPTp0/jueeew5o1awAAnZ2dyM3NRUpKCgBflECteTJqiMneunVLjNuPfvQjhIWFobq6Wpwmh0lkZWWhrKxMZICdTidyc3PFWcTFxcHl8ikDVlZWoqOjQ2Ch9PR06PV6xMTEoKqqStQux8bG0NraKpor7Clg4YusBoPBgCNHjmDatGnSbLZkyRIZwKzunKyoqEBsbKxcu9PpRFxcHDIyMpCamgpF8U3kycjIwMyZMxEXFyd7sLu7G4WFhSgtLcXdd98tHZQzZ84Udg4jMDVEUl5ejitXriAjIwOK4htacvXqVVRXV4sWt8lkwowZM6QGwboGC8mVlZXo7OzExMQEIiMjBabU6XRobGwU5oXBYEBUVBSio6OF1tfc3CwNWoAveqV8MPnXGo2vKe/w4cO4fPmy6AU1NjZK63x6ejq6urpQUFAgCpzTpk1DZ2cnMjMzYTAYEB0dLc+8qqpKipiLFy9Gb2+vsDoIuaampiIiIgLR0dFoa2vDtWvXMHv2bGFHKYpP2yUyMhJJSUlITk6WqDwrK0sojfv27UNWVhYSExOF18/6GACcOXNG9GBGR0eRk5ODnJwcUacMCgpCSkrKlKHxVHvs7+9HaWkpampqsGrVKmkkosCby+XTuSKriww+BmK1tbWIioqCn58fBgcH8emnnyIyMlKKzf39/VKL497mGkZFRUmwVVVVhaSkJFitVoyMjMj8hZ6eHvj7++PmzZtfaLv/X2HuGo0mHcB8AGWffesfNBrNTY1Gs1ej0UR89r0kAJ2qP+vCFzsDAJN6KOTgcjOw8l1UVISnn35aOJ9erxdHjhxBbGwsNBoN3n77bdx///24fPkyioqKMDExgYcffhjbt2/H/fffj9HRUXz88cdobW3Fv//7v2Pz5s0YGhpCYWEh/uEf/gFPPvkkfvCDHwiuR5oTcfOBgQFs3rwZiqLg3Xffxa5du6R7bvbs2di1axeysrIQHx+PrVu3Stt8WloaYmNjsWnTJsHGCwoKMDQ0hD179mDJkiVoa2vDyZMncfPmTfz4xz/Ghg0bxFCwaOLv7489e/Zg06ZN8Hp9wmY7duzAwMAArl27hieffBJOpxMnT57ErVu3sHv3bvndgwcPCiylKAqys7OxY8cO4coSvujt7cXatWtFmyMqKgrd3d3YvHmzrMvJkyfxz//8z0hJScGxY8ewY8cOaLVanD59Gj/84Q9hsVhQU1ODDRs2oLu7Gz09Pfjb3/6GefPmITQ0FBUVFSgoKEB/fz9iYmJw5MgRWeuysjLRvT5x4gTWrl0Ls9ksuKzX65UoiTKqTqcTn376KTIyMuSQDw0NoaOjA/7+/igsLMT06dOhKApKSkrw5JNPStp/3333ISIiAuHh4YiMjERHRwe++c1vwuFwwGazYc2aNVi1ahVWrVqFxMREREZGoqysDPPnzxdcnE6YMy137NiBN954A01NTbBYLOjt7UVBQQGWLVuGvLw8aZ46fvw48vLyRI+e9QY6jMLCQsybNw8OhwO3bt1CRUWFFMAHBgaEAkqIisPDQ0NDcerUKRH2amhoQExMDFwuFz766CPMnj0bfX196Ovrw65du7BlyxY0NDSgt7cXxcXFKCkpQVtbG6xWK6KiomA0GjFr1qwpGDbVNxVFwaJFi4StcurUKSxdulRqMSzaJicnC3R1+PBhrF+/HgkJCTAajSguLhYaZENDA+6++26kpqYCANasWYPExETYbDYMDAzgscceQ21tLR555BGR4+bEKRbWPR4PIiMjYTabsWDBAvj5+cm5bGtrw6lTp6DT6WSwR2pqqmguBQcHw2KxYOnSpVi2bBkKCgpE+fLSpUvIzMyEv78/zp49K4JxasdMu1RSUoJVq1ZhYGAAn376qUyKc7t9s2HXrFkjDlav18vYPpvNhtWrVwt76dKlSygoKBDYc8uWLfB6fXMuHnnkEaxcufILber/2rhrNJoQAEcAfF9RlGEArwPIADAPgAnAr/irf+fP/5vGgUaj+YZGoynXaDTlZrPZdzHayfmSDodDoqeysjIEBgYiPj5eOvx0Oh0+/PBDPPnkkxgdHUVdXR02bdqEiIgI7Ny5E+vWrUNtbS1qa2tx8OBB2O12lJWVobGxEaWlpXjzzTcRFRWFF198ESUlJSgvL8ef//xnwesZqZKad/PmTWzatAn+/v74+te/jpMnT6KmpgYvvviiYM8mkwlPPvmk6FdoNBp0d3dj165dMBqNqK+vx4ULF2AwGPDhhx+ip6cHR48ehdFolOi2qqoKb7zxhtAEGX15vV58+9vfxujoKE6ePIn9+/cLE6K9vR0hISE4ePAgrl+/jra2NlRUVOD3v/89FEXBzp07cfr0aTQ3N+OFF17Ahx9+KPxaOrKxsTGEhoZicHAQ7e3tqKysxJUrVxAfH4+enh40NTXh3XffRXFxMR577DEp4OXn58NkMuH111+HxWKBn58fqqqqoNfr0dXVBavViq1bt0Kj0aC1tVWUPA0GgwymaGhoQHBwMEpKSrBu3TqMj4+jqalJFCEpcMYozO12Y/78+ejs7ERRURG6urpQUlIiBigiIgKxsbHQ6XR48MEH4Xa7UVZWhldffVUGP5w5cwYLFiyQteXgYUbPsbGxcDgc6O/vl9mu/v7+qKqqEjiGGR7Tehbc2trahCAQHR0t0rUDAwPo7u6GXu+bf2uxWFBZWYnAwECEhYVJk5LRaMQDDzwAm82G3t5emM1m/Nu//RtycnLQ0tIirBIAMj4vNTUVISEhsNvtSElJwbVr1xAeHo6ysjKsXbsWbrdboBfK7nK0IhuXSB/UaHwNdna7HYsXL5bOVhb/udeLi4sxMeGThvZ6vdiyZYt0o1ZWViIrKwuhoaEiYUzYKSoqStgy69evR3FxMc6ePYuGhgYsWrQIQ0NDuHjxIhYvXgyNRoMPPvgAc+bMkfVj5vntb39b6IAsUrIQnp+fj+7ubrz//vtYt24dgoODcfDgQWRkZKC+vh7+/v7S4U6+uNFoRHJyMsbGxjAwMID+/n60tbXB398ftbW1ACBYd319vRTJiYMzMCUde2hoCPfccw+WLl0qU6vWrl2LqKgoCazYg2Oz2dDc3CznhoJuJALMnTtX+mvuv/9+1NXViXbP5710u3fv/t8Ydj8AHwH4QFGUvQCwe/dux+7du5Xdu3crzz//fAuAH+3evfu1559/fi6A8N27d5cAwPPPP/8MgGO7d+/uUr/n7t27K3bv3v3m7t3/T3vXFhvXVUXX9tjjx4xnxvXY4/EjdqZ2qhYaP6RA0qYkRREhAYX0r2lB+YgEUvng8YFaKqHwCVIjviuoRAUBIYWqbb6oCih9KCqJmvcDp5aVErt+zziT8Thu5vBx79o+DkkV2qQzvT5Lsjy+Y3vuPvecffY5Z6+197/48ssv79+7d+8KtiSj5sOHD2N0dBT9/f1YXFzUpQmXYNu2bUNDQ4NqSGzevBk1NTXYunUr3nrrLZRKJezatQuJRAJ9fX04evQoqqurVTiM+6BNTU3YuHGjNjaX/1zmvf7663jssceUhTg+Po7R0VHs2bNHI89isYjOzk6lSnOwsxCGMQatra3o7+9HT08P2tvbsWXLFqTTaTz44IM4evQoampqsGXLFgDLeiNMSxwYGNDtjCeeeAInT57UMnGnT5/Gpk2bMDg4iGPHjiEc9koPcj9vYmICIyMjeOqpp1YoPTKbiOmH7e3tGB4eRiQSUUnUVCqFS5cuYXBwUKNgwMtW4MFob2+vEmdCoRDOnDmDDRs2oLGxUSs+xWIxZDIZfPzxx1pUIZ/PY9OmTRrFcLmfSCRQKBTw8MMPa4oZDy8LhQIymQwmJibQ39+PwcFBhMNeST0evnLLoq2tTSO1VCqln0PdEZs4kslkMD09jUwmowzN0dFRJJNJdHV1YWlpCa+++iq2bdumAUAkElEORH19va5W2JcAoLu7G6O+TDBzvNetW4eZmRn09fVpJsv8/LwWJWlublYNlMHBQa2fe/z4cVRVVeHxxx9HJBJBsVjE3NwcmpqaUCgUlAzW0dGhUg7sN0yJbGlpQVdXF8bGxlBV5ZUaBIC1a9cik8kgHo8jnU4jFPIEyHj2k0wm1ZnNz8+jt7dXa/lGo1E0NDSoZDNz18lHyGQyCIfDGBgY0MmIWSf5fF4LciQSCSQSCZ2kAWifGRgYQFdXl9YM7uvr03RTRtA8m3nggQfw0UcfAfBqACQSCdx///0olUoYGhrS8T0/P4+uri6t1sWUVGqwp9NpPZtas2aNrgx6e3u1WhTPtegrmpubsbi4iA0bNqiPOXv2rJ578G8ol8Esv/b2dly4cAHr169XeYVUKoW6ujrkcjlEo1G0trau4I68++674/v373/xln7bZnPdxrELgN8DmDXG/Ni6njbGjPuvfwLgq8aYJ0XkSwAOwttnbwfwJoA+Y8yN//3vHoaGhsyRI0d0C4SHqIwmuBdnn0jzQbJBbUIAsCz/a0e+zA2lQ7NzvpnGxOUd070OHTqETCaDl156CS+88MIKVqOtOcJsH5KBKA96c+44nQnv0ZYYYG4/t2Lse6cwGXP56aAAqCYJI31OLHaapS1BYKcM8vnz1J6sPLYtV1AAdAnJzBXaysFVLBZx/vx5LXxsqydy8qivr0djY6NqbzNjgAdTpVIJ165d0/zqc+fOoaenR5+PTQhin7Dbjcw+m3DD/U2SSshcZapidXW1MjgpDVwqlRCPx1Uk6sCBA7qdxkIZ5GXYjFl7i4B55lVVVSqgFYvFtD3JkWDiwMjICFKpFOLxuKZCUvLWGIODBw8iFothx44dOjFxYhkbG8O6deswPj6OQqGA9vZ21NTUYGxsDDdu3FAnRf7DlStXVN+GhdFbWlpQX1+/QguGVYri8bhqs1AqgExy3iNZ43Rk3CZjGuzExAS6u7vVqV2+fBn5fB6xWAzFYlGrJF28eFFrFLDfk/zHn0n4s/sAxxJL2XE1u2/fPu33/OyGhgY9gymVSpienkYqldLgYHp6Wsch0yyZnsjJGFhWQ+UY5lYNt2xJwOP4Z+owuRrkYFDpkmObAQlTV5nowb6dTCYxNTWFffv23VY47E7kBx4F8D0Ap0XkhH/t5wD2iMgAvC2XUQA/8AfrWRH5C4Bz8DJtfvhJjp2gM+FS146w7CiTTC3+roisUFjksox/y5x3Omz+H+4/09naOiR01PX19QiHw3jnnXewe/duJWcwP5xEJ87EdDikRfOh84FTE4RsQd4HH77tiGmzLQPANmG0mMvlUFtbqzRzm+5uE0DoTDhJMOokA5AkIX4eJwKmFJKBRyYil59M4aMtZEJevXpVl51sZ67KqN3BQ3SmdXG/lKQp2sRBDCxLVNCZ2GQ06nUwN7u62hOwolY5ByYHCydkinJxtVUsFjUiJi+Bz+7DDz/E9u3btX9x245MUDoXewKnU1paWkJbW5vupzKKZv/IZrPo7u5GNpvVA1Ruk/A51tXV4ZFHHkE6nUZNTY1ug9Gp2oxOPqPm5mZ11uwHCwsLiEajuHHjhlb+4sRHe8m5qK2t1bxxe/VE5u7k5KQ6bH4G99e5kmOBDQZcTJjgRFIqlfQZU6ee/ZSTJyNjrqqz2aw6OhIK2V/m5uYwMjKCZDKJnTt36v/nRMtAg5PrwsKCSijbEhF8BjbPgpOQzUjnVgzvmYELnw0jc7Y/A9ZEIqEpm7wXbtXYLHMASpZioGhzZm6HipH8ffPNNzUSInGBg8ZWhmRHt8FHGZSnAAAHC0lEQVRGBZa1ZOgsOPDJBLXz122yAh0ZI0Zbl50dlw6TzFE6Il6PRCI68/L3+QAikQhmZ2dXUN3pTHk/hUJBVy6hUEgdEnVHgGVnwUHICYERh012YiTLCYT2UFeEbcFlNwfwrSjqbJPOzk79HC4tc7kcjDE4deqUUtbJXKQUAJ2NLadqfGYtSVt0dGTyMSLP5XI6+G1CFwBtR4o3UYGRjiGfz69YsbEPcaDTYTU3NyObzSopBcAKqQL78JRbT5Qq4CrJnkxJNqNWSzabVZ1v2snV1pUrVwBAy911dHRgcnJS+x77JNNC29raVEKWTm1xcXFFxE/OAx1TKBTC1atXNepk37p+3Sv4vmbNGi23yGibOfvRaBRzc3M66dC5cwVUV1eHqakpbedoNIpwOKxVqOjIeODY2tqqh6QAdMXE/082LCNmPm8WwwiFQujo6FBNHlYm4sTJDBRO/lNTU0p0YtaKzS6fnZ3F9evXlenK55LL5VaMb64UqGPDyJzjhZW2ZmZmNMLn+wwo8vm87iLYyrMknJHAZbNYSfZjAMOAb2FhAc8888xtI/eKcO4DAwPm7bff1p85G5LtxUHF/T8OXEblANSpctkFLFeJ4XKO+fOcSelAqIXC9wlGwuyU/EwOmKqqKqUEc2a2i3NwIM/NzWl+vu2IGHmQmUiHzEmJSzVgWRPdHnh0dOw0rO3KaMSO0mtrvTqb3HLh5GOzXLmdxE7K94tFr75qLBbTbBXaCACXL1/WepXJZFLbAYCmZ5IKnkwm1dFwL7GpqUmjXUZYnJxZyYjtAXjRjC2sdbNgGtmAtJ9txUmfg2NyclL3uDlRkdzCZ8HnHIlEdBlPJ80Jnk6dfYVtS+lXPjv2N9qQzWaVcUtZDE5uU1NTKBaLmjnCfs+cfwYTvG+uQlh7lltOXFFQs4h9j8+CUe/w8LDKHDc0NGB8fFzbhjbOzc0pKY2OkXbzmdfW1mJmZgbXrl3TCT0ej+t4YJ8tFouYmJhANpvVlFaumqgBxfMiZrZEIhEda3xmxhid/LnlxkPxUqmEWCymssOtra3KuAaWtdgPHTqEmZkZPP3004hGo5idndXtXlvAj4EPgxI7eOTWKPs2GddsQ65clpa8IvCFQgE9PT3a73lOxFVksVhUoTsqqNpaS0zDrHjnLiJTAK4BmC73vZQBSTi7VxOc3asL99rubmNMy63eqAjnDgAicux2M1CQ4exeXXB2ry6U0+6KEA5zcHBwcLi7cM7dwcHBIYCoJOd+y0T8VQBn9+qCs3t1oWx2V8yeu4ODg4PD3UMlRe4ODg4ODncJZXfuIvJNEbkoIpdE5Nly38/dhK+WOSkiZ6xr94nIGyIy7H9vst57zm+HiyKyvTx3/dkhIl0i8g8ROS8iZ0XkR/71QNsuInUi8p6InPTt/qV/PdB2EyISEpH3ReSw/3Pg7RaRURE5LSInROSYf60y7CZZpRxfAEIAPgCQARAGcBLAQ+W8p7ts39cADAE4Y137NYBn/dfPAviV//oh3/5aAGv9dgmV24ZPaXcawJD/uhHAv337Am07PEXUqP+6Bp409sag223Z/1N4ulKH/Z8Dbzc86ZXkTdcqwu5yR+5fAXDJGDNijLkO4M/win0EAsaYIwBmb7r8HXhCbPC/77au/19FTioV5vYFXgJtu/GQ93+s8b8MAm43AIhIJ4BvAfitdTnwdt8GFWF3uZ37pyrs8QVHyvhqmv73Vv96INtCVhZ4Cbzt/tbECQCTAN4wxqwKuwH8BsDP4NVZJlaD3QbA30TkuIh8379WEXbfiSrkvcQdFfZYJQhcW8hNBV6o6XGrX73FtS+k7cZTQB0QkQSAV0Tky5/w64GwW0S+DWDSGHNcRLbeyZ/c4toXzm4fjxpjxkSkFcAbInLhE373c7W73JH7fwB0WT93Ahgr0718XpgQkTTgaeLDi/CAgLWFeAVeDgH4ozHmr/7lVWE7ABhjsgD+Ca9IfNDtfhTALhEZhbe1+nUR+QOCbzeMMWP+90kAr8DbZqkIu8vt3P8FoE9E1opIGMCTAF4r8z3da7wGYK//ei+8Cle8/qSI1IrIWgB9AN4rw/19ZogXov8OwHljzAHrrUDbLiItfsQOEakHsA3ABQTcbmPMc8aYTmNMD7wx/HdjzHcRcLtFJCIijXwN4BsAzqBS7K6A0+ad8LIpPgDwfLnv5y7b9id49WWX4M3a+wA0w6tONex/v8/6/ef9drgIYEe57/8z2L0Z3nLzFIAT/tfOoNsOYD2A9327zwD4hX890Hbf1AZbsZwtE2i74WX5nfS/ztJ/VYrdjqHq4ODgEECUe1vGwcHBweEewDl3BwcHhwDCOXcHBweHAMI5dwcHB4cAwjl3BwcHhwDCOXcHBweHAMI5dwcHB4cAwjl3BwcHhwDiv52BLX3m0/pAAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "imshow(img, cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [], + "source": [ + "def make_prediction(img):\n", + " processed = img / 255.0\n", + " processed = np.expand_dims(processed, 0)\n", + " processed = np.expand_dims(processed, 3)\n", + " pred = model.predict(processed)\n", + " pred = np.squeeze(pred, 3)\n", + " pred = np.squeeze(pred, 0)\n", + " out_img = pred * 255\n", + " out_img[out_img > 255.0] = 255.0\n", + " out_img = out_img.astype(np.uint8)\n", + " return out_img\n", + "\n", + "def path_leaf(path):\n", + " head, tail = ntpath.split(path)\n", + " return tail or ntpath.basename(head)" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [], + "source": [ + "pred = make_prediction(img)" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 66, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXcAAADECAYAAABk6WGRAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd3zURf7/X7vJpmx62TTSSSMECBA6AlGUEwRF9BQUhZ8eHnZOVDj14MATDhVFREFABTT0KjUQUiGkkt4TkmyyyWaTbHY3m+37/v2R2/kmJKFjudvn45FH9tNm5vOZmffnPfOZeQ2HiGDGjBkzZv674P7WCTBjxowZM/ces3E3Y8aMmf9CzMbdjBkzZv4LMRt3M2bMmPkvxGzczZgxY+a/ELNxN2PGjJn/Qu6bcedwOH/icDjlHA6nisPhrLhf8ZgxY8aMmb5w7sc4dw6HYwGgAsDDABoAZAGYT0Ql9zwyM2bMmDHTh/vluY8FUEVENUSkBbAPwOP3KS4zZsyYMXMd98u4DwIg7LHd8J99ZsyYMWPmV8DyPoXL6Wdfr/4fDoezBMASALCzsxsdERFxn5JixowZM78fdDodOBwOLC0todfrYTQaYWFhAYPBAB6PB41GAysrK3C5N/e9c3JyWolI0N+x+2XcGwD49dj2BSDqeQIRfQfgOwCIiYmh7Ozs+5QUM2bMmPn9IJFIoNfr4e3tDQDo6uoCn88HAHR2dkKr1cLV1fWWwuJwOHUDHbtf3TJZAEI5HE4Qh8OxAvAsgBP3KS4zZsyY+cNgZWUFDw8Pts3h/F9HB4/Hg5OT0z2J574YdyLSA3gdwDkApQAOEFHx/YjLjBkz/WMwGH7rJJjpB6VSCdMoRaPRCFtbW3aMx+PBwsLinsRz38a5E9FpIgojosFE9K/7Fc+9QK1WQyQS3fzE3wiJRIKKigq23dDQgNLS0vsSl16vh1gshkqluudht7W1QafT3fH1arUav7ZEtcFggFQqhUajueuwdDodGhoa0NbWdg9SdmOkUuk9Da+zsxNNTU13FYZcLkdJSfdo6K6urnuRrFvGYDBALBbfsJ4TEbq6uu5JXg+ETCaDSqViL16j0djruE6nu6s60hPzDFUA+/fvx4kTv69eI1PmP/3008jKysKlS5ewd+9evPfee/D19UVgYCAaGhrY+WVlZRg/fvxdx2tpaYlZs2YhLi7unhvSL774Ap2dnXd0rdFoxJYtWzB37ly27/Dhw2htbQXQ/bw+/vhjSCSSe5JWExYWFti0adNNvalz587hs88+G/B4V1cXeDwe5s6de9/L2qpVq7B79278/e9/v6twTNe3tbVBoVBg9+7ddxQOEWHlypXIycmBi4sLRo4ciSlTptxV2nqi1+vR2dmJp59+esD4LSws8Oyzz2L//v0DhsPhcNDZ2Ynt27ffMwN7PQqFAoMHD4a1tTX0ej0AQKPRQCaTwWAwQK1Wg8fj3ZO4/ueNu0QigYWFBXJzc28pQ2/X4N2JB6zRaJgxcXV1xcyZM/Hiiy8iMzMTdnZ2AABbW1ssWbKEXRMREYErV67cdlz9YWtri7Fjx/bqC7wXpKWlwcXF5Y6uJSK88847OHbsGPN2Dh8+DHd3d7Zta2sLgaDfgQN3THNzM86fPw9LS8s+eW+qnACwY8cOtn19d4hWq2VptLOzw4QJE+5pGk2Y4t23bx/eeust/Pvf/76r8LKysgB0v/ATExMxe/bsOwpn7dq1WLZsGWJjY+Ht7Y158+bh448/vi0DeqN6Z2FhAXt7exw8eBBqtbrPcVM51uv1mDZt2g3juXDhAubMmXPPDOz1mD6YGgwGWFpawtLSElZWVnB0dASHw7mtFo1cLr/h8f95415VVYXp06fDwsJiwAxtampCR0cHAAzYZdHe3t6rKdzW1obOzk7Y2tqySt/S0nLDZmF7ezvkcjmsra0hl8tRWlqKtrY2NDU1obm5GSkpKQCAa9euQS6XQyaTsWvr6+uhUCgAgHmzRDRg81yhUPQyTiaICG5ubvD29mbh9KSzs7OP9216HmKxuN9rKisrodFoYG1tDaD75dXS0tLnvOrqahBRr2PNzc3st1gsZmk/efIkqqur2f3GxcWBz+f3Cbe9vZ09J4lEwjx7kUjUryHoSVVVFQoLC+Hp6Qng/4yE6TlbWnYPNqutrUVeXh57nqYXc3NzM+RyOaysrGBvbw+1Wg1HR0fY2tqivb29V1wDGS+5XN7nZSGVSvvt2uns7IRMJmPp69m90PO5EBHLs7a2tj7luaGhAZWVlSwv7e3t0djYCBsbG0gkkn4NUFtbG+rq+h+4weFwenXpTJ06FWPGjIFcLmdx92yFmuj5jDgcTq9zetYjmUyGmpoadHZ2wsbGBkC3Ib/+GUdFRYHH4/Ubl4m8vDz4+PiwbbVa3ac7yvTsBypDDQ0NrI4QUa90lJeXg8PhsLIkkUhQWVmJxsbGPnktlUr71DWNRgOJRAKFQsHK34AQ0W/+N3r0aPotSE1NpcOHDxMR0Z/+9Kd+z2lubqbs7Gx6+eWX6eLFi9TY2EjDhw9nx+Pj4+nnn38mIqLXXnuNZDIZnTt3jpYtW0axsbFERDRv3jwSi8W0ceNGmjZtWp84ampq6NChQ6RUKik+Pp5ycnJY2PX19ey82NhYMhgMpFQqSafT0SeffEJSqZS2b99Ob7zxBg0dOpRKSkro7NmzNHHiRFIoFLRmzRqKiopiYXzxxRdUXFxMRETz588nsVjcKy2FhYW0dOlSamlpobfeeotMeaPT6Wjz5s1UUlJCRESOjo6kVqvphx9+oEWLFlFkZCQREUVHR9OGDRuIiKigoIDWrl1LXV1d1NHRQYsWLaKUlBTKycmhyMhI+vTTT0ksFtOOHTuIiGjs2LH0zTff0IkTJyg8PJxWrFhB2dnZtHTpUkpJSSFfX1/au3cvyeVyWrx4MS1cuJCUSiURET3//POkUCioq6uL5e2+ffuoqamJFi5cSHK5nHJycujll1+m5ORkIiIaMWIEEREZjcZezyAnJ4fWrFlDREQxMTE0f/589gzWrl1LRESbN2+m6upqds3TTz9NCoWCba9du5Z0Oh01NzfTypUriYiooqKCPvroI1IqlWwfEdGGDRuosrKSNmzYQBUVFUREVF5eTn/9619ZWFKplJKSkmjPnj1kMBjogw8+oLi4uD5lSafT0bvvvktarZaIiJYvX047duwgg8FAsbGxVF5eTjt27KDXXnuNvL29SavV0ueff04SiaRXOImJiZSUlMTKn7u7O7u/qKgoUiqVpNfraefOnfTBBx8QEdHly5dJKBT2SZPBYKDJkyfT8OHDKSYmhoiI1Go1lZWV0fz586myspIuX75M48ePZ3mXmppKLS0tLI8kEgnl5+fT1KlTKSkpiWQyGW3bto2IiC5dukRHjx6l/fv3ExFRRkYGlZeXExGxtF27do1WrlxJMpmMVq5cSVKptE86jx8/TuHh4Wx7z549VFhYSEREwcHBpFAoaM+ePfTCCy9QYmIiERFFRETQrl27WLpff/11Ft+XX35JxcXFdOXKFdq4cSMRET3yyCNUXl5O9fX1lJqaStu3b6fk5GQaMWIEzZo1i5qamig7O5v+/e9/E1G3bXj//ffJaDTS5s2bqaysjHQ6Hb3zzjtUWlpKALJpALv6P+25FxUVwcfHBwqFAjqdjnk8PVGr1fD09ISFhQUmTJgAhUKB2tpadvzbb79FREQENBoNNBoNuFwuLC0t4eLiwjwAX19fCAQCxMbGYurUqezarq4uaLVabNu2DdOnTwefz0dUVBRMY/4VCkWvbgwbGxtwuVzw+XzI5XJERESgoaEBoaGh8PDwwLRp08DlcmFjYwOBQAB7e3tIpdJeHvrevXvh7OwMsViM0NDQPt0Y1dXVmDlzJgQCAWxsbKBUKgEAV65cwblz52CabObv7w+DwQALCwtYWVmxPtTW1lbmjX377bdoaGiAra0tHB0d8eijj0IikYDP54PP58PX1xfFxcUYNmwYAOD111/HpEmT4O3tjeDgYDz99NMYPXo0ZsyYAX9/f4SGhsLX1xcODg6orKxEWFgY89QqKythb28PW1tbSCQSbN68GWFhYSgpKYG1tTVUKhV8fX1ha2uLmJgYdq8A+nQ/7dixg3mGfD4fc+bMAQAkJSUhLCwMAODg4MDGKQNAeHg47O3tez1HS0tLODk5ISQkBABw9epVPP300+Dz+cy7LykpQWhoKEJCQmBjYwN/f38AQGZmJo4cOQIAePbZZ+Hs7IytW7ciMjKSlbXBgwfjetrb2xEcHMxaoQ0NDZgxYwa4XC5ycnJQUFAAR0dH8Hg8zJo1CzweDy+88ALc3d17hVNbW4shQ4bAz697usqwYcPY/bW2tqKrqwsGgwHvvPMOHB0dUVVVhfLyctbK6QmXy8WxY8dw/Phx2NjYoLy8HEqlEh4eHhAIBAgJCcGgQYOQk5MDANizZw+GDx8OZ2dnFkZrayu8vLwwduxYTJ06FR0dHSzNXl5e8PPzY2PFKyoqUFVVBQB47bXXAHR75C+88AIcHR0B9N9Sqq2tZa3LsrIybNmyBVFRUQC6u/wsLS3h4+MDPp+PBx54AABQV1fHwvrpp59Y2be0tERwcDBsbGzg5uaG4OBgAEBNTQ2srKxgY2OD7du3IyQkhOV5aGgovLy88NVXX0Eo7J7g7+DgAF9fX+zbtw8fffQRXFxcUFVVBTs7u5t/4B7I6v+af7+2567Vaik/P59EIhEREWk0GvLz86Pa2tp+z//++++Zx7127VqaPXs2EXV7SVZWVkREfTyW6OhoSk1NpYsXL9LFixeJqNuzSU9P75WOdevWEY/HY/u+/fZbunLlCkmlUnr++efZfqVSSQcOHGDbJo/YRGxsLEvD3LlzadOmTSwdptZJV1cXLV26lORyOfPsDAZDr3Q//vjj7HdUVBTzhgIDA2nu3LlERCQWi2n79u3svMmTJ1NdXR0RET3xxBOkUCjIYDCQnZ0dJSUlERHR3r17qaOjg13zwgsvsP8mr/HAgQMkEolILpfTl19+2Std7e3t9M9//pNtjxo1ihobG0mj0RARUUhICBF1t7RWrVrF8sWEwWCgAwcO0EMPPcT2TZ8+nfR6PV0Pn8+nhIQEIiKaMmUKicViEolEzItsbW3t9ZzWrVtHzc3NbHvLli0UHx9PRN3en0QiIZFIRA899BCp1WoiIho3bhw1NTXRI488wq574oknWL40NzdTfn4+vfDCC7Rz504i6m4tEXV7hQMRHx9PVVVVLO7U1FR2TCAQUEZGBhF1t0jEYnGf+zcYDFReXs48bI1GQxKJhLVYiouLac+ePUTU7VW+9957REQkl8v7TU9rayvl5uZSV1cXyWQyamxspO+//56IiM6fP89at5s3byZ7e3v68ccfqdss9a53SqWS9u7dy+rsp59+yloSOp2OZs2axc6Ty+V04sQJevzxxykuLo5aW1vpiSeeYGkaOnQoERF71iZCQkLYfQ4fPpzVP4lEQtu2bWPxmdJMRPToo49SU1MTERHxeDxKSUkhIqJDhw5Rc3MzyWQyWrhwITt/+PDhlJubS2VlZWRnZ0ddXV1UV1dH48ePpytXrhAREZfLZfl25MgRam9vJ3d3d1q9ejW1tLRQa2srEXW3BGH23P8PlUoFsViMpKQkeHt7o7OzE1ZWVrCwsGBvy+vZt28fZsyYAQDYuXMn3nnnHcTFxcHS0hKBgYEAuj+U9RxNUF5eDh8fH3zxxRcICAgA0N0XO2bMGHYOj8dDZGQkRowYwcKura3FuHHjIBKJUFZWBqB7eFR8fDz7ECeRSHDw4EFcvXoVAHDy5Em89dZbLFyhUIhFixahoaEBY8aMwYQJEyCVSmFrawsulwuNRoPKykpkZ2f3meLc2NgIoNtTGj16NCZOnIjW1laEhYVh8uTJ0Gq1WLduHSZOnMg8h7lz58Lf3x+bNm3Chx9+iJaWFnC5XAwdOhQRERE4dOgQNm3axPpk1Wo1G9v7008/wc3NDZWVlZgwYQK8vb1x8OBBvPjiiyxNEokEM2fOxF/+8hdcuHABYrEYY8eOhUAgQGZmJkpKSjBp0iRcvHgR6enpeOCBBzB8+HAAQH5+Pn744QdwuVzs2LEDkydPBgB88803+OCDD3Dq1Kk++R0TE4PQ0FAcPnwYjz32GCoqKmBnZ8fybvXq1aiqqkJKSgqEQiG++uoreHp6IikpCUB3Cys8PBy7d+/G2rVrUVpaCmtra/j6+sLa2hrXrl3DM888Ay8vL9byMPXtXr16FTU1NXjqqacQFBSEOXPmYPTo0QDA7snDwwO7du1CTU1Nr3RLpVIcPnyYefRubm4s/M8++wy5ubkYO3Ys2tvb8dJLL8HDw6PPKCAul4ujR4+iqqoKzc3NICLEx8fjueeeQ01NDZYvX44//elPSEpKQlBQEDQaDYgIBoMBFy9ehFar7RVefn4+KioqYGlpyT56mvL2008/RWRkJKRSKerq6pCWlgZXV1eMGjUKALBt2zbMmDEDBw4cAJ/Px65du1hr6euvv0Z9fT0SEhIwe/ZsyGQyZGVl4eDBg1i2bBlmz56N+fPnIygoCCkpKayfXSgUIjIyEhkZGX365JVKJR555BEUFhaCz+dDIBBAo9Fg/fr1iIyMZH3tptb39u3bsWzZMvZtIigoCD4+Pjh58iQ+/vhjeHp6wmg0IjU1FcD/tS5NLZShQ4eCiHDw4EFWRwHgwQcfhI+PD3bv3o3169fDxcUFM2fOhEwmg0AggJWVFU6ePMk8/oGwWL169Q1P+DX47rvvVvcc+XE/EYlEOHToELKzszFnzhxYW1sjPT0dP/30E1xcXBAbG9vnmqNHj+Lhhx9GWFgYUlJSWLPM3d0dRASFQoHm5mYIBAIMGjSINYG1Wi28vLwgFAohEokwZMiQPrPPPDw8wOVy0dzcDA6HgyeffBJOTk44evQoMjMz8de//hVGoxEXLlzAqFGjYGNjA3t7e1RWVkKpVGL8+PEQCoWoq6tDV1cXwsLCkJ2djVmzZsHa2ho7duxAQEAA6/q4fPkyLC0tIRQKMWrUKNaUNZGeno4nnngCXC4X33zzDQICAhAZGQknJyeUl5fD2toaTU1NaG1txQMPPMAqSHBwMFpaWlBXVwej0Yjg4GC0tbWhtrYWHA4HLi4ukEqlGDNmDCwtLXH69GkQEezs7CCVSqFUKhEeHg5ra2ucP38eEydOZF0LHA6HVQw+n48hQ4YgOTkZlpaWGDt2LLy9vXH+/HlEREQgMjISwcHB4HA4UKlUaGpqgp+fH/z9/XH48GHMnDkTISEhUCgUKC8vx5QpU/rkSXt7OyoqKmBjY4Nr166hvb0dsbGx4PP5KCgoQGdnJ1paWjBx4kSEhoZCIpHAysoKBoMBwcHB8PT0xOHDh2FrawsPDw8oFArExsbC3d0dWVlZKCwsxAsvvABra2vw+XxkZ2dDoVCgoKAAo0ePxpAhQ2AwGKDX63H58mXMnTuX6ZC0t7ejtbUVAoGAGXsTnZ2d2LNnDxYsWAAAEAgEOHToEKRSKXQ6HWJjYyGVSiEWi2Fpadlvtw7Q/ZIwxREVFYXk5GSMGDECVlZWuHbtGqysrNhH9+LiYlRXV7PuCFMXlIm0tDSUlpZCIpFAKpXCzs4Ofn5+4PF4WL16NcLDwyEWizFjxgxERERAIBBAqVRCpVKxbsWJEyfC19cXp06dwlNPPcXKsb+/P2JiYlBcXAxXV1eMHDkSAQEB6OzshEgkQnV1NZ599lkkJiZCp9PhiSeegEKhwKVLlzBhwgSEhIT06pJLTU0Fl8vF5MmT4e7ujvb2dvB4POh0OjQ1NWHq1Kmor6+Hq6srAgMDIZVKUV5eDgAICwsDh8OBSCRCWloaiouL8eabb8LGxgY2NjZobm5GS0sL0tPT8cwzzyAiIgIymQwajQYcDgeNjY2wt7fHyJEjweVyIRQKkZaWhqqqKrz22msQCAQQCoXg8XiQy+Xw9/eHTCbD5s2bm1avXv1df/l4X/Tcb5c/graMVquFlZXVrxJXUVERzp49C0tLSzz22GMICQnBjBkzcO7cOXaOXq9nX8t7/r4XmPrS+2OguIxG402Fjm7lnPvNje7tRuj1enC53Du+x577e/6+F3mnUqmQkZEBmUyGffv2Ye/evXcVXn/odDr2su35G+ju2r3RsNmeI6V6smjRIvz4448sPIPBAA6HAy6Xe8f5ZEKlUvWa+Xm/qa2thZ+fHywsLPDqq6/C3d0da9as6XWO0WhEXV0dBAIBLCwsYGNjA6lUykaoAd0j81xdXWFtbY2//e1vUKlU+Pbbb/uNs7S0FJGRkTlEFNPf8f+5bpk75dcy7ACwdOlSBAQEYPz48bhw4QKkUilrCppexj0Nwr007ABuWKkGiutWjPZvbdg7Ojr6dGXcKpaWlnd1jz339/x9L/Kuuroan3zyCcRiMZ555pm7Dq8/ehrz64cM32w+xPWGnYiQl5cHW1tbaDQaFp6FhQV7Nnc7Bf/XNOxA95yLrKwsiMVieHh44LnnnutzjkKhAJ/Ph7W1NWxtbcHhcODs7NzLtmRmZuLSpUvQaDRwcHDAwoULB4zzZmXH7LmbMWPmV8f0rUupVN7xxLbfE2q1mo1K6zlq6k7DksvlvcTF+kMkEmHQoEEDeu73S/LXjBkzZgbEZAB/zRbx/cT04fpehXUvwjN3y5gxY8bMH5CbqX7+7o270Wjso5zWE71e/6srzP0eUalUveQI/sgYDIbbFhi7U0GyO0WlUt1Tdcfr5STuFRqNpt/JebfKneRFf8jl8n4lJ/7oGI3GW34+Wq32ps9AoVDcVBbDxM261H/3xr26uhqXLl0a8LhMJsMnn3zyq6XnXkup3isqKiqwadOm3zoZd4xWq2VCUkqlEh9//DGqq6tv+GLvydmzZ9m8gF8DIsLq1avxyy+/3JPw9Ho91q9ff0/C6gmXy0VFRQVOnjx5W2kxvWgUCgVOnz7NZnzeCRUVFTh16hT279+Pffv2DTif5HYxGo1M8+m3wmAwoKSkBOfPn7/heXq9Ho2NjTeVE66trb2hcmVPbvaB/3dv3Ds6OnppmffEYDDAzc0Nubm5v1p6XnnllfsmB3o3ODk5IT4+/rdOxh3zySefsBaYnZ0d6uvr4enpecvKlCUlJWw42f1Gr9eDz+ejuLj4nn0MdHJyui9rCvB4PHA4nD7zGW7Ehx9+yBRHnZycUFRU1K+swK1QVFSE+Ph4zJ8/H2+88QZOnjwJPz+/u5aTNhqNKC8vx5///Oe7CuduMQ3htLa2HrCbxPSBdMmSJX1GuOh0OhgMBhARdDod+Hz+LY/0uZmH/7s37oMGDcJLL73ECkNnZyckEgkbB1tVVYU33ngDDQ0N/aoc9sSkKtgTk5KeCYPBwCZkiEQi9luv1yMtLQ0PPvhgr2FaWq0W7e3t7EHrdDomxSmVSvttavenOX59xdbpdH1m0N2IEydOICQkpN9nYDAY+u26am9vHzCOnh7z9eeIxWLmgZhedO3t7ewaU17J5XLWZO0ZRktLCzQaDfR6PVpaWlBWVobjx4/DyckJTU1NaGpqwrPPPgt7e3umbQN0dzFcn4emCvXLL79AIBD0aVk1NDT0UQFsaWlhHp9MJuvl/d1KE9t0nzExMZg8eTK7X1MFNaFQKNixlpaWXsfa2tp6pauurg7z5s3rFY9UKu2VDz21RHruN/2+XgLWdC/nzp3Dgw8+2Etutj9UKhW0Wi0SEhIwatQoSCQSdHR0IC0tDQ4ODujo6OgzA7Wzs/OG3aI///wzpkyZwp6xaWif0Whk1/XUajJxfUvBlK9isRhtbW3o6OjAsWPHYGlp2et+Ojs7+63nQHcZNKXDaDT2uheDwdCr9SiRSG5qPE11Oy0tDVOmTGF2oaOjg9Vxo9EIvV6PH3/8sY/KY0NDA9RqNdPp4fF4OHbsGKZOndqvEqhJ892ESSdnIH7Xo2UqKiqwceNGvPzyy0zsKSUlhckFxMbGor6+Hh0dHYiPj4dGo8HSpUv7DSs1NRVGoxHx8fGYMmUKAgIC0NTUhNLSUhARwsLC4Ofnh4SEBLS0tCA8PBzXrl1DTU0NVqxYAalUiv3792P48OHIy8tjU6QzMjKgUqlQW1uLJUuWIDk5GUKhENHR0dDpdEhJScHy5ct73ZNMJkNeXh57UeTm5kKr1aKoqAgPPPAAbG1tceHCBQgEAhQWFmLMmDE39bxSU1OhUChQVFSEiooK5tGIRCKUlJTAysoKzc3NbL9MJkNRURGICIGBgUwioWc6c3NzwePx4O3tDZFIhCeffBKZmZnQ6/VoaGjAggULwOPxUFJSgvb2dlRVVWHRokXgcDjIycmBTCaDUCjEk08+iaSkJKjVagwbNgytra2oq6vDokWLwOPxsHfvXrS3t6O6uho8Hg+ZmZmYOHEiuy+BQICYmBjk5eWhpaWll664VquFra0tysvLkZaWhqSkJHz44YcA/m/lH71eD7VajZCQEMjlcpSXl7PZpJmZmejo6EBERAQcHR2RkJCAZcuWDfic8/PzoVQq0dDQwGb9cjgcXLlyBZ2dnWhtbYVGo8Fzzz2HCxcuoKKiAu+//z5Wr16Nxx57DDNnzkR1dTUrW1OnTkV4eDhyc3OZSBXQPQPTVCZef/11AN1a4z4+PnBxcUF5eTlGjRqF8PBw5OTkQKPRoLa2FlwuFwsWLEBRUREaGxuhUChw9uxZrFy5Evb29ti6dSumTZvGBOB6otfrUVlZCYlEAkdHRzYeva6uDkVFRThz5gxmz57Nrs3IyICFhQWqq6sxb968fsddDx48GB9++CHWr18PJycnTJs2DQ0NDSgvL0dSUhKmTp2K9vZ26HQ6hIaGsrInEomQmZmJ2bNnQyKRoKCgAFqtFj4+PsjKysKkSZMQFxeH0NBQVFdXIywsDESElJQU2NnZ4eLFi5g1axYzgFVVVVAoFCguLsbzzz+PkydPIi8vDx988AEUCgUyMjKYxEh+fj5UKhXkcjkmT57cb90rLCxkq2qdOXMG7777bq+84/P5EAqFWLhwITo7O3HhwgX4+/ujtrYWAQEBrP6ZXiJ/+ctfAHRr6I8bNw6lpTp3UXEAACAASURBVKXw8fFBWFgYW3nNysoK1tbWbFGegSSWGQOJzvyafwMJh9XX11N4eDiT19y+fTudOnWKsrKymBTnc889Ry0tLVRdXU0uLi59wtDpdFRdXU2PPvooSSQSSk5OJrFYTHK5nObNm0cJCQmUmppKBQUF1NraSo2NjfTGG2+QVqulmpoasrW1ZeJajz76KAtXqVTSihUr6PTp05SQkEBLliyh5uZmqq+vp9dff51J0To4OLBr3n77bSZvu2nTJjIajTR58mRau3Yt1dfX0+eff04Gg4E2bNhAp06dIiLqJTR2IyZOnMgEyiIiIkihUJBGo6H33nuPhEIhyWQymjx5MhER1dbW0rhx46i1tZVkMhnV1NT0CqumpobOnz9PK1eupLfffpuIiPLy8mjZsmV05swZIuqWkiUi+vnnnyk/P5+IiL788ksyGAy0fv16SkxMpPPnz9OKFSuYBGtPwSUfHx8mJDZ79mwmE0xELJ2pqamUlZVFhw4dIiKiWbNmsefXk66uLiahKxKJmDCVSdwrMzOTkpKS6IsvvmB5mJqaSufPn6fi4mKKi4tjAmfLli3rI39r4vLly0xI7fvvv2fpz87Ops8++4xEIhFt3ryZ1q5dSyUlJZSRkUGPPfYYERGtWrWK9u7dS8uWLaNHH32UFAoFrVu3jk6fPk0ikYimTJnC4vnkk0+ovr6ejEYjNTc30/79+6mgoIB++eUXVu5XrlxJISEhVFFRQVu2bKGOjg7as2cPvfnmmySRSOjEiRNERLR7926aNGkSC/v8+fP93lvPZ9kzLxYtWkTLli0jom7Z32+++YaIiLKysmjDhg3U0tJC3377LRNw64/MzEx66aWXKCQkhJqamuj06dNUX19P06dPp0uXLhHR/4l5zZ07l6XRtO/UqVNUV1dHr7zyChERE0aLjo6miooKVj9ffvll0ul07JhGoyGNRkNLlixh8sDnz5+n8vJyqquro6+++ooaGxtp27ZtFB0dTUTdQmn4j3CZSTyuv/vZvXs3GQwG+vHHH9m1lZWVNH/+fJYek9AcUbdQW21tLRUWFtKhQ4do6tSpVFdXR8uXL6fw8HBmL3qWg4iICKqvr6eXXnqJiLrF5EwickTd9QF/VOEwLy8vREZGMsGsBx98EGvWrMHVq1cRHh6OqqoqdHZ2QiAQoKSkBA4ODn3CMBqN8PDwQHZ2NpYvXw4ulwsPDw9cvXoVHh4ecHd3Z14Yn8+H0WiEWCwGj8fD1atX4ePjw5q9165dA9DdhCstLUVcXBy8vLzQ3NyM2bNnw9XVFUSExsZG9rY3SaaqVCrk5uYy72DBggVsXcrQ0FAkJSXh8ccfB5fLxYwZM7Bq1Sq8+uqrTFTsZnh4eGDo0KHo7OzEtGnTYG9vj5qaGnh4eMDX1xdlZWXMswoICMDw4cPx7rvv4rPPPkNQUFCvsNzd3REREYGCggJMnToVer0eer0eP/30E6ytrXH8+HGMHz8eRUVFWLduHdM4mTdvHnJzc/Hll1+y+GfOnIlRo0bBw8MDXl5eALqbmHPmzGGaLiUlJb3SYBJiGjx4MKqrqxEZGQmgu5tj6NChfe69rKyMnSMSicDj8ZCbm4vQ0FCcPHkSRUVFGD16NDZu3Ah/f3+cOXMGpaWlmD59Otzd3VFWVoYhQ4YA6O4+uV7+1sSRI0cwcuRIAN3CcKb079+/HwKBAKWlpXB3d8czzzwDPz8/FBYWsm8GPj4+CAkJwYEDBzB48GAkJSVh/PjxGDNmDMrLy9koCqFQiP3798PPz48tzNDV1YWoqChcvXqVabfU1tZCp9Nh79698PT0RHFxMTw8PPD888/j4MGDLJ3FxcXMIwaA6dOn37Ac5eTk9NKcqaurY7OjXV1d2SIbu3btgr+/P1JTU+Hp6dnvePWWlhaoVCqMGTMGmzdvxpo1a5Cbm4sRI0bAy8sLvr6+TIxNo9FArVZDLBYzobSHHnoIANg5pnSY+v+9vb0RGhrKPizW1dWxMh4bGwsrKyuo1Wrk5+ezehcTEwOBQIDW1lZkZmbCx8cHBQUFrGfAysoKEREReOWVV1BUVAQAfb6xmcoBl8tlgncAcOrUKZSWloLL5cJoNPZqYXp5eSEgIACWlpZISUlBW1sbXFxcIJFIEB0dDZlMxrSoelJTU8Pyo7a2FmFhYcwe9VxUpF8Gsvq/5t9AnvuhQ4eYN5qRkUGfffYZEXVLn3711Ve0ceNGtsjG888/T2vXrmWSpj357rvver0Nr127RitWrOi1UIVJPnflypX0zDPPEBHRn//8Z1q3bh1VV1eTUqmkcePGkUQioePHj9Onn35KlpaW7Pr29nYi6l4cYN68eUREVFdXR//6178oJyeHLl++TEuWLGHn6/V6qqioYN5ITzZv3kxE3R7Cv/71r17h90d1dTUdOXKEiLpbN9nZ2SQSiWjr1q1Mxnj16tX0l7/8hUQiEZ04cYJ++eUXIiJ66qmnmITo9Tz//POk1WpJpVLRsmXLiMPhEFF3a0gul9Pnn39OdnZ2RESkUCioqamJPvjgA7K1tWVhmCRuRSIRnT59moiIdu7cSfn5+WwhiIkTJxIR0ZUrV0itVtOkSZPo6tWrRERkbW3NwnrzzTf7yLRqNBp66623qLKykgoLC+mVV14hg8FAcXFxpNPpSKVSEVG3vCufzyepVMrCEAqFVFpa2msxk1GjRpFQKGTSsiaEQiGTEU5MTKQRI0ZQS0sLtbe395IX7ujoYNKwNjY29P7775NKpaLjx48TERGHwyGFQkFKpZJJND/yyCP01FNPUX5+fi/PzGg00qxZs5jE79ixY9mx6OhoSk9Pp4CAALZPoVCQXq/vtS8mJoauXLlCYrGYOjo62PPoD4VCwSSR29raiKjbO2xra6PCwkLasmULCYVCysvLI2dn5wHDMfHmm29SYWEhKwMpKSksX2tqaujYsWNERBQXF0e5ubnU2tpKb731FhERnTt3jjIyMljZ/Oc//0lqtZotrPLzzz8zWVyhUEiVlZWshXHq1CnKyMggsVhMtbW1rN6ZrlWpVPTRRx+Rk5MTEXVLP1+4cIHkcjnl5eWRWCymhIQEcnV17fe+euZ3VFQUXb58mbq6uggAvfvuu0TUXUauXLlCCoWC8vPzWUu8uLiYtFotzZs3j7q6umjEiBH0ww8/UGlpKSUlJbHFRfbv30+nTp1irTipVEofffQRlZWVUWlpKR08eJDq6+tv6Ln/rlUhP/30U0yZMgUSiQT29vZIS0uDm5sbRowYgb/+9a/YuHEj3njjDQQHB+Pzzz/HnDlzEBIS0mcEw/Hjx+Hu7o4zZ87g7bffRlhYGLy8vJCeng69Xo+rV6/C1dUV9vb22L59O5YsWYKgoCB8/fXXePbZZ+Hl5QU3NzckJCQgODgYI0eOhI+PD1xdXeHm5oasrCw0NjYiNDQU27dvx+LFixEaGorExEQEBQUhICAAQ4cOZfF1dnZCoVAgLCyM9WHLZDJIJBJ4enri2LFjcHBwQEJCAt5++23weDz4+/vjvffe6/f5/fDDD5g/fz54PB7Wr1+PiRMnoqWlBTExMcjPz2eLJYwdOxaBgYFITk6GRqNBRUUFeDweHn300T5hVldXw9fXF2FhYUx90crKCnZ2dhCLxcjPz8f8+fOhVqvh7OwMkUiEyspKLFy4EDqdDm5ubigqKkJ9fT2Cg4Oxbds2PPfcc2hsbMSOHTswbtw4NDY2Ijg4GFlZWQgNDUVgYCA4HA7y8/Ph4+OD0NBQJue7ZcsWzJkzp4/MqYWFBYKCgpCRkYGysjJMmjQJoaGhiIyMxNatW8Hn81FTUwMvLy/weDzU19dDp9OhrKwMQUFBiIuLg62tLebMmYPq6mqUlpZi2LBhbFEOE46OjnB3d4dKpWJ6P4MGDUJERARiYmLYUoyFhYVsgYby8nLY2NjAzs4ODz30ELhcLhwcHCAUCkFEKC0txZAhQ5CYmIiAgAAIBAKMHz8eq1evhp2dHdLT0zFjxgxER0cD6JbtnTJlCk6cOIEVK1Zg+PDhGDp0KFsSLzs7Gw4ODggICIBSqURTUxNycnLg4+OD0aNHw9PTE+Hh4b369nuiUChw7tw5jBo1CvX19eBwOPD19cWIESOQkpICGxsbWFlZYdSoURg/fjyamppQW1uLioqKfhUmN2zYAIPBAJ1Oh/T0dKjVauaN79y5EzY2Nujq6kJHRwcefvhh8Pl8XL58GUD34jCWlpYQiUQQCAT47rvv8Nxzz7GWUGVlJfvgGRwcDFdXVyQnJ4PP5yMxMRF8Ph8dHR0YNWoUMjMzwefzIRKJkJeXh8GDB4OI0NraisjISFy+fBl+fn4YM2YM62s/duwYFi9ezL6rmCAiODs7Q6vVQiwWIyMjg7WE3d3dwefzodfrkZSUBIPBgDFjxqCpqYkpQDo7O2PLli1swZrU1FSEhYUhPDycKbq2tLRAqVTi6aefhq2tLSoqKpCfn4+srCxER0fDwcEBZ8+eRVRU1P1TheRwOLUAFAAMAPREFMPhcFwB7AcQCKAWwJ+J6IaDwwfSlklPT4eHhwdcXFxgY2ODyspK6PV6+Pr6wtPTExcuXEBUVBS8vLyQkpICBwcH1hztSXV1NTOokyZNgkKhgJWVFYqKisDhcBAcHAwnJydwuVycP38ew4YNg5eXF65cuQJ7e3uEh4eDx+MhMTERw4YNY4W8tbUVIpEIPj4+4PF4EAgESEhIYB9nTfKqvr6+sLe3R0lJCdra2hAaGgpbW1s4OTkhPz8fWq0WgYGBsLOzA5/PR0lJCZM8DQoKgpWVFTZu3Ii//e1vfe7NaDQiNzcXw4YNg7W1Na5evcq0qF1cXJCbmwt7e3u4ubmhubkZ4eHhaGtrQ2trKyQSCQYPHtyvLnRnZyfkcjlr+hERmpqaUFNTA2dnZ7i7u8PT0xNNTU1MGtXHxwc2NjYQiUSQSCRwd3fHoEGDYDAYUFhYyJqy+fn5sLOzg6+vL6vQXl5ebLWaixcvYuzYsbC3t0dycjKCg4Px0UcfYd26db1WPzJBREhLS4NAIEBgYCCbun358mUIBAJYW1vDw8MDEokENTU1cHJygqOjIwIDA5GdnQ0Oh4MxY8agq6uLfYjrbwhmfX09mpubMXjwYJSWliI8PBwCgYB9uLWxsYGvry+Tgs7Ly0NrayuGDBnC5IoVCgVqamrg6uoKBwcHeHh4oKSkBK2trYiKioKrqyuTTnZ2dmZdKlqtFitWrGAfrCMjI2FhYYGuri4Wt729Pdzd3aFWqyEUCuHp6clGokycOBEHDx7EtGnTbriIeEZGBtzd3eHq6go7Ozt0dXXB2dmZpcnb2xu+vr7o7OxEbW0tnJyc4O3t3edjqkKhQH5+Pts2rbzl4eEBo9GIRYsWYdWqVWhtbUVwcDAr+0lJSdBqtYiJiYFIJIKjoyNsbGxQWFjIXgxA92iWhoYGeHl5wdnZGTweD9nZ2XBzc4OTkxPa29vh7u4Od3d3lJSUQKFQICgoiBnVlpYWCIVCuLi4sHV6TVr/pg/uPj4+vVaDMtHQ0ACxWAwfHx9cu3YN9vb2GD58ONra2tDS0gJ7e3vY2dmhsrIS48aNA9DddajX6+Ht7Q2VSsWkk/Pz8+Ht7Y3JkyeDx+MhPT0dnp6eGDRoENzc3NDV1YXc3FwMHTqUjZbx8PCAUqmEVCq9oSrkvTDuMUTU2mPfBgDtRLSew+GsAOBCRO/fKByzcNjNkUql/xUCS7dDfX09fvzxR/j6+kKtVuPVV1/9rZP0m7Fz504EBQVhypQp91wF9NeEiPD9999jxYoVvYY0/69iMBhgMBjY0okDfe8ZCA6H86sKhz0OYNp/fu8CkATghsbdzM35XzPsQPcch0GDBrEPb/dat/6PgkQigU6nA4fD+cPfv2kRldmzZ/fRhf9fxMLCAnq9HhwO57YN+824W8/9GgApAAKwjYi+43A4HUTk3OMcKRHd0DKZPXczZgaGiGA0Gv+nPVwz/XM/PfdJRCTicDgeAM5zOJxbFvfgcDhLACwBcNO1AM2Y+V+Gw+GYDbuZ2+auxrkTkeg//1sAHAUwFoCYw+F4A8B//vcrg0ZE3xFRDBHF3OgDz73kblopfyRuJgX6e+H3lB+/p7T8L9JTNuNWxeLM3Jg7Nu4cDseOw+E4mH4DeARAEYATAExL178I4PjdJDA9PR1ffvnlgMeNRiNeeOGFW6qcRqMRCxYswIMPPojCwkJs3br1tpUUTfH0p4fRE7FYfEvqbrejkPf111/3muY8EHV1dXjllVduOdw75fvvv78jGdfy8nJs27YN1dXVeO+997Bv3z6Ul5fjs88+w/bt228pDJMBWLBgAbKysm47DSZ++uknbN++HWfOnBnwnJqaGiQmJiItLQ2JiYnYu3cvEhIS7jjOntTV1d1V+ntiGoJ3+fJlJCYmYt26dX20YG6GSCS66b3pdDo2sc1EWVkZjh49eltxabVafPHFF0hKSsLp06cxevRoXLlyBfX19XjrrbcAdH9Uv1H9/yPS1NSEtLS0uw7nZiqod+O5ewJI43A4+QAyAZwiorMA1gN4mMPhVAJ4+D/bd4xYLEZ9fX2/x9RqNZM0vRX1QAsLC1RUVMDJyQkRERGIjo6+oZzwjYiLi7uh/nZbWxuKi4sHPK7RaGAwGLBr165bli01regO9J011xMLCwvk5eXdUph3immI1p1cV1BQgOjoaISEhKCurg4TJkxAeHg47Ozsblla1jQrsaam5q7Wy7xw4QIcHBwGlGLNyspCYmIi/P39MXnyZIwePRp///vf74lx1+v1bFjcveDw4cPIyMhg46YvXbp0WysdyeVyKBSKm+rK83g8tLS0IDU1le1zc3NjwmO3ikkkLiwsDM7OzigoKMCoUaMQGBjIRMVsbGxuKpP7R0Kj0aClpQX79+9nImImVcjbpT8Bwp7ccZ87EdUA6DM3nojaADzU94o7Y9asWXjiiSfYSImeIyZsbGzQ3t6OBQsW3DQc04rzEokEb7/9NpRKJRPgUSgU/UoX9AeHw2ETGUzTz/vD39+/z+rnPbG2tkZXVxdsbW37HUvbH6mpqUyK9UajDM6dO3fT9RcHQq1W39ISXxYWFvj6668BdLdmtFptvyvcX09bWxuUSiUb/5uZmclEy0aOHHlb6dZoNPh//+//ISoq6pbTfT3+/v549tln+3RlGY1GVFVV4dChQ1i+fDkEAgGICI6OjoiNjcVjjz3W72gPUzm7FSwtLeHr69uviJdWq4WVldVtjRCaP38+SkpKAHTLJk+bNg1Ad/7civPD5/Ph5eWF8PDwfo/3TMulS5cQGxvLjgkEAowdOxYdHR23nIclJSWIjY2Fj48PNm/ejOHDh7OXkUkO4PDhw3e86LdKpbqtF7/pxWRlZQW5XA4+n3/Lz/5W8l0qlaKoqAg5OTlM2sD0QnRycur1fDs7O2FpaQkigrW1Nbhcbp/y1lMxtT9+1+OqysrKsGzZMrz66quYPXs25HI5Tpw4AY1GA61Wi6VLl+LAgQPg8/k4dOgQLly4gK1bt/YbFpfLBRHB1dUVM2fOBJfLxZdffokJEyYww56SksLO7ejowKxZs3D27FkYDAYIhULMnDkTbW1t+OGHH/Dwww+jvb0dnZ2d2LhxIzw9PTFy5Ejk5uZiypQp2L59O6ysrLB9+3YYDAbk5uZCJpOxGZ0eHh745JNP8OSTT6KsrAwRERFIS0uDUCiEr68vVCoVHnnkESQkJKCpqQmDBg2CnZ1dv6uqA90z9oqLi+Hm5oaNGzfi008/BRGhoqICBQUF8PLyglQqRWhoKNra2iAWiyGRSDBp0iTU1NRAqVRi0KBBqK+vR3R0NJuZZzQasWvXLvj4+CAxMRHr169Ha2srTpw4AT8/Pzz88MM4duwYWlpa4OrqCltbW2RlZeEf//hHvx8B/fz8sGjRIhZ2zxek6WW7f/9+WFlZQSgU4uGHH0ZISAjKy8tRUVHBJrVMnjwZ+/btw+TJk9Hc3IytW7fivffe61fBTywWIzMzEzqdDrW1tWwy2PHjx+Ho6Iiamho2gapneVmwYAF6juIyGcjPP/8cTk5OSEhIwPHjx7FixQp0dXVh9+7d+Pjjj5GWlobTp09j/PjxcHJyQmJiIl566SUcPnwYw4YNQ2pqKlavXo3U1FT89NNP+Prrr8Hj8XDx4kUcOnQIa9aswY8//ojLly/jyJEjSEtLQ3V1NVOENBm+65k0aRKefvppvPTSS1i4cCFTI+VwOIiLi4ObmxvS09OxevVq7Nu3DwUFBWyy2JUrVzBnzhysX78ecXFxAIBDhw6htLQUkyZNgkgkwrRp0+Dl5YWTJ0/i73//O44cOdJr+J5UKr2lF7yJnjOjjx07hmeeeYYZ5EWLFkGr1WL37t144IEHkJiYiDNnzmDDhg3QarXIycmBhYUFNBoNOBwOxo8fz3SERowYgdzcXPD5fDz66KPo6OiA0WiEra0tYmJi+rzokpOTcezYMfztb3+Dn58f1q1bh5UrV2LNmjV49dVX4e7ujtOnT4PH46Gurg4vv/wy4uPj0dHRgYaGBkyfPh0//PAD3nvvPaSnp8PFxQVlZWVYsGABK98qlQp1dXWorKzEt99+i4ceegi5ublwcXFBZmYmU8zct28fFi5ciJycHGRnZ+Pxxx9HUVERvL290draioaGBkRFRWHChAk3XYj7dy0cZmNjg7KyMvZGLS8vh0AggLe3N2vGVFRUYPTo0YiIiEBycvINw2tubkZQUBBEIhGOHTuGiIgIJu967tw5HDhwAFOmTIG/vz/a29uRmZmJTZs24bHHHoNAIICrqyuGDx8Oa2trjB07Fq6urigrK0NLSwuSkpIwbdo0yGQyVFVVwcHBgXlRxcXFOHnyJGJjYxEcHIzCwkIEBARApVJh1qxZ8PX1BdCtfc3j8TB48GCmf56YmMjijY6OHtA7MBXsBx54ABqNBkOHDgWHw8Hhw4dRUlKCBx54AFOnTsW5c+fQ0dGBgIAA7NixAwaDAZMmTUJmZib8/PwwcuRI5lUAQFJSEuzt7TF69GjWFdHa2gonJye2fJtp1l99fT0efPBByGSyW9Ki7+rqwpAhQ3o1SWUyGZqamjB37lz2Qu7o6MD333+POXPmYMSIEexlLBQKweVyYWlp2Uey2IRarca+ffsQGBiIKVOmoKCggB2zsLDAn/70pz6G3ZSegboDXVxcIJfL4e3tDR6PBxsbG+Tl5eHMmTMQi8UoKytDVVUVhg4diqlTp8Lf3x87d+7ElStXMHLkSKarLhAIoNPpWHy+vr7Izc2FXq9ns1yBbiPL4/EQGBh4Q5nX5cuXIyYmBl9++SUOHToEnU4Ho9GIzMxMAEBISAgyMjKgUCjg6+uL6upqjBw5EtOnT0d7ezscHR3Zy5GIEBERgYyMDPj5+SEkJASlpaWwtLSEg4MDnJ2d+0zNFwqFsLKyuu1+fqC7m3HatGnM8FpYWKCmpgYtLS0ICAjApEmTcO7cOQDdHvapU6cwYsQIBAQEoLW1FUKhEN7e3ti7dy8CAgIwatQoODg4ID4+HuPHj0dgYCDKysr6fKzt6uqCnZ0dVCoV84oLCwsBAA4ODiAidHZ2MmkLa2trqNVqWFpawtbWFnK5HMOHD0dQUBCOHz+O7OxsxMbGwtPTs9dz0Gg0UKlUcHZ2hqurKyZPngxfX1/odDpYW1ujoqICOp0Op0+fRldXF3Q6HROOO3XqFAQCASIiInDo0CEm8XzTtQcGEp35Nf8GEg7r6uqi119/nW2XlJRQVFQUvf3229TR0UGFhYX0xBNPEBHRrl27yN3dvd9wTGzevJmJMBERxcbGUlNTExmNRho8eDB9+umndOLECUpISCCdTkelpaX01FNPUXR0NC1dupSIiJKSkigmJqZXuNOmTaMzZ86QXq9n+6ZPn86EkcLDw+nHH38kpVLJxJ0uXbrEJE21Wi0dOXKEtmzZQqWlpXTu3DkiIsrJyaEDBw4QEVF8fDyTe/3uu+/oiy++oH/84x9ERCSXy+nFF1+kjo4OUiqV9M9//pOIuoWSYmNjSavVMklWk5zuxYsXKTs7m4iILl26xH5v3ryZ8vLyiIiosLCQxo0bR0REZ8+epeHDhxMRkVQqpVWrVjGxNY1GQ8uXLyelUkkVFRU0derUG+aDiaNHj9KFCxfYtsFgoMWLF1NraytJpVJatGgRO7Zr1y4aPXo0kw2urKyk4OBgiouLYzLE/fH888/TjBkz2PaiRYuYsNfatWtvKKTl7+/fZ59JfEutVlN+fj5Nnz6diIjmz59PL7/8Mnsm1z+DLVu20Pjx42n69Ol0/PhxMhgMVFVV1UsGubq6mknqXr58mQ4ePEjnz5+nL774gqqrq+nMmTNM0vZ6KisriYiYKJqzszNt3bqVzpw5Q+Hh4UTU/XwlEgnJ5XLKz8+nhx9+mO03Go2UlZXVKz0FBQVMkvfChQu0e/duIiJaunQpLV68uFf8CoWC1ZHbQavVkkwmoz//+c/s2Zl4++23WZhSqZTJTCcnJ9O//vUvio+PZwJ4RN0yzibpZqJuyd5Vq1ax80xSvD1Rq9WUnp7ORNmEQiHt2bOHlEol5eTkUG1tLT3++ON06dIlOnjwICuvOp2O1q5dy8TItFothYSE0JkzZ+js2bMUHx/fKx6NRkOlpaV06dIleu2116i4uJikUilVVFTQO++8Q+fPn6eSkhKaN28eVVZWUnFxMV25coU2b95Ma9asoYqKCqqvrycnJyd69913Sa1WU3Jy8g2Fw35zw043MO4bN26knJwcKigooCNHjrCM9vLyop9//plee+01eu6554io25ju3LmTqc5dT3NzM8vA9vZ2kslkTIFOpVKRra1tL13ztrY2euWVV6irq4uSk5NZBZk6dSotXryYCgsLWUHqaYSIiCoqKmjTpk3U1dVFGo2GLC0tqb6+nhkVhUJBIplRtAAAIABJREFUsbGxNHfuXBIKhawC91RnlEql9P7771NdXR0REb344ot07do1Sk5OJr1eT21tbawySCQSeuedd4ioW0kzKyuLLl26RAqFgu2XyWS0efNmOnr0KBER/eMf/2AF8+WXXyaZTEZE3S+8+vp6unz5Mv373/9m2taLFy+mDz74gDIyMkitVlN0dDSJRCK6dOkSpaSksJfARx99RIsXL+5XnbMnGo2GHnvssV4Vuq6ujnx8fIio2/CPHz+eZDIZrVq1ij3rV155hZRKJS1fvpwWL15MHR0dFBgYOKCR5vP59NFHH5HBYKDdu3fTtGnTiP4/e2ce19SV/v9PEsIaIOyLrAKCqLjvuFUR21q3urbVTpexdTrtdK/ttD/b2nVGO77sMtZqN6tttYtrpeIOAoqoLLKEPQQSkhBCErInz+8Pes+AgKC1TjtfP68XLyXcnHvvufec85znPOf9UMfAx7HWe+swuYGws7jcAlwduru7E1EH6//ChQtUUVFBRMTIoBaLhVEGiYj++te/Mgpmeno6bdy4kcrLyxmpUKfTERHRjh07qK6ujv7+9793eS85hnxnFRYW0vLly4mogzaq1+vpzjvvpJycHHr11VeZ0WOz2RiJcvXq1bRixYou5cyYMYPWr19PEomErFYrY/kTES1btoxRVD09PSknJ4cRW4k6+PYcldFisTDGf3+0e/fuHjnzM2fOpMzMTGpra6P33nuPqqurqbKykjZt2kQ1NTWsPTU2NhLRf3IMcHr99depuLj4qkRVu91Ozz33HCOePvroo1RdXU35+fm0Y8cOqqqqopCQEGa4mc1mam9vp1OnTrGBUKFQUFlZGT355JPkdDpJpVJRU1NTl3dSKpXSpUuX6OWXX6Y9e/ZQfX09SSQSOnXqFMXHx5NWq6VTp07RihUrqKamhvLy8hg19vPPP6f6+no6dOgQvfzyy+wdKysr++Py3LOysiAQCNDW1ga9Xo+LFy8CAKZOnYrU1FRIJBJGzKuqqkJUVFSvoXnFxcVobGwE0LHYpNFo2BRUpVIhMTERxcXFsFgsKC0thdFohEQigVarRVVVFW677TYAQGNjI8RiMerq6qDX69HQ0NCNuV5UVISBAweipaUFrq6uGDNmDBoaGmC321nmKJPJBF9fXxYpMXLkSBZdU1JSAqfTiVGjRrGogbNnz6KpqQktLS0QCAQQiURssUgoFCIwMBAOhwMnTpyA2WxmJE1ukfHIkSNQKBS488470dLSgry8PDYFLi8vZ8dVVFSwtYGEhARG+8vPz4fNZoPVaoVWqwURoampCVFRUTh//jxbkCwsLITD4UBLS8tVn21ubi6ys7O7+D/9/PyQkpICk8mEc+fOwWAwoKmpCY2NjWhvb0dTUxNcXV3h6emJvLw8DB48GCaTCREREb2GEy5duhQCgQBWqxVlZWV4/PHHAXT4obk0d71tEFqyZAl754COhXeOpw50kCI5LERrays0Gg1cXFyg1WoxePBgOBwOuLi4dEmvxufzma+5uroasbGxUCgUcHd3h0AggFwuh9lsxpAhQxAREYHZs2czAFdlZWWPi8Yc7ZB7r3JycvD8889jxIgRmDhxInOflJWVseiw2tpaxknn1NDQgCFDhqCmpgZCoRCZmZmMWBodHc0WSr28vBAUFNSlrUkkEkyYMAEOhwNnz57FK6+80mOdXim73Y4TJ070uAjb3t6OhIQEqFQqZGRkQCgUwtfXF3PmzGHthiMuAugWXjhs2DBIJBLodDpG4rxSAoEAPj4+8PLygt1uR2trKxobG1nmrujoaNx1112QyWQwGAy4dOkSzGYzysrKurgluQVRqVSKmpoa5joBuqbIs9vt4PP57N1ub2+HSCSCQqGAQqFgnBmn04nQ0FAMHz4carUaGo0GMpkMaWlpzC3Z5yJ5b73+zfzpzXJvamrqYs0qlUoqLy9nv58+fZpZnLW1tcwq6UnFxcWMa81ZrFVVVcxy1Ov1VF9fT21tbezvGo2GGhoaqLq6uts1cNaeyWRi18BJo9FQeXl5l2lgc3MzqVQq9j29Xt8tA1JdXR1JJJIu1qxUKmUW09VcCHK5nIqLi1n2Hk5arZbVjcViYS6azvXIuWSIOjjbXFYYu93OuOMWi6VL/UqlUubq6cxmr6urY26dq6mwsJBOnjxJEomky+cOh4N9ZjKZmMWkVCrZ/RF1cLG5empvb+82pefkdDpJqVQyrj0RsevmZhvc/fb03ba2ti7uts4yGo3U0NBAGo2GdDodO4fFYun2rLjr71zWxYsXmRXG3W99fT0plcou35VKpczt0pNqamqotraWiouLSSKRsExSHEfdZDKx7EWcysrKut13bm4ulZaWst/T0tJILpdTUVFRl+MuXLhAFy5cYM+iqampi9ujpaWFuXP6kk6no9zcXCorK+s2K6mqqmLtpbW1tUu2J6lUSlKptEsb47KWOZ1OstvtZDQaqbi4mORy+VWvwWq1UlNTEykUCrJYLFRVVdXtmXfuA4g6XMTnz58ni8VCRqOR1Go1nTp1inbu3EknT56kc+fOsfrR6/XU0tLC+pgTJ07Ql19+SQ888AAdPnyYiouLKScnhyoqKuj06dPU0tJCZrOZtFotaTQaOnnyJMnlcpadi6uTviz3X8WWuVG6XraM0+lkCxLXo98SXMQt3FwtPOpawuZ+SxH1L1TuZpXX3/C/6z3PTz/9hKFDh+Kee+7Bli1bkJSU9IcGct2o9+jK+nz00UexZcuWq9azwWDAyZMnMXny5BsOt7PZbODz+Wxm1RdB8ma2J7PZDDc3N7S2tsJisUAoFLL+xOl0wsXFhWVm4/pYrh6VSiXy8vJw/Phx3HvvvYiMjIRIJIKLi0uXaC+73c7qwM3NjbULrpyioiIMHz78plIhb5q4m75e/ZZEuv68ZL+Hjh3ox/TuJpfX3472es9TWVkJh8OB7du3Iz8/HxqNBlOnTr2usn4PulHvEVefRqMR586dw8iRI/scQIkIM2fO/FUbyXrTle2zL77OzWxPnHvMaDRCo9GwVJNXisfjdau/0NBQLFiwAPPmzbvqNbu4uHRpC9z/+/ve/6E791u6pesRt7Ud6AhRu3jxIsRica8N9P+aPD09MX36dLYJ6mrq7+a//1UZDIbr3jD4awejnvZ0dNatzv2W/k9r4MCBaG1tRXl5OcvOdEu31F8FBATccA57f9XnTOYmXcct3dLvUiKRCFOnToVYLMbBgwf7jPK5pVsCOmZ8RISgoCDweDzY7fabTrPsa7PYH6pzt1gstxofOpgcXN5NmUzGfoD/Hi7VZDJdE+ES6MACtLZ2pNe12Wyora1lv99M8Xg8REZGwtvbm4HZ+iO1Wo2ampobdh297Yi92aqvr2fJp/9XdKMDR65cixAIBF3cLBaLBQqF4jd5n7kQzL70u+/cMzMz8fHHHwPoqNCvv/66xwZ44MCBfuFw/+ji4vC5ZNNeXl7w9fXFU089xba+f/XVV9fEdOfqtb8vzZV67bXXsG7dOmzevPmavmexWBhyWSgUYtWqVdi2bdt1XcOvVVJSElatWoUPP/ywX8cTEWQyGZYsWdLnsZ988km/yIYcAO+/HcG2dOlS7Nmz56acS6FQMEzHbykej4dPPvmk7y37AF566aU+j+m838BsNjP20Q8//IDc3Fy0t7ejoaEBr7zyyg3r4J1OJ+x2O3788ccuycd70+++c9+xYwd7+O7u7sjIyGAbazrrzjvvxNtvv32zL++my83NDfv27cOIESPg7+8PkUgEb29vzJo1C/7+/khOTsZ99913TZEkOp0OlZWV/Voc66nj2b17N5YsWdKvRtFZp0+fxsqVK5mVaLfbsWDBgmsq40aJi2ror+XO4/GQlZXVLbKnp6ny119/3a+oLm6B7EZEL/2aAYLH43WBev2Wys/PvylhqK2trZBKpX3CtnQ6He6++240NzdDrVazzyUSCfR6PZRKJdvMZ7PZYLPZIJPJIBKJsG3bNjz88MOIjIyERqNBbGws5s2bB19f32sytjrPvjt/j9v8dPvttyMmJqbPd+p33bm3traitLQU/v7+bPTj2OdNTU1dRuGamhr2ktjtdmg0mn7Bq4COXW6dK9TpdKKlpaULM52bMkulUsjlchARuxZudyInvV4Pk8kE4D/c9c6JDerr66/KY+9NXINtaGjAqFGj4HQ6UVFRAavViiFDhoDP5zPGOp/P72IxKJVKtttVpVKxZCMGgwESiQT+/v79On95eTlzAXW+JofD0Se6WKVSsd2vQId7KSYmhllBsbGxPQ7cPYnbMQh01H9zczOICLW1tT0+d6VS2c1q61wPSqUSWq2WxSZfTQqFAmq1Gs3NzRg9enSXvzU0NLBOgduZ6u3tzSBena+fex6c3N3doVQqu8HBtFptt0GjqakJ9fX1rExuZiCTydDa2soGCO759kcKhQJ6vR6jRo1CQEAA+9xms6GlpaXb9XaWxWJh71tJSUmP7lOj0QidTsf+r1AocOrUqT47KZvNBqVSCZ1Ox+7XarWiubm5yzPt7NZSqVTsd4PBgJqaGvj5+XXrZJuamliZHCxQKpVCpVKhqqoKp0+fRklJCSQSCaqqqtDc3Izq6mo4nU72nri7u0MkEqG1tRUqlQrV1dVoaWmBl5cXBg8eDKCjj2luboZCoejSfrjz6vV6Vr+1tbWoqKiATqdDWVkZysvLQUQwGo0oKSlBWVkZXF1d+xwwftedu8ViwbBhw/DMM8+wDRJcw6mpqcHnn38OoIMJPmPGDHz00UcAgEWLFuH06dPYs2cP3njjjV7LP3jwINavXw+RSIR58+ahpaUFX3zxBd577z3w+Xw8/fTT2L59Ow4cOID6+noMHjwYvr6+yMjIwIcffgiJRIKFCxfCz88Px48fx4ULF/DNN9+gpqYG06dPR01NDU6fPs2sk/Pnz+Ouu+5CUFAQ3njjjX4PPpy4BltcXIzk5GR89tlnICK4urpi8uTJOHHiBF577TW2zfn8+fOYMGECMjIysG3bNqxduxbbt2+Hm5sb9Ho9Dhw4AJFIhNdeew0PPfTQVRdo3njjDWzfvh1hYWHIyspi5Mjq6mrMnz8fcXFxvVqc69atw65du6DRaLB+/Xq4u7vjwIED2L17N1vxP378OB544IF+hYfJZDJIJBKsWbMG+fn58PPzw8qVK3HmzBkMGDAAQ4cOxffffw+ggwf+/vvvg8/n47XXXmOM/RdffBFCoRAbN25EZmYmgoODceTIETzzzDNXrYfvv/8ejY2N8PHxwXvvvcewuhy6d8CAAXjrrbdQXl4OLy8v5OTk4OGHH4bBYACfz8enn36KTZs2QSwWIygoCBUVFQA6BoWRI0fCZDIx15BCocAPP/yA+vp6hgowGo344IMP2KaWzMxMAB1b77/66isEBgbixRdfBNDRRn766SfEx8dfdVZls9nwwQcfwNfXF2+99Raee+459iw/+ugjnD17FlqtlvH7r5TRaMSxY8fwzDPPYPfu3bBarV0GvdLSUuzatQuNjY04dOgQzp49C09PT2RnZyMvLw9hYWE9lmu32yGRSPDhhx/CaDTimWeeAZ/PR11dHXbv3g0vLy/8/PPPqKqqYlv/09LScOzYMVRXVzMuvcFgwKZNm7pkbFu7di0++OADaDQaPPjgg9DpdPjhhx9QUFCADRs2QKlUorKyEk888QR27doFgUCARYsWQSKRMOOOx+NBKBQiJCQELi4uWLx4MR5//HGsWrUKL7zwAv75z38iLCwMFy9exKFDh5Ceng6TyYSPPvoIa9euRV5eHr799ltotVqkpaVh7dq1qK+vR1ZWFl5++WUcO3YMYrEY8+fPh81mQ3V1NS5duoS//OUvMJvNfbqYftedu5ubG0aNGsWmUkSERYsWAQCio6PZxonY2FiIxWLGmdm5cyeMRiMiIyMxd+7cXsvnOitvb28MHToUQqEQn332GeLi4uDn5wdvb2/s378fQ4YMQWxsLBYuXAhfX18sWLAAY8aMQVxcHGt0o0ePRm1tLUQiEQYOHIi4uDiIxWKUlpay+Om8vDzs3r0bHh4eWLx4cb+s5StlNpsxcuRIJCUlobGxkYVhtbe3w8XFBbGxsRAIBBAIBBg0aBAmTJiAOXPm4N5778Xzzz+Pbdu2wcvLCyEhIezaOXxpb1l7nE4nSkpKkJaWBrFYjHHjxrFMT35+fkhMTERveXBtNhvkcjkmT56MoKAg1uCKi4u7WGxnz57tNUlET2Vyz3/o0KEAOhZ0x44dC1dXV6jVarYBZs+ePRCJRAgMDIS7uzu2bdsGq9WK7777Dl5eXlixYgUmTpzI6jAhIeGqluSPP/6I0aNHw9XVFcnJyQgPDwfQsTYUFBQEd3d3hIeHIykpCa6urjCbzUhMTGRc7+zsbAwcOBAikQgJCQmIjY1lM7D58+cjOjoaAoEAarUamZmZmDx5MgICAuDj48Pu85tvvoGrqys8PDwwYcIEaDQa7N27FwMHDgQRISQkBECHy0MoFILP5yM9Pb3L7LGz8vLyEBkZCQ8PDwQGBmLAgAEAOgZuDw8PjBo1CnFxcThw4ECPHYrZbEZcXBz4fD7mzJkDV1dXNvtwOBz47LPPMHPmTCQkJGDSpEkoKCgA0GGoORyOXt87m82GHTt2YN68eYiJicHw4cNhMBjw+eefY/bs2RCJRJg+fTouXrwIlUqFqKgojB07FsOGDUNAQACbyfv5+cFqtSIwMBAuLi5wOByQSCQICwtDe3s7Wltbcfr0aZjNZkRGRiIsLAw+Pj6IjY2FUCiEUChEeHg4+Hw+RCIRq1+uPjmu/IABA7Bw4ULce++9CA0NxaeffgqdTgeTyQSHwwF/f38EBATAYDDg559/xqFDhzBgwADExsbCbrdDJBKBiBAQEAAej4fw8HCEhISwWTe3Jubi4sJ2x15VvXEJbuZPb2yZdevWMa4KEdH3339PSqWStFotrVu3jjFUWlpaaMuWLey4o0eP0r/+9S+Kjo5m+NwrpdfrGc2RY8M4HA5G1yMiGj58OD388MNERPTll192Y8Hs2LGDGhoaSKfT0aZNmygrK4uIOvC8GRkZREQ0depUdnxhYSEdOHCARo0aRfn5+T1eV18qKCigHTt2EFEH++VKBgZHI9TpdLRv374uTBKLxUI1NTX03HPPUVhYGPt8xIgRREQ9Mjjsdjtt3bq1C3/m3XffZfjh48ePd+HUXKlPP/2UER03bdrEuDPx8fH07LPPks1mo+rqaoqOju6VztiTvvzyS7r99tuJqIOdwhEelUol3XfffVRXV0etra10//33s++MGTOGkUUlEgmtX7+e1q5dS2azmZqammjy5MlXPadcLmckxaamJtq8eTPZ7XbSaDS0YsUKstvtVF1dTdOnTyer1UpKpbIL+lcqldKqVavI6XQyumF7ezu1t7ez59bQ0EBDhw6ljz/+mOLj44moAxfMcXCIOvg9r7zyCkVERJDdbqd///vfJBaLiaiDocQxYxQKBW3dupVSUlLYu3mllEolDRkyhEwmEzU1NVFaWhrj9HRGWzc2NrK20JN27NjBEMJr166lBQsWMBaMp6cnEXW0s7///e/0008/EVFH+1q/fn03NhNRBx/m/fffJ19fXyIi1g9s3LiRAgIC2HHvvPMOe7927NjB+oQXX3yRFi5cSEREW7ZsYWjm1tZW2rlzJz333HO0a9cu+tvf/kavv/46/fjjj5SVlUWPPfYYbd68mYqLiykzM5P++c9/0qlTp+jdd9+lxx9/nHGPLBYLqdVqstvtJJfLqby8nIqKikiv11NRURE1NDRQcHAwNTc3k8PhoLvvvps2bNjA6nX16tUUFRVFGo2GJBIJTZgwgQ4cOEByuZw2b95Mc+bModraWsrPz6fp06dTc3Mz2e12mjNnDj3xxBPU0tJCBw4c+ONSIT/55BMEBwfjhx9+gF6vxw8//ICgoCD88MMPOHToEKRSKXJycvD2229jwYIFKCkpwQcffIBRo0bhySefRHJyMvPxXSkutZVKpYLNZkN+fj4qKysREBAAi8WC/Px8PPDAA9iwYQMUCgV2796N2NhY9v2WlhZ88sknCAwMxDfffIPi4mKkpqYC6LBCOWvdy8sLubm5uHDhAh577DGkpqZi3bp1zLL529/+xjJA9aW2tja8++67bDbCWeicDh48iOXLl+PgwYPw9vbGJ5980oViWFxcjAsXLuCpp57qktFp4sSJOH78eI85X51OJ/sBOtwPFy5cYBbzkSNHek2UAXT4nblr/OGHH0BEuHjxItzd3bFy5UpcvHgRe/bsgdPpREFBAXJycvDee+/1mcdzz549uOOOOwB0uGkWLlwIoMMKfeSRR6DX6+Hi4gKhUAir1YrCwkI89NBD+Oijj5Ceno68vDw8+eSTMBqNaG9vx+7du1FYWIjm5uarussCAwNhsVjw5ptvIj09Hbm5uQA60vUJBAJs2rQJdrsdFy5cwPfff4+SkhIYDAa0trbC4XCwXKNvvvkm5syZwyiDnLtxw4YNePzxx5GcnMxcCN9++y0WLlyI8vJyzJ07F1lZWfjrX/+KBQsWQKPRICAggM1uBw4ciKNHj2LdunX485//jD//+c/YtGnTVf2zYWFhsNvtWL9+Pdra2lBYWIiqqipERkYyX/6GDRuuGrCwa9cuzJo1iz3nNWvWYN++fXA4HGzW0tzcjPb2dsyePRtAh9to0aJFOHLkSLfyeDweRCIRmxF6eHhg7969iIqKYp9t2LABZWVlGDduHDsv5749cuQInn76aVRUVODrr79GYGAgDh48iK+++gr79+9HcnIyhg0bhpqaGgQHB6OlpQUqlQqZmZkYNGgQBAIBysvLkZiYiKioKOTl5WHx4sVsLY3H4yEgIAAOhwM+Pj44deoUzp49C5lMhsDAQEilUmzevBne3t7g8/morKzEyJEjkZWVhUWLFmHDhg2w2Wzw8/NDYWEhZsyYgQEDBsDV1RUHDx7EpEmT4Ofnh7y8PNx9992oq6tDWVkZPDw8sGLFCtjtduTk5PT6PIDf+Q7VuXPnoqKiAlFRUbDZbGz6Fh4eDl9f3y5uCKPRCFdXV8yfPx8ymQyXL1/Gn/70J9x11109lu3v788empeXF8LDw5GYmIjg4GAcO3YMMpkMDz30EEQiEerq6rr4BZ1OJ1QqFZRKJSQSCZxOJ1asWMH+PnnyZBQWFkKr1SIwMBB2ux0TJ05Eeno6FAoFampq8Je//AVAx0s4c2b/Us6eO3cOGo0GlZWVSEpK6hbd4uXlBT6fj6ioKABgjYB+8Q+GhISAz+cjPz+fDT7cdFUsFrOMUJ0lFAqxbNky7NmzBzabDe3t7XjkkUcAdCwEFRYWwt3dvVcGyYIFC3DmzBn4+/sjOjoaEokEqampGDx4MEMpl5SUYNCgQdBqtZg8eTL+3//7f7j33nvh5eXVayRFQEAAQ9kqlUrmkhMKhWhvb4dYLIZIJEJcXBwKCwtx6dIlzJ8/HwCwcOFCJCYm4uTJk3jyySfh6+uLlJQUTJs2DdXV1Zg0aVK381mtVoSGhiIgIAAXLlxAUFAQLl++DJFIBD8/P3h6euL48ePw9/dHUlISNBoNJk2ahBEjRkClUiE2NhZ+fn7w8fFBZmYmoqOjcfnyZfj7+8Pf3x+PPPIIsrOzMXXqVMydOxdGoxGLFy9GZWUloqKi4HQ64enpicWLFyM6Ohq5ubkYOXIkgoKCsGDBAsjlcmRlZbGsWIsWLUJYWBiqqqpQVlbGcu9eqaCgICxfvhzZ2dkICQlBYmIiLBYL4uPj8fDDDyMvL49l67raTsygoCCMGTMGDocDwcHB0Ol0GDVqFAQCAe6//34cPXoUra2tWLVqFRvs58yZA5lMhuTk5B7LHD9+PFtX4dw3M2fORH19PY4fPw4vLy88/PDDzD3r4+PD+ggfHx/o9XoMHz4c4eHhMBgMkMvlEIvFSE9Ph9PphEKhwLRp01hmKofDgQEDBjCXjq+vL8RiMQQCARISEtDa2spy6ZrNZgiFQhARbDYbPD09oVar2aBos9kwceJE8Hg86HQ6uLi4oKWlBU6nE3fccQe8vb1x5513Ms6Rp6cnVCoVa9fJycksSKG5uRkhISEsQ5fT6ezy/vemPzQVsjddS1Lha/leZ4rk2rVrUV5ejr179/7q6+xvcue+qHcWi+VXgdSuRdnZ2XA6ndiyZQvLuXmjxO3+62/C6+t93r83/R7v41pJi9wg31/iqtFovCojpTejob9yOp04fvw43N3dER0djcDAQFitVhatExoaygxDLt9DTEwMiAh6vR5xcXGwWq1ob2+Hu7s7vLy84HA4YDAYYDKZEBYWBo1Gg9bWVtTU1MDNzQ1jx45lA47BYEB2djZkMhnmzZsHHo+HoKAgdt/cM+9MvFSpVF3WsAwGA0QiUbe6qqqqQkJCwv8mFbI3XW8D6et7nV/WS5cu9brKf63n628n1lcju1kdO9ARRTF79ux+waWuR/2tE+D6n/fvTb/H+7hWuBXXEfeXuNoX/OpaO3auk2xra4PdbmfWulgsRlhYGAse8PX1hVAoZNayTqdjeWR5PB48PDzA5/Nhs9ng4eHB8Lt6vR4eHh6sY3Y6nTCbzQgLC2Phrnw+n11HY2Mjzpw5Ax8fH+h0Ouba5e6be+ad3atXBidwLrcr66qv96VPy53H430KYC4AJREN/eUzfwDfAogBUAdgKRG1/vK3FwE8BMAB4Aki+vmqJ8CNt9x/azkcDjQ2NkIoFP7qDv6PqubmZgQHB99wXPAt3dKvUVtbGxQKBdtfEBISwnz+3LoDF+qq0+kgFAqZ28Tb25v5yG02G5qbm+Hj4wNvb2+WMQno2MditVrZAMDj8ZhrhSuf47vX19dDLpfDzc0N8fHxcHNz63MjVX9VX1+PmJiYXi33/gzLnwOYc8VnawEcI6IEAMd++R2qaCh+AAAgAElEQVQ8Hi8ZwHIAQ375zkc8Hu/q6LI/oAQCAaKiov7PduwAEBIScqtjv6XflaRSKUpLS2GxWBAYGIjQ0FD4+vrC6XTC4XCwxB+cJW4ymWA0GtHU1MRcI5w1zs082traIJPJWMfN+ddVKhWcTifc3d0Zd50zlF1dXcHj8ZivfuzYsRg/fjwCAgJYuQ6H41djJn41FZKITgO4MnxgPoAvfvn/FwAWdPr8GyKyEFEtgCoA467lgm/plm7plq5FNpsNlZWVMJlMCAkJgb+/P4xGI4tsaW9vh06n68L34Sz10NBQtrvbbrfDarWCz+fDaDSygAVuAxq3FsTn86FWq6HT6WC1WmG1WnvsaLnPOruoONepQCD41cZRX5DA6w2FDCEiOQD88i9Hqx8AoDMaUPbLZ79aer0eCoXiRhR1zTKZTL2GVP7epdfr0dzc/N++jG4yGAx9hjv+t9W5g/hflVar7YY7+LWyWq1wOBy/WZvh3CvcoqbT6URMTAwSExPh7+8Pb29vREdHw9PTEwcOHEBbWxv8/PxgMpmg1+uZG4bzm/v6+sJqteL8+fNsx3BpaSnsdjs+++wzvPvuu6ioqIBAIGBWeXJyMoKCguDq6sqSm3Pumc7XyS0Im83mfsHjrkV9lXej49x7Gop6nHvweLzVPB7vPI/HO69Sqa5aaENDAwoKCpCdnY2jR49elXFxPaqpqbnqi9jc3IzNmzf3C6ebk5PDmDM3UwUFBSgqKur2eWZmJr788subfj1Xk91ux969e7F9+/bfpHybzYYbsYZz9OjR/3nE9LFjx/DZZ5/96nKMRiN777nIk4yMjBuKoC4uLsbp06dx7tw5qFQq2O128Hg8uLq6MutYLBbDx8cHnp6e8Pb2RmRkJGMeeXl5sQ66tbUVJpOJLdwTEcLDwxEcHAyn0wmr1QqDwYCUlBSMGDECBoOhywImF6ZrtVrZPXJlcr8LBAJYrVbI5XLGk+pcH5xbhtt0dDX19PffqnNv5vF4YQDwy7/KXz6XAYjsdFwEgKaeCiCirUQ0hojG9LZ1HQCeffZZHDt2DNOnT8fixYuxa9euG56vUaFQsEWUK2W322E0GvHzzz/3K3KgM8DsZorjil/JRTl37twN6ehupAQCAQoKClBWVvablM8RAH+tCgsLrwsR8UeRw+HAP/7xD3zxxRd9H9yHSktLu5Ak7XZ7N8b5r1FZWRm++uoruLu7M/a+WCyGm5tbF/eGXq+HXC4H0LHRcNiwYRCJRAxsxuPxwOfz4e7uDnd3dwb9czgciImJgVgshtFoxIgRI2A2mzFixAhoNBq4ubl1QS+4ubmxiBhudufn58cWWTlxETZr165FXV1dlzLa29tZh89hCoCOhV4ObcB16hyJsrP66meut+b3A7j/l//fD2Bfp8+X83g8Nx6PFwsgAcC56zwHgI5dqtOmTWNT+IULF4LH43W50f5wyLkR02AwdElE4HQ6kZKSApFI1MU3xk2vXFxcsG/fvn6Fdmm1Wtx3331dVsP7mpV0vrbe1NsIzQ1Gra2tmDdvHlJSUrpxOiQSCV555ZU+r0Gr1bIXinsB9Xp9l2nm1er5SvdFb/AthUIBHo+HvLw8LFmypFtSCO6enE5nF7dNby65K/kaTqcT3t7ejEHU27V3jpjo7dr37t3Lpu6crrSguLpqa2vrkfVxpcFgtVq7ldG5/GuFyV2vZWwymXD48GE8/vjj3ToJrj56Y9Go1Wo0Nf3HZqutrUVGRkYXv/OPP/6I8ePH9/h9vV7f5dl2Rute+Szq6uqQl5eH6upqLF++HGKxGBaLBU1NTVCr1ZBKpYwQW1JSwmLV8/PzUVpaiuzsbLS2trJ3WSaTIS8vDxaLBUqlEq2trTCbzQgMDER5eTlqa2tRUFAAvV4Pg8GAd999F4cPH4ZAIIDdbodarYbBYEBRUREuX74MvV4Pi8WCkpIS6HQ61NfX4/z588zVVV1djRMnTiAyMhJarZYZpk6nEzabDaWlpaiqqoJOp8PFixfR0NAArVaLpqYmlJWVobGxEWazGUajkRE+uXe5L2u/TxOTx+N9DWA6gEAejycDsA7AOwB283i8hwBIASz55WSXeTzebgClAOwAHiOi/oOMe5CXlxfefvttPProoxg1ahTS0tKgVqtx6dIlVFdXY9SoUWhsbERycjIGDRoEuVyO0tJSCAQCBAYGYujQoSgpKUF5eTkGDRoEqVQKg8GAyZMnIzw8HFKpFD/++COefvppAEBubi4D9cyaNQtBQUG4dOkSQkNDkZWVBSLC1KlTe7zW3NxcDBgwACkpKdDpdDh+/DgsFgsmTpzIdo1eqaysLBiNRlgsFgZd6iy5XI6qqio0NjZi+fLlqKurg7u7O0JDQ/HBBx/gqaeeQklJCRoaGnDPPffAZDJBq9WipKQEYrEY7e3tDG5lsVhw+PBh2O12iMVizJo1C0SEQ4cOQaFQICkpCXFxcfj++++RnJwMPp8PrVaLBQsWsEbh5+eH8PBwlJSUYMmSJbBYLDhx4gSADoBbYmIimpqaUFxcDJvNxqBrQAdeNTs7G+Hh4QgKCkJqamqXeHYiQn5+PhobG5GSkoLCwkIEBATAw8MD9fX18PDwwIIFC2Cz2SAQCFBSUoLq6mqIRCKMHTsWZrMZ9fX1yM7OxogRIzBz5kyGxm1sbMTAgQMxatQoNDU14ezZsxCLxazBL1u2jDU8zhLkYF0uLi6or69HcXEx+Hw+IiMjMWzYMFy6dIltUVer1VAoFJgyZQrLw1pYWIiGhga4uLhg+vTpEAqF+Omnn1BZWYnnnnsO+fn5MBgMmDFjBoAO67eyshKurq6YMWMGq5uioiKUlZXB398fI0eOhK+vL8xmMyorK1FXVweRSIS0tLRrWqArKCiATCbD4MGDuxgPZrMZ+/btA5/PR2xsLC5evIjY2FikpaUB6BgM8/LyoNfrERkZyQB2mZmZ8Pb2RkVFBRITE3H+/HnceeedOHnyJOx2O0MTKBQKnD17Fk6nE8OHD8fAgQNx5swZiMVihIaGoqKiAh4eHpg5cybKy8tht9vh6uqK9vZ2HD58GPPmzYPBYMD58+cxadIkNDc3Q6lUYvTo0SxD2YQJEyASiaBSqXDx4kXweDyGdx4+fDhiY2MZvtdkMmHcuHHQ6XTIzMwEEWHcuHE4c+YMmpubceTIEZhMJlRVVbEwycuXL6OmpgYGgwF33XUX1Go1jh8/jkmTJsHHxwcZGRkMXtbY2Ijs7GwEBQWhrq4OqampcDqdMBqNICIUFBSgra0NSUlJaG9vR01NDRISElBfX88WaxctWgSVSgWVSoXy8nKYzWasWrWqzzXI/kTLrCCiMCISElEEEW0nohYimklECb/8q+l0/JtEFEdEiUR0uN9vWy+6fPkynnjiCWzZsoVZkfn5+RCJRDhx4gQsFgsWLFiAe+65BwaDAY899hhmzpyJ0aNHIyMjA0DH6H/o0CG88847mDt3LmbPno0jR47g66+/RmtrK9th6XA48MADD2DAgAGIjo5mI2NjYyOWLVuGKVOmYOXKld2sCyLCkSNH4Obmxvjgd999Nw4fPoxly5ahvb292305HA688cYbcHNzQ3p6OjZu3AhXV9duo/H58+eRkpKCjRs3AgBeeOEF1ulwoV8hISHYtWsX27ixfv16DBkyBCNHjsSKFSvg7++PgwcP4vbbb8eECRNgNBoZz2PTpk0ICAhAVFQUgoODUV5ejurqahw5cgTTp09nNMCCggKMGTMGu3fvRlJSEqZOnYrVq1dj7dq1mDNnDgYOHIjq6mq88847eOGFF5Ceno7XXnsNR48eBdBhsb788stYunQpYmJicOedd3bblJGdnQ2BQIDNmzcjMTERS5cuxdq1azFp0iSsWLECa9asYbHJU6ZMwalTp7Bw4ULW6XKWNodSBcBw0YsXL0Z6ejqOHDmCc+fOYcyYMfjyyy8xa9YsTJgwocsMQigUwmAwMFokALz55psIDQ3FuHHjcOjQIQAda0EzZszAxo0bkZaWhtTUVBw8eBBAR+d59OhRzJw5E1VVVVCr1bhw4QIiIiJYnaxbt465psrKynD8+HHMnz8ftbW1zH/NWZ/Lli1DUVERi6GeN28eDh06hAULFuDYsWP9yjDUWRUVFViwYAEjGnI6fvw4hEIhtmzZgrFjx6KoqAhPPvkkAGDjxo146KGHcPvtt2PFihVYsWIFAgMDkZqaCqVSiU8//RTx8fH4+eefIZPJkJSUhOnTp+Oxxx4D0GHhP/3005g/fz5iY2PR1NSEqqoqDB06FN9++y3jIH333XfIyclhaAe1Wg03Nzfk5uYiMDAQWq0WOTk5sNlsmDJlCrZu3YrvvvsO06dPxzvvvIMTJ07AYDCgpaWFvRupqalYv349du7cieXLl8NisSA6Ohpff/01DAYDtFotxGIxTp48iejoaNTW1sLd3R3Jycl44oknMHLkSJw/fx7vv/8+lEol4xHp9XrU19fD4XAgPz8fbm5uOH/+PB588EFUVlZi0KBBMJlMePbZZ5GWloa2tjbw+XwQEdRqNcLCwnDmzBkEBwczTPjHH3+MlpYWBAQE4NKlS6itrcXGjRths9kQHBzMMkrd7AXVGyqVSgU/Pz8MHToU77//Pvbt24fc3FwMGzYMcXFxCAsLY7AujpfOWQgSiQTp6ekAgNtuuw3Nzc1YtWoVgA5LaNiwYVi8eDFGjRqFQYMGAejwBWu1WjzyyCNQq9UIDu4IAho2bBgmTpyIlpYWzJ49u5vPn8fjYerUqWhoaGCME19fX3z//fd49dVXGbC/s0pLS3H48GF2PDelvXIqPHr0aGg0Gtx2221QKBQszhboYG8kJydDJBLhvvvug6+vL5RKJQYPHozw8HCUlZWxBnPs2DG0tbUhLy8PNpsNK1euBACMGDECb731FqqrqzFo0CDMmDEDzc3NzK1hNpvhdDpx++23QyAQYNq0aYxTc/DgQYZlzcjIwB133IFDhw6xTpHja9jtdjQ2NmLUqFEAOgZbDjfcWTExMYiPj8eQIUMAdFiJXFlmsxnz5s1jA5vT6cTs2bNhsVig1Wrh4+OD1NRUDBkyBFKpFMuXL0dZWRlGjx6NpKQkAB0uj+bmZsyePRtubm6M6TNo0CAGnOKUn5+PGTNmwGg0ora2FiNHjsTIkSNRVlbGgGUzZ86ESqXC/fd3eChrampYspHDhw9jzZo18PDwgFKpREREBOLj41FZWQkvLy92T1OnToXFYsGBAwcwd+5cmM1myGQyhIaGAgCrV6BjFhceHs7YJvHx8fj4449xzz33wNvbG7W1td3qtCc1NDTA6XTizJkzyMnJ6WJ8DBs2DPX19Yz30tDQgGnTpgHogLUNHjyYBTRw7hy9Xo+wsDCEhYVBp9OhqKiIfb+5uRmTJ08G0LFdXiQSYffu3cjKysKECRPYrG7atGksBn3kyJFwc3ODt7c3vLy8EBsbCw8PDyQnJ7MdolqtFkFBQdBqteDxeJgxYwasVitiYmIwZMgQTJ06FfPmzYNMJkNkZCRD97a3t0OtVsPHxwc2mw0GgwFSqRTt7e1oampCaGgo3N3dERYWhuTkZOYykcvlqKioQEZGBqKjo2E0GuHu7g4fHx94eXnBbDZjzJgxCAoKgtPpxJQpU2A2m1FYWAiFQgF3d3fo9XrGo+F2ygoEAhgMBha+GR4eDq1WC6FQiJaWFqSmpqK+vp6FbWq1WgQHB0MoFPY9U+sNF3kzf3pD/j711FNE9B/c53fffcdQs62trbR3714iIjp06BAdP36ctmzZQk1NTaTX6+n5558nrVZLly9fJiKiRx55hKxWKzU2NtKCBQsYZvTo0aNUVFREbW1tDOdJRAwrWl1dTd999x0REW3dupXy8vJIKpX2eL1Lliwhp9NJRUVFDA+8YsUKqq+v73bs2rVrydvbm4g6kMV/+9vfekTuEhGtX7+eGhsb6bPPPqOkpCQiIqqvr6fGxkbSarW0cuVKIurAt27YsIGamppY/alUKiIiCgoKotdff52V2dLSQpWVlXTkyBEiIpowYQLt3LmTiIjuuusuMhqN1NLSQvfeey+1trYSEdGbb75JZrOZlRETE0Nms5mhXeVyOfn4+FBlZSV7BkqlksxmM/3zn/+kuro60mg09Pzzz1NNTU0XHDGnqqoqhkvOzc2lrKwsstvttGXLFqqtrSWpVEpqtZr+/ve/ExHR+fPnKTU1lTQaDStjzZo1ZLVa6cUXX2T3397eTnfddRdptVpWpyaTqcf6Jup4blKplNrb22nTpk1UV1dHREQvvPACqdVq9h6+/PLLZLPZSCqVUnp6OrW2tlJ7ezv5+fmxstLS0shut5PT6aTExERavXo1ERE9++yzVFxcTBUVFRQTE0NERPn5+ZSamkrl5eXU0tLCcLlExJDENTU19OKLL7LPNRoNORwOKisr64al7klcu+Fw0RMmTCAiYs928uTJDMu7cOFCOnv2LDU1NZG7uzsrv7W1lSF1s7KyGHaXiGjcuHHsHDt37qTTp09Tc3Mz/f3vfye9Xs/eF+58mzZtovz8fCooKKC9e/dSQUEBEREZjUaSSqX0008/0Z///GfKzMyk6upqysnJoYcffpj0ej1VVVXRihUrqKCggE6dOkVvvvkmSSQSMpvNJJVKadmyZaRSqSgvL4/uu+8++vTTT2nx4sVUUFBAhw8fpvT0dNq2bRudOnWKbr/9dnrrrbdo//799K9//Yt27txJs2bNoh07dtDFixdpzpw55O7uTkVFRfTUU0/RypUrKSsriyoqKig9PZ3OnDlDEomEJk2aRFu2bKGqqipasmQJzZkzh6qqqkgikTAcM1EHqvrZZ5+lhQsXUmNjIxUUFNDChQvpwIEDpFaryel0UnNzM82aNYv27dtHbW1t9Morr9Dy5cupra2Ntm3b9sdF/p44cQInTpxAdXU1duzYgcbGRoaa/eSTT2C321FYWIjs7GzMmDEDKSkpuHz5Mnbt2oXy8nLs378fUqkUMpkMc+fOZT5PLrQSALZu3QqDwcCm4g0NDaisrMTatWsBdOBWOZ/onj17YLVae0xbJpVKERgYiIKCAmi1Wpw6dQplZWWIiYnp0d+enp6OuXPnori4GDt27ACfz+81Hj0hIYFN811cXJCdnY0zZ84gPDwcu3btQn5+PgoKClBeXo7Zs2cjNzeXLc7s378fxcXF+Pbbb9HS0oLKykp8+umnKCwshEQiwYULF3DhwgUsWrQIM2bMQF1dHSIiIqBUKrFz507Mnz8fYrEYtbW1yMrK6sKvuf322/HJJ5+gqKgIP/74I0QiEe655x5cunQJX375JRwOBzIyMuDm5oYZM2aguLgYO3fuRE1NDc6cOdPjgt0XX3yB0aNHw2AwYPfu3Rg8eDBkMhlOnTqFlpYWNDc3w9vbG06nE+fPn8e///1vmM1mlhlKJpPhvvvug1AoxJo1a/DNN9/gwoUL+Pzzz7F//374+vqiuroaJ0+e7JVfo1KpYLVaceLECXh6emLmzJm4ePEiysrKcPnyZezfvx91dXWoqqrCiRMn4OLigsOHD6O+vp4d8/nnn6O0tBRFRUXMQuPxeBgxYgR8fHywZ88eCIVCnD17FoMGDcJTTz2FixcvYvPmzfD19UVtbS2ICB9//DHy8vJw7NgxDBw4EEAHXri1tRUnTpxAZmYmLl++DLPZjAkTJrAMVD2ppqYGL730EmOXCAQCnD59GgqFApWVlSy7U2hoKMaPH4+amhr4+/uzBcc1a9YgMzMT33zzDT744AOWuP7ixYvQarXIz8+H2WxGcHAwxo4di9LSUnz44YdwOp3w8/PDSy+9hK+++gqFhYU4cOAA8xkfPHgQWq0WbW1tGD16NEsW0t7eDh6PB6lUCqVSCbPZjICAAOh0OsTHx0OlUqGyshJpaWnw8/NDQUEBvL292WItl4Ly6NGjOHfuHB577DEkJSVhzJgxcHFxgd1ux9KlS5Geno6IiAj4+/tjxIgRiI2NxZQpUxAYGIiJEyey5Bz33HMPHnnkEeTk5MBkMmHQoEGoqalBbW0tS86Sm5uL+Ph4DBgwAGKxGAaDAZGRkZBIJBAKhfDw8GCJrtvb21FdXY2FCxdCKBRi0KBBeOyxxyCXy1FXV4fCwkIUFRXh5ZdfRmNjIwoKClBbWws/Pz/IZLI+F98Fr7766lUPuBnaunXrqz0hSVNTUxlfOyUlBWlpaYyiuG3bNkyaNAn19fVYs2YNBAIBIiMjIZVKMW3aNMyaNQvBwcGYMGEC1Go1Ro8eDT6fjyFDhrAsKyEhIRgwYADCwsIQFBSEsLAwSKVSVFVV4f7772d0u5iYGAiFQkRGRiI8PBwpKSnddqT5+vrC29sbI0aMQFxcHAwGA1ss8/Hx6RYSFhISwnjz06ZNQ2BgYK/RBUOHDsWlS5ewevVqzJ49G0qlEiNHjmRZkDhccUpKCkJCQlgGnTlz5iA0NBQpKSmIjY1FVFQUysrKMHXqVAwfPhwRERGIjo5GeXk5Vq1aBbFYjHPnzsHf3x98Ph/Dhg3D+PHjWeYXLy8v5jIBOqbwAoEARITIyEgMGDAAAwcOhMPhwLRp0+Dv74+4uDgEBwdDJBJBrVZj+vTpuO222xAaGtot85LD4QCPx8OQIUNARPDy8kJycjLEYjEGDBiA4OBgxMfHw9XVFW5ublCr1Vi6dCluu+02BAcHw8/PDz/99BPGjh0LT09P5qYym80YNmwYi2H28PCAm5sbwx5fKS8vLyQkJCAsLAyhoaHw9vZm+VCXLVuGwMBATJgwAVqtFgMHDmRscG9vbwwaNAjDhw9HQkIC5HI5i5AYP348eDwehg4dCn9/f1b/ERERzKXR2NiIFStWYPLkyXBxcUFCQgKio6Oh1WqhUCjg4eGBiRMngs/nIyIiAk1NTfD19cWUKVMgFAoRHByMqVOnss7oSsnlcqjVavasgI41nYiICHh6eiI+Ph52ux3h4eFISEhgvPKoqCiEhoZi0KBBjGo4bdo0ht9ISEiAwWCAp6cnIiIi4Ofnh+HDhyMwMJBlNgoNDWUGlNVq7ZIvNyMjg63BCAQCiEQiuLu7s52f3PtjNpvh4+MDmUwGp9OJpKQkGI1G+Pv7M1cKAAQHB4PP5+Pw4cOIioqCn58fxo0bh9jYWPD5fERHRyMvL4+1Q09PT2a0cFmYjEYjY9FzOGyz2Yzw8HCEh4cjNDQUYWFhiI+Ph6+vL8LDwzFgwAB4e3tDp9Mx9xAHKUtMTERoaCg8PDyYO4XH46GyshLJycldEl63tLQwF5OPjw+ioqKYa2fmzJmsr7BardixY4f81Vdf3drT8/5DIn+3b9+Of/zjH2w3WX+wpNeKLv2/qqVLl2L37t3/7cu4Zr3xxhtIS0vDd999hzfffLPX1G03QxqNBt9++y3WrFmDzZs34+GHH+6Cd72aOqNya2trkZubi7vvvhvbt2/HokWL/meYPna7HUVFRSgpKWG+d6FQyGLOAbC1lPz8fFitVsyYMYOlzCssLERMTAwMBgPCw8Ph4uKC2tpatsaUnZ2N1157Dc8++yymTJkCT09PyGQyyGQyxuQfMmQIxGIxPDw8YDAY2DoH/bKmUVpaysKudTodY7lz18CBA4kIDoeDsdabmppYHP3VIGF2ux1ms7lPkBi345fP58PNzY3tei0vL8fgwYP/d5C/drsdZ8+ehb+/P+Me96fTvtWx9y2NRsPCTFNSUv4QdcYN2pcuXYK/vz/S0tJ+NZDpRlzTqVOnEB4ezqxioH9I3yv3Whw5coTx/rnO548uLvxUJpPB398fPj4+7L51Oh1cXV3hdDpZp+fh4cHojSKRCHa7HTExMSxnrU6ng5+fHwICAtg5qqurIRQKUVJSgqSkJIb49fLyglqtxvDhw+Hm5gaxWAwiYouUHGPGxcUFPj4+qKurQ0tLCwYOHAiFQgGTycSgZN7e3ow3w+1n4PP5CAkJgc1m6/N5u7i49IsQeaWhwg3ufbXPP6Tlfku3dEt/XOXm5kIulyMuLg4hISEQi8Vs/UOj0cDd3Z0NiGazGSaTiXFbuA7YZrOhsbERYWFhzN0jFAohEAjg5+fHOkCDwQCn04nGxka4uLjAYrGAz+ezTEeZmZlITEyEzWaDv78/PD09Gc7b6XRCq9Xi3LlzmDdvHurq6uDi4oLk5GRYLBbGmXF3d+/RAu9PEh7OCr8Wcd+prq5GfHz8/47lfku3dEt/TBERzpw5g8DAQAwePBiurq4wGAy4fPkyRo8eDQBddoo3NzejoqICw4YNY3yY6upqGAwGDB8+HHa7HZ6envDw8GBp9Xg8Xhf2OvevyWRCQUEBRowYAT6fz3InJyQksFh/LjOTq6srPD09YbFYEBsbC7vdjoMHD8LhcEAqlSIsLAwmkwkKhQIxMTEIDQ2Fp6cnHA4HSwcI/Ces2eFwsNDJK3e6X4+LjftOX8jfW537Ld3Sb6Rb6zz/kdlsRkVFBcxmM0JDQyEWi6FWq+FwONgeA6fTyTpGpVIJm80Gi8XSBeHg4+PDonpEIhHc3NzQ1tYGh8PB2OpXujE4MioH9eLY60ajkblezGYzi8Ly8fFh5QiFQsawGTFiBFJTUxEZGQm5XA6r1Qq1Ws0WuLlZAheD7nQ6WVjilfmOb4Z+929ea2srduzYgdWrV/+qfKV/JBFRNybNv/71r5t2fr1ej4aGhj6P27Jly1XZJgaDoV9sHU7PPfcc24R2pUwmE+rq6nrkt/Qlo9GI2bNnX/MuzqupvLwcL730ErKysvCnP/0JGzZsQEZGBv7yl7/gwQcfhFQqxcsvv3xTkMFqtbobo6cv2Ww2tLW1YcqUKcjNzf2Nruw/ysnJgU6nY8nLASAwMBDe3t7Q6/UsoYZMJkNJSQmMRiPEYjFLAi2TydjGpYiICNZpcgk2uAVOh8PRrS5qamogl8sxfvx4tgbi4uLCGEzDrXIAACAASURBVO08Hg++vr6IiIhAXV0d5HI59u3bhz179rDw5BkzZsDf359t1Hr99dfx0UcfobS0FDabDeXl5Szaq6WlBQ0NDfDx8WH++xtJxwQ6+sX33nvvqsf87jv3y5cv49ixY4iPj7+hjfP3LIPBgNLS0i6f3cyBzWw249///vdVj3E6ndi7dy+jUfYklUqFt99+u9/nFQgEPTYCLo/lRx99hNzc3GtuKJ6enuDz+TcsvRkAnD17FvPnz2eRGCtXrsScOXMwfvx46PV6REVFISgo6IYTTHvS5s2bcfbs2Wv6jlAohK+vL3NF9AYJuxGSSCSw2+1QKBTw9fVFQ0MDi9EWiURsxy7nsuAscI7X3tDQAIlEgtbWVlRXV7OO3NPTE0KhEC4uLmzBlM/nw2AwdLkfNzc3OJ1OZm1zbhxXV1e4uLggODgYJpMJarUaISEh8PDwYKGoAoGA1VVQUBCEQiG7F5PJhISEBERGRjL4GRclw61lEtFvMoPz9PTsAlzrSb/7zv3IkSPw9fXF888/j/vuu++/fTk3RdnZ2Rg+fHiXz06dOtXjsb/FgvjJkydRXl5+1WP4fD4yMjJY6FlPunz58jVZhceOHcODDz7Y7XPOt1hcXNxrbHpf6onvc70qLy+HRqNBcnIy7HY7MjMzERISAqvVivDwcMybNw8HDhzAPffcc8POeTUdP378uuqlvr4ezz77LGJjY3uM7LgR1mZlZSUaGxuRmJiIuLg4XLp0CVu3bkVBQQGam5vR1taG+Ph41vlFREQgNjYW/v7+CA0NhZubG/h8PiZOnMjcMNXV1airq2OkRG5BValUwsXFBQEBAV06d1dXV4wdOxZisRhNTU1oa2uDwWCAWCyG1WpFaGgoYmJicPLkSeTm5qK2thbjx4+HUChkKAh3d3dcuHABJ06cgNlshlwuxx133IGQkBDU1NQgKioKPB4PRASBQMBQ0RaLBUajEVarlbFguGTbAHqcafQlIkJpaWmvMEJOv2ufu1KpxD/+8Q+sXr0a2dnZMBgM+Pbbb/Hoo4+iqKgIgwcPRmpqKr7++mvI5XI0Nzfj3Xffxfbt2yGXyxEVFYUhQ4bgu+++w1133QWBQIAdO3Zg9OjReOCBB3o85xdffAGn04nKykq89dZbAMB2cxIR4uLiMHr0aGzevBkNDQ2YP38+Ll26hAv/n70zD2+ySvv/N0mbpGnadF/pXmgplaVALaWMICCCiOIy44YjOI7i6Ig/xWXcEORi3MBBRl5EQdkVEKEIZW0prbTQPS1d0yZNmmbfk2bt+f1RnzMttOA2vs47fq/LS5o8z5PzbOfc5z73/blrarB8+XKUlZXhwoULNFbc7XZj+/btCA4OxsWLF/HYY48hMDAQ+/fvR11dHVasWAGNRgOxWIwnn3wSly9fxpNPPolPPvkE0dHRUCgUOHPmDO655x6a5HTmzBn09vbCZrPhgQceoLwVRseOHUNrayu0Wi1uueUWbNu2DVu3bkVPTw/kcjlOnDiBBQsWoKCgAAaDAYWFhairq8MjjzwCHo+Hl19+GVOnTkVtbS1UKhWqq6spw6a1tRWzZs3CJ598gnvuuYeyfRhq5MWLF7F48WIAwCuvvIK5c+eiq6sLKSkpcLlc2LlzJ0aPHo39+/fjnXfeoSREPz8/REZGYuHChVfdk8bGRjQ2NiIiIgKdnZ2Ijo6G0WjE2bNnIRQKUVtbi0cffZQm73zwwQdITk5GR0cHnn/+ecjlcixZsgR1dXUoLi7G6NGjsXDhQuzcuRMCgQBNTU14/fXXv/dzmZmZSXk1g+V2uzFjxgzw+XzceeediIiIQGlpKTo6OvDyyy8DGJhOl5eXg8/nQyaTYenSpdBoNPjmm28gFAoRFxeHGTNmABjg26jVapSWluKWW265ymVls9mwfft2jB8/HrW1tbj55psBDGRSe71etLa24vHHH0dsbCw0Gg2OHj2KwMBAhIeHY86cOSgsLMRtt92GxsZGHDp0CAsWLMDkyZPx7rvvIjY2Fu3t7XjzzTdHvA4HDhxAU1MTzGYzdRF89NFH0Gg0yM7Oxu7du2nWMsP1iYqKwqRJk2CxWNDe3o7k5GQ4nU5a2JrNZsNiscDpdCI/Px+9vb1QKpVob29HVVUVHnroIbS3t+OLL77A0qVL0dLSArfbjSlTpkAul+Py5ctYsmQJRCIRLl68iK+++grLly9HXV0dxGIxpk+fjqamJlgsFvz+97+nz5TJZILb7UZNTQ0ef/xxdHR0QK/XQyqVQqVS4cCBA8jPz0dGRgZ2796N5cuX47bbbsOFCxdQUlKClJQUTJ06FZcuXUJubi6SkpIAgPraz5w5g8bGRtTW1mLDhg203WlpaTCZTDh9+jSN/Ln11lthMBhQXl6O/v5+1NTU4LXXXoPFYkFTUxPcbjd2796Ne++995rP6a/aco+KikJ2djaeeOIJyOVyCIVCnDx5EjfeeCPmzJkDu92OkpISjBkzBitWrKCc47CwMEydOhVSqRSTJ0+mxEKmc1y9evWwv6fT6dDZ2YmlS5cOqaa0a9cueL1ezJ07FwqFAmazmSJmjUYjli1bhvPnz0MoFOKuu+4aYmXv2bMHc+fOxf3334/NmzfDarWivr6eIm0zMzOxYMECfPTRRwgKCsKkSZPg5+eH2bNnIzo6GtHR0Zg2bRouXbpEj1lTU4OMjAy43W5alGIwIU6tVuPGG29EYWEhZs6cifnz5+PgwYPYvn07brrpJjgcDly4cAGEELz33nv44x//SAl6mZmZCA0NxSuvvIKQkBD09fVBrVYjMjISt9xyC5RKJRoaGjBr1izqCmhpacELL7yAe+65Bw0NDSguLsa0adMQHByMhx9+mMKhNm/ejFtvvRU33XQTtFotTCYT3n//fdx///2YNWvWkKo5gzV+/HiEhITg2Wefxe9+9zv4+/tj27ZtkEgkmD9/Pvr7+6FQKAAMhNnl5eXhzjvvpNfGZDIhLi4OAQEBWLBgAS23dvnyZSxYsICSPH+MCCGYNGkSANDMSmDAILjxxhtx7733UqInMECCrKmpwZw5c3DhwgWYTCa88847kEgkuP322yn8q6qqCnv37sXChQths9mGbaNQKERaWhqefPJJ2rEDA4MhQwNlUvxXr16NZcuWYdGiRXQ9hQHzsdlsmgEMDKCZH3roIXR1dY3I5a+uroZUKoW/vz92795Nj5eTk4MPPvgAhBDExMTAbDbj1KlTSExMRFBQEJqbm8HhcBATE4PIyEgIhUJYrVZs27YNH3zwAU0EKisrQ1FRET7++GMIBAJMnjwZVVVVtJ5pbW0tAgICkJOTgwMHDqCrqwvZ2dn48ssv6UASFBSE2tpa8Hg8TJ06FYWFhZDL5UhNTcWRI0dw4cIFWCwWfPDBB6iurkZaWhocDgfq6upgNptht9tx4MABXLp0CVlZWTRxKiAgAJmZmeDz+RCJRAgNDYVcLodMJkNJSQl4PN4QTAcwYK2npaXh0qVLsNvtaGtrg0wmAyEEdXV1EAqFyM3NRW1tLRQKBc6ePQuFQoEpU6ZAoVBAJpPh4sWLMBgMmDx5MiQSyfUXaUeCzvyS/40EDiOEkDvvvJO43W7idDqJw+Egf/rTn+h3zzzzDCkoKKB//+EPfyCEEGK1WsmKFSvo5wyoiRBC8vPzyYsvvjji76Wnp5PExETy7bffEkIIOXbsGHnnnXfIxYsXyY4dOwghhPT395OmpiZy6623EkIIOXz4MG3H7t27KdyLEEJuuukm2qaVK1fSz5944gmyfPly+t1TTz1FCCHE7XbT82D0zjvvEKvVSrq7u8ny5csp8Mrn8xGr1UrWr19P1qxZQ5566inS19dHXC4XEYvFpLi4mEK/YmNjyZo1a8iWLVtIXV0dBW198cUXRCAQkPr6evp7d9xxByGEUMhRfn4+IYTQ3+3r6yPr1q2jf6emppKvvvpqSJtNJhO5++67CSGEWCwW4na7SXp6OiksLCSff/45MRqNRC6Xky+++IIQQsjJkycp5G04MXA0RvPnzydWq5V4vV56HZk25+TkkDFjxhCPx0MIIeT1118nL730EtHr9UOOkZGRQWbPnk0qKioIIf8Caf0QHTlyhJSWllIYFiPmGfP5fLR9crmc5ObmkiNHjpCNGzcSjUZDCCFkw4YNJDMzk9xyyy10/+joaPLuu++Sf/7zn+TcuXMUeMaIaeudd9455PPly5dToNyKFSuIz+cjhBDy7rvv0vMlZAA8N378ePLpp5+SEydODDlGaGgo4XK5pKamhvT39w/5zul0kurqapKUlEQIIeTDDz8kEydOpN8zUDOZTEaam5vJp59+SpYuXUrWr19Pjh8/TrRaLZHJZMRqtRKTyUSkUimRyWQkISGBrF27lshkMtLd3U3OnTtHIiIiyHvvvUdkMhmRyWQkJiaGFBYWkq+++oq88sorRC6Xk2PHjpG//vWv5PLly+TQoUPkscceIxKJhHR1dZHa2lry+OOPE6lUSnbu3ElWrlxJOjs7yc6dO8kzzzxDjhw5QrZs2UI2bNhAenp6yFdffUXefvttcv/995Pz58+Tf/zjH+TQoUOktbWVrFq1ijz//POkurqaPPXUU+TChQtELpeTjo4O8uqrr5Lz58+TzZs3k4kTJ5KWlhZSUVFBTCYT6evrI06nkyiVSnLw4EGybt06Ul9fTxYuXEh6enqIXC4n+/fvJ3w+nwQFBZF9+/aRl19+mSQmJpI1a9aQlStXks8//5xs2bKFpKenE4lEQtrb20leXh4pLy//zwSH+Xw+dHZ2YsmSJfD39wePx4NYLMajjz5KfVQHDhygSNiNGzeiqakJwACboaysjB6LqS3qdruRlJSE5557jlp6jDo7O/HMM8+gvb0dMpmMFu84deoUnnnmGUydOhVLlixBZ2cnWCwWPvzwQ+oGOH78OMWabt26FVu3boVcLodKpaLY4ZKSEjz55JN0RtDY2IhHHnkEbrcb//znP/G3v/0NCoUCX3zxBdatW4c9e/agtrYW+/btQ3l5Obq7u8FisfDNN99Q69BqtUIgEODZZ5/Fq6++io0bN9K6ktu2bcPMmTPpYhUwEI3y5z//GcHBwQgNDcWyZcvAZrNht9txxx13QK/XY8+ePfjggw8AgFp4DAOEEZ/Pp+6G1tZWGAwGzJgxA1arFQqFAi0tLaisrMRzzz0HYGBWIZfLsWzZMixcuBAPP/wwuFwu/v73v+Pee++FRCLBnj17wGazcebMmauehX/84x/UzbVx40Y0Nzdj6tSpEAqF2LNnDzo6OqBQKHDy5Em8/PLLqK6uxoULF/D3v/8dMpkMRUVFWLduHZ588kl4PB6cPXsW69atQ0tLC2bPno1nnnkGACgC9vvKYrFg+/btmDFjxpDwu+LiYsydOxdOpxN///vf8cYbb1Bw1KxZs3D77bfj6aefRkBAAHQ6HTIzM9Hc3IyCggJas5UQgieeeAJPPvkkIiMjR4xpPnToEABgy5YtUCqV+PDDDxEXF4eSkhJcunQJbDYbx44dw/jx49HS0oLc3FxotVqsX78eCQkJWLBgAZYsWUKn/ytWrIDBYIDL5UJeXt5Vcdg8Hg979+6laz0dHR2YN28eampqYDQaKcufySZtbGxEVlYW7r77bkyfPh18Ph+JiYkQCAQQCoUIDg6mvv1bbrkFbDabfubn54eMjAxUVFTg6NGjWLx4MVJTU3Hp0iVMnz4dfX19TCIPRX4vXrwYMpkMycnJqK+vx1133QUul4vg4GBkZGTAaDSiqqqKFsbp7u5GaGgoPB4P+vr6cPr0aRQUFKCvrw8Gg4H60VtbW5Geng6FQoHCwkKMGTMGXq8XMpkMR48eRXBwMEpLS+mMlVkz4/P5NCa+sLAQ2dnZKC4upvVWz549i7KyMjQ2NmLt2rU4ePAgqqurIZfL8dJLL2H16tWIiIjAl19+iba2NgoNmzhx4nUj2n61nTuHw4FMJsPo0aPpZ1VVVYiPj6cv0tSpU+mKsUQiwVtvvQUAKCwspPvYbDaMGjUKwIC7IjAwECwW66ooBofDQae+Op2OkiDz8vJQXl4OYMAHykxza2pq6HS8qqqKJmFotVqEhYVBrVYjNDQUQUFBkEgkOHz4MAghlF4oEAgQGhqKrq4unDhxgsb6Mm30eDyIi4vD22+/DYFAAIPBgJCQEOTm5tI2X7x4cYj7iMmW0+v1dEGUiUCIjo7Gt99+C7VajbNnz8JgMEAikdDEjJUrVyI8PBydnZ3gcDiUpaHT6VBQUACv10sHFYb5YTKZEBgYiJiYGDQ1NSEwMBClpaXwer2oqqqiECWPx4OkpCRK6+vo6KDVrSQSCWpqalBSUgKdTjds6KRaraYsaz6fj/j4eHrex44dg9VqhUajQWtrKw1dq6+vx8KFC1FSUkJdCwKBAGKxGHV1dXTAl0ql1JddXFyMffv2XfX7w8nn80EqleLMmTNXLYg1NTUhLi6OXmuRSAQWi4Xg4GBoNBpYLBZK6Gxra6PnLJPJ6GCcnJyMiooK6HQ61NbWXjXoMJ292WyGy+WCwWBAXFwcbr/9dgADMK7Q0FA0NjaioaEBRqMRvb29MBgMiIyMRGtrK26//Xa43W6MGTMGZWVlEIlE1EBSKBR0neBKJSQk0AXQw4cPIywsDF1dXbDb7aipqUFcXBwNNxw7dix8Ph88Hg+USiW9b2w2GxwOh8aQBwUFQSqVwufzQS6Xg8fjYdSoUTAYDBAKhejt7cWSJUsQFhaGiIgIcLlcWCwW9PT0YNKkSYiOjkZJSQnCwsKGlJ+MiYmB1WpFdXU1EhISEBUVhfLycggEAgoE0+v1sFqt6OjoQH5+PlJTU1FcXIz6+nrY7XacOXMGlZWVcDqd6O7uhlAohMlkgtFohN1up+X1ysrKkJ6eDr1eD7PZDJ1ORyO9mMLdhBCIxWIQQtDc3EzDey0WC5RKJRYvXozf//73tH4EEyaam5uLUaNGwW63Qy6Xw+v10lqxI+lXjR94++238eyzz1LWRGtr61WFL/R6PUUBMw9cR0cH+Hw+7dT1ej1EIhH8/PzQ3t6O4ODgYcl5NpsNOp0O/f39SE1NpSFM7e3tMJvNSEpKorhUiUSCpKQkWoKNWUCRy+VgsVj0t41GI/z9/SEUCtHe3o74+HgIBAJYrVbqMzMajRQ8BAx0OLGxsTTZwmAw0NCt/v5+XL58maZBDyer1QqlUonRo0fTa8LEnLNYLCQmJtKFK7fbDY/HQ4l6zPWLiYlBYGAg7HY72Gz2VVWTmDUQZlDS6XTQaDQ0o08oFFI8KbON1WqldEPm+rS0tNDFSY1GQ/2+g0UIgUqlQlBQEKXq9fT0wGq10mtgMpkgFApht9uh1WoRExNDS635fD7ExMRQHzwTZTD4vvl8Puj1eoSFhX0vBoxGo4HZbKYJNampqXC5XODxeGhubqbPqcPhoFYon8+HVCqFVqtFamoqeDweBAIBWltbQQhBZmYmnE4nBAIB+vr6oNfrwWazaQjecJLJZOByuYiNjUV/fz/cbjetbmSxWEAIgUgkQm9vL3g8HrhcLlwuFywWC8LCwiASiaBQKBAcHEzjshsaGhAZGYmoqKirfMeMtFotHA4HkpKSYLPZ0NPTA7vdjpaWFtx88800/NRms8FsNkMgECA6OnpYoJter4dOp4Ner6eEVpPJBLVaDYlEgszMTCQkJCA4OBg9PT20H4iOjkZrays9JvO+RkdH0/c2Pj4ezc3N6O3txa233oq+vj46yDCE07a2NlgsFtxwww3w+Xyw2WywWq0QCoXIzMyETqfDnDlz8MILLyAhIQEikQgJCQnUu2A0GqFUKnHo0CG88sorsNvtUKlUmDt3LoRCIdhsNkQiEbRaLZqamhATE4POzk4UFxfj+eefh16vh1KpREpKCgghEAgEUKlUtC/Jzs4Gn8+HwWCgxMn6+npEREQgNzd3RPzAr7Jzv3TpEgwGA86dO0cjVr6PrsVy8Pl8tALKLyHyPZkR34cU+L+l/wsZlkyF+u97Lt/3vo0ks9lMa23+mGP9Es+D2+2mHeJgCuVPUU1NDQwGA1JSUhAfHw+Px0MTk5iwwJHkcDhoog8TV240GmEwGGAymZCRkUGZ+CaTCTabjcLGXC4XHdS9Xi+Fh3k8Huj1egQGBtLF0ZiYGLBYLCiVSlitVoSEhND7FBQUBIFAAKlUCoVCgfj4eAQGBiIyMhJarRarV6/Go48+ilGjRiE+Pp4mPz333HO44YYbEBoaCqlUinnz5iE2NpbmDzAUT6fTSevBMvdAoVDA398f4eHhEIlEAAYY9FarFYGBgRg7diy0Wi0CAgIoII3BVavVatjtdmRlZf1nde7V1dWoqanB/PnzqYX3m37Tb/r1qrm5GQ6HA5MnT6ZExJ8ySDKzoCvFIAm4XC64XC4dvK8nr9dLUb6VlZWYOnUqLZzNJLip1WrqmrXZbIiKioJCoUBSUhINt+zt7aVhzWlpaSgvL0dYWBi8Xi/uvfdeGgFjs9kosZap58C0efAA7nK54Ha7h0S+aDQaOoMczkBgBufrIX9/lZ37b/pNv+nXob/97W8wmUx46623RrTACSFob29HSEgIIiIiftbZ3rU6b5vNhvb2drr2NZI8Hg+8Xi9lotvtdnA4HBp8IBQKERAQgObmZni9XgQFBaGlpQVmsxnjxo0Dm81GREQEent74XA4wGazodfrIRQKaaEQg8FA6+Xm5ORQhs5wHXNfXx/tzCsqKjB58uQR3V/XUmNjI2644YbfqJC/6Tf9ph8mg8EAjUYDnU6HpqYmmlx1pbxeL8LCwhAaGgqHw/GzYh6YznE4C5bD4YxYdWqwmOLYzc3NCAoKQnh4OPz9/eF2u9Hf30871oSEBHC5XLjdbmRlZcFmsyE2NhYGg4GuRURHR0MsFtNEPYlEQtd7mPJ9DMRsuJkLM/CR77g4TPFuq9VKLfvh9iPfYQw4HA51P1/PML9u585isbYBWAhAQwjJ/u6zVQAeA8CENvyNEHLsu+9eBvAoAB+AvxJCTlzvN37Tb/pv1ff1y19rO71eTyNPfk6FhYVRK3Tq1KnDbmOz2eByuRAeHk4LV/wUXbkGMLhzH/y3Xq9HSEgI4uLi6PdM5zecmNqwzOI+E3QhEongdDqHFAdh/Ok8Ho/igxsaGlBbW4uCggKMHj0afD4fcXFxSE1Nhcfjgc/ng0qlAiEEYWFhQ+6V2+2G1+ulawsBAQGwWCzg8/l0psOEe6tUKkRGRqKvr4/655kOnzk3Zl3xes/N97HcPwOwCcCOKz7fQAh5b/AHLBYrC8B9AMYBiANwmsVijSGE+L7H7/wqZLPZ4HQ6ERISctXClslkokVvf07r5JeWw+Gg1L1fYjFXp9NBIBBcFXFzpQgh0Ol0CAkJ+cmLfIM7nV9aLpcLUqkUIpHomtWTfD4f3G43NBoNrFYrLf4+nEZ6kY1GI7q7u9Hf349Ro0YhIiLie/mgv6/uuececDicEQMVOBwOeDweRdza7XYEBQXB4XDA6XTSyKZrnduVxxss5rwZRsvFixeRkpJCQxz9/PwQGBgIp9M5JLz5SneOn58f7dgB0BBMk8lEF9uZd9pkMqGxsZEyZbxeL3p6evDXv/4VQqEQnZ2dQ94bBvPLRE4Nts7dbjdYLBblvTN1mQEMaS/TmZvNZnA4HAQEBAxZ+B5O13tHruscI4SUArh2me1/6Q4A+wghLkJIF4AOALnX2edXI5vNhk2bNmH79u3DVhbftm0b2traIBQK8cEHH4yIp/2xYmo13nfffdfcbt26dTh37hy8Xi+sVivef//9q5KyriWBQICdO3eio6Pjpzb5mmJwt/Pnz8e2bduuuz2LxcLGjRt/FkyuzWbDli1bRkyf/3eJ8e3u3LkTFotlxG3eeustcDgcirR+9NFHv9fxr7SM2Ww2/t//+3/YvHkzoqOjvzdm2eVyoaamBm+99dY1E7eioqJGHCAZBgzTKSqVStTW1sLr9cLf3x96vR4ejwePPfbYsPfU6/VCKpUOOaeR/PVM5/nhhx+isLAQNpuN1ihl8jsY8Ny5c+eG4DhGOp5arcbhw4dht9vR3NyMkydPwmaz4dtvv8WqVavQ3NwMl8sFn8+HyZMnIz4+HhEREcjKykJUVBRFdtTV1dFZw2CQntlshslkgtPpRFdXF7hcLkVUDB5ogH8NahkZGRTLIBQKweVyKVaFyadgasxeb5b0U1Y+nmKxWA0sFmsbi8ViWhoPYHDalOK7z36y/p0Lv8zF6u3txfTp07Fy5cph462PHj2KMWPGwGq1YsWKFTh9+vTP2g5/f38EBwdj3759w5LimEy+4uJipKWlgRACf39/PPfccz8oqshms+HYsWPDwq9+qpg44cHy8/PDrbfeet19mdjfK0FoP0bFxcW48847f1Ch7J9KQWQKHgsEApw/fx5JSUnD8uetVivtgIODg3H+/Hnk5+cP2ebKgeFKt4TL5UJvby+sVit0Oh29vgz6trGxETKZDGazGVqtlhar8Hg8cDqd4PF4yMnJwauvvvqDZ6Ferxdmsxlms5kmXfX19VH0bm9vL0pLSxEeHg61Wo1p06bBarWir6+PPhvMtU5OTqZhk8y18ng8Q4wVxuI1Go3QarUYM2YMwsPDh8wmWCwWQkJCsH79emRmZkIgEFzVZzB/e71eiEQipKenY86cObDZbOju7qa0Tz8/P0yfPh033ngjDa8MDAxEb28vZDIZTpw4QTnuWVlZSE9Pp9niLpeLEin7+/sREBAANptN3TxMgpPVaoXX64VCoYBer4fBYKB5GIPb6/V6odPpsGvXLlgsFlgsFnA4HHovr6Uf27lvBpAGYCKAXgAMGWm4ueOwvTKLxfozi8WqYrFYVSNZGh6Phz7kTBXywdJqtcNaHUaj8boWG/muIIbX6wWHw4FSqcTZs2eHvWDMzWLaabfboVar6eeDucrDoclCZQAAIABJREFUWQw2m21IO5lQqSvFvLCDs0FtNhvNutRoNJSNrVar4e/vD6VSCY1Gc9WxmOy44c5bIpHQl8fhcND9mfPT6/WQyWR0n8GdHpM2zYhJl2eOzeFw4PP54HK5EBAQAIfDQWOfGTkcDpqtyqi7uxutra1XWTSDj3/l9tfiWavVatohDJZWqx3y24NfqMHPTH9/PxwOB01gAwaex87OTvT19aGxsRFarZZeG6bqTmlpKaRSKa0gdKUlKpFIcPbsWVpiTigUwmAwIDk5GWVlZUM6IJfLBb1ej/r6evT09Aw5Do/Hg5+fH43PTkxMpK4Lo9FIUbtMslpbWxsuX75MkbVarZZmSw/WlffF6XRCo9HQa2Oz2VBUVASxWAyVSkUBWIwFn5iYCK/Xi8jISCiVSpSVleGmm26ii4BdXV1oaWmhdENgAA/S09ODnp4eNDc306xMtVqNmpoaynJvb29Heno6TR5kxLBUNBoNuru70d7eDrFYjPLycpjNZtTW1qKiogJ2u51eX2ZAiYqKQnd3N81AbW9vR2JiIlQqFc1i9nq9CA0NhUAgoJnsDocDgYGB1DBkjAir1YqGhgbI5XJ6ThqNBkFBQejr60NzczMsFgs0Gg36+vrQ29uL3t5eupBrMBjQ1dWFzs5OOgAEBATA6XRCLBZDr9fDZrNBqVQOeUeH04/q3AkhakKIjxDSD2Ar/uV6UQBIGLTpKADKEY7xMSFkCiFkCpP1eaXEYjFmz56NTZs2ARgg6n23L959911ERkaiv78fhw8fhslkwqZNm3D58mWcPHkSS5YsGant2LVrF8RiMfr6+rB8+XI6OovFYkyYMOGqgYF5kJKSkiCTydDc3Izc3FwUFRWhuLgYp0+fxrRp06BWq/HGG28MCc3atGkTVCoVhEIhpk2bhvPnz6O2thYPPvggNBoNTp8+TaMQ6uvrcf78eezfvx8AUFdXh66uLkRHR2PlypWIiYmBQqHA2rVrkZOTQ7fPyckBMNDBFBYWoqSkBP39/cPy71ksFr7++mtkZGRAp9Ohvb0dGzZsQGVlJaqrq5GdnY3w8HBYrVbKmGGz2Xjvvfewa9cuCAQClJaW0nTr+vp6in1gEKRGoxE7dgws0ZSXl2PZsmUICAiAXC7H8ePHweVy8frrr+Ozzz5DbW0tTZV//fXXqV++uroar776Kng8Hm688Ua89tprAICPP/4YDQ0N+Pbbb4dNj3e73WhpacGGDRsQGxuLNWvW4KWXXoJYLMbx48fR39+PN998E3v37sXBgwexevVqWu3n5MmTOH/+PGw2G/bt24eqqip4vV4sX74c7e3tOH36NN566y2cOHECo0ePRk5ODj766CPYbDacOnUKf/vb35CWlgaVSkUzUJnkFObZi4iIQFFREV555RVqXfb09GD06NGQSqX4y1/+gvb2dmi1Wrz//vuoqqrCN998g+eff/6qcxWJRDhz5gzWrl2L7OxsSlesq6uDz+fDkSNHYLfbodfrUVtbiy+//BIdHR1Yv349du7ciSVLlqCxsRE2mw1lZWVYs2YN6urqsGDBAnz++efYt28fWltbER4ejpUrV1JURk1NDZYuXYq6ujq8/vrryM3NpVzy+Ph42iF5PB4UFRUhMzMTDocD5eXlqK+vh0KhwHvvvYfOzk6cP38er7/+OphQ6IcffhiNjY3YtWsXLl68CADYvn07uru7advi4+OvWnRtb2/HhQsXUFlZiZCQEDQ2NuKf//wnjh8/jsrKStx7771QqVSQSqXo6uqCXC5HfX09lEolLl++jNraWjzwwAPIyMjAp59+iqysLISEhGD+/Pl44YUXKEZk37590Ov1OHfuHLZt2wYul4vw8HBaIrCpqYmSaw0GA95//33s2bMHGzduxK5duyASiVBeXg4ul4v169eDxWKhsLCQDnarVq2ig9+GDRtgt9tx5MgRyuNZuXIljh49is7OTtpPjKQf1bmzWKzYQX8uBsCYAEcA3MdisXgsFisFwGgAF3/MbwBAYGAgDV0yGo0oKSkBMGDNHT58GH19fQgODsYdd9yBoqIifPbZZzQZISEhYdhjlpeX4/PPP0d6ejpNQ1epVDQ1ODw8fMQU6VmzZkEulyMxMRGZmZmIjY0Fh8OhLHLG5zl4cDh58iT1w4WHhyM2NhYCgYBOKxsaGugMICwsDHFxcRAKhTT0qr29HQDw5z//GcBAunlqaipYLBb6+vqQlpaGMWPGABjohFevXo2ZM2ciKiqK8m6uVHV1NSwWC3g8HiZMmIA//OEPAIDU1FS6T3h4OLXivF4vLl26hJkzZ4LH42HcuHEoLi5GZGQkYmNjh2AemPvGpHZLJBIarrZ69WrExsbC7XaDw+EgKSkJ27dvx+TJkxEUFAQ+n4+ZM2cCGMADSyQSCIVCOJ1OREdHY//+/XjttdcQExNDq/pcKS6XC7FYjJSUFBo9cubMGWzZsgXR0dG0yj2DRYiMjKTx29XV1QgKCsLp06exfv165OTkYOzYsfDz80NzczMtFjFhwgTweDyKlvDz88PBgwepy8Dj8WDOnDlX+b6ZNRWZTAaRSESt6ptvvhmxsbFIS0tDUFAQQkNDUVlZicOHD9NiFampqUOOxaT1y+Vy2rkcPXoUmzZtQlBQEOx2O2w2GxwOByIiIsBisRAfH4/c3Fzk5ORQRovJZKLAsYCAAISFhSEjIwMejwcff/wxxo8fDw6HAy6Xi+bmZrDZbBo+OGnSJMTFxVFsgcPhQG9vL0JDQ8Hn86klLRKJ0NHRgbVr11IWS0JCAs28ZEB2kZGRmDJlClwuF/bv309nfrGxseByuVCpVDRz1OPxwOFw0Jm9zWZDUFAQoqOjIRAIEBkZCZ/Ph+TkZMTFxVFXCFMc22Qyoa+vj7KYQkJCqA/fbrdTH7tIJAKfz0dLSwvefvttfPbZZ+ByudTVwoipmwqAPpfx8fF44IEHkJqaCrFYjMDAQEgkEvj7++PChQvw9/dHdHQ0MjIyEBgYCLFYjNDQUIwaNQoxMTHYu3cvamtrcfToUVohKjAwEH19fTRB6poaCRfJ/AdgLwZcLx4MWOaPAtgJQAygAQMdeuyg7V8BIAHQCmD+9Y5ProH8dbvd5P777yeEEPKPf/yD5OXl0e+amprISy+9RNhsNiFkAFP61FNPEZ1ON+yxGKWnp5NHHnmEEEKIy+Ui2dnZxOfzkaKiIjJnzpwR93vzzTdJS0sL/XvNmjX03/fccw9Zt24dIYSQ8ePHk88++4wQMoB7XbhwISGEEIPBQLZt20YIIeT06dOksrKSEDKA0129ejVxu93EarWSxYsX03O3WCzk888/J7NmzSKHDx8mUqmU5OfnD8Gw3nnnneSNN94ghAwgZZcsWUI8Hg9FvQ6nKVOmkKKiIjJ//nxy8eJF+vmBAwdIQ0MDIYSQ1atXE7PZTAgh5NNPPyXFxcV0u+3bt5OzZ8+Svr4+ivUlhNB71dDQQJRKJens7CRZWVn0+/j4eEIIIa2trfQzt9tN/z1r1iyi0+mI0+kkAEhJSQkhhJDp06cTt9tNEhMTycsvv0zkcvmI50YIITNnziSPPvooIYSQcePGkaVLlxIej0e8Xi8Ri8VDthWJRGT//v2ktraWLFq0iPh8PpKdnU0efPBB4vF4iFQqJenp6USlUhFC/oU/drlcpKCggEgkEtLT00O4XC45dOgQ0Wq1ZOvWraS8vPyqdnm9XrJ27VqSlpZGfD4fcblc5JNPPiGXLl0ihAzgegsLC4lWqyWxsbHkvvvuI3K5nBiNRtLZ2TnkWBqNhmi1WpKVlUV0Oh3RarVkypQp5PbbbyeHDx8mbW1tRK1Wk/b2diKRSEhubi4pKioiYrGYSCQSsmLFCpKfn0/Onj1LLl26RIRCIamtrSUlJSWkurqa/M///A9ZuHAhsVqtxOl0EpFIRJqamkhLSwuZP38+WbduHVGr1WTMmDHk2WefJXV1daStrY3Y7Xbi8XiIXq8nL7zwAklLSyMSiYS8+eabJCgoiEgkEqLT6YjFYiFWq5UYjUaSnJxMVCoV6ezsJIWFhWT37t1k8eLFpLKykrS3t5Pu7m4il8tJZmYm6enpIUajkdjtdtLf30+sViuxWCxEo9GQO+64g9TX1xOVSkU2bNhAZs+eTXp6esg777xDFi9eTLxeL5HL5USj0RC9Xk9kMhmxWCxkypQp5MUXXyQdHR2ku7ubzJo1izQ1NZE9e/aQ/Px8smXLFrJv3z4yZswY8vXXX5O6ujpSV1dHqqqqiF6vJ2azmbhcLtqOvLw8smrVKqLRaIjZbCbPPvssefvtt0llZSVRKpWkoaGBTJkyhcjlcuJwOIharSanTp0i06dPJzU1NUQul5Nz586R2267jXz44Yf0uTxz5gzZsWMH6e7uJtu2bSO7d+/+achfQsj9hJBYQog/IWQUIeRTQsgSQsgNhJDxhJBFhJDeQduvJYSkEUIyCCHHr3f8a8lisVCrb8+ePViwYAHq6+vxwAMPQKfTYfXq1fj9738PALjjjjsQFBQEDoeDyspK1NXVDXvMrKwsamk+99xzWLlyJdhsNl2kHE5yuZxWaOro6MCyZcvwyCOP0GljR0cHHnvsMSiVSkyYMAFz585FX18f2Gw2CgoKQAjBG2+8Qas/NTc301kDm83GwoULIZfLMW/ePFitVlRUVGDHjh145pln8PDDD2P58uWIjIxEYWEhFAoFdDodPvnkE9hsNng8Hjz99NPo6urCqFGjwGKxKKdjOHyuQqHA/PnzMW/ePGi1WlRXV2Pr1q2w2Wz48MMPccMNN2DLli3Ys2cPLBYL9u7dOyRiYs+ePaioqMCsWbNo4RHmnJgwsdbWVsTGxmLHjh2w2Ww4efIkmpub6b0MDQ3Fvn370NXVRembe/fuxc0330yJgHl5eUhMTMSOHTvw0EMPwd/fH9OmTYPD4cCoUaNQWlo64oK2wWAAm81GR0cH7rrrLmzbtg3x8fHgcDiIiIjAoUOHqBsgODgYU6dOxZdffgk+nw82m40bbrgBfn5+6Ovrw/vvv48//elPtJza1KlTKab5xRdfhNvtRnR0NIKDg5Geno6TJ0/ik08+GTZ0kMPhYOPGjSgoKMCJEycgkUhw8OBBTJw4EcDAgn1/fz/Ky8uxcOFCGjcuFosprZER43u9fPkyqqqqIJfLkZaWBg6Hg3HjxkGtVqOsrAyffPIJenp6YDAY4PP5UFFRAUIIjh07hj/96U8wGAzw9/dHdnY21Go1TVoSCAS0ePW5c+ewYsUKcLlcBAYGorW1FaNGjUJjYyP4fD5mzJhBC1QIBAK6kPjll18iMzMT9fX1mDhxIiZMmEDXGi5dugSTyUSLZpvNZrS2tqKgoAB5eXmIiYlBXFwcvY9OpxOEDJSXk0qlNLSQy+VCr9fT46nVang8Hpw5cwZ5eXlgs9nYsWMH8vPzIRaLER0dTZHGzPnI5XLMmTMHlZWVOH/+PAW4tbW1YfHixRg7dixiYmKQmpoKf39/8Pl8WK1W+kwFBwdDLBZT4iePx8OMGTNgsVhgNBoRFxeHqKgoxMXF0cgZZnZlt9vR09MDPz8/Cozr7e1FcXExPv74Y8ybNw9xcXGorq7GF198gSlTpiAmJgZFRUXXLbPHWbVq1TU3+CX08ccfr2LcDoMlEAiwZ88eOgXmcDiYOXMm+Hw+XC4Xjh49ioCAAMyZMwexsbGQyWTg8/nweDwYO3bsVS9Yf38/goODIZVK0d/fj5iYGMyfP58WXx43bhwtGzdY/v7+6O7uplPoqqoqOuVPTExEXV0dbr31VvD5fGzZsgVjx45Famoq2Gw2Tp06BZ/Ph7Vr11K+udlspthPhvA4Z84clJaWIj09HVlZWYiOjqbRBUwnxcTfZmVlQSQSISUlBRcvXkRCQgLS0tLg7+9POwGz2YysrKyrrkFXVxdCQkIwevRotLS0QCwW4+6776Y+2kceeQQqlQotLS2YMGEC4uPjkZ2djVOnTkGj0cDn8+G2225DVFQUzZJTq9WwWq2orKxEYmIiwsLCkJiYCL1eD61Wi0mTJmH8+PE0dNNsNiM0NBTZ2dno7e2l7glmIXLy5MlgsVhQqVQoLS3FokWLEBcXh7CwMOrS8Hg8mDx58lXnxySrqNVqsNlsLF68mEKmdDodrFYrwsLCkJ6ejoCAAFy6dAmxsbF49913MXXqVMybNw8ikYguykdERODuu++GQCBAT08PAgMDkZaWBo1GA7VaDT8/Pxot4fF4KA3R5/MNmxZfV1cHm82GvLw8JCUloaioCPfddx/6+/vxzTffID8/H1FRUUhNTYVGo0FAQAC4XC4iIiIouZOJeBEIBKirq8OUKVOQmppKa5GGhYVBKBTC398fJSUllCiakJCAmJgYjB49GkVFRcjIyMDEiRMRFBQEf39/migVGxuL8PBwmmrvcDgwYcIEpKWlwWw24+LFi7jjjjsQEBCACxcuYPz48Zg6dSp8Ph+EQiFN7ZfL5QgMDMSMGTOQlJQEPp8Po9GIqKgocDgcBAUFob+/H+fPn0dGRgYEAgFGjRoFHo+Hzs5OpKen0+zXuLg4HD9+HHl5eUhOTqbYbiZjU6vVQqvVoqCggFZsmz9/PiIjI3Hq1CnExsYiJycHAQEBtJKZn58fOBwOTpw4gbS0NPpMM+AuJhAiMTER6enpcLvdCAwMRFRUFAQCAeLj42l2q9frBY/Hg0wmg0wmw8yZM2mYpr+/P63vyiQupaSkQCqVUkRxWloawsPDaTasSCSiVankcjmsVisOHjyIBx98ECEhIfj6669x5513YuPGjb2rVq36eLh+9T+CLTMS0Y95GH+KmBCtZ555Bhs2bLhm0skPlUKhwKhRo3Dq1Cls2bIFBw4cADAQNnYlT34kMYlTjPr7+8FisYac9w8lCZLvQqyGS4K4VuLE9SBNP/Z+XHlcnU4Hp9OJqKgovPrqq1i7du2PTmq6FvWwsLCQWkKrVq3CfffddxVS+tcqnU4Hr9dLE76YDpWZtbHZbLq4zPj+Z8+eTQdYpmMbnFjG7Nfd3Q2v1wuLxYLY2FgEBgbCZDJh1KhR8Hg8aGlpQUBAAFJSUtDX10dDKZ1OJ/z8/OizSL5L4hnMTRn8Lvf19dH6qcnJyVedI/NcMP+32Wy0HsOVx2PCCVNSUuB2u7Fr1y64XC7k5uYiPj4eTqcTHA4HBw8eRE5ODnJzc+HxeGAwGOBwOJCZmUkLbTNJYQkJCWhpaYHP54PVaoXdbofD4UBOTg7YbDYyMzPhdrtp5qlQKIRKpaIDGBPRxOfzYTabIRKJhrxfg99bs9kMNptNmTOEEDQ1NcFoNCI9PR2nTp3C119/jU8++QSEEBqgkJmZ+Z/Nlhlp4eCnduzAQMjczp07cdddd/3sHfvrr7+OtWvX0ukmox8CCboypXy4a/FDs0wZa2I4XSsu/HqZjz/2flx53IqKCthsNtx0003XbOv30bX2bWlpQXBwMORyOW677bb/mI4dAGX/D9aVzwGXy0VqaioiIiJoUh4Ti848R4Ppiz09PYiPj6f8fvIdW5xJpgFAOzXmujIdOwPnYrPZtNNirNnBGvz8MqGcI70PzHPB/H+4eHzmeExxD0II9Ho99u7dC4/Hg9mzZ9NFYKaOaUNDAwoKCuDz+aDRaKBQKBAdHU1/h+lgGRdTf38/DStms9loa2tDTEwMxYiz2Wya7Wo2m2m9XpvNhoCAAPT399NF1sHv1+CBWCQS0QQ4DocDj8cDrVaLL774An/5y1+g0+mQm5uLsLAwOBwOCASC67+P/wmW+79bPxfT+koxsc7/yaiC/w11dnZCr9eDx+MhMTHxZ2emDJbD4aBgqf+LYuL1GYu3v78fVqt12EijwYVTmDyMK59do9F4VS4CU0AGwPfCDPwcM+6R5PV6odFoUFFRgeTkZIwZMwY8Ho+CwpxOJxoaGhAcHIyQkBBcvnwZSqUSUVFRyM/PB5fLpS692NhYmkshEomoC6+2thZnz57FokWLcNttt8Hn88HPzw96vZ7ifRlO/GAC5A8VUykNAOLi4q6atXxXoew35O9v+k3/jdJoNGCxWLSCmNfrZTjgMJlMtGxcQkIC+Hw+3G439b8zLsve3l5aHYnD4SAjIwN2ux3Av6xck8kEgUAwJCTwl5bH46EQr8EdKpNYyMwQ1Go1bDYbIiIiaIUvphwkU/wjNjaWlgdkQGANDQ1gs9mU/xIXF0cXZn0+H13QVigUiImJgZ+fHy3a8e+QwWBAeHj4iJ37f3aZnd/0m37TNRUUFDTE+vbz80N3dze6u7upS4GJSwcGXDNer5eyUgwGA6xWK4KDgxEVFTXEJzzYX8/n82kn90tLo9HQyDGGx36lGLeK1+ul1Y18Ph8yMjLo2oXZbKaZtlwuF6GhodTX7+fnh+zsbCQlJSE6OhoFBQVITU2lQLyYmBjqJmFKejLlJv+39B/hc/8ldD0C2/cRs2jyU+TxeGiK9E9tz69JTH3S72PV/RzX8b9FDApipAX6Kz/3+XwYM2YMxQRMmDCBfsdYvYyLinyHyWDq1nI4nCHVhVQqFfUbK5VK6gL5Kert7YXJZEJERARGylxn2qpUKuFwOBATE0Prrw5HHuVyuTT228/PD+PGjYPX66UIAoZbn5OTAy6XS9lGjK/cz88PNpuNhnoyC6n+/v5ISkqivnJGLBaLoiUCAgJGrCr1U/WTqZD/Deru7h42Jvz7yOFwABiY+r3//vvX2fraOn/+PI4ePYrS0lIcO3YMSuWw5IYfpf8Ni2qw2tracPz4yGkPTEei1+uv2VaXy4W2tjZ8+eWX/45m/iJqb2+HXC4f8fsf4io1Go147733rnm8weJwOJRBfiV7yOl0DoG2SSQStLW1gcfj0UzSwQYH87dUKsW5c+dgMBh+Mm5YoVDg5MmT+OKLL665ndVqhVgsxubNm2EymSCVSik98UqxWKwhRTAkEgmMRiNMJhOtxpScnIyenh709vZSXhAz0DHrFi6Xi+YeMHVZAQyJEPJ4PAgMDKSoYJ/PB4fDMSwV86e6xK+3/2+dOwYSc5qbm3/wfq2trSgpKYFOpwOPx0NpaemPbsOKFSuQnZ2NRYsWIS8vD+Xl5TSu+aeqo6MDx44d+1mO9WPFYrEQGBg4InnRbrejoaEBS5YsQVhY2FUANpfLRal4TALIf6IcDgfNNRhJ7e3tKCws/F4DclxcHCQSyYi4jeHEZrNpqv3gDoKBnzGMeSaaRSQSwW630zBc5h4y4L6enh44HI6fjFe2WCx46623oFQqR6yw5PP54PV64Xa7UV1djY6ODgQFBSEtLQ0CgYCiPK4cuAZH6bjdbvh8PpjNZtTU1GDTpk3o7u5GUFAQwsLC6CIqAyvs6+ujUUPBwcFob2+HWq0ekmPBuG+YQh9+fn7o6emB2+1GeXk59uzZcxUskBlsfqzhNRJUj57zjzrq/yHV19dj0aJFEIvFV32n1+tHZHIDwDfffIOcnBwalsZQ44ZjqzNsi5H0zjvvIDQ0FEajEUKhEH/84x/pi8Q8sFeO/sMx55mZBFMl3uVy4cCBA5g0adKQh2jwcQfL6XTSYwBDrQPm38x+NpttWCLlYNlsNni9Xpw4cQLTp0+nLxnDWOnu7obb7YbJZMLx48fpy8Tj8UAIgVKphF6vh9PphF6vR0BAAA4dOoSFCxdeZblYrVZK0ht8Plqtdthrf72Xg8nqHPw7brcbRqORXkuTyURdCRKJhF475nvGzca82Ha7HTfeeCN1FZjNZqjVaprw1drais2bNw+LY2ZYMYxUKhUaGxvxyCOPXPM8mHYzGnwtGEYR88xGR0eDxWLRXAjGaCkrK6MDLoPHjYyMhFarxaFDhzB37lyEh4dDpVLBYrHAarWitrYWCoWC/vaV94uJTXe5XFCr1bBYLOjs7ERWVhbuuuuuq9qtUqmgUChoMZ2ysjJMmTIFHR0dMBqN0Ol0QwpP19XVwWQyob29HQ0NDairq6OMF61Wi56eHrS1tcHf358mLYnFYlitVkilUmg0GhiNRlRXV1NiLHM/vV4vent76T1ubm6GyWRCR0cHdDod5VVJJBIUFRVBKBQO6UsIIVCpVAAGBh65XA6tVova2lpIpdJhaaaD+5X+/v5hseCD9V/tc2em+HffffdVLz+TGcqERF3JSz99+jT27t2LWbNm0dVwQgjKy8vR0NBAs1EB0EQSq9WK0NBQTJs27aq2BAUF4dChQxg9ejTN3mQwxGq1GosXL8bFixcxc+ZMxMTEwOVy0RcuLS0NOTk5kMlkqKioQGZmJrxeL86dO4epU6di//79FInAZKYyi1A+nw+33347bX9dXR2N+01JSUFDQwO0Wi3++Mc/0ugJxqr69ttvERoaColEgpycnKv8ih0dHZBKpejt7UVFRcWQOOz6+nq4XC50d3djyZIlsFgsOHLkCMaOHQulUkmzIauqquiU+Oabb4afnx/KyspQUFCAmpoapKSkYMyYMXC73SgrK6MIXIaUWVFRQfGx999/P4CBDrmrq4t2EgzBcbCcTicuX74MtVqNKVOmICkpCTabDS0tLVAoFAgKCkJ2djYuXLiAuro6zJkzB1KpFH5+fvjd734Hg8EApVJJE9bS09OhUqlw7tw5aDQa3HvvvWCz2WhqakJfXx/sdjvmzp2LY8eO4fjx45g/fz5YLBaioqLQ1tYGu90OlUqF2bNnw+VyUWyzWCymNM7BOnHiBDQaDfLz8xESEoLy8nLk5ubCZrPh6NGjWLFiBd22ra0NarUabrcbUVFRyM7Ohs1mw5dffonbb78dVqsVJpMJ586dQ1JSEs6cOQOTyYRVq1bBarWiu7sbSUlJcDgcqKyshM/nw/jx41FTUwMej0fbxwwkra2tMJvNFH07b948eL1etLW10VBNxr3D4AUsFgtaWlrgcrkwadIkhIWFobe3FyKRCOfPn4fH48GiRYvA5/NC5tT4AAAgAElEQVQREBAAqVRKjYcbbrgBPB4PFy5cgEQiQVpaGk1SqqqqgkAggMlkojHkTIUsFouFS5cuUdeNv78/vF4vjEYjLBYL2tvbERUVRVEP48ePh8/nw969ezF58mR6f6urq1FQUAC1Wg2hUAij0QiDwYCWlhbMmjWLkh/j4uIQGRmJ4uJizJgxAxMmTKDGiVKphNPphMlkQnZ2NsrLy/9tPPf/E9q/fz84HA61ngZbfHPnzkVKSgrS0tKusnAJIbBYLPB6vTTF/PTp03j66acxd+7cITz5ixcvYv/+/RCJRJBKpfjqq6+GbUtHRwcuX76MF198kVotlZWVEIlEKCoqQmpqKiorK/HQQw+hsLAQDz/8MBYtWoT33nsPX331FVQqFSQSCcrLy5GRkYHJkydj2rRptHjA5MmTkZCQgPb2drS0tGDatGno7OzEq6++CmCANrlixQqKbaiuroZWq8XEiROp//Orr76ineDWrVtpbU2fz0ctbUaNjY2oqalBfn4+9Ho99Ql3dXXhiSeewKRJkzBjxgxs3ryZlvzzer1YtmwZYmNjcerUKTz99NMYO3YsamtrsW7dOsTFxYHP58PhcGD8+PGYN28eli5dCpvNhgcffBDz58+nXHTmt9555x3MnDmT4qGNRiOWLVtG645KJJKrOvZt27bhySefREFBAcrKynDs2DGsWbMGd999N/XBPv744xCLxWCz2Th48CBCQkKQm5uLN954A2azGV6vF/PmzYNEIqHWqVKpBJfLxeHDhxESEoJbbrkF69evpwuHbDab1tjMzs4Gn8/H119/jd27d0Mul6O9vR2dnZ349NNP0dzcjLS0NBw4cAA6nQ4ymQxSqRSdnZ24cOECtFotxGIxNm3ahJqaGso5T05ORnNzMzo7OwEAGzZswObNm5Gfn4+3334bx48fR29vL4xGIzQaDTgcDuLj4+HxeFBaWgqFQgGRSITPPvsMHR0dqKyshNVqBYvFoviIrq4usNlsbNq0CVqtllqYLpcLCoUCb7zxBjgcDiZNmgQej4etW7ciIiICmZmZeOyxx4bgul0uFxoaGjBr1izExsbSsoJisRi5ubmYMGECkpOT8eGHH4LNZsPlcuGjjz5Cd3c3Ro8eje7ubuzduxdyuRwejwdnz56FTCaj0T/ffvstnn76aeTl5UEkEiEjIwPR0dHw8/PD559/jsrKSowaNQp8Pp9idsvKymh0TUlJCbq7u3Hu3DkEBAQgLy8PL730Es6ePQs2m00HAmbgOXHiBI4cOUJdSF1dXdBqtTh37hwOHjyIm2++Ga+99hoOHTpEZw8vv/wypkyZgtDQUEilUuzatQuFhYXXDbj4r+7cL1++DIfDgZaWFgADrhNmOhQbG4tXX30VW7ZsGZIazfjUqqqqhkR0lJSU4He/+x2AgY6aufCfffYZ0tLSYDAYEBQUhAcffPCqdvT29iIhIQErVqzAjh07KNRq/PjxaGxspEWAmaotn3/+Of2My+UiNzcXXq8X48ePR1dXF/UF3njjjThy5Ai1tPl8PrZv346cnBz4+/vDYDAMAXdVVFRAJBIhLy8Pf/jDHyAQCKBWqylutq6ujvqKc3Nz8fbbb2PVqlXUSh4cCXPo0CHk5ORAIBDAbDZj+vTpAICDBw+io6MDQqEQDocD2dnZNNmLgSsxnJWOjg6EhYXBYrEgPz+fckQGV8my2Wyora2l6xNGoxHjx48HAKSkpKCkpARLly6lUR51dXWIjIyk7O3hWEI7d+4Ej8dDf38/Zs2ahdzcXOzYsQOJiYkU92owGBAZGYmEhAQkJCQgPj4ebrcb48aNQ2hoKIRCIZKTk/Hxxx9DKpUiJiYGIpEIZrMZqamp4HA4yPr/7L15fJTluf//niQzk2WyTfZ9Z0lC2MIqu4ALoqAtKuBSPait1mrdqr9Wbc8pLtX2eHCrlW8RhaIiagFlJ4ASCCQsCdn3mezbJJPJbJncvz/iczcr4NJz7Dl+Xi9eQDLzzDPPcz/3ct3X9f6kppKbm8umTZvw9vbG09OToqIiAgMD6erqwmQysX37djQaDR0dHcTFxVFeXs6WLVsYN26cRF4rqXtKTNzX15fw8HC6urqYO3cuOp1OMozq6uro6OiQVZc7duyQjCBPT09iYmKkq5DD4SAmJobIyEi6urqIi4tDrVZTVVUlz7GmpoaQkBC0Wi2BgYGYTCbCw8Olb6iSLgj/YO3HxcUxceJEXC4XfX19NDc3o1arMZlMREZGDhpsnU4n/v7+jBkzhl//+tcybbG6uporrriCyMhITCYTOp0ODw8PCgsLOXDgAMHBwfT09ODp6Ulrays6nU62uwULFpCQkIC/v7+0AlRW1cnJydIKLy0tjbfffptnnnkGNzc38vLy2L9/P25ubnR3d5OZmUlUVBRBQUG0trZKM5LQ0FDCwsKkaXZsbCwqlQqLxcLhw4clj0mZqSsOTmPGjKGtrY3IyEhSU1MlqDA2NhYvLy9iYmIIDQ1l586dkpF1Mf2f7dw//vhj1q9fz9q1a5k1axYGg4Hu7m4CAwPp7Ozkl7/8Ja+99hq5ubmD4spKw3vllVd49tlnOXjwIGfPnuXEiRPodDo2bdpEc3Mz1dXVtLe3s3XrVlavXs2CBQu47bbbSE1NHXQetbW1/OEPfwD6N2V0Op1MT0tKSuLvf/871157LQB5eXmSKf3QQw/R2tpKamoqc+bMQa1WExwcLM9PKWvesmULTzzxBPv27aO5uZkdO3ZImtzBgwe55ZZbaG5u5s0335Tl98HBwajVasaOHcuuXbt49NFHOXDgAGfOnAH6O9QjR47w5ZdfMnv2bDZs2DDs+r7xxhuSY5+VlcXNN99MS0sLf/rTn2Q8OT8/n5/97Gd0dnayc+dO7r//fvR6PW1tbWzatInx48fj4+NDeXk5d955p4zxK+Gdt956i5deeomsrCzuu+8+Ghoa+OCDD0hISKC9vZ3XX3+dnp4eNm7cKOmhR48e5fnnn2fevHn85Cc/GTb7UYwrli1bRnV1tWSEKCuOwMBA3nzzTaZMmUJYWBhnz57lpptuwmKx8Nlnn3HXXXfJ2eVbb73F6dOnJT0yJCSE7du3c80113Du3DluuOEGPv30U0pLS3n22WfR6/UcPHiQ1atXS1hUfn4+a9euZcWKFVx11VWcPXsWg8FASEiIZLYrHPbY2FgSEhLw8/PDaDRKkJuvry+nTp0iICBAZok0Nzdz+vRpTpw4waJFi6isrCQsLEzCtaxWK4GBgQgh0Ov1GAwGIiIi0Ov1ZGVlsXTpUjw8PPj000+ZM2cOjY2NaLVa3nzzTWbNmkVnZydjx46VAwL0h7o++ugj5s+fj8ViwWAwsGnTJm699VZ6eno4fPjwMNidTqfjs88+47bbbuO+++7jd7/7HfX19RiNRqZMmUJoaChHjx7luuuuw2QyceDAAXJycoiMjKS1tZWCggKWL19OUFAQx48fZ/ny5cTFxeHm5kZRURETJkzAZrNJRr0yQSksLMRisbBjxw5mz57Nnj17UKvVfPrpp8ydO5cJEyYwbtw4EhMT5cAC/UkWK1euZOLEiaSkpPD+++8zd+5cdu3aRXR0tKTbJiYmMn78eAlP8/DwYOHChRw9epQVK1aQnJyMj4+P3KtqamqSA+rhw4f5t3/7t1H9GhT9n4u5t7S0kJ2dzf79+1m5ciXQb9Lg4+NDdnY2cXFxOBwOysvLMRqNpKenExoaOgxelpGRIWcGBQUFMia/b98+QkNDaWxsJDo6mrfffpsjR47ICr9p06YNOp8zZ85w/PhxsrKy0Gg0VFRUsGnTJvn7hoYGhBB88MEHfP7554wZM4a77rqLZcuWYbFY8Pb2JisrixtvvJH6+nruueeeQSCu9PR0rFYrQUFBhIaGcvfdd/PFF1/I8uvGxkYyMzN5+umnqa6u5tSpU7S0tBAYGEhwcDDTp0+noKBAkjCPHj3KnDlzqK6u5uzZsxw+fFg6ZA3Uo48+yhdffCHTwrq6uggKCuKee+7BYrGQm5vLvn37mDp1KtOmTcNms1FeXi4NSJ555hlaWlooLi6WVNCBnJNPP/0Ul8vFkiVLGDduHMePH8fpdJKbm0tiYiLTpk2THJDCwkJ5TR966CE+/PBDZs6ciYeHx7CNy+bmZtauXUtxcTHR0dE0NTURFhbG6tWryc3Npb29nYceeoiJEyfi7e2N0+lkypQp1NXVsX//fmbPno3T6cTT05O+vj6qq6vZvXs3TzzxBMXFxbI4JjQ0lPr6ehwOB1deeaWMhytl7ElJSURFRfHEE09w7Ngxxo8fj5eXF0uXLqW1tZWzZ8+Sl5fHihUr8PDwGDTb1Wq1nD9/nqCgIJKTkzEajXJFUVxcLBHFwcHBrFy5kpqaGlwuF6GhoZw5c4Zx48Zx8OBB4uPj6e7ulnaOmZmZ+Pn5odVqSUlJQaPREBkZicVikVaB0dHRnDt3Tm52KkYoLpcLl8vF9ddfT2VlpaRsrlmzhilTptDQ0MDp06cHhWQGAvYUEuz69euZOHEib7zxhty3KC0tZcmSJXh4eDBt2jS5F+FwOLjiiiuYOXMmFouFmJgY5s6dS2BgIFqtVqKDFdtKh8OBw+HA09OTlpYWGV4NCQlh8eLF+Pn58cILL8hVuIKxPnXqFPfddx9qtRqj0Sj7Cz8/P0kw7ejoQKVS8eyzz1JWViaLpxTuz1VXXUVISAhlZWXExcXhdDoJCwvjuuuuo6SkRNonJiQk8OKLL5KVlTVqRpGi7zXy958hJQMjJCSEsWPH4u7uTmtrK7GxsYSEhBAfHy9Nfz08PJg7d+6IxTdxcXHo9XomTZqEVqslIyODmJgY4uLiSElJYdq0aahUKlJSUuSsZurUqcPygJV4uBKW8PHxkR2OUhxyzTXXoNFoJPc7ISEBi8XCtGnTCAkJITo6mrCwMLq6uoiMjBxUoRcdHU1QUJBkfowbNw6DwYBer2f+/PnodDr8/f1JTU0lKiqK7u5upkyZQmRkJBqNRjKmp0+fTkpKCsnJybJzslgsTJ48WXLkByopKYmGhgaCgoKYPXs2/v7++Pv7M2HCBEJCQvD392fevHk4nU5iYmLw8fEZtPpIS0uTA9C8efMIDAyUBEStVktQUBCTJ0+WxzUajWRmZjJv3jz0ej2JiYlMmDCB2tpa+VqlqrKrq0sug4fG2y0WC2PHjsXf3x+9Xk9cXBx+fn6kpKQA/e46EyZMkKEG5R77+vrKEERMTAx+fn6YzWYcDgczZ87E09MTDw8PfHx8iIyMJCMjQ25qp6enk5KSIl1/wsLCpDtVbGwsarWamJgYwsLCiIqKIjg4WA7YGRkZsqNVpFarqaysZPz48UyfPl22g7i4OIKCgiRQy8/PT5o/p6WlSUqkgqtW7pebmxsajYaMjAy5YalsuAcFBRETE0N0dDSdnZ3ExsbidDo5f/48c+fOZdy4cTIzyGq1SjCZh4cHSUlJREREEBAQQHl5OYcPH2b58uV4e3sPAsZFRESgUqkIDg6WFaItLS3MnDlTHltxJLPb7SQmJqLRaIiIiJBoZyWkp5heK25M8fHxTJo0CbVaTWFhIS6Xi5SUFOx2uwwRhYWFERkZKbnsQghiY2NlBavJZCIxMZHw8HDCwsJwc3MjISGB4OBgIiMjiYuLIyoqiqioKCIiIqR3a0REBDqdDrPZTEREBFFRUURHR2Oz2UhKSiI4OFgigaOiouSzGhkZSVVVFXq9nr/+9a//2sjf/4sSX3m9xsfHS4/VH/Td6WLwqtEQ05ejb/NeQGZmfNtiIJvNJjtl6N8rcnd3x93dXX7vvr4+WTfgcDjw8vLCbDZTV1dHRESE3COAfxjnaDSaYawW6J9l5+fnk52dLVn0d9xxxyWJpUIInnjiCTk4KF65I6m7uxt3d3dJXLRYLHh6euLr6yuvuVIZqtyH5uZmuUejrGiVzDiFrCiEkKmUJpOJ6dOnU1pair+/v2Ssd3Z2SltNpepUqVIFJHZ5tEr3rq4udDrdt2obQ1VZWUlSUtK/NvL3/6IMBgOtra3SH/UHfbe6GAbh2zyA3/bh/br45tHU3d0teS+ALKwZKDc3N2muoYQ/rFarDKUBMgQxcFAYutpRwlDKRrW/vz/jxo27rO+ieBMkJibKJIHRNJDT4u3tLb1UB15znU4nQ0BqtZqQkBBZKaqct1I/orzPZDLhcDgIDAzE4XDQ29srXZpsNht2u13yd5T0YWX1pxiHDPzMoWpsbESn0+F0Or8RhmC0icilPCF+mLl/z3Upg4wf9H9bQx/8vr4+mXKoLPsvJoUNDv20RKvVSnx8vDyusufgdDpHHCC+b6qqqqK3t1eG0UaS8p2VdGVl1VJfX49OpxuENFayjBTj89raWhITE6VBuTKbh9E74X8W4rixsZGIiIgfZu7/qvqhY/9Bo2mkTsPNzQ0vLy9iY2MHzWhHc+saOPsLDAzEx8eHtrY2rFYr0dHR0uHpuwwnfFtdzHnM09NT7pmNpL6+vkGpmVqtVg6AFy5cID8/n7S0NNLT02lsbEStVtPS0kJwcLCs41CpVDJDRnk+lXDQSPpnIZCVkNlo+v7csR/0g75HUtjeX1dms/kbve+baOCqW2GuKLRExaBC0eW4mWk0mkHpgA0NDbIewWKx/LdMNBS+zcWu4cVWD8HBwYPgZwOlcHN6e3ux2WyYzWa50mlsbOT06dO89dZbZGVl0dfXh6+vL15eXrLOQ8mxh/7rfblWmd+1lKLKS62ifujcv0M5nU4efvhh3nzzze/keAp7AvqXzK+++up3ctzLUV9fnyzAgeFsi//tslgsrFmz5mtDnV577TXuuOOOYT+32WyUlJRcsmT860jphBVLNg8PD0klHKrROnej0YjJZAL6C8Dq6urw8/MjLCwMu92OWq2WqwFFlxPKFUKQl5c3Iv/oYjKZTBw7dowNGzZ8bSqqgvVtaWkZxPYZKJvNxp/+9CcMBoPMdS8rK6Ojo4OpU6dy8803k5mZKV2cxo0bR0xMDHq9XmYQwfCOdaBv7KVkNptlLcxHH32E3W6Xg9qlvp/y9759+2SV8Wi6ZOeuUqliVCrVYZVKVaRSqS6oVKpffPVzvUql2q9Sqcq++jtwwHueVKlU5SqVqkSlUl11yW/7v0SK23xCQsKo9MPLUW9vL8ePH5d5+ArHYuzYsd/VqV5SClNHgaI999xzrF+//r/t8/8nNRDIpBhXXI5cLheFhYX4+fkN6lSEEJhMJjZs2DCMDPhtpHTu9fX1lJaWAv3hAaUwZqCU2d7ApbzT6SQ6OpqAgABZwBcfH09fXx9qtZr4+Hg6OzuxWq3Y7XZqampoa2vDbDbLjcahqwen0ylxuD/96U85evToiNC2kQZNBV3wxBNPsHbt2mGD1NDvpHwXIYQsRFJ47MprlfJ/6GfotLa2smPHDlwuFwEBAbhcLtLT00lISCAlJYVf/OIXrFq1iq6urmGfFxISgoeHxyB4m8PhGOTBAAwDhClSkA7vvPMO999/P1VVVajVaiwWC25ubnKjVzEdGSqVSiU5Sy+++OKIpuIDdTkz917gESHEeGAmcL9KpUoFfgUcFEKkAAe/+j9f/e4WIA24GnhdpVL9twaOv03HOlRfd+YWERFBUlLSt2I1K/ZcysaOSqWioaGBuLi4b3zMb6LFixfLOGJ5efklG9M30cWW3wMfoovpu7rfSifk7u5OQ0MDU6dOpauri56eHvr6+i6JtHV3d6eqqmpYYZQS4qmurpYhg8vB4/b19clZtaKhZEghBO7u7rKtKHgEZabe19dHd3e3zOgYmK2hQLLgH5koSlqgci2UjlwIgdPpxGq10tfXh6en56AQjvL9FZSExWIhICCA6OjoETNEhoZ4nE4nNptNzvTDw8OHde6Kxd3A797b24sQQjpHKT9XBteB9ESn00lzczPBwcGoVCrMZjNjx46VkDCtViuzixTSpNFopLu7GyGEzIhR/FGh/z4qKwbof1aVCt+hfUBDQwPd3d0UFhZy9uxZAMaOHSvDZw6HQ9630VZaHh4emM1m4uPjL0qshcvYUBVCNAANX/3brFKpioAo4AZgwVcvewfIAp746ufbhBB2oEqlUpUD04HsS33WUJ06dYoPP/yQGTNm4O7uTn19PT/72c8wGo3k5eVJVOaKFSswGAzU19dTUlLC9OnTOXv2LGvWrMFsNnPhwgUKCgqYMmUKV199NdBfGdrQ0CCXUzExMZSWllJTU8OkSZM4e/Ysa9eupaenh/z8fNzd3enp6WHmzJmcOXOGEydOkJaWhlqtJiAggIULF2I2m9HpdDgcDh5++GH8/f3593//d2w2GydOnMDlclFZWcktt9xyUdPclpYWnn76aTIyMigqKmL8+PG8/PLLbNq0iePHj7Nv3z6eeeYZbDabLEcPDAwkLy+PJ554YtCxqqqqqK2tZc+ePTz44INUVFSQnp5OQEAAjz/+OD/5yU84evQoAQEBJCcns337du6++27y8/MRQnDjjTdSVlZGUVERd9xxh9w4ysvLk7MWDw8PUlNT+fvf/47D4SA5OZlPP/2UF154QZIra2trufnmm4c5LJWVlfH73/+eVatWsWjRIjZu3Mgvf/lLVqxYwZ133smKFStkvv+FCxeYPXs23t7evPTSS3h5eTFv3jxJ12toaCAiIoLDhw+zcOFCZsyYMeizGhsbKSoqwmKxUF1dzaxZs8jIyKC1tZUTJ07g5eVFV1cXq1at4tixY1xzzTUUFBRw5swZli5dytixYykoKKC1tVWaTM+aNYvi4mJOnTqFt7c3vr6+/PjHPx4U725tbeXRRx+luLiYY8eOMXbsWElHVHDHMTExLFq0SL5HSfErKCjAZDKh1Woxm81MnjyZ6upqHA6HZIovWrSIpqYm/vrXvzJ27FiioqLkOev1erKzs/Hx8ZFEx5UrVxIYGChTHKE/9fbIkSNAf5VuSEgIt912m5xQuFwuVCoVX3zxBWFhYfT19UlqYWFhIUFBQRJMpgCyUlJS6OzsHJb3bbPZ5CzabrfT0dHB6tWrqaur47XXXgP6c7iVWLfNZkOlUrFnzx60Wi0Oh4PJkydjs9lobm6Wqxaz2Ux0dLSEqF1xxRXSbOTqq68mKCiIU6dOER0djbe3NxqNhvr6ei5cuICbmxsul4u4uDjq6uo4c+YMM2bM4MKFCxQVFfGrX/2Kn//851xxxRVMmDABq9WKzWZj5cqVVFVVUV1dTW1tLXa7nZtvvpm6ujrpFOXj48PixYuJiYnB3d0df39/dDodL7/8Mi+//DLd3d18/vnnnD17lhtuuIHjx49TVlbGfffdN8wgvrm5mc2bN+Pr68uXX345ah8CXzPmrlKp4oHJwEkg7KuOXxkAFJpTFDDQFsb41c++tjw9Pfnwww8xm80sWLCA8vJyALZt20ZtbS3XXXeddK8vLy8nMjKSHTt2MH/+fEJCQigsLCQ7O5vrr7+ejz/+WI6WgGQ8pKamUlpaKk1td+7cSWRkJHq9nsLCQo4fP84111wjO/zW1lY0Gg07duygo6ODefPmsXnzZrq7u6mpqSEjI0NWtu3atQuA7Oxs/vznP0vA16Vm9SEhIXh5ebFu3TqCgoKA/pi7Xq8nMzOTXbt2YbFYcLlcHD58GLvdLhv1UJWUlKDT6di2bRsul4vs7Gw528rLy+P8+fOSb52eno6fnx82m23Q4JOSksIVV1zB9ddfL2d4u3fvZvLkyYSGhmIwGDh79iy9vb0YDAamTp0qyZKvvPIKS5cuHXGZCf151AUFBTQ3N9PS0kJWVhbQv+Rubm5m//79eHh4MGfOHC5cuEBra6scrI8dO8aYMWPw8fEhJyeH7u5uUlNTMRqNkrWtyOl08umnn3Lu3DmmT59OTU2NhMQVFxdjs9mYOnUq5eXl9PT0UFFRIWeOfn5+9PT00Nvby86dO0lKSiIpKYlz587R3t7Ozp07mTNnDnq9Hk9Pz2EDmLL8z8jIYMyYMej1ej755BMiIiLIyMjgyy+/ZNu2bYPeo0wmcnNzCQ0NZcKECXzyySeUlpayY8cOUlJSmDJlCn/84x+pqKigtbWV8+fPc+LECcaPH88777zDmTNnaGpqkis/hQmk8PLVarUcXAoLC+nt7SUqKorTp08P8zdwd3enpKSEv/3tbwQHB9PR0YHVauX06dMcPHiQ4OBggoODaWhoIDIyEh8fH+bPny+pqYrsdjt5eXns2rWLkJAQ0tPTeeuttzCZTISEhNDT00NaWprs2BW5ubnx2muvMX36dPr6+mQB1r59+9BqtSQnJ8syfX9/f4mjSEpKwmAwoNVqCQgIwM/Pj7S0NFwuF35+fhw7dgw/Pz/Gjh1LYWEhbW1tCCHkXlNYWBhHjhzB5XKxZ88e8vLy6OnpQafTkZOTQ3NzM3v27MHf3x9fX1/Kysrw9PSksLCQ6upqiQj28PCQq52AgAC8vb25cOECLS0ttLS04O7uzpkzZ+jq6iI2NpaPP/4YrVYrVx3K86NUMU+dOvWSK/nL7txVKpUO+Ah4SAhxsfXASHk/w3ozlUp1j0qlOq1SqU63tLSMeKDk5GQWLlzIHXfcQUBAgLTA2rJlC3feeSednZ3U1NQQHBzM6tWrCQ8P58knn8TlcrF27VqeeOIJuewUQrBs2TKgv1NTZnWFhYXcdNNNLF68mODgYB5++GGSk5NZu3Ytjz32mHx/Xl4ed9xxB/Hx8SxcuJAlS5awZs0afH19JcXx7bff5qmnniIgIIBTp04xc+ZMoD/FrL6+nlWrVtHQ0DDqbr6i3t5exo4dy8KFC2V13fLly2XJv0L6KygokB1aSUnJiKYNS5YsQafT8dBDDxEdHc0XX3whO58777yTZcuWcf78eR544AG0Wi1PPvkkqampHDt2TKKHs7Ky+PnPfy5nowcPHsRsNrN3715ycnL48Y9/zLx586isrOTxxx9HpxBGpKMAACAASURBVNOxfv16AgICsFgsXHfddZK9PVQTJ04kMTGRFStWsHv3bvmahx9+mOXLl3P33XczY8YMDh8+zO23387MmTNJT0/HZrOxbt06IiMjuffee3nmmWfIyMggJyeHe++9V9IuFRmNRvbu3cuNN94oKw6VsnN3d3eeffZZlixZwooVK6isrOTEiRPs3LmTzMxM1q5dy/jx49m+fTs1NTV88cUX1NfXc91113Hq1ClJYezo6OD2228fscgnPz+fdevWyWX13/72NxISEoD+2djhw4cHhZd6enqorq5mz549ZGZm4unpyWOPPcbPfvYzAgICCA4OJjQ0lIqKCtzc3FiyZAm9vb2sXr1a8smbmpoIDw+npqaGiRMnUlBQIBk/fn5+EndtNBr59a9/TWpqKkFBQbhcLmbMmDHofJTjdXZ2snr1anbv3i2fF6vVyrlz53C5XMyZM4e2tjaysrIkHmKgVCoVd911Fx4eHmg0GgICAsjOzpbgLrPZPIzUqWTPOBwOFi1axObNm4mLi+M///M/+ctf/oLFYqGmpoZbb72VtLQ02eaCgoK4cOECkydPxt3dnY8++og33niDpUuXkpuby8aNG3n77bfR6/UUFBRw3333MWbMGBobG5k1axYRERF0dXXJkv+f/vSntLS0sGLFCsaPH88vfvELHn30UTZs2IDVaiU4OJh169bR2dmJh4cHGzZskDA0m80mn581a9bwl7/8hdjYWMaMGSOpkHa7nVmzZsnUy7i4OFmUNXDD1mg0Mn36dJKSkoY9TwN1WZ27SqVS09+xbxFCKEDyJpVKFfHV7yMABZ1oBAZ6fkUDw7a9hRBvCSEyhRCZoxnhKtRAlUrFe++9R0FBAX19fdxyyy3odDqOHz9OYWGhzOL4r//6L1asWCFnpvn5+Vx55ZUYjUaWLVuGTqfDbrdz4MAB5syZg8FgYPPmzZJd8eabb3LNNdfIz79w4QILFiygtraWHTt2yOWUwWDgqquuQqfTsWXLFl588UV0Oh3FxcW4ubnJooinnnqKyspKTp48yeeff87zzz/Pxo0bgf6Y3mhOQNu2beOBBx4A+pekJ0+e5KabbsJut7NlyxZ++ctfUllZybFjx1i3bh1Tp06VJLmhUhq1QkU8d+4cAKdPn2b69OnodDpJe1RWFBs3bqS0tJQzZ85QW1vL73//e6ZOncr58+eBfvTvAw88wPz587nttttobGxEpVJx6NChQVZnBw4c4MMPP+Q3v/nNIBjaQPX09JCamkpYWBhvvvkmCQkJdHV1STZIV1cXCQkJXHHFFYSGhtLQ0EB9fT0xMTHSZAT645UZGRnMmDGD1NTUYZ1Ke3s7er1ews8Uh6fPPvuMrVu3UlJSwuOPP87evXt56623CAwMZNGiRXzyySdYrVZUKhUlJSWsXbuWa665hsWLFyOE4NixY6SlpVFRUcH7779PcnIytbW1gz67ra2NiRMnStbNiRMniImJkdWZLS0t/PrXvx5WadnZ2SnZIxqNBg8PD8rKygZNTFavXk1iYiJ1dXWkpKSQlJTE6dOnWbFiBddeey1dXV28+eabzJkzh1OnTpGWljbIeFoxuD537hyhoaG0tLQQFBREUFAQbm5uOJ1O6QFaWlrKk08+yYMPPig55h0dHaxcuZJZs2axdOlSnE4nWVlZHD58mICAAHJycgZdC6vVSkVFBTNmzECv11NTU8OaNWtklktgYOCw2b6bmxu7du3i6aef5sYbb+TkyZN0dXVx6NAh0tLSmD59OhkZGej1eqxWK1988QUzZswgJiaGDz74AH9/f/bu3YvZbKa0tBS73c6CBQuIjIwkKioKrVbLFVdcQVtbm7TRmz17NgkJCezcuZOlS5fy61//mh07dpCbm4uXl5e05Dt37hzXXnstKSkpjBs3Dm9vb/bu3cupU6fYu3cvn3/+OR999JG8t8oeiRL+bW5uxs/Pj9OnT5OcnIzT6WTLli0sX74cg8EgK4iVwrSTJ0+Sn5/PmDFjhu3HDNXlZMuogI1AkRDijwN+9XdAyfm6A/h0wM9vUalUWpVKlQCkAIPv8GXKYDBIr0yj0cj9998vnchdLhdffvklbW1tlJeX09vbKzsfZZRLS0vDZrORm5uLw+GgpaUFrVZLamoqDQ0NnDp1itzcXKA/zl1WVjZog0jZbDlz5gw5OTmUlZUB/fFAJTWsoqKCJUuWAP+wwVNAYdAfqzxy5IiMM86ePRuAxx57bFTP1aamJoKDg6mvr5ffKyUlRS4BlY27a665RnYkra2tMvY4VIqDDPxjg81oNEr0r7I6Ub77wYMHcTqdmEwm1Gq1vK5NTU0ATJo0SRINq6qqZCNTjqPM+JR9hqqqqmEPrCLFWxX64+9OpxOj0ShnJQqG2Gq1cvDgQcrKymhrayM+Ph6bzSY3wSZMmCDt75T7NFBarRa9Xo/JZKKoqEguhysrKykuLqa7u5vGxkbS09Opra2VRMSSkhLy8/Pp7e0lMzNTDsiKRVx8fDwul4vz589TU1ODwWAYtunX1taGp6cnbm5utLa24uXlJUM1+fn5rFq1igULFgw6X8Ws2tfXV5rDtLW1MXnyZJqammhvbyc/P5/rr79eHle59iUlJSxZsgRPT0+52Wo0GsnNzSUmJmZYhktAQAAzZsygpaVFGlkoPqPKvVXsBBXbwVmzZuHj4yONpZWQXFdXFx0dHWi1Wrq7u4dt+rm7u5Oenk5XVxcGg4HTp09zww03oNVq6enpITk5eRiGQOGi22w2XC4XixYtQqVSsXDhQjnhUqlUtLe309TURFNTk5xkmUwmVCoV+fn5nDhxgpaWFtra2iS4LCAggNbWVqxWK1VVVXR3d2MymXB3d5fuV4GBgRw9ehRfX1/pOKVgnRctWiTTGFtaWujq6qKlpYXW1la6urpYtGgREyZMkM+Gh4cHLpdLeiMoufZK+E2xikxKShr0PCkb8EM3uy+mS+IHVCrVHOAYkA8o67Sn6I+7fwDEArXAj4UQ7V+95/8D7qI/0+YhIcTotveMjh+4+uqree+992hpaSEuLk5WllVXV9PT08O4cePkAx0aGsrp06fJzPxHJW5TUxPNzc1MmDCBsrIymb8L/TPn3//+9xQUFHDq1CnsdruMxypqaGigsbGRyZMny4df2Wj6z//8T4xGozTogP6ZlMJrNxqNBAQEoNPpMJlMGI1GSdyDfrefSZMmSaOLgers7KS0tJTExESCgoKoqqqSS/jGxkaCg4PlAFZZWSlNL0aTEEI6yihL3PDwcBk/H7h5Bf2uTC0tLWRkZKDRaGhubqa3t5fQ0FD5uWVlZbhcLoKCgqSLUm1t7aB0zYaGBmnBplgRjiQlVpqamkptbS0ajUYaHAghqKmpQQiBv78/Go2GpqYmSSZUNgTr6+vp6+tDr9fLdjJQLpeLkpISzGYzEydOpKioiJSUFHQ6nUzxCwwMJCEhgSNHjpCRkUFAQAD79u1j7NixxMfHYzabqa+vJyAgAK1Wi5eXF0IIcnJyyMjIkLCugWYigDRJVmLMVquVgoICvLy88PT0HHHFBf2ZMcrrFCRAU1MTHR0dhIaG4nK55AD96quvSvqgv78/CQkJaDQaysrKqK6uxmKxsGHDBl566SXGjh2Ll5eXTL8D5KaiYhAdFxcnaYoqlYry8nLZ4el0OnQ6Hd7e3nR3d9PU1ERUVJQcwJSJUlRUFDExMcPuR0FBARaLhfDwcNzd3QkJCaGxsZGDBw+SmJg4bKCD/sG9rKxMrr4G2tW1t7fT19dHQkICPT097N27l9DQUEJCQmT2jqenJ/Hx8RQUFODh4UF6erqcOAUHB5OYmEhPTw+BgYFs27aNn/zkJxgMBgwGA729vUycOJHS0lICAwPR6/WD9qSqqqrw8vIiIiKC3t5evLy8pB9uenq67HOUimKz2SxDSZmZmbi7u7Nt2zYmT55MUlISH374ISEhISxYsEA+b7W1tfT09PDnP/+Zjo4ONm3aRE1NDfHx8aPiB77XbJlp06Zx6tQpOjs7JQZU+bIXK0G+mBoaGqiqqmL27Nn8/Oc/JyIigqeeeuqy33/27FnWrVsnB4TLBQEpLjkqlQqn0yln5wONCYa+/nLKxy+XQtjT0yNd7y9Hl7q+l8vLuNxrpMxSRquCtNls8tydTueIgCb49uc99HparVbUavWo11455qWqN5WsImXAgssrSx/KFlL+r6T/Qf+D//zzz/PUU08RGRk5iH3/1FNPMWXKFHx9fWlpaeHqq6+WtQsDpXDbR7tXSshE6exHuo7Kz5RrOFrbVGbT/v7+uFwuOjs7ycvLk3z60WB5A/nuQz9XWTW7u7vLAS05OZmkpCTq6+sJCgqSqxCtVktTUxNqtRovLy/Cw8PRaDT09vbicDhkiKu2thZPT0/pENXa2ioHWZvNJs9FMUxxd3eX98fhcGCxWPD19R3UdpQq4qFFYZ2dnRJzbLfbh71v165dtLa2StexX/3qV/+6VMjGxkbmzp2Ly+UacSPumwKMysrK2LVrFzabjQceeGDUWdNIcrlcHDhwgBkzZlyUJTGSBjZytVo9zHD7Yq+H0b/v5TI/RprNXkyXur6Xy8u43MHvUp3jwEFptI4dvv15D72eFysx/zq8lYHn/HVYI0Ovi/J/BQtst9s5deoUDQ0NtLW1odfr5cpB2Yfo6ekhPDycq64avZ7wUm156L7YSN9B+dmlcrUHpvYpnPVz584xderUi1JQR7sXiqWfouTkZMLDw2VVqZJVonBlFEMUp9MpV5omkwkfHx+5ulaY7QpkbCCuV+mclcFL2ZQfOBBrNJoR0b8eHh7D2qhKpRp0TUZ6VjUaDSaTiR//+Meyz7pk1OX7PHP/QT/oB11aZrN5UMUl/N8FzvX19dHT0yO580PrSZTN+vr6ejw9PdHr9Xh4eNDb2ztoAtHZ2SndlBQMstVqxeVyDTqmUrfg7+//jXC+30ZlZWWMGTNm1Jn7D2yZH/SD/sXl6+srV7dOp/Nrd+wXq/B1uVyDcAzfhf6ZE8ru7m5sNpvMLhpYzatUl3Z0dNDb2ytNSqxW6zDCore3N1qtVvoau1wuWaQ2UJ2dnTJMNlT/bIDcpSqzf+jcf9D3UkpJd1dX14idy5EjR3j88cf/B87s+yklJHK5eyoDdbHwktVq/UbHHE0KMuBiGthpfd2BQKPRyPPt6OgYdO7u7u50dXUREREh9/Camprw9fUd5keqVqtRq9XyXEYK9/X29sp8+u/yGo2kka7DJUOM/6yT+a7U3t4uH+6Bm0jfpdra2mhra7tko1NSAYeqvr7+azNovgt9W0qj3W4fNdf+f1KlpaWUlJRQV1eHm5sb586dIysri5KSEtkWfH19v/Y+wqU+Mz8//2tTDL9rDTQK/+8ImY72PCnPwsX2N76ulI69r69Ppsgq5txKWl9XV9egSmur1SpTV0fTQEibVquVg5Wfn5+sdC4uLqa2tlbeX2VjMyYmZsRjKrrYwDe0wx96v77LMI0yyWloaCA/P5+enp5Lto/vfef+ox/9iNdffx3o3yC69957v7Njb968mQceeAAvLy+2bNnC7bffPuLrlMZz8803j/j7ZcuWjVqkM5KG8l8uV2+//bYsOAJGPd/L1W9/+1teffXVUQet/wkJIcjKymLlypXU1dVJU+k33ngDo9EoO6MpU6ZwOebuu3btYvPmzSP+zmazyapiZca3Zs2a7+Q7fFM1NTXx4YcfcuLEicta1g/tnIfWGlxKI3VeA9+rZMZ8F5OX7u5uCQg7fPgw/+///T9effVVTCYTvr6+9Pb28vnnn9PY2Mh//dd/YTQasVgsPPLII6xfv37UyZdaraajo0MCwLy9vXG5XHh6etLc3Cy5TB988IHk4sTHx+Pr6zts8FLunZKvP1QDB5Kh5zOaC9PA114OMG6oHA4H/v7+CCE4c+YMP/rRj4BLh32+9527w+Hg2muvlUS7t94a0ej7G+mVV17hjjvuwNvbmzvuuENCi4ZKiWEqcbmhN0ij0Ui0weXohRdeuGzi4UC98cYb6HQ6iY39NmxwpYrxmWeeGbYk/Z9Ub28vWq2W6Oho3nzzTZxOJ2azmfnz53PllVdedLaudEBWqxWn04nD4eD999+XRVhD5enpyR/+8AcyMjKAfqLnn//858s6z9EeLIW5MvD3l+oYnU6nNI1oaGjg6NGjJCcn4+bmdsl4d21trUwDVJC7gPQNVXSpuDr0149UVVWhUqlwc3Oju7tbmlsondPXGbgGfqbdbpepzB0dHezbt48777yTZ555Bjc3N5nCOm/ePFpbW6moqCAoKAgvLy/a29tZvnz5sPY+EGM8FHPs7u4uKZbV1dWsXbuWlStXDnKVGuikJISQ8fru7m58fHzw9fWV119BHw8cDDw8PCgoKJD9gcJlHyiVSoXNZhuEDxjaHoQQw6qaB0rJunE4HJw+fZqEhATc3Nwu6jgF/wKde2RkpHRTV8rcgUGlt62trYNA9oDEh15spFQalNFoxGw2D/JONJvN1NbW0tnZKRuDkjalXOza2lr6+vpIS0uTqUwKdGo02Ww2bDYb3t7e9PX1yQFDIciN9BAqBMCgoKBhQK/u7m4MBsOg1/f09NDW1nZRdvj58+eHbbx1dXXJSkdAdpBKg4d/dCAKkU95XXNz86DXKNfnUiXSQyWEoLm5mVmzZpGVlSVRtUqZvPKwdXR0yMIlRUqRkslkkqS+0tJS9Hr9iJ9lt9ul+1BhYSEFBQVERETIjlm5jorMZrMsqLLb7RKH29fXR319PS0tLbJzt1qt9Pb20tzcPAxiNlTFxcU0NjbK6kjFJUgJJQxUR0fHoJWW1Wrl/Pnzsg3Y7XZaW1upra0d1C5KS0tHHGT6+vqwWq00NjaSn58vy/N7enpoamqiu7sbq9XKmTNnhrVPs9ksq8NHkvLcFBcXy0I4h8NBWVkZlZWVuFwurFYrvr6+8rWtra1UVlai1+tpamqSOeYhISGyvQ2t9jQYDNhsNhobGwe1387OTqqrq7FardTX15OYmCgL7np6emRbamxspKysTN7v+vp6mpqaaGtro6enB6PRSHt7uyzoKysro7u7m97eXtRqNQaDgYqKCtzd3SWyYaCam5sxGo1y89ZiscjBvKamhvLycmw226D3KRXQys/a2towmUy0tbURFxd3WQDC73XnfujQIe655x40Gg11dXUcO3aMd999ly+++IKcnBzmz5/P7373O7RaLfPmzcNut+NyuXjhhRd49913aWxs5JFHHhnx2ApvJjg4GK1Wy4svvih/t3PnTgoLCzl69CgPP/wwAJ999pnkvQC89NJLGI1GHn30UR577DE8PT2pqKhg165dJCQk8Itf/EI2RkWfffYZJ06cYNq0aRKA9sgjj7Bo0SL0ej0ZGRm8/fbbw85Vo9Fw4sQJHnvsMcLCwtDpdJSXl0v395deekk+dCUlJWzZsgWn0znqhmNpaSkff/wxkZGRGI1G6uvreeutt6iqquLDDz+U5g8ff/wx48ePx2Kx8Jvf/IZ7772Xv/3tb6SmptLZ2ckHH3zAjBkzZPhEwTAA3HbbbXh5eclKusuVy+UiKiqKVatWkZmZicViIScnh0mTJuFwOHB3d+dXv/oVBoOBxsZGXn31VTo6Oti/f7+sLM3Pz+fBBx/EarUSGxs7iEEzUBUVFcyfP5+kpCRCQkJ4/vnncXNzQ6vVkp2dzdatWyksLGTu3LkcOnSIkydPcuWVV0oi4VNPPYVKpWLr1q088MADrFixAuhfYe3du5fNmzfj5ubG+vXrh5WK22w2Hn30UR544AFqamrYtm0b3t7ebNu2jerqatzd3SkoKOCDDz6QE43nnnuO6upqiouLeeqpp6ivr6ehoYGbb76ZV155hePHj3PrrbfS3NzM8uXLefHFF3nyySe59957aWho4OTJk4POQWG3Hz16lDfeeAOtVkteXp4E0h0/fpwHH3yQAwcOUFRUxCeffILT6aS4uJgNGzaQk5PDrl27uOuuu4Zd256eHv7yl7/w8ssv43Q6ee655/jd736HyWQiOzubc+fOIYTAy8tLdtLQP2AcOHCAm2++mfDwcBoaGggICKCjo4OHH35Yvq+trY0XXniBU6dOcejQIR599FGCg4MloqCmpoba2lqqq6tZt24dqampWK1WPvvsMw4fPsz+/fu56667OHv2LJ9//jnPPvss+/bto7m5meeff54DBw7w05/+lIMHD8rQ0JdffklTUxNz5syRGTabNm3iueee49y5c5w4cWLY3l1WVhZHjx5l+fLl7N27l48//ljehyeffJKSkhIuXLgg0Q0ff/wxf/3rX7FYLLzyyiu88sorvPHGG5w7d47m5ma2b9/OggUL0Ol0l9wj/F537rm5uRInEBoaio+PDwkJCdTX18tS4yVLluDr6yvZE93d3WRlZREbG4vJZBqxAMrpdKLT6Zg4cSIxMTFoNBqJHTh+/DgbN27Ey8sLh8Mh2Sa5ubmMGTMGu91OaWkpISEhZGZmEhsbKzkoZ86cwdvbGzc3NwkqGyhfX18mTpxIRESELJLw9vZmwoQJeHp60tTUhIeHx7DVRl9fHwaDgfT0dLlyyc3N5bbbbiMwMBCNRiPDPBs3biQmJga1Wj0ifVJJAVOyBqKjo9m6dSuLFi0iLS2NzMxMdu/eTXd3N97e3gQGBhIUFMTPfvYzrr76aul0ExISglqtRqPREB8fj16vlzNUd3d3/vznPxMcHMyYMWO+lslIS0sLoaGhaLVapkyZQnFxMU6nk5CQEIk4OHz4sAQqxcTEYLFYiIuLkxWWS5cu5f7778fb25vJkyczYcKEYZ/T19eHTqcjOTmZ5ORk/Pz8qKmpQaVSYTKZ+Mtf/sLcuXPx9PRECIGfnx8ajQYvLy/sdjsGg0GGAhTI1tixY0lMTGT16tW88847BAQE0NfXR2RkpIwDK1Kr1bz33nucOnUKtVpNXFwcdruduro6Fi9eTHJysuTn2Gw2ioqK0Gg0hISEEB8fz86dO2loaMDT01NWWp49e5bm5mbCw8O5+eabmTZtGlu3buX8+fN4eHgQEhIyLJ5utVo5duwY48ePZ8yYMSQkJFBYWIjNZiMhIQEfHx9CQ0Px8vLCZDJJzO9nn30m878HeowCctXy2muv4e3tTXR0NCaTid27d0uXqqCgIMaMGSOfEQU/rNFo5GerVCrOnj3LNddcQ2RkJE6nUw7qx44do6mpCXd3d9RqNaGhoWg0mkEr2+DgYImD0Ov1HDt2jG3btqFSqYiIiMBsNlNVVSXj/ampqcTHx3PjjTfKauLk5GSioqIoKirCy8sLjUaD2WyWufTbt2/nwIEDMrTW3Nw8KPau1WqJiIggKCiI8PBwDAaDnJTt379fmoRERUXh5eXF7t27pbWhh4cH4eHh7NmzhzFjxhAREUF8fLxEnFxSSsrZ/+SfqVOnioHq6+sTQggRHx8vhBDCarUKs9ksli5dOuh1L774ohBCiNzcXLFx40YhhBCvv/66KCwsFEII0d7eLnp6esRI2rt3rygvLxdCCPH000+LkpISIYQQU6ZMEf/2b/8mWltbhdlsFna7XTQ3N4vk5GThdDpFe3u7mDp1qnA6nUIIMeiczGazeP7550V8fLzIyckZ8XM3b94sSkpKRFdXlxBCiJUrV4qKigrR2dkpFixYIIqLiwe93mQyCSGEmDhxohBCiNbWVtHT0yOuu+46IYQQOTk5Ij09XV63wMBAIYQQ+fn5wmw2D/v8vr4+sXv3bjF79mz5s+DgYHnsP/3pT+L48eNCCCHuvfdesWHDBnk/hBBiyZIl4g9/+IMQQogrr7xSvPvuu0IIIU6ePCn/LYQQf//730VMTIyIiooa9D0upsbGRvHll1+K8vJyYTAYxNmzZ8Xy5cvFkSNHRE9Pj7Db7WL9+vXi4MGDIjs7W9TV1Yn6+np5fsuWLROPP/64sNlsorGxUTz99NPDrqcih8MhPvzwQ7Fz505htVrF7373O7Fw4UJRX18vfvOb3wi9Xi8sFovYuHGjWLZsmbDZbCIvL09cf/31oqurSzz77LPi0KFDwuFwiPb2djFnzhxx4MABUVdXJwwGg/D39xdlZWUiOzt72H2w2WyioaFBvPPOO+LZZ58VoaGh4tZbbxXV1dXi+uuvF2fOnBEXLlwQDz74oDhw4IDIyckRmZmZwmKxiPz8fLF7927xox/9SBQVFYmcnBwxZ84ccfr0aXHTTTeJNWvWiI6ODrF7925x9OhR8cknn4h///d/F4GBgaKyslJ0dHQIm80mhBDCYrGIP/7xjyIyMlKYTCZRUVEhnnnmGfHuu+8Kg8Egtm7dKt566y1x4cIFcf/994u9e/eK1tZWkZycLFauXCmOHTsmKisrRWdn56DvZ7FYRF5enli1apVoamoSlZWVYsaMGeKWW24RBoNB3HrrreKxxx4bdk+cTqd48sknxaRJk0R7e7s4cuSIuO6660R1dbXo6OgQSUlJoqqqSrS1tYlx48aJ/Px8cezYMSGEGHYOTqdT9Pb2inXr1onc3FzR0tIiJk2aJJYtWyZqampEVlaWmDlzpti/f7/405/+JJYsWSLq6+tFQ0ODqK+vFy+++KJYuHChKCwsFOfOnRMrV64Uzc3NYvfu3WLt2rXCZDKJpqYmsWXLFvH000+LgIAAcezYMZGfnz+sTb/00kvipZdeEgaDQVx55ZWiuLhYFBYWiueff17MnDlTBAQEiJKSEtHZ2SlCQkJEXl6e2LlzpzzPO++8U9jtdpGdnS02bNggrFarEEKI8+fPC+C0GKVf/V7O3JWRz+FwcOLECcrKylizZg1+fn6S4rh161aJsf3DH/7AsmXL+Pjjj0lPT5dLYMVhZyQdOnRIzup37NhBU1MTmzdvZsaMGTgcDoKCguSS7f3335d0SA8PD4KCgvDw8MBut9PV1cXJkyfJzs5m3MWbAAAAIABJREFU9erVPPHEE7z77rsjptR9+umn/PGPf5TFEL29vcyePZu4uDj27NnDU089JTfHFCnnWFhYiMlk4r333mP37t3SPPs//uM/mDlzJu+//z6VlZXy9bGxsSM6tahUKumSo8wklb+VEMisWbOAfpjU4sWLB804e3t7ufrqqzGbzXR2drJ48WJqamp45plnmDNnDhUVFbz66qvMmTOHF154QdIghRD89re/HfFeWK1WDAYDJSUl7NmzR5o9REVFcf78edLT02XpuaenJxaLhcTERGlCsWvXLiorK/n888+ZO3cu77//PiUlJWzfvp3AwEBOnjw5LD6pVqs5ePAgY8eORavV8t5775Gamkp2djaBgYF4eHjg7e3NG2+8wZIlSyQ3xdPTk+PHj/PJJ5/g5+fH/v37aWtrY/HixcyfP5+IiAgiIyPx9/cnOjpaUiYHSvEhqK6u5pZbbmHFihWSjxIbG4tOp+PQoUP8/e9/R6fT4eHhQUREhHRv2r59O4888ghBQUE4nU5p/vDpp5+i0+mor69n2rRpdHd3U1xczK233srq1atlmbtaraarq4vKykoqKirkfoZiRhEQEIDT6eSTTz5h4cKF+Pn5cfDgQTQaDUVFRUyePJmgoCDS0tKwWq3D0hSVNEOLxUJTUxMXLlzgqquu4rnnnkOv11NVVcW8efMGhRXsdjvu7u5s3bqVtLQ08vPzqa2tlbC+3NxcMjIyqKiooLKykpSUFLy8vKRb2UD6ZFtbG319fbS0tHDkyBHa29vlnlpHRwfHjx8nKyuLZcuWMWfOHI4cOcLcuXMJCwsjPDwcu93O/v37mTt3LvHx8fT09DBjxgxsNhs5OTksWLCAoqIiTp48SWVlJTfddBM33XSTNP1Q5HQ6cTqdXLhwgeTkZBwOh6Synjhxgr6+PtavX8/KlSvp6uqSBirR0dEkJydTWVlJaWkpGo2Gjo4Odu3aRXp6uqSeXmqj/nvZuStSGk9aWpqsOFNUV1cnSYMKEzssLIzp06fLRnvhwoVRswTOnDkjN0EVJOfkyZNZtWoVoaGhFBcXYzQa8fPzIzw8nOjoaLq6uvD19eWGG27gyJEj5ObmkpKSgtPpZNasWcyfP5/S0lIZmx0qhWSoYIybm5uZNm2arHBTdulH0lVXXUV1dTVXXnkldXV1kgLp7e2N0+kkNDSUhIQE1q1bJ+N4o+XolpSUMGbMGBn+ue+++6R14EDDj/HjxxMVFTXouo8bN46IiAh6enpYsGCBXOoHBgbS0tJCUlISV155JU1NTRQVFcn00YaGBg4dOjTi+Sh0xYMHD/LZZ59JiFV3dze33nqrzJDRaDRcffXVlJeXU1dXR1VVFf7+/jQ1NVFYWMjChQupq6sjKiqK1NRUJk6cSE1NDQEBAYOWygq2tqioCH9/f/r6+pg6dSo2m43ExESmTp3KTTfdRHZ2NuXl5cTFxeHu7i7xvgqlVDFeMZvNTJo0SS7l3dzcuPPOO8nOzqa2tnZYipxWq8Xf35/ExESamppITU1l1apVlJWVMWHCBPz8/NBqtTJkkZyczLXXXsvp06cl9jchIQEvLy98fHzw8vIiNzeXhQsX4ubmhtlsRq/Xk5ycTGJiIvX19ZK3Av0bnRqNBovFwrXXXsuaNWvIy8sjMjKSpUuX4u/vj4+PzyA4mPLdkpKSuO2220hMTJROQ0MzmLRaLSkpKcTGxpKfn4/JZGLFihUEBwfLbBPFaUqRw+Ggs7OTzMxMVCoVkZGR1NXVyTCL8j5vb2+ioqK4/vrrKSoqoqSkRBpK9/b2ys1o5TwiIyMlrXLmzJnU19eze/duoqOjuemmm1CpVDI8p2zeBwYGotPpSEtLo62tjZqaGsaPH4+npydhYWHY7Xb0ej0hISFERUVRXl5Oamoqer1+UHaTYjQ+ZcoUKisrKSsrIzk5GZPJxIwZMxg3bhxtbW1MmTJFmqTceOON7N27l8rKSlQqFZMmTSIoKIjKyko8PT1lda3yPFxM/5JsmYtRAS+lkydPMnnyZFavXs327du/0TG+qYYSCweSDr+JLpccqUip2Lv77rtZv3498SMYXg98oIcSCbu6uvDy8kKtVst7MNK9UAbUgSbNyp+RGqSSNaTkJg/cqxjp+AOvm/JQO51OfH19aWtrk9aEo2nXrl1kZGTwy1/+ku3bt+N0OiXN0N3dndLSUmkf+PLLL/PCCy9IzK+7u/tl3Ufx1WalTqcb8Tt3dXXJQiWdTofVasXDwwO1Wi3TXRW2iVJO73A4MBgMeHp6Eh4ejlqtpqysjJCQEDo7OyVrXDlP6N+gtFqtEiam0Enb29tRq9USpaykPBoMBmJjY1Gr1TIDJTAwUKbeKbHg0e6NooFZVYqhdm5uLh0dHZw/f5577rlHWs8pg4NimalQUxUEsY+PDyqVisbGRiIiIuR9H9rWFcJkZ2enHHi3bt3KI488gsvlkqmminesr68vvr6+VFVVDdqX6evro7GxEb1ej1qtRqVSSaqqMgjp9Xq5d9fc3IynpyfBwcGDron4quhSpVLR3Nws94QUnIFiwqJMZJTno6GhgZCQEHx8fORg4ePjMyy7zWg0EhMT86+J/P1naPXq1WRmZqJWq/n5z3/+3/KZ3xedO3eOAwcO4Ovry+rVq78W1fL7rMvFHiu6/fbbiYuLIyYmhnvuuUeaUStkv7vvvptVq1bJ4y5evPiis6Rvip/+JqqurpYrQBg+AF+O6uvr5Yx/4MxbSU1UQo7+/v60tLTg5eX1jdqKgug1Go14enry17/+Fa1Wy5gxY7jiiiuGcVoGDkyKT8NIeOKLqb29nZaWFl5//XWmTZsmVxAajYagoKBBOeeAZM1cakKg6Ou0tYu9duB9U667MtjC5WEk2traCA4O/qFzV6TE40az9vvfrtbW1q/9wPxvk9FoxGazjYp7zsnJwdvbm7CwsO9NOxm4ovq2MhqNBAUFyVJ9u92OWq2mpaUFb29venp6cHd3Z9OmTdx+++0EBgZ+7ZXywPPt6OjAx8eH0tJSGhsb///2vjy6qev6el/JloRH2fI8YMAx2GEmFAyYBBvCWAKZCk3JQAvJWs3wNV9o2vSXpqZdWemvSb6VliTN0IQm4JSQMiYQZgzYzKaewDY2eJCRPEiyrNGafL4/5Hsrj0AmHEd7LS1Lz3p697zh3HvPPWdv3HHHHejs7BSc5TwWbTKZEBkZed1ww0DQaDRwuVwoKipCWFgYUlNT0dnZKYSleVz7q86av8nr8HXRlVnmd+5++PF9h8iC6BoN3qymAAdPOeQsic3NzWhsbBQatWPHjkVTUxPGjx+PjRs3YsmSJQgICOiTT51LHQ7k8Jqbm8VMw2g0wu12o729HbGxsRg2bJhYpwC8YSSXy9Vv4dn1oFarxQKzwWDA6tWr8dxzz/USTG9raxOL5t8FPTKfxfTXOXyVTkOr1SIhIcFP+evH4MRgGFx8X8CdII+Pf1UqXj4y5qpKwcHBiIqKQnBwsEhcsNvtmDp1qtDx7S8zoy/H3vOa+taaKJVK2O126HQ6yGSyXqpSvjH4m4XH40FcXBxCQ0MFJ87SpUuhUql6VXQGBQWJ+PZ3AV+myr7wVWYD13t2Bq0SU0+0tbUhMDAQQUFBQrrNd8HuZmKuXwf9SX3dalgsFhBRrzjmYIVvnPHbFDngTtBut3+jIRYu/ehwOL7TtQuHwwGn0wmbzSZS7b4ucyNfWHS5XIiLiwMRISIiAlu3boVUKoVSqYTBYOjTTrvdLqg0+DPIuV6oiwWypaUFycnJwoEZDAa0tLRAo9GAiOB0OkUKIS9K8gUXoPbVfO0LUqlUjMIlEgmUSiVWrFiBgIAAaDQa5OXloaioCG+99RakUinCwsL6LHL8NsDbfbMzErfbLXRre84wrkdCNuhH7uXl5Xj33XeFevurr76K6upqHDhwAJcvXwbgJeL6rnD8+HHs2rXrOzvejaK1tfUbJVX7urheabRv6tqhQ4e+EpHajUAikUCr1WLnzp039P2amppuOdP9fefs2bP45z//+ZUZNT0eDywWy03bzXV4z5w5g6NHj6KiogLAfzMzqEuPs6CgoBsvTs/6iZ5tAbwdR0xMDGpra0WYgI/u+xpN+2Y5ccfOs0A4URgRCf4Vl8sFh8MBiUQCg8Egfjc+Ph5Op7NbyMl3pmCz2b4SK6VMJoNSqRSdIc86qq6uhkKhEBkzX+W3fffhI2hf5kedTnfd+6gvWCwWwY/T0dGByspKbNmyBU6nsxdZ3fc6zx3wFistWLAA48ePR2dnJ2bNmoUJEyYgJydHnIQTJ04AuHGa06+Kt99+G3K5HCNHjvxWj3OzcDgccLvd2L17961uCgDvdSgtLcWOHTsA/Ld9vpDL5VAqlVAoFKioqPjWeOU5u+GN0Od2dHSgtLRUlM/3BY/Hg3Xr1qGoqAjx8fFfmVGTiJCbm4s1a9Zcd3rd3t4upvO82On1118H4HUozc3NIt3OaDRCrVajuLgY165dE5z/Op2um4P3HfVxTiaTyQSbzYY//vGP2Lx5M6RSqZil8nixL/OhRCIRGTccUqkUMplMtFmv10Mul+PPf/4ztFotqqurcfjwYdTX14vzx6kffPl3fEepXL/0RkMXvrYZjUaRb75o0SK89NJLOHPmDEwmk+hwBoq59+VAPR5Pt07ZtxaAp2MWFhYiPz+/1303EH++TqfDkSNHsGXLFhgMBsjlctFJcII74L+dyfUGBoPeuT/22GMiH3vTpk2YOnUqiAg2mw3p6enQ6XR4/vnn+w3N9Pfg9Nw+0Eizs7MTZrMZx48fx/jx4wVF7M38Pj8GZ07sjwGys7PzulS+LperG+OjXC7Hxx9/3KsAit/kvm3pKSfWH/pic+y5L38YHQ5Ht1hiS0sLTpw4gczMTNFeq9UKjUbTy4m3trZi1apVSExMhFqtRmtrK4gIarV6wNEm17Nsamoa8Ca3WCzYsWOHWCTk4Cx8vAqQx7F5YRbX1XS5XNBqtWIU1tjYiDNnziApKQnz58/vM1RBRLh69ar47PF44PF4RKWkXq9HeXk5zpw5gxkzZoiitr7abjKZcPr0aZjNZhARmpqacPr0acEpk5ycjPDwcLjdboSEhCAiIgKhoaF47LHHMHr0aOj1ehQUFAgyrfr6esGGSUQoKChATU0NAG9hYFVVFa5cuYL09HQMGzYMWq1WOGWr1SpsM5vN0Ol0aG9vR0lJCVwuF4xGozhOY2OjKBLct28f4uPjYTKZEB4ejkWLFuHnP/+5yIFvbm6G2+1GZWUlmpubodFooFarYTab4Xa7UVdXh6amJtjtdpHtxul5fdHQ0IDTp0/D6XRCr9fDbrfjypUromgtIiICbrcba9asER1Tc3MzqqurcfbsWTEI6OzsFDUFvMNTq9XQ6XRoaWlBXV0dHA5Hn2seHo8HjY2NOHnyJGJiYkTojI/mez5DnICMz3gOHDiAjIwMMSjhny9duiSeecaYCE0PhOvG3BljyQA+BhAHoBPAe0T0V8ZYLoC1AHjt8e+IaG/XPi8A+AUAD4BniGj/9Y7TH7gaul6vR0lJiViY4DdOSUkJDAYDduzYAZvNhocffljse+jQIcHtfd999wEADhw4AIvFAoVCgcWLF4tKxYqKCrjdbtx///29cpYlEglaW1tx6tQpXLp0CYmJiQgPDxdUn0lJSZg+fToqKytRVVWF2NhYaDQamM1mZGdn49ixY5g8eTKuXbuG1tZWzJ49G5cuXYJer8fSpUtF3O/gwYMwmUwgIkHI3xf27dsHh8OBMWPGICMjAwEBAaiursbEiRMBeMnP1Go1ZDIZ7r33XjDGYDabcenSJVRWVmLKlCl9kmlx7N27FzabDUlJSZg8eTLkcjm++OIL2O12qFQqTJgwASEhIdizZ4/IeKipqUFQUBBmz56N8vJy7Nq1CxMmTIBer0daWhra2tpQUlKC1tZWJCYmYv78+bBYLDh8+DDS0tIwbdo0HDt2DDabDdOmTUN+fj7kcjnWrFnTZwyWixzn5+dDJpNh8uTJ4l7h4KPckpISzJo1C59++inmz5+P5ORklJWVoaioCBkZGaioqMBdd92F6OhovPnmm1i/fj06Ojpgs9mwf/9+mM1mqFQq3H333Th37hykUinMZjMsFku3GCp3VBUVFWhuboZMJkNSUhJaWlpQVVUlCmNSUlJQU1ODhoYGaLVaEVP1BafHra+vx6FDh9DR0YHhw4fj2rVrOHbsGIKDgwUbJ38mZDIZLBYLTp06hbFjxyItLQ2XLl1CRUUF7rnnHhw6dAiMMSEW09LSgoKCAiQlJSEgIADt7e1oaGhAeHg4DAYDqqurcfnyZcjlcpjNZlGp/cknn2DMmDGYOXOmoLo1GAyQSqWora1FYWEhHA4H4uPj0djYiKNHj0KhUKCxsRFutxvvvfceli9fjkmTJkGhUKChoQEmk0mEXsePH4+mpiaYzWZMnjwZx44dQ1NTE6ZOnYrs7GxoNBqcOnUKwcHBmDBhApKSkoQ9W7duRWRkJFJTU2E2m0W1emxsLKRSKY4fP4577rkHMTExaGlpwTvvvIPc3FxUVFSIinFOgXz16lUkJiYiNTUV+fn5iIqKQnR0NI4cOYJFixb1+Qy1t7djz5492LlzJ6ZNm4b09HSEhITg5MmTsNvtqK2txYoVK8TamFwuF6EYjUaDo0ePYtKkSSAiTJw4ETU1NZg/fz42bdqEBQsWICsrC3K5HBqNBrW1tf0+w+IBGOgFIB7AlK73oQAuA7gdQC6AdX18/3YAJQDkAEYCuAJAOtAxehKH9YW3336b0tPTu22zWq20cuVKam9vp7q6OoqOjiYiIofDQU8++SSVlJQQEdHzzz9PbrebmpubadGiRURElJ+fT0RER44coTfeeIOIiD744IN+icaIiBYsWEBEXlKi+fPnC4KtmJgY2rdvH23cuJGefvppWrZsGRF5SYN2795NK1asoNWrV4vfWbduHRERXb16ld59910i8hIfPf7446IdfaGiooJ+9rOfkcPhICIvyRnH9OnTqaysjOrr6yk7O5uIiPbu3Sv+/+CDDwoyszFjxtD777/f7bc9Hg8REWVnZ5PBYCAiolWrVpHZbKa5c+eSTqcjIqLVq1fTsmXLqKioiD7//PNuxGmc6I2IaOrUqUREVF1dTTt37qT777+fLl++TC+88AJFRUWR0+mkL7/8krZu3UobNmyg4uJiKi4upgcffJB2795NhYWFFBoa2if5mclkoldeeUWcr3PnztG6det6fdfhcNCOHTvEdVuzZg1NnTqVzpw5Q0eOHKE5c+aQXq+nsrIy2rBhAxUVFdGUKVNIr9dTa2srpaamUktLC7W1tdEjjzxChYWFdOXKFXrhhRcEeROH3W4nq9VKkyZNoqysLCouLqZ9+/aRVqulefPm0blz5+jixYv00ksv0fnz56m4uJieffZZunbtWjdiNrPZTDU1NbRr1y5avnw5lZaW0qlTp+gnP/kJnT9/ni5fvkx/+MMf6OjRo9Te3k5Op5OuXbtG7e3t5Ha76cSJE7Rx40Y6ePAgqdVq2rVrF91///2CjEomk1FpaSkdOHCApkyZQmVlZVRdXU0nT56k1tZWysvLo8LCQiLyPhsPPvgg3XvvvXT27Fl655136NNPP6UVK1ZQVlYWXbhwgWbNmkX79++n6upqOnHiBD355JNUVVVFGzZsoLy8PKqvr6dly5ZRZWUlabVaKi0tFURpx48fp927d9MLL7xAZrOZGhoaKCQkhN5++206ceIEzZw5kz766CM6f/48/exnP6PW1lbatGkTPfDAA1RfX0//+Mc/BFGXx+Mht9tNNpuNNBoNPfvss/TYY4+RwWCguXPn0l/+8hcqKiqiLVu20MaNG+nkyZNUWVlJU6dOJa1WSzt27KBXX32Vqqurad68eXThwgW6cOECvfLKK7Rt2zbas2cP3XvvvbRr1y7Ky8ujZ599lqqrq3s9Q21tbfTKK6/QmDFjyGw2U319PeXm5tLBgwdJq9XSvn376PXXXyer1Sr2U6vV1NzcTGq1mqZPn05ERA0NDfTZZ5/RwoULSa1W07333ksjRowgg8FAxcXFtHz5ctq3b9/XIw4jIi0RXeh6bwZQASBxgF2WAdhCRA4iqgVQA2Da9Y5zPVRWVnbTO+Sr8G1tbQgLC0NxcbFY3Lp8+TI+//xzdHR0YNOmTZg5cyakUiliYmJQXl6OdevWCW6Wzz77DPHx8XjzzTfhdDoHzIThYZ+ysjJkZWUJgi3AWzyxePFiNDc344knnoDT6URsbCyWLl2KtrY2QXJWXl6OZcuWAYDgdge8ccV//etfeOaZZ3DXXXf1OjYR4dNPPxXxN5fLJfhrPB4P0tLSkJaWhvDwcFRWVuLpp5/G9OnTAQDV1dWYMGGCOJZare4lZMJtU6vViIiIQGdnJ1577TWEhISgtrYWKpUKbW1tkEgkgpvmypUrYvTCZyGAN+TBR1M2mw0nTpxAfX09IiMjYbFYsHjxYnR2dmLKlCkwmUwYP348VCoV2tvbYbFYMHv2bFRWVvaZIQB447F5eXmIjIyEw+FAY2Mjli5d2mua7Ha7cerUKYwaNQqdnZ1oa2uDwWBATEwMkpKSBE1weHg4VqxYAZVKhdjYWERGRoopNF9A/+Uvf4nY2Fg0Njbi9ttv71UAwyXh4uPjce7cOXz00UeYMWMGdDodamtrcfnyZRw/fhxLlizBiBEjBG2ATqfrFks2Go0ICAjAnj17UF5ejoSEBCiVSoSHh4uS/itXriAoKAhEBKlUiqCgIISFhaGtrQ1BQUFobGxEVFQUQkJCREiMU2VHR0cjIiICwcHBCA8PR25uLt59911ERkbCYDCgvLxccKSkpqaira0Ny5YtQ2BgIJYvX46pU6dCIpFg7ty5cLvdCA4OFlqle/bsQWJiIhhjuPvuuyGRSET8mZfPR0REID09HZmZmdBqtfjyyy8xbdo0kTkVExOD0NBQyGQyZGRkYPLkyWhvb4fNZsNTTz2F1NRUwYUfHR0t6LZ5hTEv5z927BhGjRoFiUQChUKBMWPGiNH6xIkTERoaimHDhiE0NBRhYWGIiopCZmam0LAtKipCfn4+5s2bh8mTJ8PhcGDevHlIS0uDTqfD9OnTe90DEokEISEhaGhoEKG91tZW7N69G1OmTIHdbofT6URra2s3LVepVAq5XA69Xi9m8Vw7NiUlRYRqMzMzxUzUbDYPKAoE3GTMnTE2AsBkAJz1/ynGWClj7EPGGJcxSgTgKw3UiIE7gwHBHdD27dvxyCOPdFMj37x5s3gwPvzwQ5FJc/DgQTidTkybNg0PPPCAcIKff/45Ghoa8Nprr2HOnDkAgG3btuEnP/kJnnrqqQH1WZubm3Hfffeho6MDO3fuxNq1awF446IzZszAggULoFKpEB0djUWLFnWrsktJScHChQths9mwefNmZGRkQKPR4P3338fw4cNhs9lQXl4Ok8mEN954o0/SMcYYNmzYILicL168KNgKz549ixUrVkAul+P8+fPQaDTYsGEDZs6cCcC7VvH4448D8DreCRMm4IEHHuhVCVhfX4/77rsPZrMZEokEYWFhKC8vx5o1a9De3o6IiAgcOHAAv//97xESEoItW7Zg1qxZALyhnIceegjAf4VQ8vPzERYWhr/97W8YOXIkQkJCUFpaiszMTFE0s2nTJowcORJutxsXLlxAXFwclEoltm/fjrVr1/aZh2y1WlFeXo67774b9fX1OHnyJJKTk7vx13O1q08//VRkdRQXF+P5559HbGws9Ho95s6di+DgYCQnJ0OlUqGoqAhr166FwWCA3W7Hww8/jOzsbDz66KNIS0tDWFgYTp8+LWz2RXJyMvR6PZ566ilcunQJLS0tqKmpgdFoxJw5c7Bw4UKsXLkSw4cPh1wuR15eHpYuXYrk5GQhZ8edd0hICLZv346ZM2eira0Ne/bswcmTJ0W83mg0Yty4cUL5Z9iwYULJKyEhAV988QUiIyMF7zl3Hh9++CFycnKg1WphsVjw4osv4plnnkFxcTH279+PP/3pT3j//fdRX1+PEydOQKFQIDIyEpmZmVAqlYJXpqKiAnPnzkVVVRXmzJkjMp8++ugjrF69GkSEkSNHIjAwECUlJRgzZgwCAwNhMBiwbt06/PjHP0Z6erqogp06dSpCQkJw6NAhPPfcc5g7dy6MRiMmTZokONCJCC+++CJ0Oh2eeOIJrFu3Dk888QSOHz/e7TpIJBKEhobi2rVryM7ORkNDg7in8vPzxbkZPnw4dDodgoKCBKvliBEjoNVqkZ2djYULF+IXv/iFuDcPHz4sCNsKCgqQkpLSKxWxubkZBoMBe/bswerVq1FaWor3338flZWVYIzBarXigw8+wH333SfCygEBAVAqlQgNDUV9fT2WLl2KsrIyJCUlYfv27RgxYgSIvNJ8y5Ytg1qtxt69e/GHP/xhwNAqcBPOnTEWAmAbgF8RkQnA3wGkApgEQAvgdf7VPnbvtbrIGHucMXaeMXZ+IGXzlpYWlJeXo6OjA4mJid1GZ4WFhXjsscfgdruh0WiQnp4uRhqLFi1CZWUlSkpKsHnzZlgsFhw5cgTt7e344IMPhOLRm2++iWPHjuHw4cPYtm1bv+04cuQIcnJyEBAQgDVr1mD37t2oqqrCyy+/LFR0Ll68iMWLF4t9HA4H/vOf/4gRbXFxMU6fPg2VSoV///vfsNvtuHDhAnQ6HbZt24aamhps3bq1X7HtX/3qV5DJZDh16hTeeustGI1GGI1GIS/G0+PMZjM2b94s9EAfeughfPLJJzh9+jRee+01nDhxQggscLjdbiQkJECv16O5uRmnTp1CcXExxo0bB7VajZKSEvz1r3/F6dOn8dBDD4HIK2Axc+ZMXL58Ge+99x4CAwPhcDiEZqnRaER9fT3WrVuHESNGoK6uDlarVeRlV+0lAAAOfUlEQVRmNzY2Ijo6GqWlpZDL5fjss89EnL6yshLjxo3rpWYFANHR0ViyZAkaGhqwd+9eVFVV4fjx4906K4VCgc7OTjz66KMidfDvf/87Vq5cCalUim3btonRJeDlbNmwYQOSkpJgsVhEJ9rU1IRz587h4sWLCA0NFdev52I4L+bRaDSCsZQLbigUCrS2tuLKlSuorKwU3CkajQalpaWCXEwqlQrn/7vf/Q6BgYGorKyEXC7Hv//9b0ycOBGnT58WTslqtaKxsRFNTU1ISEhAaGgozp8/j4SEBJw7dw7BwcG4ePEifv7zn8NoNMJsNmP8+PGIiopCUVGRINiaPXs20tPTkZycjOHDhyM8PBzDhg0T7KOcAIzLyslkMpSVlaGwsBBBQUGoq6tDaGgonnjiCVRXV4uOetKkSUhOTsa4ceNw8eJF2Gw2uN1uZGVlwe12IzY2Fs899xw2bdqEzZs3o7a2Fj/96U8hk8mwZcsWLFy4ENHR0di5c6cQySgrK0NdXR2qqqrw6quv9prl8lTRRx55BEVFRdiwYQNWrlyJO++8E3l5eWCMoba2FsHBwfB4PAgMDERTUxNkMhmioqIwceJEBAcHw2az4erVqzh16hQCAgJw9epVZGRk4Nq1a6iurhb1Br7gNA1ut1vI5D300ENYtWoV6urqUFdXhxUrVojOjIPIKy1ZWFgo1i8MBgMSExORlZUFtVoNlUoFpVKJ2267DS+//DKKioquK2EpvREFecZYIIBdAHYQ0YcAkJuba83NzaXc3Fxav379VQC/zs3NfXv9+vUTAYTn5uYWAMD69eufBPB5bm5uo+9v5ubmFuXm5r6Xm5v7Xl5eXi4fWfbEnj17oFarkZSUBIVCgVGjRonQSUdHB+666y5ERkYiKioKUVFRmDFjBiIiIjBt2jQUFhZCIpFgzpw5YpHl4sWLCAsLEzfF2LFjUVBQAKVSiUmTJvVbBPTll19i7ty5oqflmQdLly5FbGysUJC57bbbRPs4b8aYMWNE5V1AQADuuOMOxMTEYMSIEUhPT8eoUaOQkpKC2tpauN1uLFq0qM82jB07FoGBgYiKisLy5cvR1NQklJ3a2toQFRWFnJwcnDt3DkFBQZg3bx4AryINz0S58847BbOeL/i0lo8gVCqVUKcKCwtDTU0NpkyZIpSpODf46NGjERsbK85/QkICAgICcO3aNSxZsgSpqakIDw8XCk6jRo1CTEyMWGjiNK9xcXEwGAy48847MXLkSDQ3NyMiIgLz5s3rtcBtsVgQFxcHp9OJOXPmIDMzE/Hx8YLSloMxhoiICFgsFowYMQJZWVnweDwICgqC0+lEamqqmFpz6leZTIYJEyYgMDBQLPZFREQgJiYG4eHh2LBhA9auXdsnWRhXuGpoaEBWVhaSkpIEkyJfTB01ahTCwsKEStPo0aOFdq/dbodMJhP52XK5HB0dHZg2bRrGjBkDt9uN/fv3o729HatWrYJCoYBOp4PFYkFERATMZrMI1yQkJEClUsFoNGLu3LlISUkR6lo5OTlCMYwrXnV0dCA7O1uoL2VkZMBkMkEikSA2NlYIRoeGhqKtrQ0ymQzp6elISkpCYmIiwsLCIJVK0dTUBI/HI3RPOXVwSkoK4uLikJiYCIVCgZiYGAwfPhxxcXGw2+0YPXo0VCoVIiMjMWzYMLhcLsTGxiI0NBRyuRxxcXHIycnByJEjYbVaIZFIkJOTg/Dw8G5hLbvdLgqYWltb4XK5xKIrJ1z70Y9+BKVSibCwMGg0Gtx+++1ISUlBWFgYhg0bBr1ej7KyMiQkJCAjIwMqlQparRYzZsyATCaD2+1GRkZGr5EzdRVtcZWv+++/H+Hh4UhMTBRaC7Nnz+5VM8C57JOTk1FbW4ucnByoVCoQEZKTkxEfH4/29naMGjVKzIJ0Oh30ej12796tzc3N7bPA5brcMsx75j4CYCCiX/lsjycibdf7ZwFMJ6KVjLGxAD6BN86eAOAwgDQi6jfjfjBzy7z55psijrd69WqRjnQzVYk2m+0rl1T3BV9GObfbLYpNeuKbqF68HvqiHebMgryT4+3tj6ZVKpUKvhOg+/lqbGwU8fv+cKOsjDfDxcLbwLNWHn30UUyZMgUBAQH49a9/fUO/AXhnL9SVhserm/tL262rqxOc8VKpFDqdTjhHi8WC3/zmN7j99tsxe/ZskWbq+92oqChYLBa4XC7RYfAUVV/xdwDdzjcXb05KSoJMJhOFRgEBAWKdpbOzU2QGcbFoDrlcLtKFo6KicOXKFcjlcqGBwPPUe15LvvbDOyG+tnD48GHEx8fjjjvuQEBAwE3dxy6XC4cPH4bFYsG4ceOwceNG5ObmCrIw3+vvex348Tl4+mrP0OX17rW+6K4Hqp73vX5ctpFLfPKOhLN08tRH/pstLS2IjY39WtwyswA8DCCHMVbc9VoM4C+MsTLGWCmAbADPAgARXQSwFcAlAPsAPDmQYx/skMvlqKurw/DhwwF4nfrNlpt/k44d6F7gERAQ0G8Rxrft2AH0unF5IYfvwjQfQPT1UPC2+z5EvuerP/GSnse8EdzMdeNtcLlc8Hg8YkbjKwJ+I1Aqld0KgZxOp9Db9K2t4E7QV7WJh0d4B2M0GpGZmSmSAQD06hglEkm3vHmu5NQTPKPi6tWrMBqNsNlsIn/a9/+RkZFCNYw7Lj67DQkJEfvK5XLBNso7xZ7Vqz076cDAQJjNZlitVoSHh4u2qlQqBAcHi3b3dR/3V9tgt9uh1WpRUFCAgoICEUrli52+4Bqy/Dr5gi/O9sT17jWJRNLtmRjIsfNZhsvlEvcbv1f4sfnxuPC3729ej37Azwo5AHg5N+C92N8lh833GS0tLUIcYrBjoFGh7/Vua2sToagbhe/+Op1OhBB6jkZbW1vhcDhESGD06NEixg14H2a+IBkYGIjIyEhBk6tQKOBwOEBEUCgUsFgsMBqN3RxpX5zvvNy/ra1N1EiEhYV1G5maTCaYTCZER0eLzsfj8Qgn6TsT0mq1IsuKO6iWlhYx+uSCJE6nE3FxcdDr9aiqqhLEYWlpad041X1H+TxsERAQAJPJBMZYn+FTzijJj8c7LKvVisjIyG4Vt/wcBAYGwmg0wul0dlvkZIyJIit+rbiICg9p9ecPTCZTn+L0PWG32wUfDr8+7e3t4n1ISIiYmfMqW25naGgon/UMbsrfO+64g7g2qh/fTxARDAaDWDQLCQkR3CEKhUJUaPqGarhD4tN6/lBxeDwe8fmrdKrkQ6Pa14PIY8qAd8TUnwPk6xH88/WI4zhnTmdnJ+RyOex2u3g4uSADH2Hzka1EIhFVt1yZx2QyISkpqRvVLx/xc9uCgoKEA+M2c2ZHuVwOj8cjOonAwEB4PB5YrVbhgBQKhajojI6OFnH45ubmbhWQDocDdrsdSqUSERER8Hg8YjTJbeROT6/Xw2aziUXisLAwUZEdHh4uJPbq6upEyEqpVIp7gtvJOy4u4BEcHAyr1QrGmBjZBgYGCkUrqVQKi8UiMoh4J2W1WtHR0SFmA9TFw8NfH3/8MYxGIx555BHExcXBbDbD6XQiKCgIcrlcZKvw7/PzztvqcDjgcrkEXw2/Fowx4ZD5sTkjptVqRXJycrd70m63w+FwQKFQiH35rMNsNovwq8vlQlRUFKxWK5RK5eB27oyxVgBWALpb3ZZbgCj47f4hwW/3Dwvftt0pRNQn3emgcO4AwBg7318PNJTht/uHBb/dPyzcSrv9AWQ//PDDjyEIv3P3ww8//BiCGEzOffAoTXy38Nv9w4Lf7h8Wbpndgybm7ocffvjhxzeHwTRy98MPP/zw4xvCLXfujLGFjLEqxlgNY+y3t7o93yS62DJbGGPlPtsiGWMHGWPVXX8jfP73Qtd5qGKMLbg1rf76YIwlM8aOMsYqGGMXGWP/p2v7kLadMaZgjJ1ljJV02b2+a/uQtpuDMSZljP2HMfZF1+chbzdjrK6rUr+YMXa+a9vgsLs/ovfv4gVACq+YxygAMnhFPm6/lW36hu27E8AUAOU+2/4C4Ldd738L4H+73t+0yMlgfaF/gZchbTu8jKghXe8D4aXGzhzqdvvY/3/h5ZX6ouvzkLcbQB2AqB7bBoXdt3rkPg1ADRFdJSIngC3win0MCRDRcQCGHpuXwUvEhq6/y322f+MiJ7cC1L/Ay5C2nbzg4raBXS/CELcbABhjSQCWAPiHz+Yhb3c/GBR232rn/o0Ke3xPEEtdbJpdfzkJy5A8F6y7wMuQt70rNFEMoAXAQSL6QdgN4A0Az8Ors8zxQ7CbABxgjBUxxjhv+aCw+8bo9L493JCwxw8EQ+5c9BR48eWN6fnVPrZ9L20nLwPqJMaYEsAOxti4Ab4+JOxmjP0YQAsRFTHG5tzILn1s+97Z3YVZRKRhjMUAOMgYqxzgu9+p3bd65N4IINnncxIAzS1qy3eFZsZYPODlxId3hAcMsXPRJfCyDUAeEW3v2vyDsB0AiMgIIB/AQgx9u2cBuIcxVgdvaDWHMbYZQ99uEJGm628LgB3whlkGhd232rmfA5DGGBvJGJMBWAlg9y1u07eN3QAe7Xr/KLwKV3z7SsaYnDE2EkAagLO3oH1fG8w7RP8AQAUR/T+ffw1p2xlj0V0jdjDGhgGYB6ASQ9xuInqBiJKIaAS8z/ARIlqFIW43YyyYMRbK3wOYD6Acg8XuQbDavBjebIorAP7nVrfnG7btX/Dqy7rg7bV/AUAFrzpVddffSJ/v/0/XeagCsOhWt/9r2J0F73SzFEBx12vxULcdwAQA/+myuxzAS13bh7TdPc7BHPw3W2ZI2w1vll9J1+si91+DxW5/haoffvjhxxDErQ7L+OGHH3748S3A79z98MMPP4Yg/M7dDz/88GMIwu/c/fDDDz+GIPzO3Q8//PBjCMLv3P3www8/hiD8zt0PP/zwYwjC79z98MMPP4Yg/j8FjSEjUKJl8AAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "imshow(pred, cmap='gray')" + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "metadata": {}, + "outputs": [], + "source": [ + "output_dir = 'test_preds'\n", + "if not os.path.exists(output_dir):\n", + " os.makedirs(output_dir)\n", + "for x_test_file in x_test:\n", + " img = cv2.imread(x_test_file, cv2.IMREAD_GRAYSCALE)\n", + " img = cv2.resize(img, input_shape[::-1], interpolation=cv2.INTER_AREA)\n", + " pred = make_prediction(img)\n", + " filename = path_leaf(x_test_file)\n", + " filepath = os.path.join(output_dir, filename)\n", + " cv2.imwrite(filepath, pred)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "lightweight-gpu-kernel", + "language": "python", + "name": "lightweight-gpu-kernel" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.1" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/test/live-reloading/onnx/README.md b/test/live-reloading/onnx/README.md new file mode 100644 index 0000000000..77456896ee --- /dev/null +++ b/test/live-reloading/onnx/README.md @@ -0,0 +1,7 @@ +## Live-reloading model APIs + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +The model live-reloading feature is automatically enabled for the ONNX predictors. This means that any ONNX examples found in the [examples](../..) directory will already have this running. + +The live-reloading is a feature that reloads models at run-time from (a) specified S3 bucket(s) in the `cortex.yaml` config of each API. Models are added/removed from the API when the said models are added/removed from the S3 bucket(s) or reloaded when the models are edited. More on this in the [docs](insert-link). diff --git a/test/live-reloading/python/mpg-estimator/cortex.yaml b/test/live-reloading/python/mpg-estimator/cortex.yaml new file mode 100644 index 0000000000..4c243b5032 --- /dev/null +++ b/test/live-reloading/python/mpg-estimator/cortex.yaml @@ -0,0 +1,8 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: mpg-estimator + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + model_path: s3://cortex-examples/sklearn/mpg-estimator/linreg/ diff --git a/test/live-reloading/python/mpg-estimator/predictor.py b/test/live-reloading/python/mpg-estimator/predictor.py new file mode 100644 index 0000000000..104b9a5c0a --- /dev/null +++ b/test/live-reloading/python/mpg-estimator/predictor.py @@ -0,0 +1,27 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import mlflow.sklearn +import numpy as np + + +class PythonPredictor: + def __init__(self, config, python_client): + self.client = python_client + + def load_model(self, model_path): + return mlflow.sklearn.load_model(model_path) + + def predict(self, payload, query_params): + model_version = query_params.get("version") + + model = self.client.get_model(model_version=model_version) + model_input = [ + payload["cylinders"], + payload["displacement"], + payload["horsepower"], + payload["weight"], + payload["acceleration"], + ] + result = model.predict([model_input]).item() + + return {"prediction": result, "model": {"version": model_version}} diff --git a/test/live-reloading/python/mpg-estimator/requirements.txt b/test/live-reloading/python/mpg-estimator/requirements.txt new file mode 100644 index 0000000000..cbcad6b321 --- /dev/null +++ b/test/live-reloading/python/mpg-estimator/requirements.txt @@ -0,0 +1,4 @@ +mlflow +pandas +numpy +scikit-learn==0.21.3 diff --git a/test/live-reloading/python/mpg-estimator/sample.json b/test/live-reloading/python/mpg-estimator/sample.json new file mode 100644 index 0000000000..2dbbca46dd --- /dev/null +++ b/test/live-reloading/python/mpg-estimator/sample.json @@ -0,0 +1,7 @@ +{ + "cylinders": 4, + "displacement": 135, + "horsepower": 84, + "weight": 2490, + "acceleration": 15.7 +} diff --git a/test/live-reloading/tensorflow/README.md b/test/live-reloading/tensorflow/README.md new file mode 100644 index 0000000000..2444484b77 --- /dev/null +++ b/test/live-reloading/tensorflow/README.md @@ -0,0 +1,11 @@ +## Live-reloading model APIs + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +The model live-reloading feature is automatically enabled 1 for the TensorFlow predictors. This means that any TensorFLow examples found in the [examples](../..) directory will already have this running. + +The live-reloading is a feature that reloads models at run-time from (a) specified S3 bucket(s) in the `cortex.yaml` config of each API. Models are added/removed from the API when the said models are added/removed from the S3 bucket(s) or reloaded when the models are edited. More on this in the [docs](insert-link). + +--- + +*1: The live-reloading feature for the TensorFlow predictor is disabled when Inferentia resources (`compute.inf`) are added to the API and `processes_per_replica` > 1.* diff --git a/test/model-caching/onnx/multi-model-classifier/README.md b/test/model-caching/onnx/multi-model-classifier/README.md new file mode 100644 index 0000000000..bf5fc906cb --- /dev/null +++ b/test/model-caching/onnx/multi-model-classifier/README.md @@ -0,0 +1,77 @@ +# Multi-Model Classifier API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys ResNet50, MobileNet and ShuffleNet models in one API. Query parameters are used for selecting the model and the version. + +Since model caching is enabled, there can only be 2 models loaded into memory - loading a 3rd one will lead to the removal of the least recently used one. To witness the adding/removal process of models, check the logs of the API by running `cortex logs multi-model-classifier` once the API is up. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get multi-model-classifier`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +When making a prediction with [sample.json](sample.json), the following image will be used: + +![cat](https://i.imgur.com/213xcvs.jpg) + +### ResNet50 Classifier + +Make a request to the ResNet50 model: + +```bash +curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"label": "tabby", "model": {"name": "resnet50", "version": "latest"}} +``` + +### MobileNet Classifier + +Make a request to the MobileNet model: + +```bash +curl "${ENDPOINT}?model=mobilenet" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"label": "tabby", "model": {"name": "mobilenet", "version": "latest"}} +``` + +### ShuffleNet Classifier + +At this point, there are 2 models loaded into memory (as specified by `cache_size`). Loading `ShuffleNet` as well will lead to the removal of the least recently used model - in this case, it will be the ResNet50 model that will get evicted. Since the `disk_cache_size` is set to 3, no model will be removed from disk. + +Make a request to the ShuffleNet model: + +```bash +curl "${ENDPOINT}?model=shufflenet" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"label": "Egyptian_cat", "model": {"name": "shufflenet", "version": "latest"}} +``` + +--- + +Now, inspect `cortex get multi-model-classifier` to see when and which models were removed in this process of making requests to different versions of the same model. diff --git a/test/model-caching/onnx/multi-model-classifier/cortex.yaml b/test/model-caching/onnx/multi-model-classifier/cortex.yaml new file mode 100644 index 0000000000..f074721fd3 --- /dev/null +++ b/test/model-caching/onnx/multi-model-classifier/cortex.yaml @@ -0,0 +1,22 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: multi-model-classifier + kind: RealtimeAPI + predictor: + type: onnx + path: predictor.py + models: + paths: + - name: resnet50 + model_path: s3://cortex-examples/onnx/resnet50/ + - name: mobilenet + model_path: s3://cortex-examples/onnx/mobilenet/ + - name: shufflenet + model_path: s3://cortex-examples/onnx/shufflenet/ + cache_size: 2 + disk_cache_size: 3 + config: + image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + image-resize: 224 + compute: + mem: 2G diff --git a/test/model-caching/onnx/multi-model-classifier/predictor.py b/test/model-caching/onnx/multi-model-classifier/predictor.py new file mode 100644 index 0000000000..6ab949a24c --- /dev/null +++ b/test/model-caching/onnx/multi-model-classifier/predictor.py @@ -0,0 +1,99 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import numpy as np +import cv2, requests +from scipy.special import softmax + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + return image + + +def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA): + """ + Resize a numpy image. + """ + dim = None + (h, w) = image.shape[:2] + + if width is None and height is None: + return image + + if width is None: + # calculate the ratio of the height and construct the dimensions + r = height / float(h) + dim = (int(w * r), height) + else: + # calculate the ratio of the width and construct the dimensions + r = width / float(w) + dim = (width, int(h * r)) + + resized = cv2.resize(image, dim, interpolation=inter) + + return resized + + +def preprocess(img_data): + """ + Normalize input for inference. + """ + # move pixel color dimension to position 0 + img = np.moveaxis(img_data, 2, 0) + + mean_vec = np.array([0.485, 0.456, 0.406]) + stddev_vec = np.array([0.229, 0.224, 0.225]) + norm_img_data = np.zeros(img.shape).astype("float32") + for i in range(img.shape[0]): + # for each pixel in each channel, divide the value by 255 to get value between [0, 1] and then normalize + norm_img_data[i, :, :] = (img[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i] + + # extend to batch size of 1 + norm_img_data = norm_img_data[np.newaxis, ...] + return norm_img_data + + +def postprocess(results): + """ + Eliminates all dimensions of size 1, softmaxes the input and then returns the index of the element with the highest value. + """ + squeezed = np.squeeze(results) + maxed = softmax(squeezed) + result = np.argmax(maxed) + return result + + +class ONNXPredictor: + def __init__(self, onnx_client, config): + # onnx client + self.client = onnx_client + + # for image classifiers + classes = requests.get(config["image-classifier-classes"]).json() + self.image_classes = [classes[str(k)][1] for k in range(len(classes))] + self.resize_value = config["image-resize"] + + def predict(self, payload, query_params): + # get request params + model_name = query_params["model"] + model_version = query_params.get("version", "latest") + img_url = payload["url"] + + # process the input + img = get_url_image(img_url) + img = image_resize(img, height=self.resize_value) + img = preprocess(img) + + # predict + results = self.client.predict(img, model_name, model_version)[0] + + # interpret result + result = postprocess(results) + predicted_label = self.image_classes[result] + + return {"label": predicted_label, "model": {"name": model_name, "version": model_version}} diff --git a/test/model-caching/onnx/multi-model-classifier/requirements.txt b/test/model-caching/onnx/multi-model-classifier/requirements.txt new file mode 100644 index 0000000000..212d089934 --- /dev/null +++ b/test/model-caching/onnx/multi-model-classifier/requirements.txt @@ -0,0 +1,2 @@ +opencv-python==4.2.0.34 +scipy==1.4.1 diff --git a/test/model-caching/onnx/multi-model-classifier/sample.json b/test/model-caching/onnx/multi-model-classifier/sample.json new file mode 100644 index 0000000000..4ee3aa45df --- /dev/null +++ b/test/model-caching/onnx/multi-model-classifier/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/213xcvs.jpg" +} diff --git a/test/model-caching/python/mpg-estimator/README.md b/test/model-caching/python/mpg-estimator/README.md new file mode 100644 index 0000000000..e120ac8204 --- /dev/null +++ b/test/model-caching/python/mpg-estimator/README.md @@ -0,0 +1,75 @@ +# MPG Estimator API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys an MPG estimator model of multiple versions in one API. Query parameters are used for selecting the model and the version. + +Since model caching is enabled, there can only be 2 models loaded into memory (counting the versioned models as well) - loading a 3rd one will lead to the removal of the least recently used one. To witness the adding/removal process of models, check the logs of the API by running `cortex logs mpg-estimator` once the API is up. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get mpg-estimator`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +### Version 1 + +Make a request version `1` of the `mpg-estimator` model: + +```bash +curl "${ENDPOINT}?model=resnet50&version=1" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"prediction": 26.929889872154185, "model": {"name": "mpg-estimator", "version": "1"}} +``` + +### Version 2 + +At this point, there is one model loaded into memory (as specified by `cache_size`). Loading another versioned model as well will lead to the removal of the least recently used model - in this case, it will be version 1 that will get evicted. Since the `disk_cache_size` is set to 2, no model will be removed from disk. + +Make a request version `2` of the `mpg-estimator` model: + +```bash +curl "${ENDPOINT}?model=mobilenet" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"prediction": 26.929889872154185, "model": {"name": "mpg-estimator", "version": "1"}} +``` + +### Version 3 + +With the following request, version 2 of the model will have to be evicted from the memory. Since `disk_cache_size` is set to 2, this time, version 1 of the model will get removed from the disk. + +Make a request version `3` of the `mpg-estimator` model: + +```bash +curl "${ENDPOINT}?model=shufflenet" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"prediction": 26.929889872154185, "model": {"name": "mpg-estimator", "version": "1"}} +``` + +--- + +Now, inspect `cortex get mpg-estimator` to see when and which models were removed in this process of making requests to different versions of the same model. The same algorithm is applied to different models as well, not just for the versions of a specific model. diff --git a/test/model-caching/python/mpg-estimator/cortex.yaml b/test/model-caching/python/mpg-estimator/cortex.yaml new file mode 100644 index 0000000000..1d26879aaa --- /dev/null +++ b/test/model-caching/python/mpg-estimator/cortex.yaml @@ -0,0 +1,13 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: mpg-estimator + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + models: + paths: + - name: mpg-estimator + model_path: s3://cortex-examples/sklearn/mpg-estimator/linreg/ + cache_size: 1 + disk_cache_size: 2 diff --git a/test/model-caching/python/mpg-estimator/predictor.py b/test/model-caching/python/mpg-estimator/predictor.py new file mode 100644 index 0000000000..84aa206f41 --- /dev/null +++ b/test/model-caching/python/mpg-estimator/predictor.py @@ -0,0 +1,28 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import mlflow.sklearn +import numpy as np + + +class PythonPredictor: + def __init__(self, config, python_client): + self.client = python_client + + def load_model(self, model_path): + return mlflow.sklearn.load_model(model_path) + + def predict(self, payload, query_params): + model_name = query_params["model"] + model_version = query_params.get("version", "latest") + + model = self.client.get_model(model_name, model_version) + model_input = [ + payload["cylinders"], + payload["displacement"], + payload["horsepower"], + payload["weight"], + payload["acceleration"], + ] + result = model.predict([model_input]).item() + + return {"prediction": result, "model": {"name": model_name, "version": model_version}} diff --git a/test/model-caching/python/mpg-estimator/requirements.txt b/test/model-caching/python/mpg-estimator/requirements.txt new file mode 100644 index 0000000000..cbcad6b321 --- /dev/null +++ b/test/model-caching/python/mpg-estimator/requirements.txt @@ -0,0 +1,4 @@ +mlflow +pandas +numpy +scikit-learn==0.21.3 diff --git a/test/model-caching/python/mpg-estimator/sample.json b/test/model-caching/python/mpg-estimator/sample.json new file mode 100644 index 0000000000..2dbbca46dd --- /dev/null +++ b/test/model-caching/python/mpg-estimator/sample.json @@ -0,0 +1,7 @@ +{ + "cylinders": 4, + "displacement": 135, + "horsepower": 84, + "weight": 2490, + "acceleration": 15.7 +} diff --git a/test/model-caching/tensorflow/multi-model-classifier/README.md b/test/model-caching/tensorflow/multi-model-classifier/README.md new file mode 100644 index 0000000000..9fd921884b --- /dev/null +++ b/test/model-caching/tensorflow/multi-model-classifier/README.md @@ -0,0 +1,77 @@ +# Multi-Model Classifier API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys Iris, ResNet50 and Inception models in one API. Query parameters are used for selecting the model. + +Since model caching is enabled, there can only be 2 models loaded into memory - loading a 3rd one will lead to the removal of the least recently used one. To witness the adding/removal process of models, check the logs of the API by running `cortex logs multi-model-classifier` once the API is up. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the APIs endpoint. You can get the API's endpoint by running `cortex get multi-model-classifier`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +When making a prediction with [sample-image.json](sample-image.json), the following image will be used: + +![sports car](https://i.imgur.com/zovGIKD.png) + +### ResNet50 Classifier + +Make a request to the ResNet50 model: + +```bash +curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample-image.json +``` + +The expected response is: + +```json +{"label": "sports_car"} +``` + +### Inception Classifier + +Make a request to the Inception model: + +```bash +curl "${ENDPOINT}?model=inception" -X POST -H "Content-Type: application/json" -d @sample-image.json +``` + +The expected response is: + +```json +{"label": "sports_car"} +``` + +### Iris Classifier + +At this point, there are 2 models loaded into memory (as specified by `cache_size`). Loading the `iris` classifier will lead to the removal of the least recently used model - in this case, it will be the ResNet50 model that will get evicted. Since the `disk_cache_size` is set to 3, no model will be removed from disk. + +Make a request to the Iris model: + +```bash +curl "${ENDPOINT}?model=iris" -X POST -H "Content-Type: application/json" -d @sample-iris.json +``` + +The expected response is: + +```json +{"label": "setosa"} +``` + +--- + +Now, inspect `cortex get multi-model-classifier` to see when and which models were removed in this process of making requests to different versions of the same model. diff --git a/test/model-caching/tensorflow/multi-model-classifier/cortex.yaml b/test/model-caching/tensorflow/multi-model-classifier/cortex.yaml new file mode 100644 index 0000000000..4a165d177d --- /dev/null +++ b/test/model-caching/tensorflow/multi-model-classifier/cortex.yaml @@ -0,0 +1,32 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: multi-model-classifier + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + models: + paths: + - name: inception + model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ + - name: iris + model_path: s3://cortex-examples/tensorflow/iris-classifier/nn/ + - name: resnet50 + model_path: s3://cortex-examples/tensorflow/resnet50/ + cache_size: 2 + disk_cache_size: 3 + config: + models: + iris: + labels: ["setosa", "versicolor", "virginica"] + resnet50: + input_shape: [224, 224] + input_key: input + output_key: output + inception: + input_shape: [224, 224] + input_key: images + output_key: classes + image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + compute: + mem: 2G diff --git a/test/model-caching/tensorflow/multi-model-classifier/predictor.py b/test/model-caching/tensorflow/multi-model-classifier/predictor.py new file mode 100644 index 0000000000..d0914b8411 --- /dev/null +++ b/test/model-caching/tensorflow/multi-model-classifier/predictor.py @@ -0,0 +1,63 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import requests +import numpy as np +import cv2 + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + self.client = tensorflow_client + + # for image classifiers + classes = requests.get(config["image-classifier-classes"]).json() + self.image_classes = [classes[str(k)][1] for k in range(len(classes))] + + # assign "models"' key value to self.config for ease of use + self.config = config["models"] + + # for iris classifier + self.iris_labels = self.config["iris"]["labels"] + + def predict(self, payload, query_params): + model_name = query_params["model"] + model_version = query_params.get("version", "latest") + predicted_label = None + + if model_name == "iris": + prediction = self.client.predict(payload["input"], model_name, model_version) + predicted_class_id = int(prediction["class_ids"][0]) + predicted_label = self.iris_labels[predicted_class_id] + + elif model_name in ["resnet50", "inception"]: + predicted_label = self.predict_image_classifier(model_name, payload["url"]) + + return {"label": predicted_label, "model": {"model": model_name, "version": model_version}} + + def predict_image_classifier(self, model, img_url): + img = get_url_image(img_url) + img = cv2.resize( + img, tuple(self.config[model]["input_shape"]), interpolation=cv2.INTER_NEAREST + ) + if model == "inception": + img = img.astype("float32") / 255 + img = {self.config[model]["input_key"]: img[np.newaxis, ...]} + + results = self.client.predict(img, model)[self.config[model]["output_key"]] + result = np.argmax(results) + if model == "inception": + result -= 1 + predicted_label = self.image_classes[result] + + return predicted_label diff --git a/test/model-caching/tensorflow/multi-model-classifier/requirements.txt b/test/model-caching/tensorflow/multi-model-classifier/requirements.txt new file mode 100644 index 0000000000..7e2fba5e6c --- /dev/null +++ b/test/model-caching/tensorflow/multi-model-classifier/requirements.txt @@ -0,0 +1 @@ +Pillow diff --git a/test/model-caching/tensorflow/multi-model-classifier/sample-image.json b/test/model-caching/tensorflow/multi-model-classifier/sample-image.json new file mode 100644 index 0000000000..95200916c7 --- /dev/null +++ b/test/model-caching/tensorflow/multi-model-classifier/sample-image.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/zovGIKD.png" +} diff --git a/test/model-caching/tensorflow/multi-model-classifier/sample-iris.json b/test/model-caching/tensorflow/multi-model-classifier/sample-iris.json new file mode 100644 index 0000000000..67c03827f2 --- /dev/null +++ b/test/model-caching/tensorflow/multi-model-classifier/sample-iris.json @@ -0,0 +1,8 @@ +{ + "input": { + "sepal_length": 5.2, + "sepal_width": 3.6, + "petal_length": 1.4, + "petal_width": 0.3 + } +} diff --git a/test/onnx/iris-classifier/README.md b/test/onnx/iris-classifier/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/onnx/iris-classifier/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/onnx/iris-classifier/cortex.yaml b/test/onnx/iris-classifier/cortex.yaml new file mode 100644 index 0000000000..00b8a61112 --- /dev/null +++ b/test/onnx/iris-classifier/cortex.yaml @@ -0,0 +1,10 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: iris-classifier + kind: RealtimeAPI + predictor: + type: onnx + path: predictor.py + model_path: s3://cortex-examples/onnx/iris-classifier/ + monitoring: + model_type: classification diff --git a/test/onnx/iris-classifier/predictor.py b/test/onnx/iris-classifier/predictor.py new file mode 100644 index 0000000000..b135129e14 --- /dev/null +++ b/test/onnx/iris-classifier/predictor.py @@ -0,0 +1,20 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +labels = ["setosa", "versicolor", "virginica"] + + +class ONNXPredictor: + def __init__(self, onnx_client, config): + self.client = onnx_client + + def predict(self, payload): + model_input = [ + payload["sepal_length"], + payload["sepal_width"], + payload["petal_length"], + payload["petal_width"], + ] + + prediction = self.client.predict(model_input) + predicted_class_id = prediction[0][0] + return labels[predicted_class_id] diff --git a/test/onnx/iris-classifier/sample.json b/test/onnx/iris-classifier/sample.json new file mode 100644 index 0000000000..252c666b3a --- /dev/null +++ b/test/onnx/iris-classifier/sample.json @@ -0,0 +1,6 @@ +{ + "sepal_length": 5.2, + "sepal_width": 3.6, + "petal_length": 1.4, + "petal_width": 0.3 +} diff --git a/test/onnx/iris-classifier/xgboost.ipynb b/test/onnx/iris-classifier/xgboost.ipynb new file mode 100644 index 0000000000..d4e1497360 --- /dev/null +++ b/test/onnx/iris-classifier/xgboost.ipynb @@ -0,0 +1,244 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "iris_xgboost.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "IiTxCwB7t6Ef", + "colab_type": "text" + }, + "source": [ + "# Training an Iris classifier using XGBoost\n", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", + "\n", + "In this notebook, we'll show how to train a classifier trained on the [iris data set](https://archive.ics.uci.edu/ml/datasets/iris) using XGBoost." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "j6QdLAUpuW7r", + "colab_type": "text" + }, + "source": [ + "## Install Dependencies\n", + "First, we'll install our dependencies:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BQE5z_kHj9jV", + "colab_type": "code", + "colab": {} + }, + "source": [ + "pip install xgboost==0.90 scikit-learn==0.21.* onnxmltools==1.5.* boto3==1.*" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yEVK-sLnumqn", + "colab_type": "text" + }, + "source": [ + "## Load the data\n", + "We can use scikit-learn to load the Iris dataset:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "tx9Xw0x0lfbl", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from sklearn.datasets import load_iris\n", + "from sklearn.model_selection import train_test_split\n", + "\n", + "iris = load_iris()\n", + "X, y = iris.data, iris.target\n", + "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "obGdgMm3urb2", + "colab_type": "text" + }, + "source": [ + "## Train the model\n", + "We'll use XGBoost's [`XGBClassifier`](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.XGBClassifier) to train the model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jjYp8TaflhW0", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import xgboost as xgb\n", + "\n", + "xgb_model = xgb.XGBClassifier()\n", + "xgb_model = xgb_model.fit(X_train, y_train)\n", + "\n", + "print(\"Test data accuracy of the xgb classifier is {:.2f}\".format(xgb_model.score(X_test, y_test))) # Accuracy should be > 90%" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hdwu-wzJvJLb", + "colab_type": "text" + }, + "source": [ + "## Export the model\n", + "Now we can export the model in the ONNX format:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "AVgs2mkdllRn", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from onnxmltools.convert import convert_xgboost\n", + "from onnxconverter_common.data_types import FloatTensorType\n", + "\n", + "onnx_model = convert_xgboost(xgb_model, initial_types=[(\"input\", FloatTensorType([1, 4]))])\n", + "\n", + "with open(\"gbtree.onnx\", \"wb\") as f:\n", + " f.write(onnx_model.SerializeToString())" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ipVlP4yPxFxw", + "colab_type": "text" + }, + "source": [ + "## Upload the model to AWS\n", + "\n", + "Cortex loads models from AWS, so we need to upload the exported model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3IqsfyylxLhy", + "colab_type": "text" + }, + "source": [ + "Set these variables to configure your AWS credentials and model upload path:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "lc9LBH1uHT_h", + "colab_type": "code", + "cellView": "form", + "colab": {} + }, + "source": [ + "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", + "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", + "S3_UPLOAD_PATH = \"s3://my-bucket/iris-classifier/gbtree.onnx\" #@param {type:\"string\"}\n", + "\n", + "import sys\n", + "import re\n", + "\n", + "if AWS_ACCESS_KEY_ID == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", + "\n", + "elif AWS_SECRET_ACCESS_KEY == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", + "\n", + "else:\n", + " try:\n", + " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", + " except:\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NXeuZsaQxUc8", + "colab_type": "text" + }, + "source": [ + "Upload the model to S3:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "YLmnWTEVsu55", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import boto3\n", + "\n", + "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", + "print(\"Uploading {} ...\".format(S3_UPLOAD_PATH), end = '')\n", + "s3.upload_file(\"gbtree.onnx\", bucket, key)\n", + "print(\" ✓\")" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aR-mmcUzyCV3", + "colab_type": "text" + }, + "source": [ + "\n", + "That's it! See the [example](https://github.com/cortexlabs/cortex/tree/master/examples/onnx/iris-classifier) for how to deploy the model as an API." + ] + } + ] +} diff --git a/test/onnx/multi-model-classifier/README.md b/test/onnx/multi-model-classifier/README.md new file mode 100644 index 0000000000..45a001378a --- /dev/null +++ b/test/onnx/multi-model-classifier/README.md @@ -0,0 +1,69 @@ +# Multi-Model Classifier API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys ResNet50, MobileNet and ShuffleNet models in one API. Query parameters are used for selecting the model. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get multi-model-classifier`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +When making a prediction with [sample.json](sample.json), the following image will be used: + +![cat](https://i.imgur.com/213xcvs.jpg) + +### ResNet50 Classifier + +Make a request to the ResNet50 model: + +```bash +curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"label": "tabby"} +``` + +### MobileNet Classifier + +Make a request to the MobileNet model: + +```bash +curl "${ENDPOINT}?model=mobilenet" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"label": "tabby"} +``` + +### ShuffleNet Classifier + +Make a request to the ShuffleNet model: + +```bash +curl "${ENDPOINT}?model=shufflenet" -X POST -H "Content-Type: application/json" -d @sample.json +``` + +The expected response is: + +```json +{"label": "Egyptian_cat"} +``` diff --git a/test/onnx/multi-model-classifier/cortex.yaml b/test/onnx/multi-model-classifier/cortex.yaml new file mode 100644 index 0000000000..63efb00ebb --- /dev/null +++ b/test/onnx/multi-model-classifier/cortex.yaml @@ -0,0 +1,20 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: multi-model-classifier + kind: RealtimeAPI + predictor: + type: onnx + path: predictor.py + models: + paths: + - name: resnet50 + model_path: s3://cortex-examples/onnx/resnet50/ + - name: mobilenet + model_path: s3://cortex-examples/onnx/mobilenet/ + - name: shufflenet + model_path: s3://cortex-examples/onnx/shufflenet/ + config: + image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + image-resize: 224 + compute: + mem: 2G diff --git a/test/onnx/multi-model-classifier/predictor.py b/test/onnx/multi-model-classifier/predictor.py new file mode 100644 index 0000000000..a057bc9724 --- /dev/null +++ b/test/onnx/multi-model-classifier/predictor.py @@ -0,0 +1,98 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import numpy as np +import cv2, requests +from scipy.special import softmax + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + return image + + +def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA): + """ + Resize a numpy image. + """ + dim = None + (h, w) = image.shape[:2] + + if width is None and height is None: + return image + + if width is None: + # calculate the ratio of the height and construct the dimensions + r = height / float(h) + dim = (int(w * r), height) + else: + # calculate the ratio of the width and construct the dimensions + r = width / float(w) + dim = (width, int(h * r)) + + resized = cv2.resize(image, dim, interpolation=inter) + + return resized + + +def preprocess(img_data): + """ + Normalize input for inference. + """ + # move pixel color dimension to position 0 + img = np.moveaxis(img_data, 2, 0) + + mean_vec = np.array([0.485, 0.456, 0.406]) + stddev_vec = np.array([0.229, 0.224, 0.225]) + norm_img_data = np.zeros(img.shape).astype("float32") + for i in range(img.shape[0]): + # for each pixel in each channel, divide the value by 255 to get value between [0, 1] and then normalize + norm_img_data[i, :, :] = (img[i, :, :] / 255 - mean_vec[i]) / stddev_vec[i] + + # extend to batch size of 1 + norm_img_data = norm_img_data[np.newaxis, ...] + return norm_img_data + + +def postprocess(results): + """ + Eliminates all dimensions of size 1, softmaxes the input and then returns the index of the element with the highest value. + """ + squeezed = np.squeeze(results) + maxed = softmax(squeezed) + result = np.argmax(maxed) + return result + + +class ONNXPredictor: + def __init__(self, onnx_client, config): + # onnx client + self.client = onnx_client + + # for image classifiers + classes = requests.get(config["image-classifier-classes"]).json() + self.image_classes = [classes[str(k)][1] for k in range(len(classes))] + self.resize_value = config["image-resize"] + + def predict(self, payload, query_params): + # get request params + model_name = query_params["model"] + img_url = payload["url"] + + # process the input + img = get_url_image(img_url) + img = image_resize(img, height=self.resize_value) + img = preprocess(img) + + # predict + results = self.client.predict(img, model_name)[0] + + # interpret result + result = postprocess(results) + predicted_label = self.image_classes[result] + + return {"label": predicted_label} diff --git a/test/onnx/multi-model-classifier/requirements.txt b/test/onnx/multi-model-classifier/requirements.txt new file mode 100644 index 0000000000..212d089934 --- /dev/null +++ b/test/onnx/multi-model-classifier/requirements.txt @@ -0,0 +1,2 @@ +opencv-python==4.2.0.34 +scipy==1.4.1 diff --git a/test/onnx/multi-model-classifier/sample.json b/test/onnx/multi-model-classifier/sample.json new file mode 100644 index 0000000000..4ee3aa45df --- /dev/null +++ b/test/onnx/multi-model-classifier/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/213xcvs.jpg" +} diff --git a/test/onnx/yolov5-youtube/README.md b/test/onnx/yolov5-youtube/README.md new file mode 100644 index 0000000000..f7822449bb --- /dev/null +++ b/test/onnx/yolov5-youtube/README.md @@ -0,0 +1,61 @@ +# YOLOv5 Detection model + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys a detection model trained using [ultralytics' yolo repo](https://github.com/ultralytics/yolov5) using ONNX. +We'll use the `yolov5s` model as an example here. +In can be used to run inference on youtube videos and returns the annotated video with bounding boxes. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the API's endpoint for convenience. You can get the API's endpoint by running `cortex get yolov5-youtube`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +When making a prediction with [sample.json](sample.json), [this](https://www.youtube.com/watch?v=aUdKzb4LGJI) youtube video will be used. + +To make a request to the model: + +```bash +curl "${ENDPOINT}" -X POST -H "Content-Type: application/json" -d @sample.json --output video.mp4 +``` + +After a few seconds, `curl` will save the resulting video `video.mp4` in the current working directory. The following is a sample of what should be exported: + +![yolov5](https://user-images.githubusercontent.com/26958764/86545098-e0dce900-bf34-11ea-83a7-8fd544afa11c.gif) + + +## Exporting ONNX + +To export a custom model from the repo, use the [`model/export.py`](https://github.com/ultralytics/yolov5/blob/master/models/export.py) script. +The only change we need to make is to change the line + +```bash +model.model[-1].export = True # set Detect() layer export=True +``` + +to + +```bash +model.model[-1].export = False +``` + +Originally, the ultralytics repo does not export postprocessing steps of the model, e.g. the conversion from the raw CNN outputs to bounding boxes. +With newer ONNX versions, these can be exported as part of the model making the deployment much easier. + +With this modified script, the ONNX graph used for this example has been exported using +```bash +python models/export.py --weights weights/yolov5s.pt --img 416 --batch 1 +``` diff --git a/test/onnx/yolov5-youtube/conda-packages.txt b/test/onnx/yolov5-youtube/conda-packages.txt new file mode 100644 index 0000000000..131fce12b5 --- /dev/null +++ b/test/onnx/yolov5-youtube/conda-packages.txt @@ -0,0 +1,3 @@ +conda-forge::ffmpeg=4.2.3 +conda-forge::youtube-dl +conda-forge::matplotlib diff --git a/test/onnx/yolov5-youtube/cortex.yaml b/test/onnx/yolov5-youtube/cortex.yaml new file mode 100644 index 0000000000..80d0393308 --- /dev/null +++ b/test/onnx/yolov5-youtube/cortex.yaml @@ -0,0 +1,13 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: yolov5-youtube + kind: RealtimeAPI + predictor: + type: onnx + path: predictor.py + model_path: s3://cortex-examples/onnx/yolov5-youtube/ + config: + iou_threshold: 0.5 + confidence_threshold: 0.6 + compute: + gpu: 1 # this is optional, since the api can also run on cpu diff --git a/test/onnx/yolov5-youtube/labels.json b/test/onnx/yolov5-youtube/labels.json new file mode 100644 index 0000000000..c86f2f812a --- /dev/null +++ b/test/onnx/yolov5-youtube/labels.json @@ -0,0 +1,82 @@ +[ + "person", + "bicycle", + "car", + "motorcycle", + "airplane", + "bus", + "train", + "truck", + "boat", + "traffic light", + "fire hydrant", + "stop sign", + "parking meter", + "bench", + "bird", + "cat", + "dog", + "horse", + "sheep", + "cow", + "elephant", + "bear", + "zebra", + "giraffe", + "backpack", + "umbrella", + "handbag", + "tie", + "suitcase", + "frisbee", + "skis", + "snowboard", + "sports ball", + "kite", + "baseball bat", + "baseball glove", + "skateboard", + "surfboard", + "tennis racket", + "bottle", + "wine glass", + "cup", + "fork", + "knife", + "spoon", + "bowl", + "banana", + "apple", + "sandwich", + "orange", + "broccoli", + "carrot", + "hot dog", + "pizza", + "donut", + "cake", + "chair", + "couch", + "potted plant", + "bed", + "dining table", + "toilet", + "tv", + "laptop", + "mouse", + "remote", + "keyboard", + "cell phone", + "microwave", + "oven", + "toaster", + "sink", + "refrigerator", + "book", + "clock", + "vase", + "scissors", + "teddy bear", + "hair drier", + "toothbrush" +] diff --git a/test/onnx/yolov5-youtube/predictor.py b/test/onnx/yolov5-youtube/predictor.py new file mode 100644 index 0000000000..b99d29d911 --- /dev/null +++ b/test/onnx/yolov5-youtube/predictor.py @@ -0,0 +1,65 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import json +import os +import io +import uuid +import utils + +import numpy as np +from matplotlib import pyplot as plt + +from starlette.responses import StreamingResponse + + +class ONNXPredictor: + def __init__(self, onnx_client, config): + self.client = onnx_client + # Get the input shape from the ONNX runtime + (signature,) = onnx_client.get_model()["input_signatures"].values() + _, _, height, width = signature["shape"] + self.input_size = (width, height) + self.config = config + with open("labels.json") as buf: + self.labels = json.load(buf) + color_map = plt.cm.tab20(np.linspace(0, 20, len(self.labels))) + self.color_map = [tuple(map(int, colors)) for colors in 255 * color_map] + + def postprocess(self, output): + boxes, obj_score, class_scores = np.split(output[0], [4, 5], axis=1) + boxes = utils.boxes_yolo_to_xyxy(boxes) + + # get the class-prediction & class confidences + class_id = class_scores.argmax(axis=1) + cls_score = class_scores[np.arange(len(class_scores)), class_id] + + confidence = obj_score.squeeze(axis=1) * cls_score + sel = confidence > self.config["confidence_threshold"] + boxes, class_id, confidence = boxes[sel], class_id[sel], confidence[sel] + sel = utils.nms(boxes, confidence, self.config["iou_threshold"]) + boxes, class_id, confidence = boxes[sel], class_id[sel], confidence[sel] + return boxes, class_id, confidence + + def predict(self, payload): + # download YT video + in_path = utils.download_from_youtube(payload["url"], self.input_size[1]) + out_path = f"{uuid.uuid1()}.mp4" + + # run predictions + with utils.FrameWriter(out_path, size=self.input_size) as writer: + for frame in utils.frame_reader(in_path, size=self.input_size): + x = (frame.astype(np.float32) / 255).transpose(2, 0, 1) + # 4 output tensors, the last three are intermediate values and + # not necessary for detection + output, *_ = self.client.predict(x[None]) + boxes, class_ids, confidence = self.postprocess(output) + utils.overlay_boxes(frame, boxes, class_ids, self.labels, self.color_map) + writer.write(frame) + + with open(out_path, "rb") as f: + output_buf = io.BytesIO(f.read()) + + os.remove(in_path) + os.remove(out_path) + + return StreamingResponse(output_buf, media_type="video/mp4") diff --git a/test/onnx/yolov5-youtube/requirements.txt b/test/onnx/yolov5-youtube/requirements.txt new file mode 100644 index 0000000000..2c779ca7f1 --- /dev/null +++ b/test/onnx/yolov5-youtube/requirements.txt @@ -0,0 +1,3 @@ +ffmpeg-python +aiofiles +opencv-python-headless diff --git a/test/onnx/yolov5-youtube/sample.json b/test/onnx/yolov5-youtube/sample.json new file mode 100644 index 0000000000..8421278f58 --- /dev/null +++ b/test/onnx/yolov5-youtube/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://www.youtube.com/watch?v=aUdKzb4LGJI" +} diff --git a/test/onnx/yolov5-youtube/utils.py b/test/onnx/yolov5-youtube/utils.py new file mode 100644 index 0000000000..c9bbeb73fe --- /dev/null +++ b/test/onnx/yolov5-youtube/utils.py @@ -0,0 +1,130 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import youtube_dl +import ffmpeg +import numpy as np +import cv2 +import uuid + +from pathlib import Path +from typing import Iterable, Tuple + + +def download_from_youtube(url: str, min_height: int) -> Path: + target = f"{uuid.uuid1()}.mp4" + ydl_opts = { + "outtmpl": target, + "format": f"worstvideo[vcodec=vp9][height>={min_height}]", + } + with youtube_dl.YoutubeDL(ydl_opts) as ydl: + ydl.download([url]) + # we need to glob in case youtube-dl adds suffix + (path,) = Path().absolute().glob(f"{target}*") + return path + + +def frame_reader(path: Path, size: Tuple[int, int]) -> Iterable[np.ndarray]: + width, height = size + # letterbox frames to fixed size + process = ( + ffmpeg.input(path) + .filter("scale", size=f"{width}:{height}", force_original_aspect_ratio="decrease") + # Negative values for x and y center the padded video + .filter("pad", height=height, width=width, x=-1, y=-1) + .output("pipe:", format="rawvideo", pix_fmt="rgb24") + .run_async(pipe_stdout=True) + ) + + while True: + in_bytes = process.stdout.read(height * width * 3) + if not in_bytes: + process.wait() + break + frame = np.frombuffer(in_bytes, np.uint8).reshape([height, width, 3]) + yield frame + + +class FrameWriter: + def __init__(self, path: Path, size: Tuple[int, int]): + width, height = size + self.process = ( + ffmpeg.input("pipe:", format="rawvideo", pix_fmt="rgb24", s=f"{width}x{height}") + .output(path, pix_fmt="yuv420p") + .overwrite_output() + .run_async(pipe_stdin=True) + ) + + def write(self, frame: np.ndarray): + self.process.stdin.write(frame.astype(np.uint8).tobytes()) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + self.__del__() + + def __del__(self): + self.process.stdin.close() + self.process.wait() + + +def nms(dets: np.ndarray, scores: np.ndarray, thresh: float) -> np.ndarray: + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] # get boxes with more ious first + + keep = [] + while order.size > 0: + i = order[0] # pick maxmum iou box + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) # maximum width + h = np.maximum(0.0, yy2 - yy1 + 1) # maxiumum height + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return np.array(keep).astype(np.int) + + +def boxes_yolo_to_xyxy(boxes: np.ndarray): + boxes[:, 0] -= boxes[:, 2] / 2 + boxes[:, 1] -= boxes[:, 3] / 2 + boxes[:, 2] = boxes[:, 2] + boxes[:, 0] + boxes[:, 3] = boxes[:, 3] + boxes[:, 1] + return boxes + + +def overlay_boxes(frame, boxes, class_ids, label_map, color_map, line_thickness=None): + tl = ( + line_thickness or round(0.0005 * (frame.shape[0] + frame.shape[1]) / 2) + 1 + ) # line/font thickness + + for class_id, (x1, y1, x2, y2) in zip(class_ids, boxes.astype(np.int)): + color = color_map[class_id] + label = label_map[class_id] + cv2.rectangle(frame, (x1, y1), (x2, y2), color, tl, cv2.LINE_AA) + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + x3, y3 = x1 + t_size[0], y1 - t_size[1] - 3 + cv2.rectangle(frame, (x1, y1), (x3, y3), color, -1, cv2.LINE_AA) # filled + cv2.putText( + frame, + label, + (x1, y1 - 2), + 0, + tl / 3, + [225, 255, 255], + thickness=tf, + lineType=cv2.LINE_AA, + ) diff --git a/test/pytorch/answer-generator/README.md b/test/pytorch/answer-generator/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/answer-generator/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/answer-generator/cortex.yaml b/test/pytorch/answer-generator/cortex.yaml new file mode 100644 index 0000000000..b336f257dd --- /dev/null +++ b/test/pytorch/answer-generator/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: answer-generator + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 + mem: 5G diff --git a/test/pytorch/answer-generator/generator.py b/test/pytorch/answer-generator/generator.py new file mode 100644 index 0000000000..4a9aba613e --- /dev/null +++ b/test/pytorch/answer-generator/generator.py @@ -0,0 +1,44 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +# This file includes code which was modified from https://colab.research.google.com/drive/1KTLqiAOdKM_3RnBWfqgrvOQLqumUyOdA + +import torch +import torch.nn.functional as F + + +END_OF_TEXT = 50256 + + +def generate(model, conditioned_tokens, device): + generated_tokens = [] + while True: + result = recalc(model, conditioned_tokens, generated_tokens, device) + if result == END_OF_TEXT: + return generated_tokens[:-1] + + +def recalc(model, conditioned_tokens, generated_tokens, device): + indexed_tokens = conditioned_tokens + generated_tokens + tokens_tensor = torch.tensor([indexed_tokens]) + tokens_tensor = tokens_tensor.to(device) + with torch.no_grad(): + outputs = model(tokens_tensor) + predictions = outputs[0] + logits = predictions[0, -1, :] + filtered_logits = top_p_filtering(logits) + probabilities = F.softmax(filtered_logits, dim=-1) + next_token = torch.multinomial(probabilities, 1) + generated_tokens.append(next_token.item()) + return next_token.item() + + +def top_p_filtering(logits, top_p=0.9, filter_value=-float("Inf")): + assert logits.dim() == 1 + sorted_logits, sorted_indices = torch.sort(logits, descending=True) + cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) + sorted_indices_to_remove = cumulative_probs > top_p + sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() + sorted_indices_to_remove[..., 0] = 0 + indices_to_remove = sorted_indices[sorted_indices_to_remove] + logits[indices_to_remove] = filter_value + return logits diff --git a/test/pytorch/answer-generator/predictor.py b/test/pytorch/answer-generator/predictor.py new file mode 100644 index 0000000000..38c6622bf3 --- /dev/null +++ b/test/pytorch/answer-generator/predictor.py @@ -0,0 +1,36 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import wget +import torch +from transformers import GPT2Tokenizer, GPT2LMHeadModel, GPT2Config +import generator + + +class PythonPredictor: + def __init__(self, config): + medium_config = GPT2Config(n_embd=1024, n_layer=24, n_head=16) + model = GPT2LMHeadModel(medium_config) + wget.download( + "https://convaisharables.blob.core.windows.net/lsp/multiref/medium_ft.pkl", + "/tmp/medium_ft.pkl", + ) + + weights = torch.load("/tmp/medium_ft.pkl") + weights["lm_head.weight"] = weights["lm_head.decoder.weight"] + weights.pop("lm_head.decoder.weight", None) + + model.load_state_dict(weights) + + device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"using device: {device}") + model.to(device) + model.eval() + + self.device = device + self.model = model + self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + + def predict(self, payload): + conditioned_tokens = self.tokenizer.encode(payload["text"]) + [generator.END_OF_TEXT] + prediction = generator.generate(self.model, conditioned_tokens, self.device) + return self.tokenizer.decode(prediction) diff --git a/test/pytorch/answer-generator/requirements.txt b/test/pytorch/answer-generator/requirements.txt new file mode 100644 index 0000000000..effba0ef1b --- /dev/null +++ b/test/pytorch/answer-generator/requirements.txt @@ -0,0 +1,3 @@ +torch +transformers==2.3.* +wget==3.* diff --git a/test/pytorch/answer-generator/sample.json b/test/pytorch/answer-generator/sample.json new file mode 100644 index 0000000000..aa91c9d2eb --- /dev/null +++ b/test/pytorch/answer-generator/sample.json @@ -0,0 +1,3 @@ +{ + "text": "What is machine learning?" +} diff --git a/test/pytorch/image-classifier-alexnet/README.md b/test/pytorch/image-classifier-alexnet/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/image-classifier-alexnet/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/image-classifier-alexnet/cortex.yaml b/test/pytorch/image-classifier-alexnet/cortex.yaml new file mode 100644 index 0000000000..74c463c0b0 --- /dev/null +++ b/test/pytorch/image-classifier-alexnet/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-alexnet + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 + mem: 4G diff --git a/test/pytorch/image-classifier-alexnet/predictor.py b/test/pytorch/image-classifier-alexnet/predictor.py new file mode 100644 index 0000000000..a739ddbb8a --- /dev/null +++ b/test/pytorch/image-classifier-alexnet/predictor.py @@ -0,0 +1,39 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import requests +import torch +import torchvision +from torchvision import transforms +from PIL import Image +from io import BytesIO + + +class PythonPredictor: + def __init__(self, config): + device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"using device: {device}") + + model = torchvision.models.alexnet(pretrained=True).to(device) + model.eval() + # https://github.com/pytorch/examples/blob/447974f6337543d4de6b888e244a964d3c9b71f6/imagenet/main.py#L198-L199 + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + + self.preprocess = transforms.Compose( + [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] + ) + self.labels = requests.get( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + ).text.split("\n")[1:] + self.model = model + self.device = device + + def predict(self, payload): + image = requests.get(payload["url"]).content + img_pil = Image.open(BytesIO(image)) + img_tensor = self.preprocess(img_pil) + img_tensor.unsqueeze_(0) + img_tensor = img_tensor.to(self.device) + with torch.no_grad(): + prediction = self.model(img_tensor) + _, index = prediction[0].max(0) + return self.labels[index] diff --git a/test/pytorch/image-classifier-alexnet/requirements.txt b/test/pytorch/image-classifier-alexnet/requirements.txt new file mode 100644 index 0000000000..ac988bdf84 --- /dev/null +++ b/test/pytorch/image-classifier-alexnet/requirements.txt @@ -0,0 +1,2 @@ +torch +torchvision diff --git a/test/pytorch/image-classifier-alexnet/sample.json b/test/pytorch/image-classifier-alexnet/sample.json new file mode 100644 index 0000000000..eb72ddb869 --- /dev/null +++ b/test/pytorch/image-classifier-alexnet/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/PzXprwl.jpg" +} diff --git a/test/pytorch/image-classifier-resnet50/README.md b/test/pytorch/image-classifier-resnet50/README.md new file mode 100644 index 0000000000..f13020d874 --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/README.md @@ -0,0 +1,59 @@ +# Image Classifier with ResNet50 + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example implements an image recognition system using ResNet50, which allows for the recognition of up to 1000 classes. + +## Deploying + +There are 3 Cortex APIs available in this example: + +1. [cortex.yaml](cortex.yaml) - can be used with any instances. +1. [cortex_inf.yaml](cortex_inf.yaml) - to be used with `inf1` instances. +1. [cortex_gpu.yaml](cortex_gpu.yaml) - to be used with GPU instances. + +To deploy an API, run: + +```bash +cortex deploy +``` + +E.g. + +```bash +cortex deploy cortex_gpu.yaml +``` + +## Verifying your API + +Check that your API is live by running `cortex get image-classifier-resnet50`, and copy the example `curl` command that's shown. After the API is live, run the `curl` command, e.g. + +```bash +$ curl -X POST -H "Content-Type: application/json" -d @sample.json + +["tabby", "Egyptian_cat", "tiger_cat", "tiger", "plastic_bag"] +``` + +The following image is embedded in [sample.json](sample.json): + +![image](https://i.imgur.com/213xcvs.jpg) + +## Exporting SavedModels + +This example deploys models that we have built and uploaded to a public S3 bucket. If you want to build the models yourself, follow these instructions. + +Run the following command to install the dependencies required for the [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook: + +```bash +pip install --extra-index-url=https://pip.repos.neuron.amazonaws.com \ + neuron-cc==1.0.9410.0+6008239556 \ + torch-neuron==1.0.825.0 +``` + +Also, `torchvision` has to be installed, but without any dependencies: + +```bash +pip install torchvision==0.4.2 --no-deps +``` + +The [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook will generate 2 torch models. One is saved as `resnet50.pt` which can be run on GPU or CPU, and another is saved as `resnet50_neuron.pt`, which can only be run on `inf1` instances. diff --git a/test/pytorch/image-classifier-resnet50/cortex.yaml b/test/pytorch/image-classifier-resnet50/cortex.yaml new file mode 100644 index 0000000000..d6c1cb64c9 --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/cortex.yaml @@ -0,0 +1,15 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + model_path: s3://cortex-examples/pytorch/image-classifier-resnet50 + model_name: resnet50.pt + device: cpu + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + compute: + cpu: 1 diff --git a/test/pytorch/image-classifier-resnet50/cortex_gpu.yaml b/test/pytorch/image-classifier-resnet50/cortex_gpu.yaml new file mode 100644 index 0000000000..7f06603504 --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/cortex_gpu.yaml @@ -0,0 +1,16 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + model_path: s3://cortex-examples/pytorch/image-classifier-resnet50 + model_name: resnet50.pt + device: gpu + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + compute: + gpu: 1 + cpu: 1 diff --git a/test/pytorch/image-classifier-resnet50/cortex_inf.yaml b/test/pytorch/image-classifier-resnet50/cortex_inf.yaml new file mode 100644 index 0000000000..55ce4ff793 --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/cortex_inf.yaml @@ -0,0 +1,16 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + model_path: s3://cortex-examples/pytorch/image-classifier-resnet50 + model_name: resnet50_neuron.pt + device: inf + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + compute: + inf: 1 + cpu: 1 diff --git a/test/pytorch/image-classifier-resnet50/generate_resnet50_models.ipynb b/test/pytorch/image-classifier-resnet50/generate_resnet50_models.ipynb new file mode 100644 index 0000000000..e4e1343d85 --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/generate_resnet50_models.ipynb @@ -0,0 +1,121 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generate Resnet50 Models\n", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "import numpy as np\n", + "import os\n", + "import torch_neuron\n", + "from torchvision import models" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Load Resnet50 model" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "model = models.resnet50(pretrained=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Compile model for Inferentia. Should have worked with 1 NeuronCores, but it appears that setting it to a minimum of 2 is required." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:Neuron:compiling module ResNet with neuron-cc\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Compiler args type is value is ['--num-neuroncores', '2']\n" + ] + } + ], + "source": [ + "model.eval()\n", + "batch_size = 1\n", + "image = torch.zeros([batch_size, 3, 224, 224], dtype=torch.float32)\n", + "model_neuron = torch.neuron.trace(model, example_inputs=[image], compiler_args=[\"--num-neuroncores\", \"2\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Save both models to disk" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "model_neuron.save(\"resnet50_neuron.pt\")\n", + "torch.save(model.state_dict(), \"resnet50.pt\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/test/pytorch/image-classifier-resnet50/predictor.py b/test/pytorch/image-classifier-resnet50/predictor.py new file mode 100644 index 0000000000..8059c4078c --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/predictor.py @@ -0,0 +1,93 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import os +import torch +import cv2 +import numpy as np +import requests +import re +import boto3 +from botocore import UNSIGNED +from botocore.client import Config +from torchvision import models, transforms, datasets + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image + + +class PythonPredictor: + def __init__(self, config): + # load classes + classes = requests.get(config["classes"]).json() + self.idx2label = [classes[str(k)][1] for k in range(len(classes))] + + # create s3 client + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + # download the model + model_path = config["model_path"] + model_name = config["model_name"] + bucket, key = re.match("s3://(.+?)/(.+)", model_path).groups() + s3.download_file(bucket, os.path.join(key, model_name), model_name) + + # load the model + self.device = None + if config["device"] == "gpu": + self.device = torch.device("cuda") + self.model = models.resnet50() + self.model.load_state_dict(torch.load(model_name, map_location="cuda:0")) + self.model.eval() + self.model = self.model.to(self.device) + elif config["device"] == "cpu": + self.model = models.resnet50() + self.model.load_state_dict(torch.load(model_name)) + self.model.eval() + elif config["device"] == "inf": + import torch_neuron + + self.model = torch.jit.load(model_name) + else: + raise RuntimeError("invalid predictor: config: must be cpu, gpu, or inf") + + # save normalization transform for later use + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) + self.transform = transforms.Compose( + [ + transforms.ToPILImage(), + transforms.Resize(config["input_shape"]), + transforms.ToTensor(), + normalize, + ] + ) + + def predict(self, payload): + # preprocess image + image = get_url_image(payload["url"]) + image = self.transform(image) + image = torch.tensor(image.numpy()[np.newaxis, ...]) + + # predict + if self.device: + results = self.model(image.to(self.device)) + else: + results = self.model(image) + + # Get the top 5 results + top5_idx = results[0].sort()[1][-5:] + + # Lookup and print the top 5 labels + top5_labels = [self.idx2label[idx] for idx in top5_idx] + top5_labels = top5_labels[::-1] + + return top5_labels diff --git a/test/pytorch/image-classifier-resnet50/sample.json b/test/pytorch/image-classifier-resnet50/sample.json new file mode 100644 index 0000000000..4ee3aa45df --- /dev/null +++ b/test/pytorch/image-classifier-resnet50/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/213xcvs.jpg" +} diff --git a/test/pytorch/iris-classifier/README.md b/test/pytorch/iris-classifier/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/iris-classifier/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/iris-classifier/cortex.yaml b/test/pytorch/iris-classifier/cortex.yaml new file mode 100644 index 0000000000..a8b590882d --- /dev/null +++ b/test/pytorch/iris-classifier/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: iris-classifier + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + model: s3://cortex-examples/pytorch/iris-classifier/weights.pth + monitoring: + model_type: classification diff --git a/test/pytorch/iris-classifier/model.py b/test/pytorch/iris-classifier/model.py new file mode 100644 index 0000000000..fe29ff7b6d --- /dev/null +++ b/test/pytorch/iris-classifier/model.py @@ -0,0 +1,59 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score + + +class IrisNet(nn.Module): + def __init__(self): + super(IrisNet, self).__init__() + self.fc1 = nn.Linear(4, 100) + self.fc2 = nn.Linear(100, 100) + self.fc3 = nn.Linear(100, 3) + self.softmax = nn.Softmax(dim=1) + + def forward(self, X): + X = F.relu(self.fc1(X)) + X = self.fc2(X) + X = self.fc3(X) + X = self.softmax(X) + return X + + +if __name__ == "__main__": + iris = load_iris() + X, y = iris.data, iris.target + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42) + + train_X = Variable(torch.Tensor(X_train).float()) + test_X = Variable(torch.Tensor(X_test).float()) + train_y = Variable(torch.Tensor(y_train).long()) + test_y = Variable(torch.Tensor(y_test).long()) + + model = IrisNet() + + criterion = nn.CrossEntropyLoss() + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + for epoch in range(1000): + optimizer.zero_grad() + out = model(train_X) + loss = criterion(out, train_y) + loss.backward() + optimizer.step() + + if epoch % 100 == 0: + print("number of epoch {} loss {}".format(epoch, loss)) + + predict_out = model(test_X) + _, predict_y = torch.max(predict_out, 1) + + print("prediction accuracy {}".format(accuracy_score(test_y.data, predict_y.data))) + + torch.save(model.state_dict(), "weights.pth") diff --git a/test/pytorch/iris-classifier/predictor.py b/test/pytorch/iris-classifier/predictor.py new file mode 100644 index 0000000000..71994bb9ae --- /dev/null +++ b/test/pytorch/iris-classifier/predictor.py @@ -0,0 +1,50 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import re +import torch +import os +import boto3 +from botocore import UNSIGNED +from botocore.client import Config +from model import IrisNet + +labels = ["setosa", "versicolor", "virginica"] + + +class PythonPredictor: + def __init__(self, config): + # download the model + bucket, key = re.match("s3://(.+?)/(.+)", config["model"]).groups() + + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + s3.download_file(bucket, key, "/tmp/model.pth") + + # initialize the model + model = IrisNet() + model.load_state_dict(torch.load("/tmp/model.pth")) + model.eval() + + self.model = model + + def predict(self, payload): + # Convert the request to a tensor and pass it into the model + input_tensor = torch.FloatTensor( + [ + [ + payload["sepal_length"], + payload["sepal_width"], + payload["petal_length"], + payload["petal_width"], + ] + ] + ) + + # Run the prediction + output = self.model(input_tensor) + + # Translate the model output to the corresponding label string + return labels[torch.argmax(output[0])] diff --git a/test/pytorch/iris-classifier/requirements.txt b/test/pytorch/iris-classifier/requirements.txt new file mode 100644 index 0000000000..f2f30b7ef9 --- /dev/null +++ b/test/pytorch/iris-classifier/requirements.txt @@ -0,0 +1,2 @@ +torch +scikit-learn diff --git a/test/pytorch/iris-classifier/sample.json b/test/pytorch/iris-classifier/sample.json new file mode 100644 index 0000000000..0bc6836266 --- /dev/null +++ b/test/pytorch/iris-classifier/sample.json @@ -0,0 +1,6 @@ +{ + "sepal_length": 2.2, + "sepal_width": 3.6, + "petal_length": 1.4, + "petal_width": 3.3 +} diff --git a/test/pytorch/language-identifier/README.md b/test/pytorch/language-identifier/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/language-identifier/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/language-identifier/cortex.yaml b/test/pytorch/language-identifier/cortex.yaml new file mode 100644 index 0000000000..e8243a58fa --- /dev/null +++ b/test/pytorch/language-identifier/cortex.yaml @@ -0,0 +1,9 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: language-identifier + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + monitoring: + model_type: classification diff --git a/test/pytorch/language-identifier/predictor.py b/test/pytorch/language-identifier/predictor.py new file mode 100644 index 0000000000..e59ebe5012 --- /dev/null +++ b/test/pytorch/language-identifier/predictor.py @@ -0,0 +1,18 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import wget +import fasttext + + +class PythonPredictor: + def __init__(self, config): + wget.download( + "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin", "/tmp/model" + ) + + self.model = fasttext.load_model("/tmp/model") + + def predict(self, payload): + prediction = self.model.predict(payload["text"]) + language = prediction[0][0][-2:] + return language diff --git a/test/pytorch/language-identifier/requirements.txt b/test/pytorch/language-identifier/requirements.txt new file mode 100644 index 0000000000..a342ff2914 --- /dev/null +++ b/test/pytorch/language-identifier/requirements.txt @@ -0,0 +1,2 @@ +wget==3.* +fasttext==0.9.* diff --git a/test/pytorch/language-identifier/sample.json b/test/pytorch/language-identifier/sample.json new file mode 100644 index 0000000000..225c357392 --- /dev/null +++ b/test/pytorch/language-identifier/sample.json @@ -0,0 +1,3 @@ +{ + "text": "build machine learning apis" +} diff --git a/test/pytorch/multi-model-text-analyzer/README.md b/test/pytorch/multi-model-text-analyzer/README.md new file mode 100644 index 0000000000..0fbca390cd --- /dev/null +++ b/test/pytorch/multi-model-text-analyzer/README.md @@ -0,0 +1,51 @@ +# Multi-Model Analyzer API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys a sentiment analyzer and a text summarizer in one API. Query parameters are used for selecting the model. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the APIs endpoint. You can get the API's endpoint by running `cortex get text-analyzer`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +### Sentiment Analyzer Classifier + +Make a request to the sentiment analyzer model: + +```bash +curl "${ENDPOINT}?model=sentiment" -X POST -H "Content-Type: application/json" -d @sample-sentiment.json +``` + +The expected response is: + +```json +{"label": "POSITIVE", "score": 0.9998506903648376} +``` + +### Text Summarizer + +Make a request to the text summarizer model: + +```bash +curl "${ENDPOINT}?model=summarizer" -X POST -H "Content-Type: application/json" -d @sample-summarizer.json +``` + +The expected response is: + +```text +Machine learning is the study of algorithms and statistical models that computer systems use to perform a specific task. It is seen as a subset of artificial intelligence. Machine learning algorithms are used in a wide variety of applications, such as email filtering and computer vision. In its application across business problems, machine learning is also referred to as predictive analytics. +``` diff --git a/test/pytorch/multi-model-text-analyzer/cortex.yaml b/test/pytorch/multi-model-text-analyzer/cortex.yaml new file mode 100644 index 0000000000..b2ece6bab9 --- /dev/null +++ b/test/pytorch/multi-model-text-analyzer/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: multi-model-text-analyzer + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 + mem: 6G diff --git a/test/pytorch/multi-model-text-analyzer/predictor.py b/test/pytorch/multi-model-text-analyzer/predictor.py new file mode 100644 index 0000000000..03a8b03fbb --- /dev/null +++ b/test/pytorch/multi-model-text-analyzer/predictor.py @@ -0,0 +1,25 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +from transformers import pipeline +from starlette.responses import JSONResponse + + +class PythonPredictor: + def __init__(self, config): + device = 0 if torch.cuda.is_available() else -1 + print(f"using device: {'cuda' if device == 0 else 'cpu'}") + + self.analyzer = pipeline(task="sentiment-analysis", device=device) + self.summarizer = pipeline(task="summarization", device=device) + + def predict(self, query_params, payload): + model_name = query_params.get("model") + + if model_name == "sentiment": + return self.analyzer(payload["text"])[0] + elif model_name == "summarizer": + summary = self.summarizer(payload["text"]) + return summary[0]["summary_text"] + else: + return JSONResponse({"error": f"unknown model: {model_name}"}, status_code=400) diff --git a/test/pytorch/multi-model-text-analyzer/requirements.txt b/test/pytorch/multi-model-text-analyzer/requirements.txt new file mode 100644 index 0000000000..3f565d80e4 --- /dev/null +++ b/test/pytorch/multi-model-text-analyzer/requirements.txt @@ -0,0 +1,2 @@ +torch +transformers==2.9.* diff --git a/test/pytorch/multi-model-text-analyzer/sample-sentiment.json b/test/pytorch/multi-model-text-analyzer/sample-sentiment.json new file mode 100644 index 0000000000..de3a18a92a --- /dev/null +++ b/test/pytorch/multi-model-text-analyzer/sample-sentiment.json @@ -0,0 +1,3 @@ +{ + "text": "best day ever" +} diff --git a/test/pytorch/multi-model-text-analyzer/sample-summarizer.json b/test/pytorch/multi-model-text-analyzer/sample-summarizer.json new file mode 100644 index 0000000000..b19a1406d4 --- /dev/null +++ b/test/pytorch/multi-model-text-analyzer/sample-summarizer.json @@ -0,0 +1,3 @@ +{ + "text": "Machine learning (ML) is the scientific study of algorithms and statistical models that computer systems use to perform a specific task without using explicit instructions, relying on patterns and inference instead. It is seen as a subset of artificial intelligence. Machine learning algorithms build a mathematical model based on sample data, known as training data, in order to make predictions or decisions without being explicitly programmed to perform the task. Machine learning algorithms are used in a wide variety of applications, such as email filtering and computer vision, where it is difficult or infeasible to develop a conventional algorithm for effectively performing the task. Machine learning is closely related to computational statistics, which focuses on making predictions using computers. The study of mathematical optimization delivers methods, theory and application domains to the field of machine learning. Data mining is a field of study within machine learning, and focuses on exploratory data analysis through unsupervised learning. In its application across business problems, machine learning is also referred to as predictive analytics." +} diff --git a/test/pytorch/object-detector/README.md b/test/pytorch/object-detector/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/object-detector/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/object-detector/coco_labels.txt b/test/pytorch/object-detector/coco_labels.txt new file mode 100644 index 0000000000..8d950d95da --- /dev/null +++ b/test/pytorch/object-detector/coco_labels.txt @@ -0,0 +1,91 @@ +__background__ +person +bicycle +car +motorcycle +airplane +bus +train +truck +boat +traffic light +fire hydrant +N/A +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +N/A +backpack +umbrella +N/A +N/A +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +N/A +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +couch +potted plant +bed +N/A +dining table +N/A +N/A +toilet +N/A +tv +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +N/A +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/test/pytorch/object-detector/cortex.yaml b/test/pytorch/object-detector/cortex.yaml new file mode 100644 index 0000000000..9b06d29e9e --- /dev/null +++ b/test/pytorch/object-detector/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: object-detector + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 + mem: 4G diff --git a/test/pytorch/object-detector/predictor.py b/test/pytorch/object-detector/predictor.py new file mode 100644 index 0000000000..52aa593774 --- /dev/null +++ b/test/pytorch/object-detector/predictor.py @@ -0,0 +1,49 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +from io import BytesIO + +import requests +import torch +from PIL import Image +from torchvision import models +from torchvision import transforms + + +class PythonPredictor: + def __init__(self, config): + self.device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"using device: {self.device}") + + model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True).to(self.device) + model.eval() + + self.preprocess = transforms.Compose([transforms.ToTensor()]) + + with open("/mnt/project/coco_labels.txt") as f: + self.coco_labels = f.read().splitlines() + + self.model = model + + def predict(self, payload): + threshold = float(payload["threshold"]) + image = requests.get(payload["url"]).content + img_pil = Image.open(BytesIO(image)) + img_tensor = self.preprocess(img_pil).to(self.device) + img_tensor.unsqueeze_(0) + + with torch.no_grad(): + pred = self.model(img_tensor) + + predicted_class = [self.coco_labels[i] for i in pred[0]["labels"].cpu().tolist()] + predicted_boxes = [ + [(i[0], i[1]), (i[2], i[3])] for i in pred[0]["boxes"].detach().cpu().tolist() + ] + predicted_score = pred[0]["scores"].detach().cpu().tolist() + predicted_t = [predicted_score.index(x) for x in predicted_score if x > threshold] + if len(predicted_t) == 0: + return [], [] + + predicted_t = predicted_t[-1] + predicted_boxes = predicted_boxes[: predicted_t + 1] + predicted_class = predicted_class[: predicted_t + 1] + return predicted_boxes, predicted_class diff --git a/test/pytorch/object-detector/requirements.txt b/test/pytorch/object-detector/requirements.txt new file mode 100644 index 0000000000..ac988bdf84 --- /dev/null +++ b/test/pytorch/object-detector/requirements.txt @@ -0,0 +1,2 @@ +torch +torchvision diff --git a/test/pytorch/object-detector/sample.json b/test/pytorch/object-detector/sample.json new file mode 100644 index 0000000000..5005f13bad --- /dev/null +++ b/test/pytorch/object-detector/sample.json @@ -0,0 +1,4 @@ +{ + "url": "https://i.imgur.com/PzXprwl.jpg", + "threshold": "0.8" +} diff --git a/test/pytorch/question-generator/cortex.yaml b/test/pytorch/question-generator/cortex.yaml new file mode 100644 index 0000000000..a944303edb --- /dev/null +++ b/test/pytorch/question-generator/cortex.yaml @@ -0,0 +1,10 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: question-generator + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + mem: 6G diff --git a/test/pytorch/question-generator/dependencies.sh b/test/pytorch/question-generator/dependencies.sh new file mode 100644 index 0000000000..5040da2342 --- /dev/null +++ b/test/pytorch/question-generator/dependencies.sh @@ -0,0 +1,4 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +# torchvision isn’t required for this example, and pip was throwing warnings with it installed +pip uninstall torchvision -y diff --git a/test/pytorch/question-generator/predictor.py b/test/pytorch/question-generator/predictor.py new file mode 100644 index 0000000000..0b7692890c --- /dev/null +++ b/test/pytorch/question-generator/predictor.py @@ -0,0 +1,36 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +from transformers import AutoModelWithLMHead, AutoTokenizer +import spacy +import subprocess +import json + + +class PythonPredictor: + def __init__(self, config): + subprocess.call("python -m spacy download en_core_web_sm".split(" ")) + import en_core_web_sm + + self.tokenizer = AutoTokenizer.from_pretrained( + "mrm8488/t5-base-finetuned-question-generation-ap" + ) + self.model = AutoModelWithLMHead.from_pretrained( + "mrm8488/t5-base-finetuned-question-generation-ap" + ) + self.nlp = en_core_web_sm.load() + + def predict(self, payload): + context = payload["context"] + answer = payload["answer"] + max_length = int(payload.get("max_length", 64)) + + input_text = "answer: {} context: {} ".format(answer, context) + features = self.tokenizer([input_text], return_tensors="pt") + + output = self.model.generate( + input_ids=features["input_ids"], + attention_mask=features["attention_mask"], + max_length=max_length, + ) + + return {"result": self.tokenizer.decode(output[0])} diff --git a/test/pytorch/question-generator/requirements.txt b/test/pytorch/question-generator/requirements.txt new file mode 100644 index 0000000000..d7b5db27a0 --- /dev/null +++ b/test/pytorch/question-generator/requirements.txt @@ -0,0 +1,4 @@ +spacy==2.1.8 +-e git+https://github.com/huggingface/transformers.git#egg=transformers +--find-links https://download.pytorch.org/whl/torch_stable.html +torch==1.6.0+cpu diff --git a/test/pytorch/question-generator/sample.json b/test/pytorch/question-generator/sample.json new file mode 100644 index 0000000000..88c9fb0c92 --- /dev/null +++ b/test/pytorch/question-generator/sample.json @@ -0,0 +1,4 @@ +{ + "context": "Sarah works as a software engineer in London", + "answer": "London" +} diff --git a/test/pytorch/reading-comprehender/README.md b/test/pytorch/reading-comprehender/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/reading-comprehender/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/reading-comprehender/cortex.yaml b/test/pytorch/reading-comprehender/cortex.yaml new file mode 100644 index 0000000000..ba89862c78 --- /dev/null +++ b/test/pytorch/reading-comprehender/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: reading-comprehender + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 + mem: 4G diff --git a/test/pytorch/reading-comprehender/predictor.py b/test/pytorch/reading-comprehender/predictor.py new file mode 100644 index 0000000000..7b86ac4770 --- /dev/null +++ b/test/pytorch/reading-comprehender/predictor.py @@ -0,0 +1,25 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +from allennlp.predictors.predictor import Predictor as AllenNLPPredictor + + +class PythonPredictor: + def __init__(self, config): + self.device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"using device: {self.device}") + + cuda_device = -1 + if self.device == "cuda": + cuda_device = 0 + + self.predictor = AllenNLPPredictor.from_path( + "https://storage.googleapis.com/allennlp-public-models/bidaf-elmo-model-2018.11.30-charpad.tar.gz", + cuda_device=cuda_device, + ) + + def predict(self, payload): + prediction = self.predictor.predict( + passage=payload["passage"], question=payload["question"] + ) + return prediction["best_span_str"] diff --git a/test/pytorch/reading-comprehender/requirements.txt b/test/pytorch/reading-comprehender/requirements.txt new file mode 100644 index 0000000000..13dd5fbdba --- /dev/null +++ b/test/pytorch/reading-comprehender/requirements.txt @@ -0,0 +1 @@ +allennlp==0.9.* diff --git a/test/pytorch/reading-comprehender/sample.json b/test/pytorch/reading-comprehender/sample.json new file mode 100644 index 0000000000..14f60455bc --- /dev/null +++ b/test/pytorch/reading-comprehender/sample.json @@ -0,0 +1,4 @@ +{ + "passage": "Cortex Labs is building machine learning infrastructure for deploying models in production", + "question": "What does Cortex Labs do?" +} diff --git a/test/pytorch/search-completer/README.md b/test/pytorch/search-completer/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/search-completer/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/search-completer/cortex.yaml b/test/pytorch/search-completer/cortex.yaml new file mode 100644 index 0000000000..cd73458149 --- /dev/null +++ b/test/pytorch/search-completer/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: search-completer + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 + mem: 4G diff --git a/test/pytorch/search-completer/predictor.py b/test/pytorch/search-completer/predictor.py new file mode 100644 index 0000000000..58d03ccc2c --- /dev/null +++ b/test/pytorch/search-completer/predictor.py @@ -0,0 +1,20 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +import regex +import tqdm + + +class PythonPredictor: + def __init__(self, config): + roberta = torch.hub.load("pytorch/fairseq", "roberta.large", force_reload=True) + roberta.eval() + device = "cuda" if torch.cuda.is_available() else "cpu" + print(f"using device: {device}") + roberta.to(device) + + self.model = roberta + + def predict(self, payload): + predictions = self.model.fill_mask(payload["text"] + " ", topk=5) + return [prediction[0] for prediction in predictions] diff --git a/test/pytorch/search-completer/requirements.txt b/test/pytorch/search-completer/requirements.txt new file mode 100644 index 0000000000..16b9215d31 --- /dev/null +++ b/test/pytorch/search-completer/requirements.txt @@ -0,0 +1,5 @@ +torch +regex +tqdm +dataclasses +hydra-core diff --git a/test/pytorch/search-completer/sample.json b/test/pytorch/search-completer/sample.json new file mode 100644 index 0000000000..dfd2a2f433 --- /dev/null +++ b/test/pytorch/search-completer/sample.json @@ -0,0 +1,3 @@ +{ + "text": "machine learning is" +} diff --git a/test/pytorch/sentiment-analyzer/README.md b/test/pytorch/sentiment-analyzer/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/pytorch/sentiment-analyzer/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/pytorch/sentiment-analyzer/cortex.yaml b/test/pytorch/sentiment-analyzer/cortex.yaml new file mode 100644 index 0000000000..1ed6c45bbf --- /dev/null +++ b/test/pytorch/sentiment-analyzer/cortex.yaml @@ -0,0 +1,10 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: sentiment-analyzer + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + # gpu: 1 # this is optional, since the api can also run on cpu diff --git a/test/pytorch/sentiment-analyzer/predictor.py b/test/pytorch/sentiment-analyzer/predictor.py new file mode 100644 index 0000000000..03b796d199 --- /dev/null +++ b/test/pytorch/sentiment-analyzer/predictor.py @@ -0,0 +1,15 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +from transformers import pipeline + + +class PythonPredictor: + def __init__(self, config): + device = 0 if torch.cuda.is_available() else -1 + print(f"using device: {'cuda' if device == 0 else 'cpu'}") + + self.analyzer = pipeline(task="sentiment-analysis", device=device) + + def predict(self, payload): + return self.analyzer(payload["text"])[0] diff --git a/test/pytorch/sentiment-analyzer/requirements.txt b/test/pytorch/sentiment-analyzer/requirements.txt new file mode 100644 index 0000000000..3f565d80e4 --- /dev/null +++ b/test/pytorch/sentiment-analyzer/requirements.txt @@ -0,0 +1,2 @@ +torch +transformers==2.9.* diff --git a/test/pytorch/sentiment-analyzer/sample.json b/test/pytorch/sentiment-analyzer/sample.json new file mode 100644 index 0000000000..7622d16ae0 --- /dev/null +++ b/test/pytorch/sentiment-analyzer/sample.json @@ -0,0 +1,3 @@ +{ + "text": "best day ever" +} diff --git a/test/pytorch/text-generator/README.md b/test/pytorch/text-generator/README.md new file mode 100644 index 0000000000..f99417e3b4 --- /dev/null +++ b/test/pytorch/text-generator/README.md @@ -0,0 +1,192 @@ +# Deploy machine learning models to production + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example shows how to deploy a realtime text generation API using a GPT-2 model from Hugging Face's transformers library. + +## Implement your Predictor + +1. Create a Python file named `predictor.py`. +2. Define a Predictor class with a constructor that loads and initializes the model. +3. Add a predict function that will accept a payload and return the generated text. + +```python +# predictor.py + +import torch +from transformers import GPT2Tokenizer, GPT2LMHeadModel + + +class PythonPredictor: + def __init__(self, config): + self.device = "cuda" if torch.cuda.is_available() else "cpu" + self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + self.model = GPT2LMHeadModel.from_pretrained("gpt2").to(self.device) + + def predict(self, payload): + input_length = len(payload["text"].split()) + tokens = self.tokenizer.encode(payload["text"], return_tensors="pt").to(self.device) + prediction = self.model.generate(tokens, max_length=input_length + 20, do_sample=True) + return self.tokenizer.decode(prediction[0]) +``` + +## Specify Python dependencies + +Create a `requirements.txt` file to specify the dependencies needed by `predictor.py`. Cortex will automatically install them into your runtime once you deploy: + +```python +# requirements.txt + +torch +transformers==3.0.* +``` + +## Deploy your model locally + +You can create APIs from any Python runtime that has access to Docker (e.g. the Python shell or a Jupyter notebook): + +```python +import cortex + +cx_local = cortex.client("local") + +api_spec = { + "name": "text-generator", + "kind": "RealtimeAPI", + "predictor": { + "type": "python", + "path": "predictor.py" + } +} + +cx_local.deploy(api_spec, project_dir=".", wait=True) +``` + +## Consume your API + +```python +import requests + +endpoint = cx_local.get_api("text-generator")["endpoint"] +payload = {"text": "hello world"} +print(requests.post(endpoint, payload).text) +``` + +## Manage your APIs using the CLI + +Monitor the status of your API using `cortex get`: + +```bash +$ cortex get --watch + +env realtime api status last update avg request 2XX +local text-generator updating 8s - - +``` + +Show additional information for your API (e.g. its endpoint) using `cortex get `: + +```bash +$ cortex get text-generator + +status last update avg request 2XX +live 1m - - + +endpoint: http://localhost:8889 +``` + +You can also stream logs from your API: + +```bash +$ cortex logs text-generator + +... +``` + +## Deploy your model to AWS + +Cortex can automatically provision infrastructure on your AWS account and deploy your models as production-ready web services: + +```bash +$ cortex cluster up +``` + +This creates a Cortex cluster in your AWS account, which will take approximately 15 minutes. After your cluster is created, you can deploy to your cluster by using the same code and configuration as before: + +```python +import cortex + +cx_aws = cortex.client("aws") + +api_spec = { + "name": "text-generator", + "kind": "RealtimeAPI", + "predictor": { + "type": "python", + "path": "predictor.py" + } +} + +cx_aws.deploy(api_spec, project_dir=".") +``` + +Monitor the status of your APIs using `cortex get` using your CLI: + +```bash +$ cortex get --watch + +env realtime api status up-to-date requested last update avg request 2XX +aws text-generator live 1 1 1m - - +local text-generator live 1 1 17m 3.1285 s 1 +``` + +The output above indicates that one replica of your API was requested and is available to serve predictions. Cortex will automatically launch more replicas if the load increases and will spin down replicas if there is unused capacity. + +Show additional information for your API (e.g. its endpoint) using `cortex get `: + +```bash +$ cortex get text-generator --env aws + +status up-to-date requested last update avg request 2XX +live 1 1 1m - - + +endpoint: https://***.execute-api.us-west-2.amazonaws.com/text-generator +``` + +## Run on GPUs + +If your cortex cluster is using GPU instances (configured during cluster creation) or if you are running locally with an nvidia GPU, you can run your text generator API on GPUs. Add the `compute` field to your API configuration and re-deploy: + +```python +api_spec = { + "name": "text-generator", + "kind": "RealtimeAPI", + "predictor": { + "type": "python", + "path": "predictor.py" + }, + "compute": { + "gpu": 1 + } +} + +cx_aws.deploy(api_spec, project_dir=".") +``` + +As your new API is initializing, the old API will continue to respond to prediction requests. Once the API's status becomes "live" (with one up-to-date replica), traffic will be routed to the updated version. You can track the status of your API using `cortex get`: + +```bash +$ cortex get --env aws --watch + +realtime api status up-to-date stale requested last update avg request 2XX +text-generator updating 0 1 1 29s - - +``` + +## Cleanup + +Deleting APIs will free up cluster resources and allow Cortex to scale down to the minimum number of instances you specified during cluster creation: + +```python +cx_local.delete_api("text-generator") + +cx_aws.delete_api("text-generator") +``` diff --git a/test/pytorch/text-generator/deploy.ipynb b/test/pytorch/text-generator/deploy.ipynb new file mode 100644 index 0000000000..5ffbce9caa --- /dev/null +++ b/test/pytorch/text-generator/deploy.ipynb @@ -0,0 +1,80 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", + "\n", + "This example needs to run on a machine that supports Docker to deploy Cortex APIs locally (Colab users can still deploy to remote Cortex clusters)", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip3 install cortex\n", + "!pip3 install requests" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import cortex\n", + "\n", + "cx = cortex.client(\"local\")\n", + "\n", + "api_spec = {\n", + " \"name\": \"text-generator\",\n", + " \"kind\": \"RealtimeAPI\",\n", + " \"predictor\": {\n", + " \"type\": \"python\",\n", + " \"path\": \"predictor.py\"\n", + " }\n", + "}\n", + "\n", + "cx.deploy(api_spec, project_dir=\".\", wait=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import requests\n", + "\n", + "endpoint = cx.get_api(\"text-generator\")[\"endpoint\"]\n", + "payload = {\"text\": \"hello world\"}\n", + "print(requests.post(endpoint, payload).text)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/test/pytorch/text-generator/predictor.py b/test/pytorch/text-generator/predictor.py new file mode 100644 index 0000000000..b14d8abcc7 --- /dev/null +++ b/test/pytorch/text-generator/predictor.py @@ -0,0 +1,17 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +from transformers import GPT2Tokenizer, GPT2LMHeadModel + + +class PythonPredictor: + def __init__(self, config): + self.device = "cuda" if torch.cuda.is_available() else "cpu" + self.tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + self.model = GPT2LMHeadModel.from_pretrained("gpt2").to(self.device) + + def predict(self, payload): + input_length = len(payload["text"].split()) + tokens = self.tokenizer.encode(payload["text"], return_tensors="pt").to(self.device) + prediction = self.model.generate(tokens, max_length=input_length + 20, do_sample=True) + return self.tokenizer.decode(prediction[0]) diff --git a/test/pytorch/text-generator/requirements.txt b/test/pytorch/text-generator/requirements.txt new file mode 100644 index 0000000000..1447500abe --- /dev/null +++ b/test/pytorch/text-generator/requirements.txt @@ -0,0 +1,2 @@ +torch +transformers==3.0.* diff --git a/test/pytorch/text-summarizer/README.md b/test/pytorch/text-summarizer/README.md new file mode 100644 index 0000000000..4323c6e133 --- /dev/null +++ b/test/pytorch/text-summarizer/README.md @@ -0,0 +1,5 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. + +Please refer [here](https://sshleifer.github.io/blog_v2/jupyter/2020/03/12/bart.html) to learn more about BART. diff --git a/test/pytorch/text-summarizer/cortex.yaml b/test/pytorch/text-summarizer/cortex.yaml new file mode 100644 index 0000000000..9f7b620ca9 --- /dev/null +++ b/test/pytorch/text-summarizer/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: text-summarizer + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + gpu: 1 # this is optional, since the api can also run on cpu + mem: 6G diff --git a/test/pytorch/text-summarizer/predictor.py b/test/pytorch/text-summarizer/predictor.py new file mode 100644 index 0000000000..05652afd17 --- /dev/null +++ b/test/pytorch/text-summarizer/predictor.py @@ -0,0 +1,18 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +from transformers import pipeline + + +class PythonPredictor: + def __init__(self, config): + device = 0 if torch.cuda.is_available() else -1 + print(f"using device: {'cuda' if device == 0 else 'cpu'}") + + self.summarizer = pipeline(task="summarization", device=device) + + def predict(self, payload): + summary = self.summarizer( + payload["text"], num_beams=4, length_penalty=2.0, max_length=142, no_repeat_ngram_size=3 + ) + return summary[0]["summary_text"] diff --git a/test/pytorch/text-summarizer/requirements.txt b/test/pytorch/text-summarizer/requirements.txt new file mode 100644 index 0000000000..5afceb377e --- /dev/null +++ b/test/pytorch/text-summarizer/requirements.txt @@ -0,0 +1,2 @@ +transformers==2.9.* +torch diff --git a/test/pytorch/text-summarizer/sample.json b/test/pytorch/text-summarizer/sample.json new file mode 100644 index 0000000000..e54b77f18c --- /dev/null +++ b/test/pytorch/text-summarizer/sample.json @@ -0,0 +1,3 @@ +{ + "text": "Machine learning (ML) is the scientific study of algorithms and statistical models that computer systems use to perform a specific task without using explicit instructions, relying on patterns and inference instead. It is seen as a subset of artificial intelligence. Machine learning algorithms build a mathematical model based on sample data, known as training data, in order to make predictions or decisions without being explicitly programmed to perform the task. Machine learning algorithms are used in a wide variety of applications, such as email filtering and computer vision, where it is difficult or infeasible to develop a conventional algorithm for effectively performing the task. Machine learning is closely related to computational statistics, which focuses on making predictions using computers. The study of mathematical optimization delivers methods, theory and application domains to the field of machine learning. Data mining is a field of study within machine learning, and focuses on exploratory data analysis through unsupervised learning. In its application across business problems, machine learning is also referred to as predictive analytics." +} diff --git a/test/sklearn/iris-classifier/README.md b/test/sklearn/iris-classifier/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/sklearn/iris-classifier/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/sklearn/iris-classifier/cortex.yaml b/test/sklearn/iris-classifier/cortex.yaml new file mode 100644 index 0000000000..1f05c85eca --- /dev/null +++ b/test/sklearn/iris-classifier/cortex.yaml @@ -0,0 +1,15 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: iris-classifier + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + bucket: cortex-examples + key: sklearn/iris-classifier/model.pkl + monitoring: + model_type: classification + compute: + cpu: 0.2 + mem: 200M diff --git a/test/sklearn/iris-classifier/predictor.py b/test/sklearn/iris-classifier/predictor.py new file mode 100644 index 0000000000..46edab0ad2 --- /dev/null +++ b/test/sklearn/iris-classifier/predictor.py @@ -0,0 +1,31 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import os +import boto3 +from botocore import UNSIGNED +from botocore.client import Config +import pickle + +labels = ["setosa", "versicolor", "virginica"] + + +class PythonPredictor: + def __init__(self, config): + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + s3.download_file(config["bucket"], config["key"], "/tmp/model.pkl") + self.model = pickle.load(open("/tmp/model.pkl", "rb")) + + def predict(self, payload): + measurements = [ + payload["sepal_length"], + payload["sepal_width"], + payload["petal_length"], + payload["petal_width"], + ] + + label_id = self.model.predict([measurements])[0] + return labels[label_id] diff --git a/test/sklearn/iris-classifier/requirements.txt b/test/sklearn/iris-classifier/requirements.txt new file mode 100644 index 0000000000..bbc213cf3e --- /dev/null +++ b/test/sklearn/iris-classifier/requirements.txt @@ -0,0 +1,2 @@ +boto3 +scikit-learn==0.21.3 diff --git a/test/sklearn/iris-classifier/sample.json b/test/sklearn/iris-classifier/sample.json new file mode 100644 index 0000000000..9e792863cd --- /dev/null +++ b/test/sklearn/iris-classifier/sample.json @@ -0,0 +1,6 @@ +{ + "sepal_length": 5.2, + "sepal_width": 3.6, + "petal_length": 1.5, + "petal_width": 0.3 +} diff --git a/test/sklearn/iris-classifier/trainer.py b/test/sklearn/iris-classifier/trainer.py new file mode 100644 index 0000000000..db1b047938 --- /dev/null +++ b/test/sklearn/iris-classifier/trainer.py @@ -0,0 +1,25 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import boto3 +import pickle + +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.linear_model import LogisticRegression + +# Train the model + +iris = load_iris() +data, labels = iris.data, iris.target +training_data, test_data, training_labels, test_labels = train_test_split(data, labels) + +model = LogisticRegression(solver="lbfgs", multi_class="multinomial") +model.fit(training_data, training_labels) +accuracy = model.score(test_data, test_labels) +print("accuracy: {:.2f}".format(accuracy)) + +# Upload the model + +pickle.dump(model, open("model.pkl", "wb")) +s3 = boto3.client("s3") +s3.upload_file("model.pkl", "cortex-examples", "sklearn/iris-classifier/model.pkl") diff --git a/test/sklearn/mpg-estimator/README.md b/test/sklearn/mpg-estimator/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/sklearn/mpg-estimator/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/sklearn/mpg-estimator/cortex.yaml b/test/sklearn/mpg-estimator/cortex.yaml new file mode 100644 index 0000000000..e6ffc969ee --- /dev/null +++ b/test/sklearn/mpg-estimator/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: mpg-estimator + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + config: + model: s3://cortex-examples/sklearn/mpg-estimator/linreg/ + monitoring: + model_type: regression diff --git a/test/sklearn/mpg-estimator/predictor.py b/test/sklearn/mpg-estimator/predictor.py new file mode 100644 index 0000000000..bb1c2ed19a --- /dev/null +++ b/test/sklearn/mpg-estimator/predictor.py @@ -0,0 +1,41 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import boto3 +from botocore import UNSIGNED +from botocore.client import Config +import mlflow.sklearn +import numpy as np +import re +import os + + +class PythonPredictor: + def __init__(self, config): + model_path = "/tmp/model" + os.makedirs(model_path, exist_ok=True) + + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + # download mlflow model folder from S3 + bucket, prefix = re.match("s3://(.+?)/(.+)", config["model"]).groups() + response = s3.list_objects_v2(Bucket=bucket, Prefix=prefix) + for s3_obj in response["Contents"]: + obj_key = s3_obj["Key"] + s3.download_file(bucket, obj_key, os.path.join(model_path, os.path.basename(obj_key))) + + self.model = mlflow.sklearn.load_model(model_path) + + def predict(self, payload): + model_input = [ + payload["cylinders"], + payload["displacement"], + payload["horsepower"], + payload["weight"], + payload["acceleration"], + ] + + result = self.model.predict([model_input]) + return np.asscalar(result) diff --git a/test/sklearn/mpg-estimator/requirements.txt b/test/sklearn/mpg-estimator/requirements.txt new file mode 100644 index 0000000000..cbcad6b321 --- /dev/null +++ b/test/sklearn/mpg-estimator/requirements.txt @@ -0,0 +1,4 @@ +mlflow +pandas +numpy +scikit-learn==0.21.3 diff --git a/test/sklearn/mpg-estimator/sample.json b/test/sklearn/mpg-estimator/sample.json new file mode 100644 index 0000000000..2dbbca46dd --- /dev/null +++ b/test/sklearn/mpg-estimator/sample.json @@ -0,0 +1,7 @@ +{ + "cylinders": 4, + "displacement": 135, + "horsepower": 84, + "weight": 2490, + "acceleration": 15.7 +} diff --git a/test/sklearn/mpg-estimator/trainer.py b/test/sklearn/mpg-estimator/trainer.py new file mode 100644 index 0000000000..f17b7d9c05 --- /dev/null +++ b/test/sklearn/mpg-estimator/trainer.py @@ -0,0 +1,25 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import mlflow.sklearn +import pandas as pd +import numpy as np +from sklearn.linear_model import LinearRegression +from sklearn.model_selection import train_test_split + + +df = pd.read_csv( + "https://www.uio.no/studier/emner/sv/oekonomi/ECON4150/v16/statacourse/datafiles/auto.csv" +) +df = df.replace("?", np.nan) +df = df.dropna() +df = df.drop(["name", "origin", "year"], axis=1) # drop categorical variables for simplicity +data = df.drop("mpg", axis=1) +labels = df[["mpg"]] + +training_data, test_data, training_labels, test_labels = train_test_split(data, labels) +model = LinearRegression() +model.fit(training_data, training_labels) +accuracy = model.score(test_data, test_labels) +print("accuracy: {:.2f}".format(accuracy)) + +mlflow.sklearn.save_model(model, "linreg") diff --git a/test/spacy/entity-recognizer/README.md b/test/spacy/entity-recognizer/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/spacy/entity-recognizer/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/spacy/entity-recognizer/cortex.yaml b/test/spacy/entity-recognizer/cortex.yaml new file mode 100644 index 0000000000..cc4dbbba38 --- /dev/null +++ b/test/spacy/entity-recognizer/cortex.yaml @@ -0,0 +1,10 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: entity-recognizer + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + mem: 1G diff --git a/test/spacy/entity-recognizer/predictor.py b/test/spacy/entity-recognizer/predictor.py new file mode 100644 index 0000000000..9d42a9de4c --- /dev/null +++ b/test/spacy/entity-recognizer/predictor.py @@ -0,0 +1,22 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import spacy +import subprocess + + +class PythonPredictor: + """ + Class to perform NER (named entity recognition) + """ + + def __init__(self, config): + subprocess.call("python -m spacy download en_core_web_md".split(" ")) + import en_core_web_md + + self.nlp = en_core_web_md.load() + + def predict(self, payload): + doc = self.nlp(payload["text"]) + proc = lambda ent: {"label": ent.label_, "start": ent.start, "end": ent.end} + out = {ent.text: proc(ent) for ent in doc.ents} + return out diff --git a/test/spacy/entity-recognizer/requirements.txt b/test/spacy/entity-recognizer/requirements.txt new file mode 100644 index 0000000000..568e4fc634 --- /dev/null +++ b/test/spacy/entity-recognizer/requirements.txt @@ -0,0 +1 @@ +spacy diff --git a/test/spacy/entity-recognizer/sample.json b/test/spacy/entity-recognizer/sample.json new file mode 100644 index 0000000000..ae0f0f4120 --- /dev/null +++ b/test/spacy/entity-recognizer/sample.json @@ -0,0 +1,3 @@ +{ + "text": "Lilium, a Munich-based startup that is designing and building vertical take-off and landing (VTOL) aircraft with speeds of up to 100 km/h that it plans eventually to run in its own taxi fleet, has closed a funding round of over $240 million — money that it plans to use to keep developing its aircraft, and to start building manufacturing facilities to produce more of them, for an expected launch date of 2025." +} diff --git a/test/tensorflow/image-classifier-inception/README.md b/test/tensorflow/image-classifier-inception/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/tensorflow/image-classifier-inception/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/tensorflow/image-classifier-inception/cortex.yaml b/test/tensorflow/image-classifier-inception/cortex.yaml new file mode 100644 index 0000000000..e5177788ba --- /dev/null +++ b/test/tensorflow/image-classifier-inception/cortex.yaml @@ -0,0 +1,13 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-inception + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ + monitoring: + model_type: classification + compute: + cpu: 1 + gpu: 1 diff --git a/test/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml b/test/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml new file mode 100644 index 0000000000..919870651c --- /dev/null +++ b/test/tensorflow/image-classifier-inception/cortex_server_side_batching.yaml @@ -0,0 +1,17 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-inception + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ + server_side_batching: + max_batch_size: 2 + batch_interval: 0.2s + threads_per_process: 2 + monitoring: + model_type: classification + compute: + cpu: 1 + gpu: 1 diff --git a/test/tensorflow/image-classifier-inception/inception.ipynb b/test/tensorflow/image-classifier-inception/inception.ipynb new file mode 100644 index 0000000000..46956e0e48 --- /dev/null +++ b/test/tensorflow/image-classifier-inception/inception.ipynb @@ -0,0 +1,211 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "inception.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "n8CwINQcEBKz", + "colab_type": "text" + }, + "source": [ + "# Exporting ImageNet Inception\n", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", + "\n", + "In this notebook, we'll show how to export the [pre-trained Imagenet Inception model](https://tfhub.dev/google/imagenet/inception_v3/classification/3) for serving." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3221z3P69fgf", + "colab_type": "text" + }, + "source": [ + "First, we'll install the required packages:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "_SdQpq7g9LiI", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow==1.14.* tensorflow-hub==0.6.* boto3==1.*" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "I-k0gUpxDGkU", + "colab_type": "text" + }, + "source": [ + "Next, we'll download the model from TensorFlow Hub and export it for serving:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "z6QLCzB4BKMe", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import time\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def\n", + "\n", + "export_dir = \"export/\" + str(time.time()).split('.')[0]\n", + "builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n", + "\n", + "with tf.Session(graph=tf.Graph()) as sess:\n", + " module = hub.Module(\"https://tfhub.dev/google/imagenet/inception_v3/classification/3\")\n", + "\n", + " input_params = module.get_input_info_dict()\n", + " image_input = tf.placeholder(\n", + " name=\"images\", dtype=input_params[\"images\"].dtype, shape=input_params[\"images\"].get_shape()\n", + " )\n", + " \n", + " sess.run([tf.global_variables_initializer(), tf.tables_initializer()])\n", + "\n", + " classes = module(image_input)\n", + " signature = predict_signature_def(inputs={\"images\": image_input}, outputs={\"classes\": classes})\n", + "\n", + " builder.add_meta_graph_and_variables(\n", + " sess, [\"serve\"], signature_def_map={\"predict\": signature}, strip_default_attrs=True\n", + " )\n", + "\n", + "builder.save()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aGtJiyEnBgwl", + "colab_type": "text" + }, + "source": [ + "## Upload the model to AWS\n", + "\n", + "Cortex loads models from AWS, so we need to upload the exported model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fTkjvSKBBmUB", + "colab_type": "text" + }, + "source": [ + "Set these variables to configure your AWS credentials and model upload path:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4xcDWxqCBPre", + "colab_type": "code", + "cellView": "form", + "colab": {} + }, + "source": [ + "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", + "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", + "S3_UPLOAD_PATH = \"s3://my-bucket/image-classifier/inception\" #@param {type:\"string\"}\n", + "\n", + "import sys\n", + "import re\n", + "\n", + "if AWS_ACCESS_KEY_ID == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", + "\n", + "elif AWS_SECRET_ACCESS_KEY == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", + "\n", + "else:\n", + " try:\n", + " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", + " except:\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "czZkjb1IBr-f", + "colab_type": "text" + }, + "source": [ + "Upload the model to S3:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "M0b0IbyaBsim", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import os\n", + "import boto3\n", + "\n", + "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", + "\n", + "for dirpath, _, filenames in os.walk(\"export\"):\n", + " for filename in filenames:\n", + " filepath = os.path.join(dirpath, filename)\n", + " filekey = os.path.join(key, filepath[len(\"export/\"):])\n", + " print(\"Uploading s3://{}/{}...\".format(bucket, filekey), end = '')\n", + " s3.upload_file(filepath, bucket, filekey)\n", + " print(\" ✓\")\n", + "\n", + "print(\"\\nUploaded model export directory to \" + S3_UPLOAD_PATH)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pZQWoeZbE7Wc", + "colab_type": "text" + }, + "source": [ + "\n", + "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/image-classifier-inception) for how to deploy the model as an API." + ] + } + ] +} diff --git a/test/tensorflow/image-classifier-inception/predictor.py b/test/tensorflow/image-classifier-inception/predictor.py new file mode 100644 index 0000000000..c2afb63c0c --- /dev/null +++ b/test/tensorflow/image-classifier-inception/predictor.py @@ -0,0 +1,21 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import requests +import numpy as np +from PIL import Image +from io import BytesIO + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + self.client = tensorflow_client + self.labels = requests.get( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + ).text.split("\n") + + def predict(self, payload): + image = requests.get(payload["url"]).content + decoded_image = np.asarray(Image.open(BytesIO(image)), dtype=np.float32) / 255 + model_input = {"images": np.expand_dims(decoded_image, axis=0)} + prediction = self.client.predict(model_input) + return self.labels[np.argmax(prediction["classes"])] diff --git a/test/tensorflow/image-classifier-inception/requirements.txt b/test/tensorflow/image-classifier-inception/requirements.txt new file mode 100644 index 0000000000..7e2fba5e6c --- /dev/null +++ b/test/tensorflow/image-classifier-inception/requirements.txt @@ -0,0 +1 @@ +Pillow diff --git a/test/tensorflow/image-classifier-inception/sample.json b/test/tensorflow/image-classifier-inception/sample.json new file mode 100644 index 0000000000..667652007a --- /dev/null +++ b/test/tensorflow/image-classifier-inception/sample.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/PzXprwl.jpg" +} diff --git a/test/tensorflow/image-classifier-resnet50/README.md b/test/tensorflow/image-classifier-resnet50/README.md new file mode 100644 index 0000000000..7a52dadbb8 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/README.md @@ -0,0 +1,90 @@ +# Image Classifier with ResNet50 + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example implements an image recognition system using ResNet50, which allows for the recognition of up to 1000 classes. + +## Deploying + +There are 4 Cortex APIs available in this example: + +1. [cortex.yaml](cortex.yaml) - can be used with any instances. +1. [cortex_inf.yaml](cortex_inf.yaml) - to be used with `inf1` instances. +1. [cortex_gpu.yaml](cortex_gpu.yaml) - to be used with GPU instances. +1. [cortex_gpu_server_side_batching.yaml](cortex_gpu_server_side_batching.yaml) - to be used with GPU instances. Deployed with `max_batch_size` > 1. The exported model and the TensorFlow Predictor do not need to be modified to support server-side batching. + +To deploy an API, run: + +```bash +cortex deploy +``` + +E.g. + +```bash +cortex deploy cortex_inf.yaml +``` + +## Verifying your API + +Check that your API is live by running `cortex get image-classifier-resnet50`, and copy the example `curl` command that's shown. After the API is live, run the `curl` command, e.g. + +```bash +$ curl -X POST -H "Content-Type: application/json" -d @sample.json + +["tabby", "Egyptian_cat", "tiger_cat", "tiger", "plastic_bag"] +``` + +The following image is embedded in [sample.json](sample.json): + +![image](https://i.imgur.com/213xcvs.jpg) + +## Throughput test + +Before [throughput_test.py](../../utils/throughput_test.py) is run, 2 environment variables have to be exported: + +```bash +export ENDPOINT= # you can find this with `cortex get image-classifier-resnet50` +export PAYLOAD=https://i.imgur.com/213xcvs.jpg # this is the cat image shown in the previous step +``` + +Then, deploy each API one at a time and check the results: + +1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 2` with the [cortex.yaml](cortex.yaml) API running on an `c5.xlarge` instance will get **~16.2 inferences/sec** with an average latency of **200 ms**. +1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 48` with the [cortex_inf.yaml](cortex_inf.yaml) API running on an `inf1.2xlarge` instance will get **~510 inferences/sec** with an average latency of **80 ms**. +1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 24` with the [cortex_gpu.yaml](cortex_gpu.yaml) API running on an `g4dn.xlarge` instance will get **~125 inferences/sec** with an average latency of **85 ms**. Optimizing the model with TensorRT to use FP16 on TF-serving only seems to achieve a 10% performance improvement - one thing to consider is that the TensorRT engines hadn't been built beforehand, so this might have affected the results negatively. +1. Running `python ../../utils/throughput_test.py -i 30 -p 4 -t 60` with the [cortex_gpu_server_side_batching.yaml](cortex_gpu_batch_sized.yaml) API running on an `g4dn.xlarge` instance will get **~186 inferences/sec** with an average latency of **500 ms**. This achieves a 49% higher throughput than the [cortex_gpu.yaml](cortex_gpu.yaml) API, at the expense of increased latency. + +Alternatively to [throughput_test.py](../../utils/throughput_test.py), the `ab` GNU utility can also be used to benchmark the API. This has the advantage that it's not as taxing on your local machine, but the disadvantage that it doesn't implement a cooldown period. You can run `ab` like this: + +```bash +# for making octet-stream requests, which is the default for throughput_test script +ab -n -c -p sample.bin -T 'application/octet-stream' -rks 120 $ENDPOINT + +# for making json requests, will will have lower performance because the API has to download the image every time +ab -n -c -p sample.json -T 'application/json' -rks 120 $ENDPOINT +``` + +*Note: `inf1.xlarge` isn't used because the major bottleneck with `inf` instances for this example is with the CPU, and `inf1.2xlarge` has twice the amount of CPU cores for same number of Inferentia ASICs (which is 1), which translates to almost double the throughput.* + +## Exporting SavedModels + +This example deploys models that we have built and uploaded to a public S3 bucket. If you want to build the models yourself, follow these instructions. + +Run the following command to install the dependencies required for the [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook: + +```bash +pip install --extra-index-url=https://pip.repos.neuron.amazonaws.com \ + neuron-cc==1.0.9410.0+6008239556 \ + tensorflow-neuron==1.15.0.1.0.1333.0 +``` + +The [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook will generate 2 SavedModels. One will be saved in the `resnet50` directory which can be run on GPU or on CPU and another in the `resnet50_neuron` directory which can only be run on `inf1` instances. For server-side batching on `inf1` instances, a different compilation of the model is required. To compile ResNet50 model for a batch size of 5, run `run_all` from [this directory](https://github.com/aws/aws-neuron-sdk/tree/master/src/examples/tensorflow/keras_resnet50). + +If you'd also like to build the TensorRT version of the GPU model, run the following command in a new Python environment to install the pip dependencies required for the [generate_gpu_resnet50_model.ipynb](generate_gpu_resnet50_model.ipynb) notebook: + +```bash +pip install tensorflow==2.0.0 +``` + +TensorRT also has to be installed to export the SavedModel. Follow the instructions on [Nvidia TensorRT Documentation](https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html#installing-debian) to download and install TensorRT on your local machine (this will require ~5GB of space, and you will have to create an Nvidia account). This notebook also requires that the SavedModel generated with the [generate_resnet50_models.ipynb](generate_resnet50_models.ipynb) notebook exists in the `resnet50` directory. The TensorRT SavedModel will be exported to the `resnet50_gpu` directory. You can then replace the existing SavedModel with the TensorRT-optimized version in [cortex_gpu.yaml](cortex_gpu.yaml) - it's a drop-in replacement that doesn't require any other dependencies on the Cortex side. By default, the API config in [cortex_gpu.yaml](cortex_gpu.yaml) uses the non-TensorRT-optimized version due to simplicity. diff --git a/test/tensorflow/image-classifier-resnet50/cortex.yaml b/test/tensorflow/image-classifier-resnet50/cortex.yaml new file mode 100644 index 0000000000..afbe5a8394 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/cortex.yaml @@ -0,0 +1,18 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/resnet50/ + processes_per_replica: 4 + threads_per_process: 16 + config: + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + input_key: input + output_key: output + compute: + cpu: 3 + mem: 4G diff --git a/test/tensorflow/image-classifier-resnet50/cortex_gpu.yaml b/test/tensorflow/image-classifier-resnet50/cortex_gpu.yaml new file mode 100644 index 0000000000..f86b85e414 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/cortex_gpu.yaml @@ -0,0 +1,19 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/resnet50/ + processes_per_replica: 4 + threads_per_process: 24 + config: + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + input_key: input + output_key: output + compute: + gpu: 1 + cpu: 3 + mem: 4G diff --git a/test/tensorflow/image-classifier-resnet50/cortex_gpu_server_side_batching.yaml b/test/tensorflow/image-classifier-resnet50/cortex_gpu_server_side_batching.yaml new file mode 100644 index 0000000000..61604346d0 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/cortex_gpu_server_side_batching.yaml @@ -0,0 +1,22 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/resnet50/ + server_side_batching: + max_batch_size: 32 + batch_interval: 0.1s + processes_per_replica: 4 + threads_per_process: 192 + config: + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + input_key: input + output_key: output + compute: + gpu: 1 + cpu: 3 + mem: 4G diff --git a/test/tensorflow/image-classifier-resnet50/cortex_inf.yaml b/test/tensorflow/image-classifier-resnet50/cortex_inf.yaml new file mode 100644 index 0000000000..13f999e1b5 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/cortex_inf.yaml @@ -0,0 +1,21 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/resnet50_neuron/ + processes_per_replica: 4 + threads_per_process: 256 + config: + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + input_key: input + output_key: output + compute: + inf: 1 + cpu: 3 + mem: 4G + autoscaling: + max_replica_concurrency: 16384 diff --git a/test/tensorflow/image-classifier-resnet50/cortex_inf_server_side_batching.yaml b/test/tensorflow/image-classifier-resnet50/cortex_inf_server_side_batching.yaml new file mode 100644 index 0000000000..2b33961e95 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/cortex_inf_server_side_batching.yaml @@ -0,0 +1,24 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: image-classifier-resnet50 + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/resnet50_neuron_batch_size_5/ + server_side_batching: + max_batch_size: 5 + batch_interval: 0.1s + processes_per_replica: 4 + threads_per_process: 260 + config: + classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + input_shape: [224, 224] + input_key: input_1:0 + output_key: probs/Softmax:0 + compute: + inf: 1 + cpu: 3 + mem: 4G + autoscaling: + max_replica_concurrency: 16384 diff --git a/test/tensorflow/image-classifier-resnet50/generate_gpu_resnet50_model.ipynb b/test/tensorflow/image-classifier-resnet50/generate_gpu_resnet50_model.ipynb new file mode 100644 index 0000000000..ca78235b4d --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/generate_gpu_resnet50_model.ipynb @@ -0,0 +1,131 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generate GPU Resnet50 Model\n", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf\n", + "from tensorflow.python.compiler.tensorrt import trt_convert as trt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "input_model_dir = \"resnet50\"\n", + "output_model_dir = \"resnet50_gpu\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS\n", + "conversion_params = conversion_params._replace(\n", + " max_workspace_size_bytes=(1<<30))\n", + "conversion_params = conversion_params._replace(precision_mode=\"FP16\")\n", + "conversion_params = conversion_params._replace(\n", + " maximum_cached_engines=100)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Linked TensorRT version: (0, 0, 0)\n", + "INFO:tensorflow:Loaded TensorRT version: (0, 0, 0)\n", + "INFO:tensorflow:Running against TensorRT version 0.0.0\n" + ] + } + ], + "source": [ + "converter = trt.TrtGraphConverterV2(\n", + " input_saved_model_dir=input_model_dir,\n", + " conversion_params=conversion_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/robert/.miniconda3/envs/py36-tf/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1781: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "If using Keras pass *_constraint arguments to layers.\n", + "WARNING:tensorflow:Issue encountered when serializing variables.\n", + "Type is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\n", + "to_proto not supported in EAGER mode.\n", + "WARNING:tensorflow:Issue encountered when serializing trainable_variables.\n", + "Type is unsupported, or the types of the items don't match field type in CollectionDef. Note this is a warning and probably safe to ignore.\n", + "to_proto not supported in EAGER mode.\n" + ] + } + ], + "source": [ + "converter.convert()" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Assets written to: resnet50_gpu/assets\n" + ] + } + ], + "source": [ + "converter.save(output_model_dir)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/test/tensorflow/image-classifier-resnet50/generate_resnet50_models.ipynb b/test/tensorflow/image-classifier-resnet50/generate_resnet50_models.ipynb new file mode 100644 index 0000000000..11eaf5a316 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/generate_resnet50_models.ipynb @@ -0,0 +1,178 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Generate Resnet50 Models\n", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import time\n", + "import shutil\n", + "import tensorflow as tf\n", + "import tensorflow.neuron as tfn\n", + "import tensorflow.compat.v1.keras as keras\n", + "from tensorflow.keras.applications.resnet50 import ResNet50" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Prepare export directories for compile/non-compiled versions of the model." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "model_dir = \"resnet50\"\n", + "compiled_model_dir = model_dir + \"_neuron\"\n", + "shutil.rmtree(model_dir, ignore_errors=True)\n", + "shutil.rmtree(compiled_model_dir, ignore_errors=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Instantiate a Keras ResNet50 model." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /home/robert/.miniconda3/envs/py36-neuron/lib/python3.6/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "If using Keras pass *_constraint arguments to layers.\n" + ] + } + ], + "source": [ + "keras.backend.set_learning_phase(0)\n", + "keras.backend.set_image_data_format('channels_last')\n", + "model = ResNet50(weights='imagenet')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Export the model as SavedModel." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From :5: simple_save (from tensorflow.python.saved_model.simple_save) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.simple_save.\n", + "WARNING:tensorflow:From /home/robert/.miniconda3/envs/py36-neuron/lib/python3.6/site-packages/tensorflow_core/python/saved_model/signature_def_utils_impl.py:201: build_tensor_info (from tensorflow.python.saved_model.utils_impl) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "This function will only be available through the v1 compatibility library as tf.compat.v1.saved_model.utils.build_tensor_info or tf.compat.v1.saved_model.build_tensor_info.\n", + "INFO:tensorflow:Assets added to graph.\n", + "INFO:tensorflow:No assets to write.\n", + "INFO:tensorflow:SavedModel written to: resnet50/saved_model.pb\n" + ] + } + ], + "source": [ + "tf.saved_model.simple_save(\n", + " session = keras.backend.get_session(),\n", + " export_dir = model_dir,\n", + " inputs = {'input': model.inputs[0]},\n", + " outputs = {'output': model.outputs[0]})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "And then compile it for Inferentia to be used on only one Neuron core. `--static-weights` option is used to cache all weights onto the neuron core's memory." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from resnet50/variables/variables\n", + "INFO:tensorflow:Froze 320 variables.\n", + "INFO:tensorflow:Converted 320 variables to const ops.\n", + "INFO:tensorflow:fusing subgraph neuron_op_d6f098c01c780733 with neuron-cc\n", + "INFO:tensorflow:Number of operations in TensorFlow session: 4638\n", + "INFO:tensorflow:Number of operations after tf.neuron optimizations: 556\n", + "INFO:tensorflow:Number of operations placed on Neuron runtime: 554\n", + "INFO:tensorflow:No assets to save.\n", + "INFO:tensorflow:No assets to write.\n", + "INFO:tensorflow:SavedModel written to: resnet50_neuron/saved_model.pb\n", + "INFO:tensorflow:Successfully converted resnet50 to resnet50_neuron\n" + ] + }, + { + "data": { + "text/plain": [ + "{'OnNeuronRatio': 0.9964028776978417}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "compiler_args = ['--static-weights', '--num-neuroncores', '1']\n", + "batch_size = 1\n", + "tfn.saved_model.compile(model_dir, compiled_model_dir, batch_size)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/test/tensorflow/image-classifier-resnet50/predictor.py b/test/tensorflow/image-classifier-resnet50/predictor.py new file mode 100644 index 0000000000..98828723cc --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/predictor.py @@ -0,0 +1,63 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import os +import cv2 +import numpy as np +import requests +import imageio +import json +import base64 + + +def read_image(payload): + """ + Read JPG image from {"url": "https://..."} or from a bytes object. + """ + if isinstance(payload, bytes): + jpg_as_np = np.frombuffer(payload, dtype=np.uint8) + img = cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR) + elif isinstance(payload, dict) and "url" in payload.keys(): + img = imageio.imread(payload["url"]) + else: + return None + return img + + +def prepare_image(image, input_shape, input_key): + """ + Prepares an image for the TFS client. + """ + img = cv2.resize(image, input_shape, interpolation=cv2.INTER_NEAREST) + img = {input_key: img[np.newaxis, ...]} + return img + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + self.client = tensorflow_client + + # load classes + classes = requests.get(config["classes"]).json() + self.idx2label = [classes[str(k)][1] for k in range(len(classes))] + + self.input_shape = tuple(config["input_shape"]) + self.input_key = str(config["input_key"]) + self.output_key = str(config["output_key"]) + + def predict(self, payload): + # preprocess image + img = read_image(payload) + if img is None: + return None + img = prepare_image(img, self.input_shape, self.input_key) + + # predict + results = self.client.predict(img)[self.output_key] + results = np.argsort(results) + + # Lookup and print the top 5 labels + top5_idx = results[-5:] + top5_labels = [self.idx2label[idx] for idx in top5_idx] + top5_labels = top5_labels[::-1] + + return top5_labels diff --git a/test/tensorflow/image-classifier-resnet50/requirements.txt b/test/tensorflow/image-classifier-resnet50/requirements.txt new file mode 100644 index 0000000000..66340adf33 --- /dev/null +++ b/test/tensorflow/image-classifier-resnet50/requirements.txt @@ -0,0 +1 @@ +imageio==2.9.* diff --git a/test/tensorflow/image-classifier-resnet50/sample.bin b/test/tensorflow/image-classifier-resnet50/sample.bin new file mode 100644 index 0000000000000000000000000000000000000000..921abf24a5c99cd3c1d1cd12d00f134a16391a77 GIT binary patch literal 8680 zcmbVxYg`le*6v`cQB&uZa{9BbO)$v8W{2LJ+NxL z*0Y{xjkDd^g}nahij^x67Z(I^fq#heF%p4{b73CL^El@58jE=z|JrM;*CvdgFyYk~ zd(y-S>`Cki6DCfcILVcHz)w>qyG~*L!@SAgFC90Y#TxI*p1^)}$^Yodc?)r$h;$)c zESEQsaqccGcNga!1cP&qe|0=K?B5TUaje(IPhi6_Cc_04ufy?KEI9OdI5AwE0e?qc zb00r#Zs76>9xF zgAYTNghqTExgzS5m7lIz8~YV+-TDpjUw^Z8Tf+7oDSJ}C-z!WLWgo~nDE{$K?!S)w zeDv7yUrrRBDf#v6xzh6&F8)@2t>Sv+4aLp6`a2Dcs=L28J^bU();4YXqYj}*2L)_9WE}rJ!#(JFU_7?cDK0Bm>;Bj)53)IDzg8-!1n*IBKz;a{%>3z$P|_f z6p!VO;K*>(f-^M{>YxQ@auMcB9_hI|J5TOecjRR24cSbeirq2PN9JhTfQ@s=;N6k? z%)Ihq(O27!#ozlmCURxZ%!Q|>uO8o28aW+_iR5dB!;|=0{OWKha?j4I!FG&XzLKJ6 z?+-&&+(k~L?E9Nnb{itKVQ2|yxb@75#A~lOeiq^#;}w+^sxT@h0f4}L% zQ~c9YUH!S&79eMG7r5Lcp5QOF?^@)1C-T##UwepQFNI%DVHg22>(oDG<5QSgI}!e>I?31Wj}l;uP;4Wm3+hZDIwg(pq_Oyfi9^btyxD(AwoX84PVzeMp1KyAiUqT;k4a<~$m~v+?aFRDYVT*HD-f@yNZR$2$6M(G&4uueKmUL_St2 z&8w4g3pt7|C*qU9C%zT#r57&j`Py+d^{uM3c*Ape}$xV@=HZ zVcs)wN6!DLp+7el@PDDT3Gx4^yyrg+G49hRW}J!+R`-W#12)F7A)DQbJWaB6c4DDc z<(s5GSLQf~U&qU+NGI}!Npwzpx{?x4bnH+Ua6R4=+gJ2)LNr68t-dL|O)2zz(JcH_ zrqXdnEjBJ0IJAMIkS)>%0{YUm*}Wn!?PABlrMxS&W<$54_^?$z!*SY)=%cy_GR_;z z!>{}6QzF`LoqQ{&I3bhmxQOd-V1r)Jgx_DKLx?*PLqS=b#QtRu{#f~2TfRkk%ZNQ< z&yt6bmmCU1J{qOUdl%D*Os_Q;=-korut6CmNKvT?*O;<8d9iqCnP z=lgux9qMrMXjeu`gmy)E;&6m+W|kAtvB}t96Ea&Zva_;;!tPxEm&BAl!hBLVJL*-r z10`-eX`y>oP{PGMynEQX=`oRrr!;c9OWcK)NcOC_+|o$)W<}(-mSx^Qf90F7I{El} zRoc_ET7*&2L?zzimZ+9hX=Nl1oRz~fN37HOw>6D%c7~0X+ww@MBgY^iV>(%?zv8-X2!n%qk=TeuTj=^2T=E9AfCU9fr@m9vI%a=BGX9@bte z#V;f`Mv5YEzAc*Oi&mMHG0eRH%=zu4Z;urntCAG(%#Lj2%yWCeQ7hy(g)| zUw=$4Pkj2`obB_T`_(M(c~pMxkFf3bB|VuR(tl~F&H4apeqzVVR3;R2l+Bn`EpU_x z{K<%Qnp)4E&_;2kJ)T||K$a$Ib`o1nNkB8N?SZsTB5Zgb{wyH(q&zU(C%w6pPd|Z4 zpie8-woA)8Z5)Mti4)n}LkzLy2QodWU0Ph9Y?kXwse}H#l6I8O5TN5;MbycRsX)1c z_cCDqtvNj|cfsj#8El{K>Aof|;-A>4CmYPNZ)+(ayf_M9jHRsqCRkO7l;o zM8nhIX4!GAwS@M3I7U~L+UmnPt!bQ@U8m5yZND{^LGe0svp;K5?+?Pgh&%e4@;e(a zY7Per?X?J`c*R$hPGoYDfc`6qJmN(DY=8-5Pm!{vgy!0iykjvW>T%&q2{ifVcyEAy zVGSVWaUn)+P>yD6W6hVk2)H7LaJ4cV*$53UhkP9{bct<7YMm2#q`bo??lmlIMj9I$Ut1ZWsS;vhQd&&gW=F8bscpg zzciMu%M5{+P|24Q;^}`_d%QVEW$C*>cbMhtG{cMI=GvE}dysD$zC(Fh?%N8l7ugU0 z*;ijLx)(JC5HSdycX$ICt~ZesBFGM zH$^c3B)N+)sae_*;`S12rw>v0Cta^i2xqwKKZ z*O{^A&YEyJ84-rluh+kbqUPQlsWiqtw&6)_$NNQoBT>)S54DB|^xw}jtI5d6or5K` z`}ZT%rN5NAS%T%Gtr!`peLPs|xQML|^Pt_g6NO{(;u3(Q!+HOYoH^Qiq{O9@V*r=NL z+HZ6R{+kC?u?HCcPdYZbnQJYbYb3T~OER_0FiF5H^FVodQ&pr=M${$C`$9PigPV1% z#PV<2@1~lH4#yjn;do-eBLVFP9dJ^}QGU~eKU~gD_W%_tMjPtn*nJtg_#Fz z;p=Y+<1R65QD-)DB@Qve%kg`udlf?kWnF|%y1zwMUQD+XEdu6iB=n~&o*53a#E#w? z^P+*G59}S_-6za3(LM4k~X$Aw0kFQCfhD;!6yYBDyx zFWZjUx72<+g5JD_GvaHb)co(BxP!GTEvkku&|3z4kX;htD8YL|{3F!dv-Fde@W|DW zt?|N4u_gNY4ktndxb*sKg2IzF-iVG~WsNdTdNHOggp}XrIFVDUwP7p!w-etScAVb# zu3U0tub}NFHkGt|;Lr(X^-5=Z-I${l=;Zm5Yty2QajUi;dC1KU(7!Y6h^2q$9F&9+ zd)jD4SGfuLz?GXlC_nLRTb1M7J<*JRg-?y6?_q!8Bdu#`6*qgRG>N;vU|vxDsM*1JX>N}L;HEF9Fr@Cq^20)m-|T(fNH4h=a^&o zbomJl=&?E!8toE@$CXZ`7lzC8QyIaa&Ci=;+FK$HDBLN=VgntI4FwW|XDWm6{4pKf zPXBdTz0AP{--TJeJm05$Ehv0nhk zb{o)$gj|K|TYFNUq@yIyiA=;P9w$T|cHbt?k7H{;BzEr09n2Tp8Z+8vQhwgpY5Aw- znFY8mqy<{r;HIxmw25JcXzBTxi%hHbO@62S9xy<{7keYDvP%)9X5ZG@H0^g87YoFC zAO>#d*)(ZKDPc<+*_K5w_D%WWTvdKbgNjdklYc-lr!|V7eDzdpUtaKwC(MW)KKSbJ zc$N3_I1-Z)-)WDM1_Mma36iX_msJi0TpzlOSlMYlypJ$k8^Aki_dsIpjr18gVBV$; zS8f;@bsI1(?#ackUaB9)3pf=|5{T*ct>xkIH|V;{;VJN$Mo&dC*%@((ndE-wPosyN z$o3=_10zQ0ybz41;z*t>9o42v3t)h^1!oIH#O>u3PjRUGr=<1^Qddmv^1C(mT%=e( zBm^O1=7u?ugXhT$z?d-obu!|PVO<;kY*UF&V2Y6m4WObdx8++%gWVTChl&mgH9L{n z)a!71Oee6tbr3JJ=5MK4oG0d6RCUVS!tOozD67X>-&Y#~ z@@u4L#Qg*vQa{EkAPxF$1y=Q_i}c|+EjK6;Xe~M@jQ6%I^G}w2GCf~dVviC3In@EI z%rt?=gWdT6uCQcBSux3}HbNgAFc=Oh+G-0)o*9oIdBwndeMdifHyw4HbRs=VMq39u zN+2l*4Ez=fE#~=K*4WN#gKsmytGij!l`%z#MS#I%Q>zw)4ZVtWA{PYjm|sC+6CL7< z+EeT)5m(eXB1RxI?5+*bSeu5L>ra81g&(9a9l8&O$bi`KtJeFxSQ)Xo@92qs4(3oe z4n38altZ~MgVi~4)~E<_edlzS<2uHt56a8&uQ#DbR3gi!4gT|6N7gMOD-nv zrrfr+CJ+@XAYp+w;jA~bVN+$_%XqKS5CL7{C;_54-zW79LziK5=vK@V+VuhUvHS#) zpk|Or{pdt2er>_IV~MsdW*Q#iDg;aDcB!lN!DL8!&V?>wY-XY+L_N=pGYG?=9U%)U z-~^s!O~mhVUmBcnLo``T?c5u!bm(bV9lomX~uVEwD{?hu$P7F&Kw1dxP2P1-Cc>Q*|$3`!Dz zp!X2p?7zQiUxa2D&l>Yh6BGnLNYS5r)#gvg3t)ojF00|DvEhqdD7pMv=n$GddopkMu^1@}OO8uB@kEHJzyrh8@)BHk()u7pCx8$^ zke}Ft_v1MkvPeoZ?fEXdS*K zXV;j~G|N$0%uF~Tx5iGpZa2#zEA$ijzcNFZSDNLtpVSOs{g!B03PQ?+2*svZ7ZYxB z6sOSOoMQS>fNZ2{38Qqc|MeX8@35iT@q3;k@s zBmR^5t=x%112EH}hV((MyU?&PR0S2dRIeK-z$qu5adfS&_^pT~HU%^M3b zm^&Omfk)F|4Z7*o!eLsNwhT5Nwr)7)w9w%KkKZhfveX9C^nsIv)Y+_y?yKkF16`%@hV8GWaN(Zfw2RbGS zcD|6b>>i>V%(vt7SU4R-IgzV~D}H{gf{8KWm5Ib?zoSoxoo$%S#4buoAS`cadPi&~ z7zmXs#14gES+Z2~fxj@V`HFHs zUU}SfL7pCCF36J64+XDV|M_(8!w8oCB}#o^*LDKxW~v`n-RHqi$jj#!oCuD#W8~I> z+@}*jK4Fb)x_d4^s^AUn9v~NLmzi+fUnei8lFh#ejr6k2Mc%kBE?JiOpmX*x_dp_+ zleye9ZJs?4dym*GG(t1-FcuZ>C@ayHrMo$lLE+58%?NvhqZSn;q<}LKaAT}fvSAH03DhU`J`mx3;=_pZQmJski-^+yt*j4GkX284x*u;a|e^0|cE>zQ#E?n70 zFCe_g?`tzit$7k)aW8Zr0Q^VDZ&s919VV~BB!9|*e| zkEY-61sc0iKsy)|%NxM}he?eJMjt0&kO#(Pul%zRZ+#BL94z zwyhdAu;4b1wkAL}KLOTV;ol59x9fK84H(KlazoPBGTj{Lkd$cH>M`FoXayg0Vt;fF zQMN+S1)D1mNiIx?9M65cexrRZ^}6GvmEro(#h!hD3;Ut2e%?5Bul5ZYc57^a;XtJ&49?oHPn<<1gB^h5 z=)gHX1D*4cNfb57sDvUVs7V=ql}d)JL5MjW)@UDJ=%WmXn+I{js)YHVk~kia=~uw7 zM)-Iy;z`v;X?dv%HK}3p!E2{w#7CVTM(1joz4+AKRr#n2(nH+ceS_#I1q{vsncFR1 zN0b-cNL9l=u4wB%F)ZXechnS9sPzgnjJuh6gtK5(RC}G#G+ighm|uCQVLSH&H_;r` zZv=%0fJfyQxd7JN?^6&+pc;1etsHQaW;wVI3XLF6l9&l)X(kOLh@Yu|q%{jITB zcd*r(@nQXj+ETp$BU9>18{$81mH+r%Kw8DHmi-$A zfLFLlRE}##^_k;VQWg) zKS)zIMCZaTAq;TqDra;e6Lr2@4Qq?4fStu`!tM+Lg26OG8UU=nb3-f)jTK;z3w<+Y z6_W;-E@i{vC^TF{+P2Lap<$DT0i(x*U2QrhKOkJ8*MPnb)YsqSmN}6%o;wqzS1}dZ zv543iZ|6YqNI$$F47PV;Q&x-EM?suk#Z0q6i`kh5Hy4ogG~C4FpaFU>*@lCa;C*y)DWKKB3&HKP za3u2~QFqPEbGULj)Ws3X9~)0-N^}EF(4!=n`F6*S5o&&CKYdiH9XbxgA>uqN<86c# zKi^?fTxrGn7ixxn55i3yy~;AYd547h;gZ;*Qn?LtvR()*^Dlynr<%w?P-}|E4dSUbzXj zHn3&i=qTYb+aM7CuzC|*DP*R%;S^ev*1gRL@6!cf_QZ97paU4+BDOV3q9E_&Tic*3 z=!haz{xE5DD)@L1(HeHcO(Qiex{H9V>nmj>ddc-<>;j9a`uiCf!9ZQ$G+^YdW&?HA zzmxtBj4p0IYD)nYCCH)#V6Cst^%1&dJ~)-NfPU;mLYRox5s$CAv=W;cFCu32sNI+H zht2--zKbxG|4QB9iKJ^`DT2eTV<`9?y*z;^>nup@R6^j);jMs$$yJZ7@@0mw<9vs> nI+?jSC^F{xUjzvL0|#PdBqF?9&IK8 90%" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hdwu-wzJvJLb", + "colab_type": "text" + }, + "source": [ + "## Export the model\n", + "Now we can export the model using [`Estimator.export_saved_model`](https://www.tensorflow.org/versions/r1.14/api_docs/python/tf/estimator/Estimator#export_saved_model):" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "AVgs2mkdllRn", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def json_serving_input_fn():\n", + " placeholders = {}\n", + " features = {}\n", + " for feature_name in feature_names:\n", + " placeholders[feature_name] = tf.placeholder(shape=[None], dtype=tf.float64, name=feature_name)\n", + " features[feature_name] = tf.expand_dims(placeholders[feature_name], -1)\n", + " \n", + " return tf.estimator.export.ServingInputReceiver(features, receiver_tensors=placeholders)\n", + "\n", + "\n", + "classifier.export_saved_model(\"export\", json_serving_input_fn)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ipVlP4yPxFxw", + "colab_type": "text" + }, + "source": [ + "## Upload the model to AWS\n", + "\n", + "Cortex loads models from AWS, so we need to upload the exported model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3IqsfyylxLhy", + "colab_type": "text" + }, + "source": [ + "Set these variables to configure your AWS credentials and model upload path:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "lc9LBH1uHT_h", + "colab_type": "code", + "cellView": "form", + "colab": {} + }, + "source": [ + "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", + "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", + "S3_UPLOAD_PATH = \"s3://my-bucket/iris-classifier/tensorflow\" #@param {type:\"string\"}\n", + "\n", + "import sys\n", + "import re\n", + "\n", + "if AWS_ACCESS_KEY_ID == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", + "\n", + "elif AWS_SECRET_ACCESS_KEY == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", + "\n", + "else:\n", + " try:\n", + " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", + " except:\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NXeuZsaQxUc8", + "colab_type": "text" + }, + "source": [ + "Upload the model to S3:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "YLmnWTEVsu55", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import os\n", + "import boto3\n", + "\n", + "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", + "\n", + "for dirpath, _, filenames in os.walk(\"export\"):\n", + " for filename in filenames:\n", + " filepath = os.path.join(dirpath, filename)\n", + " filekey = os.path.join(key, filepath[len(\"export/\"):])\n", + " print(\"Uploading s3://{}/{}...\".format(bucket, filekey), end = '')\n", + " s3.upload_file(filepath, bucket, filekey)\n", + " print(\" ✓\")", + "\n", + "print(\"\\nUploaded model export directory to \" + S3_UPLOAD_PATH)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aR-mmcUzyCV3", + "colab_type": "text" + }, + "source": [ + "\n", + "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/iris-classifier) for how to deploy the model as an API." + ] + } + ] +} diff --git a/test/tensorflow/license-plate-reader/README.md b/test/tensorflow/license-plate-reader/README.md new file mode 100644 index 0000000000..009286a4e1 --- /dev/null +++ b/test/tensorflow/license-plate-reader/README.md @@ -0,0 +1,175 @@ +# Real-Time License Plate Identification System + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This project implements a license plate identification system. On resource-constrained systems, running inferences may prove to be too computationally expensive. One solution is to run the ML in the cloud and have the local (embedded) system act as a client of these services. + +![Demo GIF](https://i.imgur.com/jgkJB59.gif) + +*Figure 1 - GIF taken from this real-time recording [video](https://www.youtube.com/watch?v=gsYEZtecXlA) of predictions* + +![Raspberry Pi client with 4G access and onboard GPS that connects to cortex's APIs for inference](https://i.imgur.com/MvDAXWU.jpg) + +*Figure 2 - Raspberry Pi-powered client with 4G access and onboard GPS that connects to cortex's APIs for inference. More on that [here](https://github.com/RobertLucian/cortex-license-plate-reader-client).* + +In our example, we assume we have a dashcam mounted on a car and we want to detect and recognize all license plates in the video stream in real-time. We can use an embedded computer system to record the video, then stream and infer frame-by-frame using a web service, reassemble the stream with the licence plate annotations, and finally display the annotated stream on a screen. The web service in our case is a set of 2 web APIs deployed using cortex. + +## Used Models + +The identification of license plates is done in three steps: + +1. Detecting the bounding boxes of each license plate using *YOLOv3* model. +1. Detecting the very specific region of each word inside each bounding box with high accuracy using a pretrained *CRAFT* text detector. +1. Recognizing the text inside the previously detected boxes using a pretrained *CRNN* model. + +Out of these three models (*YOLOv3*, *CRAFT* and *CRNN*) only *YOLOv3* has been fine-tuned with a rather small dataset to better work with license plates. This dataset can be found [here](https://github.com/RobertLucian/license-plate-dataset). This *YOLOv3* model has in turn been trained using [this](https://github.com/experiencor/keras-yolo3) GitHub project. To get more details about our fine-tuned model, check the project's description page. + +The other two models, *CRAFT* and *CRNN*, can be found in [keras-ocr](https://github.com/faustomorales/keras-ocr). + +## Deployment - Lite Version + +A lite version of the deployment is available with `cortex_lite.yaml`. The lite version accepts an image as input and returns an image with the recognized license plates overlayed on top. A single GPU is required for this deployment (i.e. `g4dn.xlarge`). + +Once the cortex cluster is created, run + +```bash +cortex deploy cortex_lite.yaml +``` + +And monitor the API with + +```bash +cortex get --watch +``` + +To run an inference on the lite version, the only 3 tools you need are `curl`, `sed` and `base64`. This API expects an URL pointing to an image onto which the inferencing is done. This includes the detection of license plates with *YOLOv3* and the recognition part with *CRAFT* + *CRNN* models. + +Export the endpoint & the image's URL by running + +```bash +export ENDPOINT=your-api-endpoint +export IMAGE_URL=https://i.imgur.com/r8xdI7P.png +``` + +Then run the following piped commands + +```bash +curl "${ENDPOINT}" -X POST -H "Content-Type: application/json" -d '{"url":"'${IMAGE_URL}'"}' | +sed 's/"//g' | +base64 -d > prediction.jpg +``` + +The resulting image is the same as the one in [Verifying the Deployed APIs](#verifying-the-deployed-apis). + +For another prediction, let's use a generic image from the web. Export [this image's URL link](https://i.imgur.com/mYuvMOs.jpg) and re-run the prediction. This is what we get. + +![annotated sample image](https://i.imgur.com/tg1PE1E.jpg) + +*The above prediction has the bounding boxes colored differently to distinguish them from the cars' red bodies* + +## Deployment - Full Version + +The recommended number of instances to run this smoothly on a video stream is about 12 GPU instances (2 GPU instances for *YOLOv3* and 10 for *CRNN* + *CRAFT*). `cortex_full.yaml` is already set up to use these 12 instances. Note: this is the optimal number of instances when using the `g4dn.xlarge` instance type. For the client to work smoothly, the number of processes per replica can be adjusted, especially for `p3` or `g4` instances, where the GPU has a lot of compute capacity. + +If you don't have access to this many GPU-equipped instances, you could just lower the number and expect dropped frames. It will still prove the point, albeit at a much lower framerate and with higher latency. More on that [here](https://github.com/RobertLucian/cortex-license-plate-reader-client). + +Then after the cortex cluster is created, run + +```bash +cortex deploy cortex_full.yaml +``` + +And monitor the APIs with + +```bash +cortex get --watch +``` + +We can run the inference on a sample image to verify that both APIs are working as expected before we move on to running the client. Here is an example image: + +![sample image](https://i.imgur.com/r8xdI7P.png) + +On your local machine run: + +``` +pip install requests click opencv-contrib-python numpy +``` + +and run the following script with Python >= `3.6.x`. The application expects the argument to be a link to an image. The following link is for the above sample image. + + +```bash +export YOLOV3_ENDPOINT=api_endpoint_for_yolov3 +export CRNN_ENDPOINT=api_endpoint_for_crnn +python sample_inference.py "https://i.imgur.com/r8xdI7P.png" +``` + +If all goes well, then a prediction will be saved as a JPEG image to disk. By default, it's saved to `prediction.jpg`. Here is the output for the image above: + +![annotated sample image](https://i.imgur.com/JaD4A05.jpg) + +You can use `python sample_inference.py --help` to find out more. Keep in mind that any detected license plates with a confidence score lower than 80% are discarded. + +If this verification works, then we can move on and run the main client. + +### Running the Client + +Once the APIs are up and running, launch the streaming client by following the instructions at [robertlucian/cortex-license-plate-reader-client](https://github.com/RobertLucian/cortex-license-plate-reader-client). + +*Note: The client is kept in a separate repository to maintain the cortex project clean and focused. Keeping some of the projects that are more complex out of this repository can reduce the confusion.* + +## Customization/Optimization + +### Uploading the Model to S3 + +The only model to upload to an S3 bucket (for Cortex to deploy) is the *YOLOv3* model. The other two models are downloaded automatically upon deploying the service. + +If you would like to host the model from your own bucket, or if you want to fine tune the model for your needs, here's what you can do. + +#### Lite Version + +Download the *Keras* model: + +```bash +wget -O license_plate.h5 "https://www.dropbox.com/s/vsvgoyricooksyv/license_plate.h5?dl=0" +``` + +And then upload it to your bucket (also make sure [cortex_lite.yaml](cortex_lite.yaml) points to this bucket): + +```bash +BUCKET=my-bucket +YOLO3_PATH=examples/tensorflow/license-plate-reader/yolov3_keras +aws s3 cp license_plate.h5 "s3://$BUCKET/$YOLO3_PATH/model.h5" +``` + +#### Full Version + +Download the *SavedModel*: + +```bash +wget -O yolov3.zip "https://www.dropbox.com/sh/4ltffycnzfeul01/AAB7Xdmmi59w0EPOwhQ1nkvua/yolov3?dl=0" +``` + +Unzip it: + +```bash +unzip yolov3.zip -d yolov3 +``` + +And then upload it to your bucket (also make sure [cortex_full.yaml](cortex_full.yaml) points to this bucket): + +```bash +BUCKET=my-bucket +YOLO3_PATH=examples/tensorflow/license-plate-reader/yolov3_tf +aws s3 cp yolov3/ "s3://$BUCKET/$YOLO3_PATH" --recursive +``` + +### Configuring YOLOv3 Predictor + +The `yolov3` API predictor requires a [config.json](config.json) file to configure the input size of the image (dependent on the model's architecture), the anchor boxes, the object threshold, and the IoU threshold. All of these are already set appropriately so no other change is required. + +The configuration file's content is based on [this](https://github.com/experiencor/keras-yolo3/blob/bf37c87561caeccc4f1b879e313d4a3fec1b987e/zoo/config_license_plates.json#L2-L7). + +### Opportunities for performance improvements + +One way to reduce the inference time is to convert the models to use FP16/BFP16 (in mixed mode or not) and then choose the accelerator that gives the best performance in half precision mode - i.e. T4/V100. A speedup of an order of magnitude can be expected. diff --git a/test/tensorflow/license-plate-reader/config.json b/test/tensorflow/license-plate-reader/config.json new file mode 100644 index 0000000000..0ff64d0a98 --- /dev/null +++ b/test/tensorflow/license-plate-reader/config.json @@ -0,0 +1,8 @@ +{ + "labels": ["license-plate"], + "net_h" : 416, + "net_w" : 416, + "anchors" : [15,6, 18,8, 22,9, 27,11, 32,13, 41,17, 54,21, 66,27, 82,33], + "obj_thresh" : 0.8, + "nms_thresh" : 0.01 +} diff --git a/test/tensorflow/license-plate-reader/cortex_full.yaml b/test/tensorflow/license-plate-reader/cortex_full.yaml new file mode 100644 index 0000000000..f16f6ab934 --- /dev/null +++ b/test/tensorflow/license-plate-reader/cortex_full.yaml @@ -0,0 +1,35 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: yolov3 + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor_yolo.py + model_path: s3://cortex-examples/tensorflow/license-plate-reader/yolov3_tf/ + processes_per_replica: 4 + threads_per_process: 3 + signature_key: serving_default + config: + model_config: config.json + compute: + cpu: 1 + gpu: 1 + mem: 8G + autoscaling: + min_replicas: 2 + max_replicas: 2 + +- name: crnn + kind: RealtimeAPI + predictor: + type: python + path: predictor_crnn.py + processes_per_replica: 1 + threads_per_process: 1 + compute: + cpu: 1 + gpu: 1 + mem: 8G + autoscaling: + min_replicas: 10 + max_replicas: 10 diff --git a/test/tensorflow/license-plate-reader/cortex_lite.yaml b/test/tensorflow/license-plate-reader/cortex_lite.yaml new file mode 100644 index 0000000000..8e07cd8280 --- /dev/null +++ b/test/tensorflow/license-plate-reader/cortex_lite.yaml @@ -0,0 +1,14 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: license-plate-reader + kind: RealtimeAPI + predictor: + type: python + path: predictor_lite.py + config: + yolov3: s3://cortex-examples/tensorflow/license-plate-reader/yolov3_keras/model.h5 + yolov3_model_config: config.json + compute: + cpu: 1 + gpu: 1 + mem: 4G diff --git a/test/tensorflow/license-plate-reader/predictor_crnn.py b/test/tensorflow/license-plate-reader/predictor_crnn.py new file mode 100644 index 0000000000..aa543f45cf --- /dev/null +++ b/test/tensorflow/license-plate-reader/predictor_crnn.py @@ -0,0 +1,44 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import cv2 +import numpy as np +import keras_ocr +import base64 +import pickle +import tensorflow as tf + + +class PythonPredictor: + def __init__(self, config): + # limit memory usage on each process + for gpu in tf.config.list_physical_devices("GPU"): + tf.config.experimental.set_memory_growth(gpu, True) + + # keras-ocr will automatically download pretrained + # weights for the detector and recognizer. + self.pipeline = keras_ocr.pipeline.Pipeline() + + def predict(self, payload): + # preprocess the images w/ license plates (LPs) + imgs = payload["imgs"] + imgs = base64.b64decode(imgs.encode("utf-8")) + jpgs_as_np = pickle.loads(imgs) + images = [cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR) for jpg_as_np in jpgs_as_np] + + # run batch inference + try: + prediction_groups = self.pipeline.recognize(images) + except ValueError: + # exception can occur when the images are too small + prediction_groups = [] + + image_list = [] + for img_predictions in prediction_groups: + boxes_per_image = [] + for predictions in img_predictions: + boxes_per_image.append([predictions[0], predictions[1].tolist()]) + image_list.append(boxes_per_image) + + lps = {"license-plates": image_list} + + return lps diff --git a/test/tensorflow/license-plate-reader/predictor_lite.py b/test/tensorflow/license-plate-reader/predictor_lite.py new file mode 100644 index 0000000000..0a71b775fa --- /dev/null +++ b/test/tensorflow/license-plate-reader/predictor_lite.py @@ -0,0 +1,120 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import boto3, base64, cv2, re, os, requests, json +import keras_ocr + +from botocore import UNSIGNED +from botocore.client import Config +from tensorflow.keras.models import load_model +import utils.utils as utils +import utils.bbox as bbox_utils +import utils.preprocess as preprocess_utils + + +class PythonPredictor: + def __init__(self, config): + # download yolov3 model + bucket, key = re.match("s3://(.+?)/(.+)", config["yolov3"]).groups() + + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + model_path = "/tmp/model.h5" + s3.download_file(bucket, key, model_path) + + # load yolov3 model + self.yolov3_model = load_model(model_path) + + # get configuration for yolov3 model + with open(config["yolov3_model_config"]) as json_file: + data = json.load(json_file) + for key in data: + setattr(self, key, data[key]) + self.box_confidence_score = 0.8 + + # keras-ocr automatically downloads the pretrained + # weights for the detector and recognizer + self.recognition_model_pipeline = keras_ocr.pipeline.Pipeline() + + def predict(self, payload): + # download image + img_url = payload["url"] + image = preprocess_utils.get_url_image(img_url) + + # detect the bounding boxes + boxes = utils.get_yolo_boxes( + self.yolov3_model, + image, + self.net_h, + self.net_w, + self.anchors, + self.obj_thresh, + self.nms_thresh, + len(self.labels), + tensorflow_model=False, + ) + + # purge bounding boxes with a low confidence score + aux = [] + for b in boxes: + label = -1 + for i in range(len(b.classes)): + if b.classes[i] > self.box_confidence_score: + label = i + if label >= 0: + aux.append(b) + boxes = aux + del aux + + # if bounding boxes have been detected + dec_words = [] + if len(boxes) > 0: + # create set of images of the detected license plates + lps = [] + for b in boxes: + lp = image[b.ymin : b.ymax, b.xmin : b.xmax] + lps.append(lp) + + # run batch inference + try: + prediction_groups = self.recognition_model_pipeline.recognize(lps) + except ValueError: + # exception can occur when the images are too small + prediction_groups = [] + + # process pipeline output + image_list = [] + for img_predictions in prediction_groups: + boxes_per_image = [] + for predictions in img_predictions: + boxes_per_image.append([predictions[0], predictions[1].tolist()]) + image_list.append(boxes_per_image) + + # reorder text within detected LPs based on horizontal position + dec_lps = preprocess_utils.reorder_recognized_words(image_list) + for dec_lp in dec_lps: + dec_words.append([word[0] for word in dec_lp]) + + # if there are no recognized LPs, then don't draw them + if len(dec_words) == 0: + dec_words = [[] for i in range(len(boxes))] + + # draw predictions as overlays on the source image + draw_image = bbox_utils.draw_boxes( + image, + boxes, + overlay_text=dec_words, + labels=["LP"], + obj_thresh=self.box_confidence_score, + ) + + # image represented in bytes + byte_im = preprocess_utils.image_to_jpeg_bytes(draw_image) + + # encode image + image_enc = base64.b64encode(byte_im).decode("utf-8") + + # image with draw boxes overlayed + return image_enc diff --git a/test/tensorflow/license-plate-reader/predictor_yolo.py b/test/tensorflow/license-plate-reader/predictor_yolo.py new file mode 100644 index 0000000000..7648b66960 --- /dev/null +++ b/test/tensorflow/license-plate-reader/predictor_yolo.py @@ -0,0 +1,46 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import json +import base64 +import numpy as np +import cv2 +import pickle +import utils.utils as utils + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + self.client = tensorflow_client + + with open(config["model_config"]) as json_file: + data = json.load(json_file) + for key in data: + setattr(self, key, data[key]) + + def predict(self, payload): + # decode the payload + img = payload["img"] + img = base64.b64decode(img) + jpg_as_np = np.frombuffer(img, dtype=np.uint8) + image = cv2.imdecode(jpg_as_np, flags=cv2.IMREAD_COLOR) + + # detect the bounding boxes + boxes = utils.get_yolo_boxes( + self.client, + image, + self.net_h, + self.net_w, + self.anchors, + self.obj_thresh, + self.nms_thresh, + len(self.labels), + ) + + # package the response + response = {"boxes": []} + for box in boxes: + response["boxes"].append( + [box.xmin, box.ymin, box.xmax, box.ymax, float(box.c), box.classes.tolist()] + ) + + return response diff --git a/test/tensorflow/license-plate-reader/requirements.txt b/test/tensorflow/license-plate-reader/requirements.txt new file mode 100644 index 0000000000..0fb87fcf23 --- /dev/null +++ b/test/tensorflow/license-plate-reader/requirements.txt @@ -0,0 +1,5 @@ +keras-ocr==0.8.5 +keras==2.3.1 +tensorflow==2.3.0 +scipy==1.4.1 +numpy==1.18.* diff --git a/test/tensorflow/license-plate-reader/sample_inference.py b/test/tensorflow/license-plate-reader/sample_inference.py new file mode 100644 index 0000000000..11e217ec78 --- /dev/null +++ b/test/tensorflow/license-plate-reader/sample_inference.py @@ -0,0 +1,100 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import click, cv2, requests, pickle, base64, json +import numpy as np +import utils.bbox as bbox_utils +import utils.preprocess as preprocess_utils + + +@click.command( + help=( + "Identify license plates in a given image" + " while outsourcing the predictions using the REST API endpoints." + " Both API endpoints have to be exported as environment variables." + ) +) +@click.argument("img_url_src", type=str) +@click.argument("yolov3_endpoint", envvar="YOLOV3_ENDPOINT") +@click.argument("crnn_endpoint", envvar="CRNN_ENDPOINT") +@click.option( + "--output", + "-o", + type=str, + default="prediction.jpg", + show_default=True, + help="File to save the prediction to.", +) +def main(img_url_src, yolov3_endpoint, crnn_endpoint, output): + + # get the image in bytes representation + image = preprocess_utils.get_url_image(img_url_src) + image_bytes = preprocess_utils.image_to_jpeg_bytes(image) + + # encode image + image_enc = base64.b64encode(image_bytes).decode("utf-8") + image_dump = json.dumps({"img": image_enc}) + + # make yolov3 api request + resp = requests.post( + yolov3_endpoint, data=image_dump, headers={"content-type": "application/json"} + ) + + # parse response + boxes_raw = resp.json()["boxes"] + boxes = [] + for b in boxes_raw: + box = bbox_utils.BoundBox(*b) + boxes.append(box) + + # purge bounding boxes with a low confidence score + confidence_score = 0.8 + aux = [] + for b in boxes: + label = -1 + for i in range(len(b.classes)): + if b.classes[i] > confidence_score: + label = i + if label >= 0: + aux.append(b) + boxes = aux + del aux + + dec_words = [] + if len(boxes) > 0: + # create set of images of the detected license plates + lps = [] + for b in boxes: + lp = image[b.ymin : b.ymax, b.xmin : b.xmax] + jpeg = preprocess_utils.image_to_jpeg_nparray(lp) + lps.append(jpeg) + + # encode the cropped license plates + lps = pickle.dumps(lps, protocol=0) + lps_enc = base64.b64encode(lps).decode("utf-8") + lps_dump = json.dumps({"imgs": lps_enc}) + + # make crnn api request + resp = requests.post( + crnn_endpoint, data=lps_dump, headers={"content-type": "application/json"} + ) + + # parse the response + dec_lps = resp.json()["license-plates"] + dec_lps = preprocess_utils.reorder_recognized_words(dec_lps) + for dec_lp in dec_lps: + dec_words.append([word[0] for word in dec_lp]) + + if len(dec_words) == 0: + dec_words = [[] for i in range(len(boxes))] + + # draw predictions as overlays on the source image + draw_image = bbox_utils.draw_boxes( + image, boxes, overlay_text=dec_words, labels=["LP"], obj_thresh=confidence_score + ) + + # and save it to disk + cv2.imwrite(output, draw_image) + + +if __name__ == "__main__": + main() diff --git a/test/tensorflow/license-plate-reader/utils/__init__.py b/test/tensorflow/license-plate-reader/utils/__init__.py new file mode 100644 index 0000000000..5f47d63e43 --- /dev/null +++ b/test/tensorflow/license-plate-reader/utils/__init__.py @@ -0,0 +1 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) diff --git a/test/tensorflow/license-plate-reader/utils/bbox.py b/test/tensorflow/license-plate-reader/utils/bbox.py new file mode 100644 index 0000000000..de9c7ef8c0 --- /dev/null +++ b/test/tensorflow/license-plate-reader/utils/bbox.py @@ -0,0 +1,111 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import numpy as np +import cv2 +from .colors import get_color + + +class BoundBox: + def __init__(self, xmin, ymin, xmax, ymax, c=None, classes=None): + self.xmin = xmin + self.ymin = ymin + self.xmax = xmax + self.ymax = ymax + + self.c = c + self.classes = classes + + self.label = -1 + self.score = -1 + + def get_label(self): + if self.label == -1: + self.label = np.argmax(self.classes) + + return self.label + + def get_score(self): + if self.score == -1: + self.score = self.classes[self.get_label()] + + return self.score + + +def _interval_overlap(interval_a, interval_b): + x1, x2 = interval_a + x3, x4 = interval_b + + if x3 < x1: + if x4 < x1: + return 0 + else: + return min(x2, x4) - x1 + else: + if x2 < x3: + return 0 + else: + return min(x2, x4) - x3 + + +def bbox_iou(box1, box2): + intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax]) + intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax]) + + intersect = intersect_w * intersect_h + + w1, h1 = box1.xmax - box1.xmin, box1.ymax - box1.ymin + w2, h2 = box2.xmax - box2.xmin, box2.ymax - box2.ymin + + union = w1 * h1 + w2 * h2 - intersect + + return float(intersect) / union + + +def draw_boxes(image, boxes, overlay_text, labels, obj_thresh, quiet=True): + for box, overlay in zip(boxes, overlay_text): + label_str = "" + label = -1 + + for i in range(len(labels)): + if box.classes[i] > obj_thresh: + if label_str != "": + label_str += ", " + label_str += labels[i] + " " + str(round(box.get_score() * 100, 2)) + "%" + label = i + if not quiet: + print(label_str) + + if label >= 0: + if len(overlay) > 0: + text = label_str + ": [" + " ".join(overlay) + "]" + else: + text = label_str + text = text.upper() + text_size = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, 1.1e-3 * image.shape[0], 5) + width, height = text_size[0][0], text_size[0][1] + region = np.array( + [ + [box.xmin - 3, box.ymin], + [box.xmin - 3, box.ymin - height - 26], + [box.xmin + width + 13, box.ymin - height - 26], + [box.xmin + width + 13, box.ymin], + ], + dtype="int32", + ) + + # cv2.rectangle(img=image, pt1=(box.xmin,box.ymin), pt2=(box.xmax,box.ymax), color=get_color(label), thickness=5) + rec = (box.xmin, box.ymin, box.xmax - box.xmin, box.ymax - box.ymin) + rec = tuple(int(i) for i in rec) + cv2.rectangle(img=image, rec=rec, color=get_color(label), thickness=3) + cv2.fillPoly(img=image, pts=[region], color=get_color(label)) + cv2.putText( + img=image, + text=text, + org=(box.xmin + 13, box.ymin - 13), + fontFace=cv2.FONT_HERSHEY_SIMPLEX, + fontScale=1e-3 * image.shape[0], + color=(0, 0, 0), + thickness=1, + ) + + return image diff --git a/test/tensorflow/license-plate-reader/utils/colors.py b/test/tensorflow/license-plate-reader/utils/colors.py new file mode 100644 index 0000000000..2902c4e5aa --- /dev/null +++ b/test/tensorflow/license-plate-reader/utils/colors.py @@ -0,0 +1,100 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + + +def get_color(label): + """Return a color from a set of predefined colors. Contains 80 colors in total. + code originally from https://github.com/fizyr/keras-retinanet/ + Args + label: The label to get the color for. + Returns + A list of three values representing a RGB color. + """ + if label < len(colors): + return colors[label] + else: + print("Label {} has no color, returning default.".format(label)) + return (0, 255, 0) + + +colors = [ + [31, 0, 255], + [0, 159, 255], + [255, 95, 0], + [255, 19, 0], + [255, 0, 0], + [255, 38, 0], + [0, 255, 25], + [255, 0, 133], + [255, 172, 0], + [108, 0, 255], + [0, 82, 255], + [0, 255, 6], + [255, 0, 152], + [223, 0, 255], + [12, 0, 255], + [0, 255, 178], + [108, 255, 0], + [184, 0, 255], + [255, 0, 76], + [146, 255, 0], + [51, 0, 255], + [0, 197, 255], + [255, 248, 0], + [255, 0, 19], + [255, 0, 38], + [89, 255, 0], + [127, 255, 0], + [255, 153, 0], + [0, 255, 255], + [0, 255, 216], + [0, 255, 121], + [255, 0, 248], + [70, 0, 255], + [0, 255, 159], + [0, 216, 255], + [0, 6, 255], + [0, 63, 255], + [31, 255, 0], + [255, 57, 0], + [255, 0, 210], + [0, 255, 102], + [242, 255, 0], + [255, 191, 0], + [0, 255, 63], + [255, 0, 95], + [146, 0, 255], + [184, 255, 0], + [255, 114, 0], + [0, 255, 235], + [255, 229, 0], + [0, 178, 255], + [255, 0, 114], + [255, 0, 57], + [0, 140, 255], + [0, 121, 255], + [12, 255, 0], + [255, 210, 0], + [0, 255, 44], + [165, 255, 0], + [0, 25, 255], + [0, 255, 140], + [0, 101, 255], + [0, 255, 82], + [223, 255, 0], + [242, 0, 255], + [89, 0, 255], + [165, 0, 255], + [70, 255, 0], + [255, 0, 172], + [255, 76, 0], + [203, 255, 0], + [204, 0, 255], + [255, 0, 229], + [255, 133, 0], + [127, 0, 255], + [0, 235, 255], + [0, 255, 197], + [255, 0, 191], + [0, 44, 255], + [50, 255, 0], +] diff --git a/test/tensorflow/license-plate-reader/utils/preprocess.py b/test/tensorflow/license-plate-reader/utils/preprocess.py new file mode 100644 index 0000000000..5e40a35719 --- /dev/null +++ b/test/tensorflow/license-plate-reader/utils/preprocess.py @@ -0,0 +1,59 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import numpy as np +import cv2, requests +from statistics import mean + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + return image + + +def image_to_jpeg_nparray(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): + """ + Convert numpy image to jpeg numpy vector. + """ + is_success, im_buf_arr = cv2.imencode(".jpg", image, quality) + return im_buf_arr + + +def image_to_jpeg_bytes(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): + """ + Convert numpy image to bytes-encoded jpeg image. + """ + buf = image_to_jpeg_nparray(image, quality) + byte_im = buf.tobytes() + return byte_im + + +def reorder_recognized_words(detected_images): + """ + Reorder the detected words in each image based on the average horizontal position of each word. + Sorting them in ascending order. + """ + + reordered_images = [] + for detected_image in detected_images: + + # computing the mean average position for each word + mean_horizontal_positions = [] + for words in detected_image: + box = words[1] + y_positions = [point[0] for point in box] + mean_y_position = mean(y_positions) + mean_horizontal_positions.append(mean_y_position) + indexes = np.argsort(mean_horizontal_positions) + + # and reordering them + reordered = [] + for index, words in zip(indexes, detected_image): + reordered.append(detected_image[index]) + reordered_images.append(reordered) + + return reordered_images diff --git a/test/tensorflow/license-plate-reader/utils/utils.py b/test/tensorflow/license-plate-reader/utils/utils.py new file mode 100644 index 0000000000..9d07b289e0 --- /dev/null +++ b/test/tensorflow/license-plate-reader/utils/utils.py @@ -0,0 +1,160 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import cv2 +import numpy as np +import math +from .bbox import BoundBox, bbox_iou +from scipy.special import expit + + +def _sigmoid(x): + return expit(x) + + +def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w): + if (float(net_w) / image_w) < (float(net_h) / image_h): + new_w = net_w + new_h = (image_h * net_w) / image_w + else: + new_h = net_w + new_w = (image_w * net_h) / image_h + + for i in range(len(boxes)): + x_offset, x_scale = (net_w - new_w) / 2.0 / net_w, float(new_w) / net_w + y_offset, y_scale = (net_h - new_h) / 2.0 / net_h, float(new_h) / net_h + + boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w) + boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w) + boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h) + boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h) + + +def do_nms(boxes, nms_thresh): + if len(boxes) > 0: + nb_class = len(boxes[0].classes) + else: + return + + for c in range(nb_class): + sorted_indices = np.argsort([-box.classes[c] for box in boxes]) + + for i in range(len(sorted_indices)): + index_i = sorted_indices[i] + + if boxes[index_i].classes[c] == 0: + continue + + for j in range(i + 1, len(sorted_indices)): + index_j = sorted_indices[j] + + if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh: + boxes[index_j].classes[c] = 0 + + +def decode_netout(netout, anchors, obj_thresh, net_h, net_w): + grid_h, grid_w = netout.shape[:2] + nb_box = 3 + netout = netout.reshape((grid_h, grid_w, nb_box, -1)) + nb_class = netout.shape[-1] - 5 + + boxes = [] + + netout[..., :2] = _sigmoid(netout[..., :2]) + netout[..., 4] = _sigmoid(netout[..., 4]) + netout[..., 5:] = netout[..., 4][..., np.newaxis] * _softmax(netout[..., 5:]) + netout[..., 5:] *= netout[..., 5:] > obj_thresh + + for i in range(grid_h * grid_w): + row = i // grid_w + col = i % grid_w + + for b in range(nb_box): + # 4th element is objectness score + objectness = netout[row, col, b, 4] + + if objectness <= obj_thresh: + continue + + # first 4 elements are x, y, w, and h + x, y, w, h = netout[row, col, b, :4] + + x = (col + x) / grid_w # center position, unit: image width + y = (row + y) / grid_h # center position, unit: image height + w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width + h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height + + # last elements are class probabilities + classes = netout[row, col, b, 5:] + + box = BoundBox(x - w / 2, y - h / 2, x + w / 2, y + h / 2, objectness, classes) + + boxes.append(box) + + return boxes + + +def preprocess_input(image, net_h, net_w): + new_h, new_w, _ = image.shape + + # determine the new size of the image + if (float(net_w) / new_w) < (float(net_h) / new_h): + new_h = (new_h * net_w) // new_w + new_w = net_w + else: + new_w = (new_w * net_h) // new_h + new_h = net_h + + # resize the image to the new size + resized = cv2.resize(image[:, :, ::-1] / 255.0, (new_w, new_h)) + + # embed the image into the standard letter box + new_image = np.ones((net_h, net_w, 3)) * 0.5 + new_image[ + (net_h - new_h) // 2 : (net_h + new_h) // 2, (net_w - new_w) // 2 : (net_w + new_w) // 2, : + ] = resized + new_image = np.expand_dims(new_image, 0) + + return new_image + + +def get_yolo_boxes( + model, image, net_h, net_w, anchors, obj_thresh, nms_thresh, classes, tensorflow_model=True +): + # preprocess the input + image_h, image_w, _ = image.shape + batch_input = np.zeros((1, net_h, net_w, 3)) + batch_input[0] = preprocess_input(image, net_h, net_w) + + # run the prediction + if tensorflow_model: + output = model.predict({"input_1": batch_input}) + yolos = [output["conv_81"], output["conv_93"], output["conv_105"]] + filters = 3 * (5 + classes) + for i in range(len(yolos)): + length = len(yolos[i]) + box_size = int(math.sqrt(length / filters)) + yolos[i] = np.array(yolos[i]).reshape((box_size, box_size, filters)) + else: + output = model.predict_on_batch(batch_input) + yolos = [output[0][0], output[1][0], output[2][0]] + + boxes = [] + # decode the output of the network + for j in range(len(yolos)): + yolo_anchors = anchors[(2 - j) * 6 : (3 - j) * 6] # config['model']['anchors'] + boxes += decode_netout(yolos[j], yolo_anchors, obj_thresh, net_h, net_w) + + # correct the sizes of the bounding boxes + correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w) + + # suppress non-maximal boxes + do_nms(boxes, nms_thresh) + + return boxes + + +def _softmax(x, axis=-1): + x = x - np.amax(x, axis, keepdims=True) + e_x = np.exp(x) + + return e_x / e_x.sum(axis, keepdims=True) diff --git a/test/tensorflow/multi-model-classifier/README.md b/test/tensorflow/multi-model-classifier/README.md new file mode 100644 index 0000000000..631f800179 --- /dev/null +++ b/test/tensorflow/multi-model-classifier/README.md @@ -0,0 +1,69 @@ +# Multi-Model Classifier API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example deploys Iris, ResNet50 and Inception models in one API. Query parameters are used for selecting the model. + +The example can be run on both CPU and on GPU hardware. + +## Sample Prediction + +Deploy the model by running: + +```bash +cortex deploy +``` + +And wait for it to become live by tracking its status with `cortex get --watch`. + +Once the API has been successfully deployed, export the APIs endpoint. You can get the API's endpoint by running `cortex get multi-model-classifier`. + +```bash +export ENDPOINT=your-api-endpoint +``` + +When making a prediction with [sample-image.json](sample-image.json), the following image will be used: + +![sports car](https://i.imgur.com/zovGIKD.png) + +### ResNet50 Classifier + +Make a request to the ResNet50 model: + +```bash +curl "${ENDPOINT}?model=resnet50" -X POST -H "Content-Type: application/json" -d @sample-image.json +``` + +The expected response is: + +```json +{"label": "sports_car"} +``` + +### Inception Classifier + +Make a request to the Inception model: + +```bash +curl "${ENDPOINT}?model=inception" -X POST -H "Content-Type: application/json" -d @sample-image.json +``` + +The expected response is: + +```json +{"label": "sports_car"} +``` + +### Iris Classifier + +Make a request to the Iris model: + +```bash +curl "${ENDPOINT}?model=iris" -X POST -H "Content-Type: application/json" -d @sample-iris.json +``` + +The expected response is: + +```json +{"label": "setosa"} +``` diff --git a/test/tensorflow/multi-model-classifier/cortex.yaml b/test/tensorflow/multi-model-classifier/cortex.yaml new file mode 100644 index 0000000000..ef99bc941e --- /dev/null +++ b/test/tensorflow/multi-model-classifier/cortex.yaml @@ -0,0 +1,30 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: multi-model-classifier + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + models: + paths: + - name: inception + model_path: s3://cortex-examples/tensorflow/image-classifier/inception/ + - name: iris + model_path: s3://cortex-examples/tensorflow/iris-classifier/nn/ + - name: resnet50 + model_path: s3://cortex-examples/tensorflow/resnet50/ + config: + models: + iris: + labels: ["setosa", "versicolor", "virginica"] + resnet50: + input_shape: [224, 224] + input_key: input + output_key: output + inception: + input_shape: [224, 224] + input_key: images + output_key: classes + image-classifier-classes: https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json + compute: + mem: 2G diff --git a/test/tensorflow/multi-model-classifier/predictor.py b/test/tensorflow/multi-model-classifier/predictor.py new file mode 100644 index 0000000000..6577777037 --- /dev/null +++ b/test/tensorflow/multi-model-classifier/predictor.py @@ -0,0 +1,62 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import requests +import numpy as np +import cv2 + + +def get_url_image(url_image): + """ + Get numpy image from URL image. + """ + resp = requests.get(url_image, stream=True).raw + image = np.asarray(bytearray(resp.read()), dtype="uint8") + image = cv2.imdecode(image, cv2.IMREAD_COLOR) + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + return image + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + self.client = tensorflow_client + + # for image classifiers + classes = requests.get(config["image-classifier-classes"]).json() + self.image_classes = [classes[str(k)][1] for k in range(len(classes))] + + # assign "models"' key value to self.config for ease of use + self.config = config["models"] + + # for iris classifier + self.iris_labels = self.config["iris"]["labels"] + + def predict(self, payload, query_params): + model_name = query_params["model"] + predicted_label = None + + if model_name == "iris": + prediction = self.client.predict(payload["input"], model_name) + predicted_class_id = int(prediction["class_ids"][0]) + predicted_label = self.iris_labels[predicted_class_id] + + elif model_name in ["resnet50", "inception"]: + predicted_label = self.predict_image_classifier(model_name, payload["url"]) + + return {"label": predicted_label} + + def predict_image_classifier(self, model, img_url): + img = get_url_image(img_url) + img = cv2.resize( + img, tuple(self.config[model]["input_shape"]), interpolation=cv2.INTER_NEAREST + ) + if model == "inception": + img = img.astype("float32") / 255 + img = {self.config[model]["input_key"]: img[np.newaxis, ...]} + + results = self.client.predict(img, model)[self.config[model]["output_key"]] + result = np.argmax(results) + if model == "inception": + result -= 1 + predicted_label = self.image_classes[result] + + return predicted_label diff --git a/test/tensorflow/multi-model-classifier/requirements.txt b/test/tensorflow/multi-model-classifier/requirements.txt new file mode 100644 index 0000000000..7e2fba5e6c --- /dev/null +++ b/test/tensorflow/multi-model-classifier/requirements.txt @@ -0,0 +1 @@ +Pillow diff --git a/test/tensorflow/multi-model-classifier/sample-image.json b/test/tensorflow/multi-model-classifier/sample-image.json new file mode 100644 index 0000000000..95200916c7 --- /dev/null +++ b/test/tensorflow/multi-model-classifier/sample-image.json @@ -0,0 +1,3 @@ +{ + "url": "https://i.imgur.com/zovGIKD.png" +} diff --git a/test/tensorflow/multi-model-classifier/sample-iris.json b/test/tensorflow/multi-model-classifier/sample-iris.json new file mode 100644 index 0000000000..67c03827f2 --- /dev/null +++ b/test/tensorflow/multi-model-classifier/sample-iris.json @@ -0,0 +1,8 @@ +{ + "input": { + "sepal_length": 5.2, + "sepal_width": 3.6, + "petal_length": 1.4, + "petal_width": 0.3 + } +} diff --git a/test/tensorflow/sentiment-analyzer/README.md b/test/tensorflow/sentiment-analyzer/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/tensorflow/sentiment-analyzer/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/tensorflow/sentiment-analyzer/bert.ipynb b/test/tensorflow/sentiment-analyzer/bert.ipynb new file mode 100644 index 0000000000..27ca8c67b1 --- /dev/null +++ b/test/tensorflow/sentiment-analyzer/bert.ipynb @@ -0,0 +1,1007 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "bert.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "code", + "metadata": { + "id": "j0a4mTk9o1Qg", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Modified source from https://colab.research.google.com/github/google-research/bert/blob/master/predicting_movie_reviews_with_bert_on_tf_hub.ipynb\n", + "\n", + "# Copyright 2019 Google Inc.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dCpvgG0vwXAZ", + "colab_type": "text" + }, + "source": [ + "#Predicting Movie Review Sentiment with BERT on TF Hub", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xiYrZKaHwV81", + "colab_type": "text" + }, + "source": [ + "If you’ve been following Natural Language Processing over the past year, you’ve probably heard of BERT: Bidirectional Encoder Representations from Transformers. It’s a neural network architecture designed by Google researchers that’s totally transformed what’s state-of-the-art for NLP tasks, like text classification, translation, summarization, and question answering.\n", + "\n", + "Now that BERT's been added to [TF Hub](https://www.tensorflow.org/hub) as a loadable module, it's easy(ish) to add into existing TensorFlow text pipelines. In an existing pipeline, BERT can replace text embedding layers like ELMO and GloVE. Alternatively, [finetuning](http://wiki.fast.ai/index.php/Fine_tuning) BERT can provide both an accuracy boost and faster training time in many cases.\n", + "\n", + "Here, we'll train a model to predict whether an IMDB movie review is positive or negative using BERT in TensorFlow with tf hub. Some code was adapted from [this colab notebook](https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb). Let's get started!" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "chM4UttbMIqq", + "colab_type": "text" + }, + "source": [ + "First, we'll install the required packages:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jviywGyWyKsA", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install bert-tensorflow==1.0.* tensorflow-gpu==1.13.* scikit-learn==0.21.* pandas==0.24.* tensorflow-hub==0.6.* boto3==1.*" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "hsZvic2YxnTz", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from datetime import datetime\n", + "\n", + "from sklearn.model_selection import train_test_split\n", + "import pandas as pd\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "\n", + "import bert\n", + "from bert import run_classifier\n", + "from bert import optimization\n", + "from bert import tokenization" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KVB3eOcjxxm1", + "colab_type": "text" + }, + "source": [ + "Below, we'll set an output location to store our model output, checkpoints, and export in a local directory. Note: if you're running on Google Colab, local directories don't persist after the session ends." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "US_EAnICvP7f", + "colab_type": "code", + "colab": {} + }, + "source": [ + "OUTPUT_DIR = \"bert\"\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pmFYvkylMwXn", + "colab_type": "text" + }, + "source": [ + "#Data" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MC_w8SRqN0fr", + "colab_type": "text" + }, + "source": [ + "First, let's download the dataset, hosted by Stanford. The code below, which downloads, extracts, and imports the IMDB Large Movie Review Dataset, is borrowed from [this TensorFlow tutorial](https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "fom_ff20gyy6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from tensorflow import keras\n", + "import os\n", + "import re\n", + "\n", + "# Load all files from a directory in a DataFrame.\n", + "def load_directory_data(directory):\n", + " data = {}\n", + " data[\"sentence\"] = []\n", + " data[\"sentiment\"] = []\n", + " for file_path in os.listdir(directory):\n", + " with tf.gfile.GFile(os.path.join(directory, file_path), \"r\") as f:\n", + " data[\"sentence\"].append(f.read())\n", + " data[\"sentiment\"].append(re.match(\"\\d+_(\\d+)\\.txt\", file_path).group(1))\n", + " return pd.DataFrame.from_dict(data)\n", + "\n", + "# Merge positive and negative examples, add a polarity column and shuffle.\n", + "def load_dataset(directory):\n", + " pos_df = load_directory_data(os.path.join(directory, \"pos\"))\n", + " neg_df = load_directory_data(os.path.join(directory, \"neg\"))\n", + " pos_df[\"polarity\"] = 1\n", + " neg_df[\"polarity\"] = 0\n", + " return pd.concat([pos_df, neg_df]).sample(frac=1).reset_index(drop=True)\n", + "\n", + "# Download and process the dataset files.\n", + "def download_and_load_datasets(force_download=False):\n", + " dataset = tf.keras.utils.get_file(\n", + " fname=\"aclImdb.tar.gz\", \n", + " origin=\"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\", \n", + " extract=True)\n", + " \n", + " train_df = load_dataset(os.path.join(os.path.dirname(dataset), \n", + " \"aclImdb\", \"train\"))\n", + " test_df = load_dataset(os.path.join(os.path.dirname(dataset), \n", + " \"aclImdb\", \"test\"))\n", + " \n", + " return train_df, test_df\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "2abfwdn-g135", + "colab_type": "code", + "colab": {} + }, + "source": [ + "train, test = download_and_load_datasets()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XA8WHJgzhIZf", + "colab_type": "text" + }, + "source": [ + "To keep training fast, we'll take a sample of 5000 train and test examples, respectively." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "lw_F488eixTV", + "colab_type": "code", + "colab": {} + }, + "source": [ + "train = train.sample(5000)\n", + "test = test.sample(5000)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "prRQM8pDi8xI", + "colab_type": "code", + "colab": {} + }, + "source": [ + "train.columns" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sfRnHSz3iSXz", + "colab_type": "text" + }, + "source": [ + "For us, our input data is the 'sentence' column and our label is the 'polarity' column (0, 1 for negative and positive, respecitvely)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "IuMOGwFui4it", + "colab_type": "code", + "colab": {} + }, + "source": [ + "DATA_COLUMN = 'sentence'\n", + "LABEL_COLUMN = 'polarity'\n", + "# label_list is the list of labels, i.e. True, False or 0, 1 or 'dog', 'cat'\n", + "label_list = [0, 1]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "V399W0rqNJ-Z", + "colab_type": "text" + }, + "source": [ + "#Data Preprocessing\n", + "We'll need to transform our data into a format BERT understands. This involves two steps. First, we create `InputExample`'s using the constructor provided in the BERT library.\n", + "\n", + "- `text_a` is the text we want to classify, which in this case, is the `Request` field in our Dataframe. \n", + "- `text_b` is used if we're training a model to understand the relationship between sentences (i.e. is `text_b` a translation of `text_a`? Is `text_b` an answer to the question asked by `text_a`?). This doesn't apply to our task, so we can leave `text_b` blank.\n", + "- `label` is the label for our example, i.e. True, False" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "p9gEt5SmM6i6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Use the InputExample class from BERT's run_classifier code to create examples from the data\n", + "train_InputExamples = train.apply(lambda x: bert.run_classifier.InputExample(guid=None, # Globally unique ID for bookkeeping, unused in this example\n", + " text_a = x[DATA_COLUMN], \n", + " text_b = None, \n", + " label = x[LABEL_COLUMN]), axis = 1)\n", + "\n", + "test_InputExamples = test.apply(lambda x: bert.run_classifier.InputExample(guid=None, \n", + " text_a = x[DATA_COLUMN], \n", + " text_b = None, \n", + " label = x[LABEL_COLUMN]), axis = 1)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SCZWZtKxObjh", + "colab_type": "text" + }, + "source": [ + "Next, we need to preprocess our data so that it matches the data BERT was trained on. For this, we'll need to do a couple of things (but don't worry--this is also included in the Python library):\n", + "\n", + "\n", + "1. Lowercase our text (if we're using a BERT lowercase model)\n", + "2. Tokenize it (i.e. \"sally says hi\" -> [\"sally\", \"says\", \"hi\"])\n", + "3. Break words into WordPieces (i.e. \"calling\" -> [\"call\", \"##ing\"])\n", + "4. Map our words to indexes using a vocab file that BERT provides\n", + "5. Add special \"CLS\" and \"SEP\" tokens (see the [readme](https://github.com/google-research/bert))\n", + "6. Append \"index\" and \"segment\" tokens to each input (see the [BERT paper](https://arxiv.org/pdf/1810.04805.pdf))\n", + "\n", + "Happily, we don't have to worry about most of these details.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qMWiDtpyQSoU", + "colab_type": "text" + }, + "source": [ + "To start, we'll need to load a vocabulary file and lowercasing information directly from the BERT tf hub module:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "IhJSe0QHNG7U", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# This is a path to an uncased (all lowercase) version of BERT\n", + "BERT_MODEL_HUB = \"https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1\"\n", + "\n", + "def create_tokenizer_from_hub_module():\n", + " \"\"\"Get the vocab file and casing info from the Hub module.\"\"\"\n", + " with tf.Graph().as_default():\n", + " bert_module = hub.Module(BERT_MODEL_HUB)\n", + " tokenization_info = bert_module(signature=\"tokenization_info\", as_dict=True)\n", + " with tf.Session() as sess:\n", + " vocab_file, do_lower_case = sess.run([tokenization_info[\"vocab_file\"],\n", + " tokenization_info[\"do_lower_case\"]])\n", + " \n", + " return bert.tokenization.FullTokenizer(\n", + " vocab_file=vocab_file, do_lower_case=do_lower_case)\n", + "\n", + "tokenizer = create_tokenizer_from_hub_module()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "z4oFkhpZBDKm", + "colab_type": "text" + }, + "source": [ + "Great--we just learned that the BERT model we're using expects lowercase data (that's what stored in tokenization_info[\"do_lower_case\"]) and we also loaded BERT's vocab file. We also created a tokenizer, which breaks words into word pieces:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "dsBo6RCtQmwx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "tokenizer.tokenize(\"This here's an example of using the BERT tokenizer\")" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0OEzfFIt6GIc", + "colab_type": "text" + }, + "source": [ + "Using our tokenizer, we'll call `run_classifier.convert_examples_to_features` on our InputExamples to convert them into features BERT understands." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "LL5W8gEGRTAf", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# We'll set sequences to be at most 128 tokens long.\n", + "MAX_SEQ_LENGTH = 128\n", + "# Convert our train and test features to InputFeatures that BERT understands.\n", + "train_features = bert.run_classifier.convert_examples_to_features(train_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)\n", + "test_features = bert.run_classifier.convert_examples_to_features(test_InputExamples, label_list, MAX_SEQ_LENGTH, tokenizer)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ccp5trMwRtmr", + "colab_type": "text" + }, + "source": [ + "#Creating a model\n", + "\n", + "Now that we've prepared our data, let's focus on building a model. `create_model` does just this below. First, it loads the BERT tf hub module again (this time to extract the computation graph). Next, it creates a single new layer that will be trained to adapt BERT to our sentiment task (i.e. classifying whether a movie review is positive or negative). This strategy of using a mostly trained model is called [fine-tuning](http://wiki.fast.ai/index.php/Fine_tuning)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6o2a5ZIvRcJq", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def create_model(is_predicting, input_ids, input_mask, segment_ids, labels,\n", + " num_labels):\n", + " \"\"\"Creates a classification model.\"\"\"\n", + "\n", + " bert_module = hub.Module(\n", + " BERT_MODEL_HUB,\n", + " trainable=True)\n", + " bert_inputs = dict(\n", + " input_ids=input_ids,\n", + " input_mask=input_mask,\n", + " segment_ids=segment_ids)\n", + " bert_outputs = bert_module(\n", + " inputs=bert_inputs,\n", + " signature=\"tokens\",\n", + " as_dict=True)\n", + "\n", + " # Use \"pooled_output\" for classification tasks on an entire sentence.\n", + " # Use \"sequence_outputs\" for token-level output.\n", + " output_layer = bert_outputs[\"pooled_output\"]\n", + "\n", + " hidden_size = output_layer.shape[-1].value\n", + "\n", + " # Create our own layer to tune for politeness data.\n", + " output_weights = tf.get_variable(\n", + " \"output_weights\", [num_labels, hidden_size],\n", + " initializer=tf.truncated_normal_initializer(stddev=0.02))\n", + "\n", + " output_bias = tf.get_variable(\n", + " \"output_bias\", [num_labels], initializer=tf.zeros_initializer())\n", + "\n", + " with tf.variable_scope(\"loss\"):\n", + "\n", + " # Dropout helps prevent overfitting\n", + " output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)\n", + "\n", + " logits = tf.matmul(output_layer, output_weights, transpose_b=True)\n", + " logits = tf.nn.bias_add(logits, output_bias)\n", + " log_probs = tf.nn.log_softmax(logits, axis=-1)\n", + "\n", + " # Convert labels into one-hot encoding\n", + " one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)\n", + "\n", + " predicted_labels = tf.squeeze(tf.argmax(log_probs, axis=-1, output_type=tf.int32))\n", + " # If we're predicting, we want predicted labels and the probabiltiies.\n", + " if is_predicting:\n", + " return (predicted_labels, log_probs)\n", + "\n", + " # If we're train/eval, compute loss between predicted and actual label\n", + " per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)\n", + " loss = tf.reduce_mean(per_example_loss)\n", + " return (loss, predicted_labels, log_probs)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qpE0ZIDOCQzE", + "colab_type": "text" + }, + "source": [ + "Next we'll wrap our model function in a `model_fn_builder` function that adapts our model to work for training, evaluation, and prediction." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "FnH-AnOQ9KKW", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# model_fn_builder actually creates our model function\n", + "# using the passed parameters for num_labels, learning_rate, etc.\n", + "def model_fn_builder(num_labels, learning_rate, num_train_steps,\n", + " num_warmup_steps):\n", + " \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n", + " def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n", + " \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n", + "\n", + " input_ids = features[\"input_ids\"]\n", + " input_mask = features[\"input_mask\"]\n", + " segment_ids = features[\"segment_ids\"]\n", + " label_ids = features[\"label_ids\"]\n", + "\n", + " is_predicting = (mode == tf.estimator.ModeKeys.PREDICT)\n", + " \n", + " # TRAIN and EVAL\n", + " if not is_predicting:\n", + "\n", + " (loss, predicted_labels, log_probs) = create_model(\n", + " is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n", + "\n", + " train_op = bert.optimization.create_optimizer(\n", + " loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu=False)\n", + "\n", + " # Calculate evaluation metrics. \n", + " def metric_fn(label_ids, predicted_labels):\n", + " accuracy = tf.metrics.accuracy(label_ids, predicted_labels)\n", + " f1_score = tf.contrib.metrics.f1_score(\n", + " label_ids,\n", + " predicted_labels)\n", + " auc = tf.metrics.auc(\n", + " label_ids,\n", + " predicted_labels)\n", + " recall = tf.metrics.recall(\n", + " label_ids,\n", + " predicted_labels)\n", + " precision = tf.metrics.precision(\n", + " label_ids,\n", + " predicted_labels) \n", + " true_pos = tf.metrics.true_positives(\n", + " label_ids,\n", + " predicted_labels)\n", + " true_neg = tf.metrics.true_negatives(\n", + " label_ids,\n", + " predicted_labels) \n", + " false_pos = tf.metrics.false_positives(\n", + " label_ids,\n", + " predicted_labels) \n", + " false_neg = tf.metrics.false_negatives(\n", + " label_ids,\n", + " predicted_labels)\n", + " return {\n", + " \"eval_accuracy\": accuracy,\n", + " \"f1_score\": f1_score,\n", + " \"auc\": auc,\n", + " \"precision\": precision,\n", + " \"recall\": recall,\n", + " \"true_positives\": true_pos,\n", + " \"true_negatives\": true_neg,\n", + " \"false_positives\": false_pos,\n", + " \"false_negatives\": false_neg\n", + " }\n", + "\n", + " eval_metrics = metric_fn(label_ids, predicted_labels)\n", + "\n", + " if mode == tf.estimator.ModeKeys.TRAIN:\n", + " return tf.estimator.EstimatorSpec(mode=mode,\n", + " loss=loss,\n", + " train_op=train_op)\n", + " else:\n", + " return tf.estimator.EstimatorSpec(mode=mode,\n", + " loss=loss,\n", + " eval_metric_ops=eval_metrics)\n", + " else:\n", + " (predicted_labels, log_probs) = create_model(\n", + " is_predicting, input_ids, input_mask, segment_ids, label_ids, num_labels)\n", + "\n", + " predictions = {\n", + " 'probabilities': log_probs,\n", + " 'labels': predicted_labels\n", + " }\n", + " return tf.estimator.EstimatorSpec(mode, predictions=predictions)\n", + "\n", + " # Return the actual model function in the closure\n", + " return model_fn\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "OjwJ4bTeWXD8", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Compute train and warmup steps from batch size\n", + "# These hyperparameters are copied from this colab notebook (https://colab.sandbox.google.com/github/tensorflow/tpu/blob/master/tools/colab/bert_finetuning_with_cloud_tpus.ipynb)\n", + "BATCH_SIZE = 32\n", + "LEARNING_RATE = 2e-5\n", + "NUM_TRAIN_EPOCHS = 3.0\n", + "# Warmup is a period of time where hte learning rate \n", + "# is small and gradually increases--usually helps training.\n", + "WARMUP_PROPORTION = 0.1\n", + "# Model configs\n", + "SAVE_CHECKPOINTS_STEPS = 500\n", + "SAVE_SUMMARY_STEPS = 100" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "emHf9GhfWBZ_", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Compute # train and warmup steps from batch size\n", + "num_train_steps = int(len(train_features) / BATCH_SIZE * NUM_TRAIN_EPOCHS)\n", + "num_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "oEJldMr3WYZa", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Specify outpit directory and number of checkpoint steps to save\n", + "run_config = tf.estimator.RunConfig(\n", + " model_dir=OUTPUT_DIR,\n", + " save_summary_steps=SAVE_SUMMARY_STEPS,\n", + " save_checkpoints_steps=SAVE_CHECKPOINTS_STEPS)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "q_WebpS1X97v", + "colab_type": "code", + "colab": {} + }, + "source": [ + "model_fn = model_fn_builder(\n", + " num_labels=len(label_list),\n", + " learning_rate=LEARNING_RATE,\n", + " num_train_steps=num_train_steps,\n", + " num_warmup_steps=num_warmup_steps)\n", + "\n", + "estimator = tf.estimator.Estimator(\n", + " model_fn=model_fn,\n", + " config=run_config,\n", + " params={\"batch_size\": BATCH_SIZE})\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NOO3RfG1DYLo", + "colab_type": "text" + }, + "source": [ + "Next we create an input builder function that takes our training feature set (`train_features`) and produces a generator. This is a pretty standard design pattern for working with TensorFlow [Estimators](https://www.tensorflow.org/guide/estimators)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1Pv2bAlOX_-K", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Create an input function for training. drop_remainder = True for using TPUs.\n", + "train_input_fn = bert.run_classifier.input_fn_builder(\n", + " features=train_features,\n", + " seq_length=MAX_SEQ_LENGTH,\n", + " is_training=True,\n", + " drop_remainder=False)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "t6Nukby2EB6-", + "colab_type": "text" + }, + "source": [ + "Now we train our model! For me, using a Colab notebook running on Google's GPUs, training time is typically 8-14 minutes." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "nucD4gluYJmK", + "colab_type": "code", + "colab": {} + }, + "source": [ + "print(f'Beginning Training!')\n", + "current_time = datetime.now()\n", + "estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n", + "print(\"Training took time \", datetime.now() - current_time)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CmbLTVniARy3", + "colab_type": "text" + }, + "source": [ + "Now let's use our test data to see how well our model did:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "JIhejfpyJ8Bx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "test_input_fn = run_classifier.input_fn_builder(\n", + " features=test_features,\n", + " seq_length=MAX_SEQ_LENGTH,\n", + " is_training=False,\n", + " drop_remainder=False)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "PPVEXhNjYXC-", + "colab_type": "code", + "colab": {} + }, + "source": [ + "estimator.evaluate(input_fn=test_input_fn, steps=None)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ueKsULteiz1B", + "colab_type": "text" + }, + "source": [ + "Now let's write code to make predictions on new sentences:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "OsrbTD2EJTVl", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def getPrediction(in_sentences):\n", + " labels = [\"Negative\", \"Positive\"]\n", + " input_examples = [run_classifier.InputExample(guid=\"\", text_a = x, text_b = None, label = 0) for x in in_sentences] # here, \"\" is just a dummy label\n", + " input_features = run_classifier.convert_examples_to_features(input_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n", + " predict_input_fn = run_classifier.input_fn_builder(features=input_features, seq_length=MAX_SEQ_LENGTH, is_training=False, drop_remainder=False)\n", + " predictions = estimator.predict(predict_input_fn)\n", + " return [(sentence, prediction['probabilities'], labels[prediction['labels']]) for sentence, prediction in zip(in_sentences, predictions)]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "-thbodgih_VJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "pred_sentences = [\n", + " \"That movie was absolutely awful\",\n", + " \"The acting was a bit lacking\",\n", + " \"The film was creative and surprising\",\n", + " \"Absolutely fantastic!\"\n", + "]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "QrZmvZySKQTm", + "colab_type": "code", + "colab": {} + }, + "source": [ + "predictions = getPrediction(pred_sentences)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MXkRiEBUqN3n", + "colab_type": "text" + }, + "source": [ + "Voila! We have a sentiment classifier!" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ERkTE8-7oQLZ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "predictions" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P3Tg7c47vfLE", + "colab_type": "text" + }, + "source": [ + "# Export the model\n", + "\n", + "We are now ready to export the model. The following code defines the serving input function and exports the model to `OUTPUT_DIR`." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NfXsdV4qtlpW", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def serving_input_fn():\n", + " reciever_tensors = {\n", + " \"input_ids\": tf.placeholder(dtype=tf.int32,\n", + " shape=[1, MAX_SEQ_LENGTH])\n", + " }\n", + " features = {\n", + " \"input_ids\": reciever_tensors['input_ids'],\n", + " \"input_mask\": 1 - tf.cast(tf.equal(reciever_tensors['input_ids'], 0), dtype=tf.int32),\n", + " \"segment_ids\": tf.zeros(dtype=tf.int32, shape=[1, MAX_SEQ_LENGTH]),\n", + " \"label_ids\": tf.placeholder(tf.int32, [None], name='label_ids')\n", + " }\n", + " return tf.estimator.export.ServingInputReceiver(features, reciever_tensors)\n", + " \n", + "estimator._export_to_tpu = False\n", + "estimator.export_saved_model(OUTPUT_DIR+\"/export\", serving_input_fn)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tIFTmUbcwI0w", + "colab_type": "text" + }, + "source": [ + "# Upload the model to AWS\n", + "\n", + "Cortex loads models from AWS, so we need to upload the exported model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gByRzrnR_OBX", + "colab_type": "text" + }, + "source": [ + "Set these variables to configure your AWS credentials and model upload path:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1bdCOb3z0_Gh", + "colab_type": "code", + "cellView": "form", + "colab": {} + }, + "source": [ + "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", + "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", + "S3_UPLOAD_PATH = \"s3://my-bucket/sentiment-analyzer/bert\" #@param {type:\"string\"}\n", + "\n", + "import sys\n", + "import re\n", + "\n", + "if AWS_ACCESS_KEY_ID == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", + "\n", + "elif AWS_SECRET_ACCESS_KEY == \"\":\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", + "\n", + "else:\n", + " try:\n", + " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", + " except:\n", + " print(\"\\033[91m{}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WLT09hZr_bhm", + "colab_type": "text" + }, + "source": [ + "Upload to S3:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jCN3BINl2sKN", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import os\n", + "import boto3\n", + "\n", + "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", + "\n", + "for dirpath, _, filenames in os.walk(OUTPUT_DIR+\"/export\"):\n", + " for filename in filenames:\n", + " filepath = os.path.join(dirpath, filename)\n", + " filekey = os.path.join(key, filepath[len(OUTPUT_DIR+\"/export/\"):])\n", + " print(\"Uploading s3://{}/{} ...\".format(bucket, filekey), end = '')\n", + " s3.upload_file(filepath, bucket, filekey)\n", + " print(\" ✓\")\n", + "\n", + "print(\"\\nUploaded model export directory to \" + S3_UPLOAD_PATH)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7XPKSHzf_d7M", + "colab_type": "text" + }, + "source": [ + "\n", + "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/sentiment-analyzer) for how to deploy the model as an API." + ] + } + ] +} diff --git a/test/tensorflow/sentiment-analyzer/cortex.yaml b/test/tensorflow/sentiment-analyzer/cortex.yaml new file mode 100644 index 0000000000..3e6447053e --- /dev/null +++ b/test/tensorflow/sentiment-analyzer/cortex.yaml @@ -0,0 +1,13 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: sentiment-analyzer + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/sentiment-analyzer/bert/ + monitoring: + model_type: classification + compute: + cpu: 1 + gpu: 1 diff --git a/test/tensorflow/sentiment-analyzer/predictor.py b/test/tensorflow/sentiment-analyzer/predictor.py new file mode 100644 index 0000000000..901f2bf349 --- /dev/null +++ b/test/tensorflow/sentiment-analyzer/predictor.py @@ -0,0 +1,29 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import tensorflow as tf +import tensorflow_hub as hub +from bert import tokenization, run_classifier + +labels = ["negative", "positive"] + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + with tf.Graph().as_default(): + bert_module = hub.Module("https://tfhub.dev/google/bert_uncased_L-12_H-768_A-12/1") + info = bert_module(signature="tokenization_info", as_dict=True) + with tf.Session() as sess: + vocab_file, do_lower_case = sess.run([info["vocab_file"], info["do_lower_case"]]) + self._tokenizer = tokenization.FullTokenizer( + vocab_file=vocab_file, do_lower_case=do_lower_case + ) + self.client = tensorflow_client + + def predict(self, payload): + input_example = run_classifier.InputExample(guid="", text_a=payload["review"], label=0) + input_feature = run_classifier.convert_single_example( + 0, input_example, [0, 1], 128, self._tokenizer + ) + model_input = {"input_ids": [input_feature.input_ids]} + prediction = self.client.predict(model_input) + return labels[prediction["labels"][0]] diff --git a/test/tensorflow/sentiment-analyzer/requirements.txt b/test/tensorflow/sentiment-analyzer/requirements.txt new file mode 100644 index 0000000000..273614922e --- /dev/null +++ b/test/tensorflow/sentiment-analyzer/requirements.txt @@ -0,0 +1,5 @@ +bert-tensorflow==1.0.1 +tensorflow-hub==0.7.0 +tensorflow==1.15.* +tensorflow-serving-api==1.15.* +numpy==1.16.* diff --git a/test/tensorflow/sentiment-analyzer/sample.json b/test/tensorflow/sentiment-analyzer/sample.json new file mode 100644 index 0000000000..c433e33216 --- /dev/null +++ b/test/tensorflow/sentiment-analyzer/sample.json @@ -0,0 +1,3 @@ +{ + "review": "the movie was amazing!" +} diff --git a/test/tensorflow/text-generator/README.md b/test/tensorflow/text-generator/README.md new file mode 100644 index 0000000000..41a04891b3 --- /dev/null +++ b/test/tensorflow/text-generator/README.md @@ -0,0 +1,3 @@ +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +Please refer to the [tutorial](https://docs.cortex.dev/text-generator) to see how to deploy an example with Cortex. diff --git a/test/tensorflow/text-generator/cortex.yaml b/test/tensorflow/text-generator/cortex.yaml new file mode 100644 index 0000000000..d0e54b527d --- /dev/null +++ b/test/tensorflow/text-generator/cortex.yaml @@ -0,0 +1,11 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: text-generator + kind: RealtimeAPI + predictor: + type: tensorflow + path: predictor.py + model_path: s3://cortex-examples/tensorflow/text-generator/gpt-2/124M/ + compute: + cpu: 1 + gpu: 1 diff --git a/test/tensorflow/text-generator/encoder.py b/test/tensorflow/text-generator/encoder.py new file mode 100644 index 0000000000..2f73dd509b --- /dev/null +++ b/test/tensorflow/text-generator/encoder.py @@ -0,0 +1,118 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +# This file includes code which was modified from https://github.com/openai/gpt-2 + +import json +import regex +from functools import lru_cache + + +@lru_cache() +def bytes_to_unicode(): + bs = ( + list(range(ord("!"), ord("~") + 1)) + + list(range(ord("¡"), ord("¬") + 1)) + + list(range(ord("®"), ord("ÿ") + 1)) + ) + cs = bs[:] + n = 0 + for b in range(2 ** 8): + if b not in bs: + bs.append(b) + cs.append(2 ** 8 + n) + n += 1 + cs = [chr(n) for n in cs] + return dict(zip(bs, cs)) + + +def get_pairs(word): + pairs = set() + prev_char = word[0] + for char in word[1:]: + pairs.add((prev_char, char)) + prev_char = char + return pairs + + +class Encoder: + def __init__(self, encoder, bpe_merges, errors="replace"): + self.encoder = encoder + self.decoder = {v: k for k, v in self.encoder.items()} + self.errors = errors + self.byte_encoder = bytes_to_unicode() + self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} + self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) + self.cache = {} + self.pat = regex.compile( + r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" + ) + + def bpe(self, token): + if token in self.cache: + return self.cache[token] + word = tuple(token) + pairs = get_pairs(word) + + if not pairs: + return token + + while True: + bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) + if bigram not in self.bpe_ranks: + break + first, second = bigram + new_word = [] + i = 0 + while i < len(word): + try: + j = word.index(first, i) + new_word.extend(word[i:j]) + i = j + except: + new_word.extend(word[i:]) + break + + if word[i] == first and i < len(word) - 1 and word[i + 1] == second: + new_word.append(first + second) + i += 2 + else: + new_word.append(word[i]) + i += 1 + new_word = tuple(new_word) + word = new_word + if len(word) == 1: + break + else: + pairs = get_pairs(word) + word = " ".join(word) + self.cache[token] = word + return word + + def encode(self, text): + bpe_tokens = [] + for token in regex.findall(self.pat, text): + token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) + bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")) + return bpe_tokens + + def decode(self, tokens): + text = "".join([self.decoder[token] for token in tokens]) + text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) + return text + + +def get_encoder(s3_client): + encoder = json.load( + s3_client.get_object( + Bucket="cortex-examples", Key="tensorflow/text-generator/gpt-2/encoder.json" + )["Body"] + ) + bpe_data = ( + s3_client.get_object( + Bucket="cortex-examples", Key="tensorflow/text-generator/gpt-2/vocab.bpe" + )["Body"] + .read() + .decode("utf-8") + ) + bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split("\n")[1:-1]] + return Encoder(encoder=encoder, bpe_merges=bpe_merges) diff --git a/test/tensorflow/text-generator/gpt-2.ipynb b/test/tensorflow/text-generator/gpt-2.ipynb new file mode 100644 index 0000000000..1597816fcd --- /dev/null +++ b/test/tensorflow/text-generator/gpt-2.ipynb @@ -0,0 +1,383 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "gpt-2.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "kc5cIgeEmv8o", + "colab_type": "text" + }, + "source": [ + "# Exporting GPT-2\n", + "\n", + "_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_\n", + "\n", + "In this notebook, we'll show how to export [OpenAI's GPT-2 text generation model](https://github.com/openai/gpt-2) for serving." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RAWs29lAktOK", + "colab_type": "text" + }, + "source": [ + "First, we'll download the GPT-2 code repository:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gHs3aaFaLUXq", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!git clone --no-checkout https://github.com/openai/gpt-2.git\n", + "!cd gpt-2 && git reset --hard ac5d52295f8a1c3856ea24fb239087cc1a3d1131" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A4al4P14nmni", + "colab_type": "text" + }, + "source": [ + "Next we'll specify the model size (choose one of 124M, 355M, or 774M):" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3Y4bt6hkfuxY", + "colab_type": "code", + "colab": {}, + "cellView": "form" + }, + "source": [ + "import sys\n", + "\n", + "MODEL_SIZE = \"124M\" #@param {type:\"string\"}\n", + "\n", + "if MODEL_SIZE not in {\"124M\", \"355M\", \"774M\"}:\n", + " print(\"\\033[91m{}\\033[00m\".format('ERROR: MODEL_SIZE must be \"124M\", \"355M\", or \"774M\"'), file=sys.stderr)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "C6xRx0Monh_j", + "colab_type": "text" + }, + "source": [ + "We can use `download_model.py` to download the model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Kb50Z6NjbJBN", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!python3 ./gpt-2/download_model.py $MODEL_SIZE" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zz2ioOcpoPjV", + "colab_type": "text" + }, + "source": [ + "Next, we'll install the required packages:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Vk4Q2RR-UZQm", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow==1.14.* numpy==1.* boto3==1.*" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "KkVf5FmuUMrl", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import sys\n", + "import os\n", + "import time\n", + "import json\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Ay7qiQFoWRn", + "colab_type": "text" + }, + "source": [ + "Now we can export the model for serving:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GdnYXr1IKaF0", + "colab_type": "code", + "colab": {} + }, + "source": [ + "sys.path.append(os.path.join(os.getcwd(), 'gpt-2/src'))\n", + "import model, sample\n", + "\n", + "def export_for_serving(\n", + " model_name='124M',\n", + " seed=None,\n", + " batch_size=1,\n", + " length=None,\n", + " temperature=1,\n", + " top_k=0,\n", + " models_dir='models'\n", + "):\n", + " \"\"\"\n", + " Export the model for TF Serving\n", + " :model_name=124M : String, which model to use\n", + " :seed=None : Integer seed for random number generators, fix seed to reproduce\n", + " results\n", + " :length=None : Number of tokens in generated text, if None (default), is\n", + " determined by model hyperparameters\n", + " :temperature=1 : Float value controlling randomness in boltzmann\n", + " distribution. Lower temperature results in less random completions. As the\n", + " temperature approaches zero, the model will become deterministic and\n", + " repetitive. Higher temperature results in more random completions.\n", + " :top_k=0 : Integer value controlling diversity. 1 means only 1 word is\n", + " considered for each step (token), resulting in deterministic completions,\n", + " while 40 means 40 words are considered at each step. 0 (default) is a\n", + " special setting meaning no restrictions. 40 generally is a good value.\n", + " :models_dir : path to parent folder containing model subfolders\n", + " (i.e. contains the folder)\n", + " \"\"\"\n", + " models_dir = os.path.expanduser(os.path.expandvars(models_dir))\n", + "\n", + " hparams = model.default_hparams()\n", + " with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n", + " hparams.override_from_dict(json.load(f))\n", + "\n", + " if length is None:\n", + " length = hparams.n_ctx\n", + " elif length > hparams.n_ctx:\n", + " raise ValueError(\"Can't get samples longer than window size: %s\" % hparams.n_ctx)\n", + "\n", + " with tf.Session(graph=tf.Graph()) as sess:\n", + " context = tf.placeholder(tf.int32, [batch_size, None])\n", + " np.random.seed(seed)\n", + " tf.set_random_seed(seed)\n", + "\n", + " output = sample.sample_sequence(\n", + " hparams=hparams, length=length,\n", + " context=context,\n", + " batch_size=batch_size,\n", + " temperature=temperature, top_k=top_k\n", + " )\n", + "\n", + " saver = tf.train.Saver()\n", + " ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\n", + " saver.restore(sess, ckpt)\n", + "\n", + " export_dir=os.path.join(models_dir, model_name, \"export\", str(time.time()).split('.')[0])\n", + " if not os.path.isdir(export_dir):\n", + " os.makedirs(export_dir)\n", + "\n", + " builder = tf.saved_model.builder.SavedModelBuilder(export_dir)\n", + " signature = predict_signature_def(inputs={'context': context},\n", + " outputs={'sample': output})\n", + "\n", + " builder.add_meta_graph_and_variables(sess,\n", + " [tf.saved_model.SERVING],\n", + " signature_def_map={\"predict\": signature},\n", + " strip_default_attrs=True)\n", + " builder.save()\n", + "\n", + "\n", + "export_for_serving(top_k=40, length=256, model_name=MODEL_SIZE)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hGfSohMrowmg", + "colab_type": "text" + }, + "source": [ + "## Upload the model to AWS\n", + "\n", + "Cortex loads models from AWS, so we need to upload the exported model." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BfB5QZ82ozj9", + "colab_type": "text" + }, + "source": [ + "Set these variables to configure your AWS credentials and model upload path:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "B2RNuNk7o1c5", + "colab_type": "code", + "colab": {}, + "cellView": "form" + }, + "source": [ + "AWS_ACCESS_KEY_ID = \"\" #@param {type:\"string\"}\n", + "AWS_SECRET_ACCESS_KEY = \"\" #@param {type:\"string\"}\n", + "S3_UPLOAD_PATH = \"s3://my-bucket/text-generator/gpt-2\" #@param {type:\"string\"}\n", + "\n", + "import sys\n", + "import re\n", + "\n", + "if AWS_ACCESS_KEY_ID == \"\":\n", + " print(\"\\033[91m {}\\033[00m\".format(\"ERROR: Please set AWS_ACCESS_KEY_ID\"), file=sys.stderr)\n", + "\n", + "elif AWS_SECRET_ACCESS_KEY == \"\":\n", + " print(\"\\033[91m {}\\033[00m\".format(\"ERROR: Please set AWS_SECRET_ACCESS_KEY\"), file=sys.stderr)\n", + "\n", + "else:\n", + " try:\n", + " bucket, key = re.match(\"s3://(.+?)/(.+)\", S3_UPLOAD_PATH).groups()\n", + " except:\n", + " print(\"\\033[91m {}\\033[00m\".format(\"ERROR: Invalid s3 path (should be of the form s3://my-bucket/path/to/file)\"), file=sys.stderr)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ics0omsrpS8V", + "colab_type": "text" + }, + "source": [ + "Upload the model to S3:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BnKncToppUhN", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import os\n", + "import boto3\n", + "\n", + "s3 = boto3.client(\"s3\", aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\n", + "\n", + "for dirpath, _, filenames in os.walk(\"models/{}/export\".format(MODEL_SIZE)):\n", + " for filename in filenames:\n", + " filepath = os.path.join(dirpath, filename)\n", + " filekey = os.path.join(key, MODEL_SIZE, filepath[len(\"models/{}/export/\".format(MODEL_SIZE)):])\n", + " print(\"Uploading s3://{}/{} ...\".format(bucket, filekey), end = '')\n", + " s3.upload_file(filepath, bucket, filekey)\n", + " print(\" ✓\")\n", + "\n", + "print(\"\\nUploaded model export directory to {}/{}\".format(S3_UPLOAD_PATH, MODEL_SIZE))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IIMVPhe2qkU4", + "colab_type": "text" + }, + "source": [ + "\n", + "We also need to upload `vocab.bpe` and `encoder.json`, so that the [encoder](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/text-generator/encoder.py) in the [Predictor](https://github.com/cortexlabs/cortex/blob/master/examples/tensorflow/text-generator/predictor.py) can encode the input text before making a request to the model." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "YdN8MtZxsO9V", + "colab_type": "code", + "colab": {} + }, + "source": [ + "print(\"Uploading s3://{}/{}/vocab.bpe ...\".format(bucket, key), end = '')\n", + "s3.upload_file(os.path.join(\"models\", MODEL_SIZE, \"vocab.bpe\"), bucket, os.path.join(key, \"vocab.bpe\"))\n", + "print(\" ✓\")\n", + "\n", + "print(\"Uploading s3://{}/{}/encoder.json ...\".format(bucket, key), end = '')\n", + "s3.upload_file(os.path.join(\"models\", MODEL_SIZE, \"encoder.json\"), bucket, os.path.join(key, \"encoder.json\"))\n", + "print(\" ✓\")" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MsoxwahIpnTO", + "colab_type": "text" + }, + "source": [ + "\n", + "That's it! See the [example on GitHub](https://github.com/cortexlabs/cortex/tree/master/examples/tensorflow/text-generator) for how to deploy the model as an API." + ] + } + ] +} diff --git a/test/tensorflow/text-generator/predictor.py b/test/tensorflow/text-generator/predictor.py new file mode 100644 index 0000000000..3cbc45e1d7 --- /dev/null +++ b/test/tensorflow/text-generator/predictor.py @@ -0,0 +1,24 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import os +import boto3 +from botocore import UNSIGNED +from botocore.client import Config +from encoder import get_encoder + + +class TensorFlowPredictor: + def __init__(self, tensorflow_client, config): + self.client = tensorflow_client + + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + self.encoder = get_encoder(s3) + + def predict(self, payload): + model_input = {"context": [self.encoder.encode(payload["text"])]} + prediction = self.client.predict(model_input) + return self.encoder.decode(prediction["sample"]) diff --git a/test/tensorflow/text-generator/requirements.txt b/test/tensorflow/text-generator/requirements.txt new file mode 100644 index 0000000000..f064e1eb7e --- /dev/null +++ b/test/tensorflow/text-generator/requirements.txt @@ -0,0 +1,2 @@ +requests +regex diff --git a/test/tensorflow/text-generator/sample.json b/test/tensorflow/text-generator/sample.json new file mode 100644 index 0000000000..dfd2a2f433 --- /dev/null +++ b/test/tensorflow/text-generator/sample.json @@ -0,0 +1,3 @@ +{ + "text": "machine learning is" +} diff --git a/test/traffic-splitter/README.md b/test/traffic-splitter/README.md new file mode 100644 index 0000000000..d68d763dd0 --- /dev/null +++ b/test/traffic-splitter/README.md @@ -0,0 +1,111 @@ +# Splitting traffic between APIs + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +This example shows how to split traffic between 2 different iris-classifiers deployed as Realtime APIs. + +To deploy this example: + +1. Determine your CLI Version `cortex version` +1. Clone the repo and switch to the current version by replacing `` with your CLI version: `git clone -b v https://github.com/cortexlabs/cortex` (e.g. if the output of `cortex version` is 0.18.1, the clone command would be `git clone -b v0.18.1 https://github.com/cortexlabs/cortex`) +1. Navigate to this example directory + +## `cortex deploy` + +```bash +$ cortex deploy --env aws + +creating iris-classifier-onnx (RealtimeAPI) +creating iris-classifier-tf (RealtimeAPI) +created iris-classifier (TrafficSplitter) +``` + +## `cortex get` + +```bash +$ cortex get + +env realtime api status up-to-date requested last update avg request 2XX +aws iris-classifier-onnx updating 0 1 27s - - +aws iris-classifier-tf updating 0 1 27s - - + +env traffic splitter apis last update +aws iris-classifier iris-classifier-onnx:30 iris-classifier-tf:70 27s +``` + +## `cortex get iris-classifier` + +```bash +$ cortex get iris-classifier --env aws + +apis weights status requested last update avg request 2XX 5XX +iris-classifier-onnx 30 live 1 1m - - - +iris-classifier-tf 70 live 1 1m - - - + +last updated: 1m +endpoint: https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier +example curl: curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +... +``` + +## Make multiple requests + +```bash +$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +setosa + +$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +setosa + +$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +setosa + +$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +setosa + +$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +setosa + +$ curl https://abcedefg.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +setosa +``` + +## `cortex get iris-classifier` + +Notice the requests being routed to the different Realtime APIs based on their weights (the output below may not match yours): + +```bash +$ cortex get iris-classifier --env aws + +using aws environment + + +apis weights status requested last update avg request 2XX 5XX +iris-classifier-onnx 30 live 1 4m 6.00791 ms 1 - +iris-classifier-tf 70 live 1 4m 5.81867 ms 5 - + +last updated: 4m +endpoint: https://comtf6hs64.execute-api.us-west-2.amazonaws.com/iris-classifier +example curl: curl https://comtf6hs64.execute-api.us-west-2.amazonaws.com/iris-classifier -X POST -H "Content-Type: application/json" -d @sample.json +... +``` + +## Cleanup + +Use `cortex delete ` to delete the Traffic Splitter and the two Realtime APIs (note that the Traffic Splitter and each Realtime API must be deleted by separate `cortex delete` commands): + +```bash +$ cortex delete iris-classifier --env aws + +deleting iris-classifier + +$ cortex delete iris-classifier-onnx --env aws + +deleting iris-classifier-onnx + +$ cortex delete iris-classifier-tf --env aws + +deleting iris-classifier-tf +``` + +Running `cortex delete ` will free up cluster resources and allow Cortex to scale down to the minimum number of instances you specified during cluster installation. It will not spin down your cluster. diff --git a/test/traffic-splitter/cortex.yaml b/test/traffic-splitter/cortex.yaml new file mode 100644 index 0000000000..16702378cd --- /dev/null +++ b/test/traffic-splitter/cortex.yaml @@ -0,0 +1,28 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +- name: iris-classifier-pytorch + kind: RealtimeAPI + predictor: + type: python + path: pytorch_predictor.py + config: + model: s3://cortex-examples/pytorch/iris-classifier/weights.pth + monitoring: + model_type: classification + +- name: iris-classifier-onnx + kind: RealtimeAPI + predictor: + type: onnx + path: onnx_predictor.py + model_path: s3://cortex-examples/onnx/iris-classifier/ + monitoring: + model_type: classification + +- name: iris-classifier + kind: TrafficSplitter + apis: + - name: iris-classifier-onnx + weight: 30 + - name: iris-classifier-pytorch + weight: 70 diff --git a/test/traffic-splitter/model.py b/test/traffic-splitter/model.py new file mode 100644 index 0000000000..fe29ff7b6d --- /dev/null +++ b/test/traffic-splitter/model.py @@ -0,0 +1,59 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +from sklearn.datasets import load_iris +from sklearn.model_selection import train_test_split +from sklearn.metrics import accuracy_score + + +class IrisNet(nn.Module): + def __init__(self): + super(IrisNet, self).__init__() + self.fc1 = nn.Linear(4, 100) + self.fc2 = nn.Linear(100, 100) + self.fc3 = nn.Linear(100, 3) + self.softmax = nn.Softmax(dim=1) + + def forward(self, X): + X = F.relu(self.fc1(X)) + X = self.fc2(X) + X = self.fc3(X) + X = self.softmax(X) + return X + + +if __name__ == "__main__": + iris = load_iris() + X, y = iris.data, iris.target + X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8, random_state=42) + + train_X = Variable(torch.Tensor(X_train).float()) + test_X = Variable(torch.Tensor(X_test).float()) + train_y = Variable(torch.Tensor(y_train).long()) + test_y = Variable(torch.Tensor(y_test).long()) + + model = IrisNet() + + criterion = nn.CrossEntropyLoss() + + optimizer = torch.optim.SGD(model.parameters(), lr=0.01) + + for epoch in range(1000): + optimizer.zero_grad() + out = model(train_X) + loss = criterion(out, train_y) + loss.backward() + optimizer.step() + + if epoch % 100 == 0: + print("number of epoch {} loss {}".format(epoch, loss)) + + predict_out = model(test_X) + _, predict_y = torch.max(predict_out, 1) + + print("prediction accuracy {}".format(accuracy_score(test_y.data, predict_y.data))) + + torch.save(model.state_dict(), "weights.pth") diff --git a/test/traffic-splitter/onnx_predictor.py b/test/traffic-splitter/onnx_predictor.py new file mode 100644 index 0000000000..b135129e14 --- /dev/null +++ b/test/traffic-splitter/onnx_predictor.py @@ -0,0 +1,20 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +labels = ["setosa", "versicolor", "virginica"] + + +class ONNXPredictor: + def __init__(self, onnx_client, config): + self.client = onnx_client + + def predict(self, payload): + model_input = [ + payload["sepal_length"], + payload["sepal_width"], + payload["petal_length"], + payload["petal_width"], + ] + + prediction = self.client.predict(model_input) + predicted_class_id = prediction[0][0] + return labels[predicted_class_id] diff --git a/test/traffic-splitter/pytorch_predictor.py b/test/traffic-splitter/pytorch_predictor.py new file mode 100644 index 0000000000..71994bb9ae --- /dev/null +++ b/test/traffic-splitter/pytorch_predictor.py @@ -0,0 +1,50 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import re +import torch +import os +import boto3 +from botocore import UNSIGNED +from botocore.client import Config +from model import IrisNet + +labels = ["setosa", "versicolor", "virginica"] + + +class PythonPredictor: + def __init__(self, config): + # download the model + bucket, key = re.match("s3://(.+?)/(.+)", config["model"]).groups() + + if os.environ.get("AWS_ACCESS_KEY_ID"): + s3 = boto3.client("s3") # client will use your credentials if available + else: + s3 = boto3.client("s3", config=Config(signature_version=UNSIGNED)) # anonymous client + + s3.download_file(bucket, key, "/tmp/model.pth") + + # initialize the model + model = IrisNet() + model.load_state_dict(torch.load("/tmp/model.pth")) + model.eval() + + self.model = model + + def predict(self, payload): + # Convert the request to a tensor and pass it into the model + input_tensor = torch.FloatTensor( + [ + [ + payload["sepal_length"], + payload["sepal_width"], + payload["petal_length"], + payload["petal_width"], + ] + ] + ) + + # Run the prediction + output = self.model(input_tensor) + + # Translate the model output to the corresponding label string + return labels[torch.argmax(output[0])] diff --git a/test/traffic-splitter/sample.json b/test/traffic-splitter/sample.json new file mode 100644 index 0000000000..e17bbb2896 --- /dev/null +++ b/test/traffic-splitter/sample.json @@ -0,0 +1,6 @@ +{ + "sepal_length": 5.2, + "sepal_width": 3.6, + "petal_length": 1.4, + "petal_width": 0.3 +} diff --git a/test/utils/README.md b/test/utils/README.md new file mode 100644 index 0000000000..61202eb0c0 --- /dev/null +++ b/test/utils/README.md @@ -0,0 +1,36 @@ +## Throughput tester + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +[throughput_test.py](throughput_test.py) is a Python CLI that can be used to test the throughput of your deployed API. The throughput will vary depending on your API's configuration (specified in your `cortex.yaml` file), your local machine's resources (mostly CPU, since it has to spawn many concurrent requests), and the internet connection on your local machine. + +```bash +Usage: throughput_test.py [OPTIONS] ENDPOINT PAYLOAD + + Program for testing the throughput of Cortex-deployed APIs. + +Options: + -w, --processes INTEGER Number of processes for prediction requests. [default: 1] + -t, --threads INTEGER Number of threads per process for prediction requests. [default: 1] + -s, --samples INTEGER Number of samples to run per thread. [default: 10] + -i, --time-based FLOAT How long the thread making predictions will run for in seconds. + If set, -s option will be ignored. + --help Show this message and exit. +``` + +`ENDPOINT` is the API's endpoint, which you can get by running `cortex get `. This argument can also be exported as an environment variable instead of being passed to the CLI. + +`PAYLOAD` can either be a local file or an URL resource that points to a file. The allowed extension types for the file are `json` and `jpg`. This argument can also be exported as an environment variable instead of being passed to the CLI. + +* `json` files are generally `sample.json`s as they are found in most Cortex examples. Each of these is attached to the request as payload. The content type of the request is `"application/json"`. +* `jpg` images are read as numpy arrays and then are converted to a bytes object using `cv2.imencode` function. The content type of the request is `"application/octet-stream"`. + +The same payload `PAYLOAD` is attached to all requests the script makes. + +### Dependencies + +The [throughput_test.py](throughput_test.py) CLI has been tested with Python 3.6.9. To install the CLI's dependencies, run the following: + +```bash +pip install requests click opencv-contrib-python numpy validator-collection imageio +``` diff --git a/test/utils/throughput_test.py b/test/utils/throughput_test.py new file mode 100644 index 0000000000..c157cf0b29 --- /dev/null +++ b/test/utils/throughput_test.py @@ -0,0 +1,179 @@ +# WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub) + +import os +import sys +import click +import concurrent.futures +import requests +import imageio +import json +import time +import itertools +import cv2 +import numpy as np + +from validator_collection import checkers + + +@click.command(help="Program for testing the throughput of Cortex-deployed APIs.") +@click.argument("endpoint", type=str, envvar="ENDPOINT") +@click.argument("payload", type=str, envvar="PAYLOAD") +@click.option( + "--processes", + "-p", + type=int, + default=1, + show_default=True, + help="Number of processes for prediction requests.", +) +@click.option( + "--threads", + "-t", + type=int, + default=1, + show_default=True, + help="Number of threads per process for prediction requests.", +) +@click.option( + "--samples", + "-s", + type=int, + default=10, + show_default=True, + help="Number of samples to run per thread.", +) +@click.option( + "--time-based", + "-i", + type=float, + default=0.0, + help="How long the thread making predictions will run for in seconds. If set, -s option will be ignored.", +) +def main(payload, endpoint, processes, threads, samples, time_based): + file_type = None + if checkers.is_url(payload): + if payload.lower().endswith(".json"): + file_type = "json" + payload_data = requests.get(payload).json() + elif payload.lower().endswith(".jpg"): + file_type = "jpg" + payload_data = imageio.imread(payload) + elif checkers.is_file(payload): + if payload.lower().endswith(".json"): + file_type = "json" + with open(payload, "r") as f: + payload_data = json.load(f) + elif payload.lower().endswith(".jpg"): + file_type = "jpg" + payload_data = cv2.imread(payload, cv2.IMREAD_COLOR) + else: + print(f"'{payload}' isn't an URL resource, nor is it a local file") + sys.exit(1) + + if file_type is None: + print(f"'{payload}' doesn't point to a jpg image or to a json file") + sys.exit(1) + if file_type == "jpg": + data = image_to_jpeg_bytes(payload_data) + if file_type == "json": + data = json.dumps(payload_data) + + print("Starting the inference throughput test...") + results = [] + start = time.time() + with concurrent.futures.ProcessPoolExecutor(max_workers=processes) as executor: + results = executor_submitter( + executor, processes, process_worker, threads, data, endpoint, samples, time_based + ) + end = time.time() + elapsed = end - start + + total_requests = sum(results) + + print(f"A total of {total_requests} requests have been served in {elapsed} seconds") + print(f"Avg number of inferences/sec is {total_requests / elapsed}") + print(f"Avg time spent on an inference is {elapsed / total_requests} seconds") + + +def process_worker(threads, data, endpoint, samples, time_based): + results = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as executor: + results = executor_submitter(executor, threads, task, data, endpoint, samples, time_based) + + return results + + +def executor_submitter(executor, workers, *args, **kwargs): + futures = [] + for worker in range(workers): + future = executor.submit(*args, **kwargs) + futures.append(future) + + results = [future.result() for future in futures] + results = list(itertools.chain.from_iterable(results)) + + return results + + +def task(data, endpoint, samples, time_based): + timeout = 60 + + if isinstance(data, str): + headers = {"content-type": "application/json"} + elif isinstance(data, bytes): + headers = {"content-type": "application/octet-stream"} + else: + return + + if time_based == 0.0: + for i in range(samples): + try: + resp = requests.post( + endpoint, + data=data, + headers=headers, + timeout=timeout, + ) + except Exception as e: + print(e) + break + time.sleep(0.1) + return [samples] + else: + start = time.time() + counter = 0 + while start + time_based >= time.time(): + try: + resp = requests.post( + endpoint, + data=data, + headers=headers, + timeout=timeout, + ) + except Exception as e: + print(e) + break + time.sleep(0.1) + counter += 1 + return [counter] + + +def image_to_jpeg_nparray(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): + """ + Convert numpy image to jpeg numpy vector. + """ + is_success, im_buf_arr = cv2.imencode(".jpg", image, quality) + return im_buf_arr + + +def image_to_jpeg_bytes(image, quality=[int(cv2.IMWRITE_JPEG_QUALITY), 95]): + """ + Convert numpy image to bytes-encoded jpeg image. + """ + buf = image_to_jpeg_nparray(image, quality) + byte_im = buf.tobytes() + return byte_im + + +if __name__ == "__main__": + main() From f28337205af146c198ae7c71c998e28758abaaa9 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Mon, 7 Dec 2020 16:27:39 -0800 Subject: [PATCH 12/36] Update tutorials --- docs/deployments/telemetry.md | 2 +- docs/tutorials/batch.md | 4 +- docs/tutorials/multi-model.md | 77 ------------------- docs/tutorials/project.md | 117 +++++++++++++++++++++++++++++ docs/tutorials/realtime.md | 12 +-- docs/tutorials/traffic-splitter.md | 2 - 6 files changed, 122 insertions(+), 92 deletions(-) delete mode 100644 docs/tutorials/multi-model.md create mode 100644 docs/tutorials/project.md diff --git a/docs/deployments/telemetry.md b/docs/deployments/telemetry.md index e7e767c79c..0c9c3f4821 100644 --- a/docs/deployments/telemetry.md +++ b/docs/deployments/telemetry.md @@ -6,7 +6,7 @@ By default, Cortex sends anonymous usage data to Cortex Labs. ## What data is collected? -If telemetry is enabled, events and errors are collected. Each time you run a command an event will be sent with a randomly generated unique CLI ID and the name of the command. For example, if you run `cortex deploy`, Cortex Labs will receive an event of the structure `{id: 1234, command: "deploy"}`. In addition, the operator sends heartbeats that include cluster metrics like the types of instances running in your cluster. +If telemetry is enabled, events and errors are collected. Each time you run a command an event will be sent with a randomly generated unique CLI ID and the name of the command. For example, if you run `cortex get`, Cortex Labs will receive an event of the structure `{id: 1234, command: "get"}`. In addition, the operator sends heartbeats that include cluster metrics like the types of instances running in your cluster. ## How do I opt out? diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 6a188d487c..7d26f97755 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -1,8 +1,6 @@ # Deploy a batch API -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -**Note: Batch APIs are only supported on a Cortex cluster (in AWS).** +**Note: at this time, batch APIs are only supported on AWS.** ## Install cortex diff --git a/docs/tutorials/multi-model.md b/docs/tutorials/multi-model.md deleted file mode 100644 index 1f1bee4a82..0000000000 --- a/docs/tutorials/multi-model.md +++ /dev/null @@ -1,77 +0,0 @@ -# Deploy a multi-model API - -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - -## Install cortex - -```bash -$ pip install cortex -``` - -## Spin up a cluster on AWS (requires AWS credentials) - -```bash -$ cortex cluster up -``` - -## Define a multi-model API - -```python -# multi_model.py - -import cortex - -class PythonPredictor: - def __init__(self, config): - from transformers import pipeline - - self.analyzer = pipeline(task="sentiment-analysis", device=device) - self.summarizer = pipeline(task="summarization", device=device) - - def predict(self, query_params, payload): - model = query_params.get("model") - - if model == "sentiment": - return self.analyzer(payload["text"])[0] - elif model == "summarizer": - return self.summarizer(payload["text"])[0]["summary_text"] - -requirements = ["tensorflow", "transformers"] - -api_spec = {"name": "multi-model", "kind": "RealtimeAPI"} - -cx = cortex.client("aws") -cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) -``` - -## Deploy to AWS - -```bash -$ python multi_model.py -``` - -## Monitor - -```bash -$ cortex get multi-model --env aws --watch -``` - -## Stream logs - -```bash -$ cortex logs multi-model -``` - -## Make a request - -```bash -$ curl https:// \ - -X POST -H "Content-Type: application/json" \ - -d '{"text": "hello world"}' -``` - -## Delete the API - -```bash -$ cortex delete multi-model -``` diff --git a/docs/tutorials/project.md b/docs/tutorials/project.md new file mode 100644 index 0000000000..84bc55bec9 --- /dev/null +++ b/docs/tutorials/project.md @@ -0,0 +1,117 @@ +# Deploy a project + +## Install cortex + +```bash +$ pip install cortex +``` + +## Create a directory + +```bash +$ mkdir text-generator && cd text-generator + +$ touch predictor.py requirements.txt realtime.py +``` + +## Define a Predictor + +```python +# predictor.py + +class PythonPredictor: + def __init__(self, config): + from transformers import pipeline + + self.model = pipeline(task="text-generation") + + def predict(self, payload): + return self.model(payload["text"])[0] +``` + +## Specify Python dependencies + +```text +tensorflow +transformers +``` + +## Configure an API + +```python +# realtime.py + +import cortex + +api_spec = { + "name": "text-generator", + "kind": "RealtimeAPI", + "predictor": {"type": "python", "path": "predictor.py"}, +} + +cx = cortex.client("local") +cx.deploy(api_spec, project_dir=".") +``` + +## Test locally (requires Docker) + +```bash +$ python realtime.py +``` + +## Monitor + +```bash +$ cortex get text-generator --watch +``` + +## Make a request + +```bash +$ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' +``` + +## Stream logs + +```bash +$ cortex logs text-generator +``` + +## Spin up a cluster on AWS (requires AWS credentials) + +```bash +$ cortex cluster up +``` + +## Edit `realtime.py` + +```python +# cx = cortex.client("local") +cx = cortex.client("aws") +``` + +## Deploy to AWS + +```bash +$ python realtime.py +``` + +## Monitor + +```bash +$ cortex get text-generator --env aws --watch +``` + +## Make a request + +```bash +$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' +``` + +## Delete the APIs + +```bash +$ cortex delete --env local text-generator + +$ cortex delete --env aws text-generator +``` diff --git a/docs/tutorials/realtime.md b/docs/tutorials/realtime.md index 3d1d1f02fc..6871021b2e 100644 --- a/docs/tutorials/realtime.md +++ b/docs/tutorials/realtime.md @@ -1,7 +1,5 @@ # Deploy a realtime API -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - ## Install cortex ```bash @@ -47,9 +45,7 @@ $ cortex get text-generator --watch ## Make a request ```bash -$ curl http://localhost:8889 \ - -X POST -H "Content-Type: application/json" \ - -d '{"text": "hello world"}' +$ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' ``` ## Stream logs @@ -86,12 +82,10 @@ $ cortex get text-generator --env aws --watch ## Make a request ```bash -$ curl https:// \ - -X POST -H "Content-Type: application/json" \ - -d '{"text": "hello world"}' +$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' ``` -## Delete the API +## Delete the APIs ```bash $ cortex delete --env local text-generator diff --git a/docs/tutorials/traffic-splitter.md b/docs/tutorials/traffic-splitter.md index 5db0afda9a..fb8537b000 100644 --- a/docs/tutorials/traffic-splitter.md +++ b/docs/tutorials/traffic-splitter.md @@ -1,7 +1,5 @@ # Deploy a traffic splitter -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - ## Install cortex ```bash From 3c6e53cab4ab6ba17e7f697ef2477b00ef07294a Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Mon, 7 Dec 2020 19:01:43 -0800 Subject: [PATCH 13/36] Update docs --- .dockerignore | 1 + build/lint.sh | 1 + build/test-examples.sh | 2 +- docs/{deployments/gpus.md => aws/gpu.md} | 6 +- docs/{deployments => aws}/inferentia.md | 0 docs/deployments/batch-api.md | 36 ------------ docs/deployments/compute.md | 36 ------------ docs/deployments/realtime-api.md | 39 ------------ docs/summary.md | 21 +++---- docs/tutorials/multi-model.md | 75 ++++++++++++++++++++++++ docs/tutorials/traffic-splitter.md | 6 +- 11 files changed, 94 insertions(+), 129 deletions(-) rename docs/{deployments/gpus.md => aws/gpu.md} (91%) rename docs/{deployments => aws}/inferentia.md (100%) delete mode 100644 docs/deployments/batch-api.md delete mode 100644 docs/deployments/compute.md delete mode 100644 docs/deployments/realtime-api.md create mode 100644 docs/tutorials/multi-model.md diff --git a/.dockerignore b/.dockerignore index ee2e048e67..3d39c7390f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,6 +2,7 @@ /bin/ /dev/ /docs/ +/test/ **/.* **/*.md diff --git a/build/lint.sh b/build/lint.sh index 9243106188..841c25111a 100755 --- a/build/lint.sh +++ b/build/lint.sh @@ -72,6 +72,7 @@ output=$(cd "$ROOT" && find . -type f \ ! -path "./vendor/*" \ ! -path "**/.vscode/*" \ ! -path "**/__pycache__/*" \ +! -path "./test/*" \ ! -path "./dev/config/*" \ ! -path "./bin/*" \ ! -path "./.circleci/*" \ diff --git a/build/test-examples.sh b/build/test-examples.sh index a886a63587..3b334f4d00 100755 --- a/build/test-examples.sh +++ b/build/test-examples.sh @@ -19,7 +19,7 @@ set -eou pipefail ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")"/.. >/dev/null && pwd)" CORTEX="$ROOT/bin/cortex" -for example in $ROOT/docs/tutorials/*/cortex.yaml; do +for example in $ROOT/test/*/cortex.yaml; do timer=1200 example_base_dir=$(dirname "${example}") retry="false" diff --git a/docs/deployments/gpus.md b/docs/aws/gpu.md similarity index 91% rename from docs/deployments/gpus.md rename to docs/aws/gpu.md index cc7af572b7..98d950cea3 100644 --- a/docs/deployments/gpus.md +++ b/docs/aws/gpu.md @@ -5,9 +5,9 @@ _WARNING: you are on the master branch, please refer to the docs on the branch t To use GPUs: 1. Make sure your AWS account is subscribed to the [EKS-optimized AMI with GPU Support](https://aws.amazon.com/marketplace/pp/B07GRHFXGM). -2. You may need to [request a limit increase](https://console.aws.amazon.com/servicequotas/home?#!/services/ec2/quotas) for your desired instance type. -3. Set instance type to an AWS GPU instance (e.g. `g4dn.xlarge`) when installing Cortex. -4. Set the `gpu` field in the `compute` configuration for your API. One unit of GPU corresponds to one virtual GPU. Fractional requests are not allowed. +1. You may need to [request a limit increase](https://console.aws.amazon.com/servicequotas/home?#!/services/ec2/quotas) for your desired instance type. +1. Set instance type to an AWS GPU instance (e.g. `g4dn.xlarge`) when installing Cortex. +1. Set the `gpu` field in the `compute` configuration for your API. One unit of GPU corresponds to one virtual GPU. Fractional requests are not allowed. ## Tips diff --git a/docs/deployments/inferentia.md b/docs/aws/inferentia.md similarity index 100% rename from docs/deployments/inferentia.md rename to docs/aws/inferentia.md diff --git a/docs/deployments/batch-api.md b/docs/deployments/batch-api.md deleted file mode 100644 index 57f994d70c..0000000000 --- a/docs/deployments/batch-api.md +++ /dev/null @@ -1,36 +0,0 @@ -# Batch API Overview - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -You can deploy your model as a Batch API to create a web service that can receive job requests and orchestrate offline batch inference on large datasets across multiple workers. - -## When should I use a Batch API - -You may want to deploy your model as a Batch API if any of the following scenarios apply to your use case: - -* inference will run on a large dataset and can be distributed across multiple workers -* job progress and status needs to be monitored -* inference is a part of internal data pipelines that may be chained together -* a small number of requests are received, but each request takes minutes or hours to complete - -You may want to consider deploying your model as a [Realtime API](realtime-api.md) if these scenarios don't apply to you. - -A Batch API deployed in Cortex will create/support the following: - -* a REST web service to receive job requests, manage running jobs, and retrieve job statuses -* an autoscaling worker pool that can scale to 0 -* log aggregation and streaming -* `on_job_complete` hook to for aggregation or triggering webhooks - -## How does it work - -You specify the following: - -* a Cortex Predictor class in Python that defines how to initialize your model run batch inference -* an API configuration YAML file that defines how your API will behave in production (parallelism, networking, compute, etc.) - -Once you've implemented your predictor and defined your API configuration, you can use the Cortex CLI to deploy a Batch API. The Cortex CLI will package your predictor implementation and the rest of the code and dependencies and upload it to the Cortex Cluster. The Cortex Cluster will setup an endpoint to a web service that can receive job submission requests and manage jobs. - -A job submission typically consists of an input dataset or the location of your input dataset, the number of workers for your job, and the batch size. When a job is submitted to your Batch API endpoint, you will immediately receive a Job ID that you can use to get the job's status and logs, and stop the job if necessary. Behind the scenes, your Batch API will break down the dataset into batches and push them onto a queue. Once all of the batches have been enqueued, the Cortex Cluster will spin up the requested number of workers and initialize them with your predictor implementation. Each worker will take one batch at a time from the queue and run your Predictor implementation. After all batches have been processed, the `on_job_complete` hook in your predictor implementation (if provided) will be executed by one of the workers. - -At any point, you can use the Job ID that was provided upon job submission to make requests to the Batch API endpoint to get job status, progress metrics, and worker statuses. Logs for each job are aggregated and are accessible via the Cortex CLI or in your AWS console. diff --git a/docs/deployments/compute.md b/docs/deployments/compute.md deleted file mode 100644 index 7937ca00ab..0000000000 --- a/docs/deployments/compute.md +++ /dev/null @@ -1,36 +0,0 @@ -# Compute - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -Compute resource requests in Cortex follow the syntax and meaning of [compute resources in Kubernetes](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container). - -For example: - -```yaml -- name: my-api - ... - compute: - cpu: 1 - gpu: 1 - mem: 1G -``` - -CPU, GPU, Inf, and memory requests in Cortex correspond to compute resource requests in Kubernetes. In the example above, the API will only be scheduled once 1 CPU, 1 GPU, and 1G of memory are available on any instance, and it will be guaranteed to have access to those resources throughout its execution. In some cases, resource requests can be (or may default to) `Null`. - -## CPU - -One unit of CPU corresponds to one virtual CPU on AWS. Fractional requests are allowed, and can be specified as a floating point number or via the "m" suffix (`0.2` and `200m` are equivalent). - -## GPU - -One unit of GPU corresponds to one virtual GPU. Fractional requests are not allowed. - -See [GPU documentation](gpus.md) for more information. - -## Memory - -One unit of memory is one byte. Memory can be expressed as an integer or by using one of these suffixes: `K`, `M`, `G`, `T` (or their power-of two counterparts: `Ki`, `Mi`, `Gi`, `Ti`). For example, the following values represent roughly the same memory: `128974848`, `129e6`, `129M`, `123Mi`. - -## Inf - -One unit of Inf corresponds to one Inferentia ASIC with 4 NeuronCores *(not the same thing as `cpu`)* and 8GB of cache memory *(not the same thing as `mem`)*. Fractional requests are not allowed. diff --git a/docs/deployments/realtime-api.md b/docs/deployments/realtime-api.md deleted file mode 100644 index 687fb270b0..0000000000 --- a/docs/deployments/realtime-api.md +++ /dev/null @@ -1,39 +0,0 @@ -# Realtime API Overview - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -You can deploy a Realtime API on Cortex to serve your model via an HTTP endpoint for on-demand inferences. - -## When should I use a Realtime API - -You may want to deploy your model as a Realtime API if any of the following scenarios apply to your use case: - -* predictions are served on demand -* predictions need to be made in the time of a single web request -* predictions need to be made on an individual basis -* predictions are served directly to consumers - -You may want to consider deploying your model as a [Batch API](batch-api.md) if these scenarios don't apply to you. - -A Realtime API deployed in Cortex has the following features: - -* request-based autoscaling -* rolling updates to enable you to update the model/serving code without downtime -* realtime metrics collection -* log streaming -* multi-model serving -* server-side batching -* traffic splitting (e.g. for A/B testing) - -## How does it work - -You specify the following: - -* a Cortex Predictor class in Python that defines how to initialize and serve your model -* an API configuration YAML file that defines how your API will behave in production (autoscaling, monitoring, networking, compute, etc.) - -Once you've implemented your predictor and defined your API configuration, you can use the Cortex CLI to deploy a Realtime API. The Cortex CLI will package your predictor implementation and the rest of the code and dependencies and upload it to the Cortex Cluster. The Cortex Cluster will set up an HTTP endpoint that routes traffic to multiple replicas/copies of web servers initialized with your code. - -When a request is made to the HTTP endpoint, it gets routed to one your API's replicas (at random). The replica receives the request, parses the payload and executes the inference code you've defined in your predictor implementation and sends a response. - -The Cortex Cluster will automatically scale based on the incoming traffic and the autoscaling configuration you've defined. You can safely update your model or your code and use the Cortex CLI to deploy without experiencing downtime because updates to your API will be rolled out automatically. Request metrics and logs will automatically be aggregated and be accessible via the Cortex CLI or on your AWS console. diff --git a/docs/summary.md b/docs/summary.md index 3b984a57ec..15642c9a60 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -1,7 +1,12 @@ # Table of contents -* [Deploy a realtime API](tutorials/realtime.md) -* [Deploy a batch API](tutorials/batch.md) +## Tutorials + +* [Realtime API](tutorials/realtime.md) +* [Batch API](tutorials/batch.md) +* [Multi-model API](tutorials/multi-model.md) +* [Traffic splitter](tutorials/traffic-splitter.md) +* [Project directory](tutorials/project.md) ## Running on AWS @@ -9,6 +14,8 @@ * [Credentials](aws/credentials.md) * [Security](aws/security.md) * [Spot instances](aws/spot.md) +* [GPUs](aws/gpus.md) +* [Inferentia](aws/inferentia.md) * [Networking](aws/networking.md) * [VPC peering](aws/vpc-peering.md) * [Custom domain](aws/custom-domain.md) @@ -36,16 +43,10 @@ * [Endpoints](deployments/batch-api/endpoints.md) * [Job statuses](deployments/batch-api/statuses.md) * [Python client](deployments/python-client.md) -* [Environments](deployments/environments.md) -* [Telemetry](deployments/telemetry.md) - -## Advanced - -* [Compute](deployments/compute.md) -* [Using GPUs](deployments/gpus.md) -* [Using Inferentia](deployments/inferentia.md) * [Python packages](deployments/python-packages.md) * [System packages](deployments/system-packages.md) +* [Environments](deployments/environments.md) +* [Telemetry](deployments/telemetry.md) ## Troubleshooting diff --git a/docs/tutorials/multi-model.md b/docs/tutorials/multi-model.md new file mode 100644 index 0000000000..aca9ab7825 --- /dev/null +++ b/docs/tutorials/multi-model.md @@ -0,0 +1,75 @@ +# Deploy a multi-model API + +_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ + +## Install cortex + +```bash +$ pip install cortex +``` + +## Spin up a cluster on AWS (requires AWS credentials) + +```bash +$ cortex cluster up +``` + +## Define a multi-model API + +```python +# multi_model.py + +import cortex + +class PythonPredictor: + def __init__(self, config): + from transformers import pipeline + + self.analyzer = pipeline(task="sentiment-analysis", device=device) + self.summarizer = pipeline(task="summarization", device=device) + + def predict(self, query_params, payload): + model = query_params.get("model") + + if model == "sentiment": + return self.analyzer(payload["text"])[0] + elif model == "summarizer": + return self.summarizer(payload["text"])[0]["summary_text"] + +requirements = ["tensorflow", "transformers"] + +api_spec = {"name": "multi-model", "kind": "RealtimeAPI"} + +cx = cortex.client("aws") +cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) +``` + +## Deploy to AWS + +```bash +$ python multi_model.py +``` + +## Monitor + +```bash +$ cortex get multi-model --env aws --watch +``` + +## Stream logs + +```bash +$ cortex logs multi-model +``` + +## Make a request + +```bash +$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator?model=sentiment -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' +``` + +## Delete the API + +```bash +$ cortex delete multi-model +``` diff --git a/docs/tutorials/traffic-splitter.md b/docs/tutorials/traffic-splitter.md index fb8537b000..3e1d2a7182 100644 --- a/docs/tutorials/traffic-splitter.md +++ b/docs/tutorials/traffic-splitter.md @@ -70,7 +70,7 @@ $ python traffic_splitter.py ## Monitor ```bash -$ cortex get text-generator --env aws --watch +$ cortex get text-generator --watch ``` ## Stream logs @@ -82,9 +82,7 @@ $ cortex logs text-generator ## Make a request ```bash -$ curl https:// \ - -X POST -H "Content-Type: application/json" \ - -d '{"text": "hello world"}' +$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' ``` ## Delete the API From 230540a0ea36bb489ab4b3877a463970e721506e Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Mon, 7 Dec 2020 19:42:49 -0800 Subject: [PATCH 14/36] Update docs --- docs/deployments/batch-api/deployment.md | 120 ------------------ docs/deployments/realtime-api/deployment.md | 61 --------- docs/summary.md | 44 +++---- docs/tutorials/batch.md | 2 +- .../batch/configuration.md} | 12 +- .../batch}/endpoints.md | 0 .../batch}/predictors.md | 0 .../batch-api => workloads/batch}/statuses.md | 0 .../environments.md | 0 .../python-client.md | 0 .../python-packages.md | 0 .../realtime}/autoscaling.md | 0 .../realtime/configuration.md} | 10 -- .../realtime}/models.md | 0 .../realtime}/parallelism.md | 0 .../realtime}/prediction-monitoring.md | 0 .../realtime}/predictors.md | 0 .../realtime}/statuses.md | 0 .../realtime}/traffic-splitter.md | 0 .../system-packages.md | 0 docs/{deployments => workloads}/telemetry.md | 0 21 files changed, 23 insertions(+), 226 deletions(-) delete mode 100644 docs/deployments/batch-api/deployment.md delete mode 100644 docs/deployments/realtime-api/deployment.md rename docs/{deployments/batch-api/api-configuration.md => workloads/batch/configuration.md} (88%) rename docs/{deployments/batch-api => workloads/batch}/endpoints.md (100%) rename docs/{deployments/batch-api => workloads/batch}/predictors.md (100%) rename docs/{deployments/batch-api => workloads/batch}/statuses.md (100%) rename docs/{deployments => workloads}/environments.md (100%) rename docs/{deployments => workloads}/python-client.md (100%) rename docs/{deployments => workloads}/python-packages.md (100%) rename docs/{deployments/realtime-api => workloads/realtime}/autoscaling.md (100%) rename docs/{deployments/realtime-api/api-configuration.md => workloads/realtime/configuration.md} (93%) rename docs/{deployments/realtime-api => workloads/realtime}/models.md (100%) rename docs/{deployments/realtime-api => workloads/realtime}/parallelism.md (100%) rename docs/{deployments/realtime-api => workloads/realtime}/prediction-monitoring.md (100%) rename docs/{deployments/realtime-api => workloads/realtime}/predictors.md (100%) rename docs/{deployments/realtime-api => workloads/realtime}/statuses.md (100%) rename docs/{deployments/realtime-api => workloads/realtime}/traffic-splitter.md (100%) rename docs/{deployments => workloads}/system-packages.md (100%) rename docs/{deployments => workloads}/telemetry.md (100%) diff --git a/docs/deployments/batch-api/deployment.md b/docs/deployments/batch-api/deployment.md deleted file mode 100644 index 27b94f82bf..0000000000 --- a/docs/deployments/batch-api/deployment.md +++ /dev/null @@ -1,120 +0,0 @@ -# Batch API deployment - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -Once your model is [exported](../../guides/exporting.md), you've implemented a [Predictor](predictors.md), and you've [configured your API](api-configuration.md), you're ready to deploy a Batch API. - -## `cortex deploy` - -The `cortex deploy` command collects your configuration and source code and deploys your API on your cluster: - -```bash -$ cortex deploy - -created image-classifier (BatchAPI) -``` - -APIs are declarative, so to update your API, you can modify your source code and/or configuration and run `cortex deploy` again. - -After deploying a Batch API you can use `cortex get ` to display the Batch API endpoint, which you can use to make the following requests: - -1. Submit a batch job -1. Get the status of a job -1. Stop a job - -You can find documentation for the Batch API endpoint [here](endpoints.md). - -## `cortex get` - -The `cortex get` command displays the status of all of your API: - -```bash -$ cortex get - -env batch api running jobs latest job id last update -aws image-classifier 1 69d9c0013c2d0d97 (submitted 30s ago) 46s -``` - -## `cortex get ` - -`cortex get ` shows additional information about a specific Batch API and lists a summary of all currently running / recently submitted jobs. - -```bash -$ cortex get image-classifier - -job id status progress failed start time duration -69d9c0013c2d0d97 running 1/24 0 29 Jul 2020 14:38:01 UTC 30s -69da5b1f8cd3b2d3 completed with failures 15/16 1 29 Jul 2020 13:38:01 UTC 5m20s -69da5bc32feb6aa0 succeeded 40/40 0 29 Jul 2020 12:38:01 UTC 10m21s -69da5bd5b2f87258 succeeded 34/34 0 29 Jul 2020 11:38:01 UTC 8m54s - -endpoint: http://***.amazonaws.com/image-classifier -... -``` - -Appending the `--watch` flag will re-run the `cortex get` command every 2 seconds. - -## Job commands - -Once a job has been submitted to your Batch API (see [here](endpoints.md#submit-a-job)), you can use the Job ID from job submission response to get the status, stream logs, and stop a running job using the CLI. - -### `cortex get ` - -After a submitting a job, you can use the `cortex get ` command to show information about the job: - -```bash -$ cortex get image-classifier 69d9c0013c2d0d97 - -job id: 69d9c0013c2d0d97 -status: running - -start time: 29 Jul 2020 14:38:01 UTC -end time: - -duration: 32s - -batch stats -total succeeded failed avg time per batch -24 1 0 20s - -worker stats -requested running failed succeeded -2 2 0 0 - -job endpoint: https://***..amazonaws.com/image-classifier/69d9c0013c2d0d97 -``` - -### `cortex logs ` - -You can use `cortex logs ` to stream logs from a job: - -```bash -$ cortex logs image-classifier 69d9c0013c2d0d97 - -started enqueuing batches -partitioning 240 items found in job submission into 24 batches of size 10 -completed enqueuing a total of 24 batches -spinning up workers... -2020-07-30 16:50:30.147522:cortex:pid-1:INFO:downloading the project code -2020-07-30 16:50:30.268987:cortex:pid-1:INFO:downloading the python serving image -.... -``` - -### `cortex delete ` - -You can use `cortex delete ` to stop a running job: - -```bash -$ cortex delete image-classifier 69d9c0013c2d0d97 - -stopped job 69d96a01ea55da8c -``` - -## `cortex delete` - -Use the `cortex delete` command to delete your API: - -```bash -$ cortex delete my-api - -deleting my-api -``` diff --git a/docs/deployments/realtime-api/deployment.md b/docs/deployments/realtime-api/deployment.md deleted file mode 100644 index f068ff463d..0000000000 --- a/docs/deployments/realtime-api/deployment.md +++ /dev/null @@ -1,61 +0,0 @@ -# API deployment - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -Once your model is [exported](../../guides/exporting.md), you've implemented a [Predictor](predictors.md), and you've [configured your API](api-configuration.md), you're ready to deploy! - -## `cortex deploy` - -The `cortex deploy` command collects your configuration and source code and deploys your API on your cluster: - -```bash -$ cortex deploy - -creating my-api (RealtimeAPI) -``` - -APIs are declarative, so to update your API, you can modify your source code and/or configuration and run `cortex deploy` again. - -## `cortex get` - -The `cortex get` command displays the status of your APIs, and `cortex get ` shows additional information about a specific API. - -```bash -$ cortex get my-api - -status up-to-date requested last update avg request 2XX -live 1 1 1m - - - -endpoint: http://***.amazonaws.com/text-generator -... -``` - -Appending the `--watch` flag will re-run the `cortex get` command every 2 seconds. - -## `cortex logs` - -You can stream logs from your API using the `cortex logs` command: - -```bash -$ cortex logs my-api -``` - -## Making a prediction - -You can use `curl` to test your prediction service, for example: - -```bash -$ curl http://***.amazonaws.com/my-api \ - -X POST -H "Content-Type: application/json" \ - -d '{"key": "value"}' -``` - -## `cortex delete` - -Use the `cortex delete` command to delete your API: - -```bash -$ cortex delete my-api - -deleting my-api -``` diff --git a/docs/summary.md b/docs/summary.md index 15642c9a60..d92b75620b 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -24,29 +24,27 @@ * [Update](aws/update.md) * [Uninstall](aws/uninstall.md) -## Deployments +## Workloads -* [Realtime API](deployments/realtime-api.md) - * [Predictor implementation](deployments/realtime-api/predictors.md) - * [API configuration](deployments/realtime-api/api-configuration.md) - * [API deployment](deployments/realtime-api/deployment.md) - * [API statuses](deployments/realtime-api/statuses.md) - * [Models](deployments/realtime-api/models.md) - * [Parallelism](deployments/realtime-api/parallelism.md) - * [Autoscaling](deployments/realtime-api/autoscaling.md) - * [Prediction monitoring](deployments/realtime-api/prediction-monitoring.md) - * [Traffic Splitter](deployments/realtime-api/traffic-splitter.md) -* [Batch API](deployments/batch-api.md) - * [Predictor implementation](deployments/batch-api/predictors.md) - * [API configuration](deployments/batch-api/api-configuration.md) - * [API deployment](deployments/batch-api/deployment.md) - * [Endpoints](deployments/batch-api/endpoints.md) - * [Job statuses](deployments/batch-api/statuses.md) -* [Python client](deployments/python-client.md) -* [Python packages](deployments/python-packages.md) -* [System packages](deployments/system-packages.md) -* [Environments](deployments/environments.md) -* [Telemetry](deployments/telemetry.md) +* [Realtime API](workloads/realtime.md) + * [Predictor implementation](workloads/realtime/predictors.md) + * [API configuration](workloads/realtime/configuration.md) + * [API statuses](workloads/realtime/statuses.md) + * [Models](workloads/realtime/models.md) + * [Parallelism](workloads/realtime/parallelism.md) + * [Autoscaling](workloads/realtime/autoscaling.md) + * [Prediction monitoring](workloads/realtime/prediction-monitoring.md) + * [Traffic Splitter](workloads/realtime/traffic-splitter.md) +* [Batch API](workloads/batch.md) + * [Predictor implementation](workloads/batch/predictors.md) + * [API configuration](workloads/batch/configuration.md) + * [Endpoints](workloads/batch/endpoints.md) + * [Job statuses](workloads/batch/statuses.md) +* [Python client](workloads/python-client.md) +* [Python packages](workloads/python-packages.md) +* [System packages](workloads/system-packages.md) +* [Environments](workloads/environments.md) +* [Telemetry](workloads/telemetry.md) ## Troubleshooting @@ -54,7 +52,7 @@ * [404/503 API responses](troubleshooting/api-request-errors.md) * [NVIDIA runtime not found](troubleshooting/nvidia-container-runtime-not-found.md) * [TF session in predict()](troubleshooting/tf-session-in-predict.md) -* [Serving-side batching errors](troubleshooting/server-side-batching-errors.md) +* [Server-side batching errors](troubleshooting/server-side-batching-errors.md) ## Guides diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 7d26f97755..7fee9c5150 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -86,7 +86,7 @@ $ python batch.py ## Describe the Batch API ```bash -$ cortex get image-classifier -e aws +$ cortex get image-classifier --env aws ``` ## Submit a job diff --git a/docs/deployments/batch-api/api-configuration.md b/docs/workloads/batch/configuration.md similarity index 88% rename from docs/deployments/batch-api/api-configuration.md rename to docs/workloads/batch/configuration.md index eda8e9d067..ad5d216710 100644 --- a/docs/deployments/batch-api/api-configuration.md +++ b/docs/workloads/batch/configuration.md @@ -1,11 +1,7 @@ -# API configuration +# Batch API configuration _WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ -Once your model is [exported](../../guides/exporting.md) and you've implemented a [Predictor](predictors.md), you can configure your API via a YAML file (typically named `cortex.yaml`). - -Reference the section below which corresponds to your Predictor type: [Python](#python-predictor), [TensorFlow](#tensorflow-predictor), or [ONNX](#onnx-predictor). - ## Python Predictor @@ -29,8 +25,6 @@ Reference the section below which corresponds to your Predictor type: [Python](# mem: # memory request per worker, e.g. 200Mi or 1Gi (default: Null) ``` -See additional documentation for [compute](../compute.md), [networking](../../aws/networking.md), and [overriding API images](../system-packages.md). - ## TensorFlow Predictor @@ -65,8 +59,6 @@ See additional documentation for [compute](../compute.md), [networking](../../aw mem: # memory request per worker, e.g. 200Mi or 1Gi (default: Null) ``` -See additional documentation for [compute](../compute.md), [networking](../../aws/networking.md), and [overriding API images](../system-packages.md). - ## ONNX Predictor @@ -94,5 +86,3 @@ See additional documentation for [compute](../compute.md), [networking](../../aw gpu: # GPU request per worker (default: 0) mem: # memory request per worker, e.g. 200Mi or 1Gi (default: Null) ``` - -See additional documentation for [compute](../compute.md), [networking](../../aws/networking.md), and [overriding API images](../system-packages.md). diff --git a/docs/deployments/batch-api/endpoints.md b/docs/workloads/batch/endpoints.md similarity index 100% rename from docs/deployments/batch-api/endpoints.md rename to docs/workloads/batch/endpoints.md diff --git a/docs/deployments/batch-api/predictors.md b/docs/workloads/batch/predictors.md similarity index 100% rename from docs/deployments/batch-api/predictors.md rename to docs/workloads/batch/predictors.md diff --git a/docs/deployments/batch-api/statuses.md b/docs/workloads/batch/statuses.md similarity index 100% rename from docs/deployments/batch-api/statuses.md rename to docs/workloads/batch/statuses.md diff --git a/docs/deployments/environments.md b/docs/workloads/environments.md similarity index 100% rename from docs/deployments/environments.md rename to docs/workloads/environments.md diff --git a/docs/deployments/python-client.md b/docs/workloads/python-client.md similarity index 100% rename from docs/deployments/python-client.md rename to docs/workloads/python-client.md diff --git a/docs/deployments/python-packages.md b/docs/workloads/python-packages.md similarity index 100% rename from docs/deployments/python-packages.md rename to docs/workloads/python-packages.md diff --git a/docs/deployments/realtime-api/autoscaling.md b/docs/workloads/realtime/autoscaling.md similarity index 100% rename from docs/deployments/realtime-api/autoscaling.md rename to docs/workloads/realtime/autoscaling.md diff --git a/docs/deployments/realtime-api/api-configuration.md b/docs/workloads/realtime/configuration.md similarity index 93% rename from docs/deployments/realtime-api/api-configuration.md rename to docs/workloads/realtime/configuration.md index 21f1312477..9d9e47fa13 100644 --- a/docs/deployments/realtime-api/api-configuration.md +++ b/docs/workloads/realtime/configuration.md @@ -2,10 +2,6 @@ _WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ -Once your model is [exported](../../guides/exporting.md) and you've implemented a [Predictor](predictors.md), you can configure your API via a YAML file (typically named `cortex.yaml`). - -Reference the section below which corresponds to your Predictor type: [Python](#python-predictor), [TensorFlow](#tensorflow-predictor), or [ONNX](#onnx-predictor). - ## Python Predictor @@ -60,8 +56,6 @@ Reference the section below which corresponds to your Predictor type: [Python](# max_unavailable: # maximum number of replicas that can be unavailable during an update; can be an absolute number, e.g. 5, or a percentage of desired replicas, e.g. 10% (default: 25%) ``` -See additional documentation for [models](models.md), [parallelism](parallelism.md), [autoscaling](autoscaling.md), [compute](../compute.md), [networking](../../aws/networking.md), [prediction monitoring](prediction-monitoring.md), and [overriding API images](../system-packages.md). - ## TensorFlow Predictor @@ -123,8 +117,6 @@ See additional documentation for [models](models.md), [parallelism](parallelism. max_unavailable: # maximum number of replicas that can be unavailable during an update; can be an absolute number, e.g. 5, or a percentage of desired replicas, e.g. 10% (default: 25%) ``` -See additional documentation for [models](models.md), [parallelism](parallelism.md), [autoscaling](autoscaling.md), [compute](../compute.md), [networking](../../aws/networking.md), [prediction monitoring](prediction-monitoring.md), and [overriding API images](../system-packages.md). - ## ONNX Predictor @@ -178,5 +170,3 @@ See additional documentation for [models](models.md), [parallelism](parallelism. max_surge: # maximum number of replicas that can be scheduled above the desired number of replicas during an update; can be an absolute number, e.g. 5, or a percentage of desired replicas, e.g. 10% (default: 25%) (set to 0 to disable rolling updates) max_unavailable: # maximum number of replicas that can be unavailable during an update; can be an absolute number, e.g. 5, or a percentage of desired replicas, e.g. 10% (default: 25%) ``` - -See additional documentation for [models](models.md), [parallelism](parallelism.md), [autoscaling](autoscaling.md), [compute](../compute.md), [networking](../../aws/networking.md), [prediction monitoring](prediction-monitoring.md), and [overriding API images](../system-packages.md). diff --git a/docs/deployments/realtime-api/models.md b/docs/workloads/realtime/models.md similarity index 100% rename from docs/deployments/realtime-api/models.md rename to docs/workloads/realtime/models.md diff --git a/docs/deployments/realtime-api/parallelism.md b/docs/workloads/realtime/parallelism.md similarity index 100% rename from docs/deployments/realtime-api/parallelism.md rename to docs/workloads/realtime/parallelism.md diff --git a/docs/deployments/realtime-api/prediction-monitoring.md b/docs/workloads/realtime/prediction-monitoring.md similarity index 100% rename from docs/deployments/realtime-api/prediction-monitoring.md rename to docs/workloads/realtime/prediction-monitoring.md diff --git a/docs/deployments/realtime-api/predictors.md b/docs/workloads/realtime/predictors.md similarity index 100% rename from docs/deployments/realtime-api/predictors.md rename to docs/workloads/realtime/predictors.md diff --git a/docs/deployments/realtime-api/statuses.md b/docs/workloads/realtime/statuses.md similarity index 100% rename from docs/deployments/realtime-api/statuses.md rename to docs/workloads/realtime/statuses.md diff --git a/docs/deployments/realtime-api/traffic-splitter.md b/docs/workloads/realtime/traffic-splitter.md similarity index 100% rename from docs/deployments/realtime-api/traffic-splitter.md rename to docs/workloads/realtime/traffic-splitter.md diff --git a/docs/deployments/system-packages.md b/docs/workloads/system-packages.md similarity index 100% rename from docs/deployments/system-packages.md rename to docs/workloads/system-packages.md diff --git a/docs/deployments/telemetry.md b/docs/workloads/telemetry.md similarity index 100% rename from docs/deployments/telemetry.md rename to docs/workloads/telemetry.md From e11c015ad517e133b1ab5a132fa58464d847a576 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Mon, 7 Dec 2020 20:08:06 -0800 Subject: [PATCH 15/36] Update docs --- docs/summary.md | 2 ++ docs/tutorials/multi-model.md | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/summary.md b/docs/summary.md index d92b75620b..4e95d78306 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -1,5 +1,7 @@ # Table of contents +* [Get started](tutorials/realtime.md) + ## Tutorials * [Realtime API](tutorials/realtime.md) diff --git a/docs/tutorials/multi-model.md b/docs/tutorials/multi-model.md index aca9ab7825..4fc5b7f8e3 100644 --- a/docs/tutorials/multi-model.md +++ b/docs/tutorials/multi-model.md @@ -1,7 +1,5 @@ # Deploy a multi-model API -_WARNING: you are on the master branch; please refer to examples on the branch corresponding to your `cortex version` (e.g. for version 0.23.*, run `git checkout -b 0.23` or switch to the `0.23` branch on GitHub)_ - ## Install cortex ```bash From f6c11a1874ff77ee3755526cd75217020db4900c Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Mon, 7 Dec 2020 20:11:09 -0800 Subject: [PATCH 16/36] Update .gitbook.yaml --- .gitbook.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitbook.yaml b/.gitbook.yaml index 52b37106f2..2039ff62cf 100644 --- a/.gitbook.yaml +++ b/.gitbook.yaml @@ -1,6 +1,7 @@ root: ./docs/ structure: + readme: ./tutorials/realtime.md summary: summary.md redirects: From 3dce6f0f9162b3f2e379d3e51d6045e7c1f5a4ce Mon Sep 17 00:00:00 2001 From: Vishal Bollu Date: Tue, 8 Dec 2020 11:18:50 -0500 Subject: [PATCH 17/36] Revert batch predictor.py --- test/batch/image-classifier/predictor.py | 61 ++++++++++++++++-------- 1 file changed, 42 insertions(+), 19 deletions(-) diff --git a/test/batch/image-classifier/predictor.py b/test/batch/image-classifier/predictor.py index 92a8cc26b9..293c466fd3 100644 --- a/test/batch/image-classifier/predictor.py +++ b/test/batch/image-classifier/predictor.py @@ -2,22 +2,18 @@ import os import requests +import torch +import torchvision +from torchvision import transforms from PIL import Image from io import BytesIO +import boto3 import json import re -# labels "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" -# bucket, key - class PythonPredictor: def __init__(self, config, job_spec): - import re - import boto3 - from torchvision import transforms - import torchvision - self.model = torchvision.models.alexnet(pretrained=True).eval() normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) @@ -25,34 +21,61 @@ def __init__(self, config, job_spec): [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize] ) - self.labels = requests.get(config["labels"]).text.split("\n")[1:] + self.labels = requests.get( + "https://storage.googleapis.com/download.tensorflow.org/data/ImageNetLabels.txt" + ).text.split("\n")[1:] + + if len(config.get("dest_s3_dir", "")) == 0: + raise Exception("'dest_s3_dir' field was not provided in job submission") - self.s3 = boto3.client("s3") # initialize S3 client to save results + self.s3 = boto3.client("s3") self.bucket, self.key = re.match("s3://(.+?)/(.+)", config["dest_s3_dir"]).groups() self.key = os.path.join(self.key, job_spec["job_id"]) def predict(self, payload, batch_id): - import json - from PIL import Image - import torch - tensor_list = [] - for image_url in payload: # download and preprocess each image - img_pil = Image.open(BytesIO(requests.get(image_url).content)) + + # download and preprocess each image + for image_url in payload: + if image_url.startswith("s3://"): + bucket, image_key = re.match("s3://(.+?)/(.+)", image_url).groups() + image_bytes = self.s3.get_object(Bucket=bucket, Key=image_key)["Body"].read() + else: + image_bytes = requests.get(image_url).content + + img_pil = Image.open(BytesIO(image_bytes)) tensor_list.append(self.preprocess(img_pil)) + # classify the batch of images img_tensor = torch.stack(tensor_list) - with torch.no_grad(): # classify the batch of images + with torch.no_grad(): prediction = self.model(img_tensor) _, indices = prediction.max(1) - results = [ # extract predicted classes + # extract predicted classes + results = [ {"url": payload[i], "class": self.labels[class_idx]} for i, class_idx in enumerate(indices) ] + json_output = json.dumps(results) # save results + self.s3.put_object(Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json_output) + + def on_job_complete(self): + all_results = [] + + # aggregate all classifications + paginator = self.s3.get_paginator("list_objects_v2") + for page in paginator.paginate(Bucket=self.bucket, Prefix=self.key): + for obj in page["Contents"]: + body = self.s3.get_object(Bucket=self.bucket, Key=obj["Key"])["Body"] + all_results += json.loads(body.read().decode("utf8")) + + # save single file containing aggregated classifications self.s3.put_object( - Bucket=self.bucket, Key=f"{self.key}/{batch_id}.json", Body=json.dumps(results) + Bucket=self.bucket, + Key=os.path.join(self.key, "aggregated_results.json"), + Body=json.dumps(all_results), ) From e598e242c05614e6a4ba1f69766ea9f2838990e1 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Tue, 8 Dec 2020 10:55:22 -0800 Subject: [PATCH 18/36] Update docs --- docs/summary.md | 1 + docs/tutorials/batch.md | 6 ++---- docs/tutorials/realtime.md | 14 +++++++------- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/docs/summary.md b/docs/summary.md index 4e95d78306..88688246b5 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -1,6 +1,7 @@ # Table of contents * [Get started](tutorials/realtime.md) +* [Chat with us](https://gitter.im/cortexlabs/cortex) ## Tutorials diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 7fee9c5150..75140dfa12 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -1,14 +1,12 @@ # Deploy a batch API -**Note: at this time, batch APIs are only supported on AWS.** - ## Install cortex ```bash $ pip install cortex ``` -## Spin up a cluster on AWS (requires AWS credentials) +## Spin up a cluster on AWS ```bash $ cortex cluster up @@ -142,5 +140,5 @@ Once the job is complete, you should be able to find the results of the batch jo ## Delete the Batch API ```bash -$ cortex delete --env local image-classifier +$ cortex delete image-classifier --env local ``` diff --git a/docs/tutorials/realtime.md b/docs/tutorials/realtime.md index 6871021b2e..4e22e6b43a 100644 --- a/docs/tutorials/realtime.md +++ b/docs/tutorials/realtime.md @@ -9,7 +9,7 @@ $ pip install cortex ## Define a realtime API ```python -# realtime.py +# text_generator.py import cortex @@ -33,7 +33,7 @@ cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) ## Test locally (requires Docker) ```bash -$ python realtime.py +$ python text_generator.py ``` ## Monitor @@ -54,13 +54,13 @@ $ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"t $ cortex logs text-generator ``` -## Spin up a cluster on AWS (requires AWS credentials) +## Spin up a cluster on AWS ```bash $ cortex cluster up ``` -## Edit `realtime.py` +## Edit `text_generator.py` ```python # cx = cortex.client("local") @@ -70,7 +70,7 @@ cx = cortex.client("aws") ## Deploy to AWS ```bash -$ python realtime.py +$ python text_generator.py ``` ## Monitor @@ -88,7 +88,7 @@ $ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H ## Delete the APIs ```bash -$ cortex delete --env local text-generator +$ cortex delete text-generator --env local -$ cortex delete --env aws text-generator +$ cortex delete text-generator --env aws ``` From 68080c346c256f2c5fba65a468d72115d3077d95 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Tue, 8 Dec 2020 13:40:26 -0800 Subject: [PATCH 19/36] Update README.md --- README.md | 75 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/README.md b/README.md index e955d8c274..9e450eb2d0 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,30 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Runs inference on spot instances with on-demand backups. * Autoscales to handle production workloads. +#### Configure Cortex + +```yaml +# cluster.yaml + +region: us-east-1 +instance_type: g4dn.xlarge +min_instances: 10 +max_instances: 100 +spot: true +``` + +#### Spin up Cortex on your AWS account + +```text +$ cortex cluster up --config cluster.yaml + +○ configuring autoscaling ✓ +○ configuring networking ✓ +○ configuring logging ✓ + +cortex is ready! +``` +
## Reproducible deployments @@ -25,6 +49,36 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Integrate with your data science platform or CI/CD system. * Test locally before deploying to your cluster. +#### Implement a predictor + +```python +from transformers import pipeline + +class PythonPredictor: + def __init__(self, config): + self.model = pipeline(task="text-generation") + + def predict(self, payload): + return self.model(payload["text"])[0] +``` + +#### Configure an API + +```python +api_spec = { + "name": "text-generator", + "kind": "RealtimeAPI", + "compute": { + "gpu": 1, + "mem": "8Gi" + }, + "autoscaling": { + "min_replicas": 1, + "max_replicas": 10 + } +} +``` +
## Scalable machine learning APIs @@ -35,8 +89,29 @@ Cortex is an open source platform for deploying, managing, and scaling machine l * Configure traffic splitting for A/B testing. * Update APIs without downtime. +#### Deploy to your cluster + +```python +import cortex + +cx = cortex.client("aws") +cx.create_api(api_spec, predictor=PythonPredictor) + +# creating https://example.com/text-generator +``` + +#### Consume your API + +```bash +$ curl https://example.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' +``` +
## Get started +```bash +pip install cortex +``` + [Deploy models](https://docs.cortex.dev) and [join our community](https://gitter.im/cortexlabs/cortex). From 1f19d616ce56f184d66f507a486c56ef1e6c5064 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Tue, 8 Dec 2020 14:44:21 -0800 Subject: [PATCH 20/36] Update docs --- docs/tutorials/advanced.md | 119 +++++++++++++++++++++++++++++++++++++ docs/tutorials/batch.md | 34 +++++++---- docs/tutorials/realtime.md | 38 ++++++++---- 3 files changed, 167 insertions(+), 24 deletions(-) create mode 100644 docs/tutorials/advanced.md diff --git a/docs/tutorials/advanced.md b/docs/tutorials/advanced.md new file mode 100644 index 0000000000..347d45f9aa --- /dev/null +++ b/docs/tutorials/advanced.md @@ -0,0 +1,119 @@ +# Advanced deployments + +## Install cortex + +```bash +$ pip install cortex +``` + +## Create a directory + +```bash +$ mkdir text-generator && cd text-generator + +$ touch predictor.py requirements.txt text-generator.yaml +``` + +## Define a Predictor in `predictor.py` + +```python +class PythonPredictor: + def __init__(self, config): + from transformers import pipeline + + self.model = pipeline(task="text-generation") + + def predict(self, payload): + return self.model(payload["text"])[0] +``` + +## Specify Python dependencies in `requirements.txt` + +```text +tensorflow +transformers +``` + +## Configure 2 realtime APIs and a traffic splitter in `text-generator.yaml` + +```yaml +- name: text-generator-cpu + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + cpu: 1 + +- name: text-generator-gpu + kind: RealtimeAPI + predictor: + type: python + path: predictor.py + compute: + gpu: 1 + +- name: text-generator + kind: TrafficSplitter + apis: + - name: text-generator-cpu + weight: 80 + - name: text-generator-gpu + weight: 20 +``` + +## Test locally (requires Docker) + +```bash +$ cortex deploy text-generator.yaml +``` + +## Monitor + +```bash +$ cortex get text-generator --watch +``` + +## Make a request + +```bash +$ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' +``` + +## Stream logs + +```bash +$ cortex logs text-generator +``` + +## Spin up a cluster on AWS + +```bash +$ cortex cluster up +``` + +## Deploy to AWS + +```bash +$ cortex deploy text-generator.yaml --env aws +``` + +## Monitor + +```bash +$ cortex get text-generator --env aws --watch +``` + +## Make a request + +```bash +$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' +``` + +## Delete the APIs + +```bash +$ cortex delete text-generator --env local + +$ cortex delete text-generator --env aws +``` diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 75140dfa12..253ff00e5c 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -1,18 +1,30 @@ # Deploy a batch API -## Install cortex +Deploy models as batch APIs that can orchestrate distributed batch inference jobs on large datasets. + +## Key features + +* Distributed inference +* Fault tolerance with queues +* Metrics and log aggregation +* `on_job_complete` webhook +* Scale to 0 + +## How it works + +### Install cortex ```bash $ pip install cortex ``` -## Spin up a cluster on AWS +### Spin up a cluster on AWS ```bash $ cortex cluster up ``` -## Define a batch API +### Define a batch API ```python # batch.py @@ -75,19 +87,19 @@ cx = cortex.client("aws") cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) ``` -## Deploy to your Cortex cluster on AWS +### Deploy to your Cortex cluster on AWS ```bash $ python batch.py ``` -## Describe the Batch API +### Describe the Batch API ```bash $ cortex get image-classifier --env aws ``` -## Submit a job +### Submit a job ```python import cortex @@ -121,24 +133,24 @@ print(response) # > {"job_id":"69b183ed6bdf3e9b","api_name":"image-classifier", "config": {"dest_s3_dir": ...}} ``` -## Monitor the job +### Monitor the job ```bash $ cortex get image-classifier 69b183ed6bdf3e9b ``` -## Stream job logs +### Stream job logs ```bash $ cortex logs image-classifier 69b183ed6bdf3e9b ``` -## View the results +### View the results Once the job is complete, you should be able to find the results of the batch job in the S3 directory you've specified. -## Delete the Batch API +### Delete the Batch API ```bash -$ cortex delete image-classifier --env local +$ cortex delete image-classifier --env local ``` diff --git a/docs/tutorials/realtime.md b/docs/tutorials/realtime.md index 4e22e6b43a..f21ad88903 100644 --- a/docs/tutorials/realtime.md +++ b/docs/tutorials/realtime.md @@ -1,12 +1,24 @@ # Deploy a realtime API -## Install cortex +Deploy models as realtime APIs that can respond to prediction requests on demand. For example, an object detection web service that receives an image and returns a list of objects in the image. + +## Key features + +* Request-based autoscaling +* Multi-model endpoints +* Server-side batching +* Metrics and log aggregation +* Rolling updates + +## How it works + +### Install cortex ```bash $ pip install cortex ``` -## Define a realtime API +### Define a realtime API ```python # text_generator.py @@ -27,65 +39,65 @@ requirements = ["tensorflow", "transformers"] api_spec = {"name": "text-generator", "kind": "RealtimeAPI"} cx = cortex.client("local") -cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) +cx.create_api(api_spec, predictor=PythonPredictor, requirements=requirements) ``` -## Test locally (requires Docker) +### Test locally (requires Docker) ```bash $ python text_generator.py ``` -## Monitor +### Monitor ```bash $ cortex get text-generator --watch ``` -## Make a request +### Make a request ```bash $ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' ``` -## Stream logs +### Stream logs ```bash $ cortex logs text-generator ``` -## Spin up a cluster on AWS +### Spin up a cluster on AWS ```bash $ cortex cluster up ``` -## Edit `text_generator.py` +### Edit `text_generator.py` ```python # cx = cortex.client("local") cx = cortex.client("aws") ``` -## Deploy to AWS +### Deploy to AWS ```bash $ python text_generator.py ``` -## Monitor +### Monitor ```bash $ cortex get text-generator --env aws --watch ``` -## Make a request +### Make a request ```bash $ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' ``` -## Delete the APIs +### Delete the APIs ```bash $ cortex delete text-generator --env local From 6b03e4eefe3eef4a3676b9288fafcb88b5266668 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 17:55:36 -0500 Subject: [PATCH 21/36] Tutorials --- docs/tutorials/batch.md | 4 +- docs/tutorials/multi-model.md | 46 +----------- docs/tutorials/project.md | 112 ++++++++--------------------- docs/tutorials/traffic-splitter.md | 75 +++++++------------ 4 files changed, 57 insertions(+), 180 deletions(-) diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 75140dfa12..868f90fdb1 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -72,7 +72,7 @@ api_spec = { } cx = cortex.client("aws") -cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) +cx.create_api(api_spec, predictor=PythonPredictor, requirements=requirements) ``` ## Deploy to your Cortex cluster on AWS @@ -140,5 +140,5 @@ Once the job is complete, you should be able to find the results of the batch jo ## Delete the Batch API ```bash -$ cortex delete image-classifier --env local +$ cortex delete image-classifier --env local ``` diff --git a/docs/tutorials/multi-model.md b/docs/tutorials/multi-model.md index 4fc5b7f8e3..8fd78fe2da 100644 --- a/docs/tutorials/multi-model.md +++ b/docs/tutorials/multi-model.md @@ -1,19 +1,5 @@ # Deploy a multi-model API -## Install cortex - -```bash -$ pip install cortex -``` - -## Spin up a cluster on AWS (requires AWS credentials) - -```bash -$ cortex cluster up -``` - -## Define a multi-model API - ```python # multi_model.py @@ -39,35 +25,5 @@ requirements = ["tensorflow", "transformers"] api_spec = {"name": "multi-model", "kind": "RealtimeAPI"} cx = cortex.client("aws") -cx.deploy(api_spec, predictor=PythonPredictor, requirements=requirements) -``` - -## Deploy to AWS - -```bash -$ python multi_model.py -``` - -## Monitor - -```bash -$ cortex get multi-model --env aws --watch -``` - -## Stream logs - -```bash -$ cortex logs multi-model -``` - -## Make a request - -```bash -$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator?model=sentiment -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' -``` - -## Delete the API - -```bash -$ cortex delete multi-model +cx.create_api(api_spec, predictor=PythonPredictor, requirements=requirements) ``` diff --git a/docs/tutorials/project.md b/docs/tutorials/project.md index 84bc55bec9..be51aea5fb 100644 --- a/docs/tutorials/project.md +++ b/docs/tutorials/project.md @@ -1,117 +1,63 @@ # Deploy a project -## Install cortex +You can deploy an API by providing a project directory. Cortex will save the project directory and make it available during API initialization. ```bash -$ pip install cortex +project/ + ├── model.py + ├── util.py + ├── predictor.py + ├── requirements.txt + └── ... ``` -## Create a directory - -```bash -$ mkdir text-generator && cd text-generator - -$ touch predictor.py requirements.txt realtime.py -``` - -## Define a Predictor +You can define your Predictor class in a separate python file and import code from your project. ```python # predictor.py +from model import MyModel + class PythonPredictor: def __init__(self, config): - from transformers import pipeline + model = MyModel() - self.model = pipeline(task="text-generation") - - def predict(self, payload): - return self.model(payload["text"])[0] + def predict(payload): + return model(payload) ``` -## Specify Python dependencies - -```text -tensorflow -transformers -``` - -## Configure an API +## Deploy using the Python Client ```python -# realtime.py - import cortex api_spec = { "name": "text-generator", "kind": "RealtimeAPI", - "predictor": {"type": "python", "path": "predictor.py"}, + "predictor": { + "type": "python", + "path": "predictor.py" + } } -cx = cortex.client("local") -cx.deploy(api_spec, project_dir=".") -``` - -## Test locally (requires Docker) - -```bash -$ python realtime.py -``` - -## Monitor - -```bash -$ cortex get text-generator --watch -``` - -## Make a request - -```bash -$ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' -``` - -## Stream logs - -```bash -$ cortex logs text-generator -``` - -## Spin up a cluster on AWS (requires AWS credentials) - -```bash -$ cortex cluster up -``` - -## Edit `realtime.py` - -```python -# cx = cortex.client("local") cx = cortex.client("aws") +cx.create_api(api_spec, project_dir=".") ``` -## Deploy to AWS +## Deploy using the CLI -```bash -$ python realtime.py -``` +Navigate to your project directory and define a yaml with the api specification: -## Monitor +```yaml +# api.yaml -```bash -$ cortex get text-generator --env aws --watch +- name: text-generator + kind: RealtimeAPI + predictor: + type: python + path: predictor.py ``` -## Make a request - ```bash -$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' -``` - -## Delete the APIs - -```bash -$ cortex delete --env local text-generator - -$ cortex delete --env aws text-generator +$ cortex deploy api.yaml -e aws ``` diff --git a/docs/tutorials/traffic-splitter.md b/docs/tutorials/traffic-splitter.md index 3e1d2a7182..be4502929b 100644 --- a/docs/tutorials/traffic-splitter.md +++ b/docs/tutorials/traffic-splitter.md @@ -1,29 +1,17 @@ -# Deploy a traffic splitter +# Traffic splitter -## Install cortex +A Traffic Splitter can be used expose multiple APIs as a single endpoint. The percentage of traffic routed to each API can be controlled. This can be useful when performing A/B tests, setting up multi-armed bandits or performing canary deployments. -```bash -$ pip install cortex -``` - -## Spin up a cluster on AWS (requires AWS credentials) - -```bash -$ cortex cluster up -``` +**Note: Traffic Splitter is only supported on a Cortex cluster** -## Define 2 realtime APIs and a traffic splitter +## Deploy APIs ```python -# traffic_splitter.py - -import cortex - class PythonPredictor: def __init__(self, config): from transformers import pipeline - self.model = pipeline(task="text-generation") + self.model = pipeline(task="text-generation", model=config["model"]) def predict(self, payload): return self.model(payload["text"])[0] @@ -46,47 +34,34 @@ api_spec_gpu = { }, } -traffic_splitter = { - "name": "text-generator", - "kind": "TrafficSplitter", - "apis": [ - {"name": "text-generator-cpu", "weight": 30}, - {"name": "text-generator-gpu", "weight": 70}, - ], -} - cx = cortex.client("aws") -cx.deploy(api_spec_cpu, predictor=PythonPredictor, requirements=requirements) -cx.deploy(api_spec_gpu, predictor=PythonPredictor, requirements=requirements) -cx.deploy(traffic_splitter) +cx.create_api(api_spec_cpu, predictor=PythonPredictor, requirements=requirements) +cx.create_api(api_spec_gpu, predictor=PythonPredictor, requirements=requirements) ``` -## Deploy to AWS +## Deploy a traffic splitter -```bash -$ python traffic_splitter.py -``` - -## Monitor - -```bash -$ cortex get text-generator --watch -``` - -## Stream logs +```python +traffic_splitter_spec = { + "name": "classifier", + "kind": "TrafficSplitter", + "apis": [ + {"name": "text-generator-cpu", "weight": 50}, + {"name": "text-generator-gpu", "weight": 50}, + ], +} -```bash -$ cortex logs text-generator +cx.create_api(traffic_splitter_spec) ``` -## Make a request +## Update the weights of the traffic splitter -```bash -$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' -``` +```python +traffic_splitter_spec = cx.get_api("classifier")["spec"]["submitted_api_spec"] -## Delete the API +# send 99% of the traffic to text-generator-gpu +traffic_splitter_spec["api"][0]["weight"] = 1 +traffic_splitter_spec["api"][1]["weight"] = 99 -```bash -$ cortex delete text-generator +cx.patch(traffic_splitter_spec) ``` From 6d9fe04d587aa43b36e501aa08cfb36bbd23abe9 Mon Sep 17 00:00:00 2001 From: Vishal Bollu Date: Tue, 8 Dec 2020 23:37:08 +0000 Subject: [PATCH 22/36] Remove cloud folder --- docs/cloud/install.md | 15 --------------- docs/cloud/uninstall.md | 15 --------------- docs/cloud/update.md | 11 ----------- 3 files changed, 41 deletions(-) delete mode 100644 docs/cloud/install.md delete mode 100644 docs/cloud/uninstall.md delete mode 100644 docs/cloud/update.md diff --git a/docs/cloud/install.md b/docs/cloud/install.md deleted file mode 100644 index c210d7c2e4..0000000000 --- a/docs/cloud/install.md +++ /dev/null @@ -1,15 +0,0 @@ -# Install - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -## AWS - -To spin up Cortex using AWS as the cloud provider, follow [these instructions](../aws/install.md). - -## GCP - -To spin up Cortex using GCP as the cloud provider, follow [these instructions](../gcp/install.md). - -## Local - -If you'll only be using Cortex locally, install it with `pip install cortex`. diff --git a/docs/cloud/uninstall.md b/docs/cloud/uninstall.md deleted file mode 100644 index a162f34dbd..0000000000 --- a/docs/cloud/uninstall.md +++ /dev/null @@ -1,15 +0,0 @@ -# Uninstall - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -## AWS - -To spin down a Cortex cluster on AWS, follow [these instructions](../aws/uninstall.md). - -## GCP - -To spin down a Cortex cluster on GCP, follow [these instructions](../gcp/uninstall.md). - -## Local - -To uninstall the Cortex CLI, run `pip uninstall cortex`. diff --git a/docs/cloud/update.md b/docs/cloud/update.md deleted file mode 100644 index 1cf87cc8da..0000000000 --- a/docs/cloud/update.md +++ /dev/null @@ -1,11 +0,0 @@ -# Update - -_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ - -## AWS - -To update the configuration of a running Cortex cluster on AWS, follow [these instructions](../aws/update.md). - -## GCP - -It is currently not possible to update a Cortex cluster running on GCP. From 215ab408b292f56b2abc3bbc137e3024965c5058 Mon Sep 17 00:00:00 2001 From: Omer Spillinger Date: Tue, 8 Dec 2020 15:45:02 -0800 Subject: [PATCH 23/36] Update docs --- docs/tutorials/multi-model.md | 10 ++++++++++ docs/tutorials/realtime.md | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/tutorials/multi-model.md b/docs/tutorials/multi-model.md index 8fd78fe2da..4993dfebf5 100644 --- a/docs/tutorials/multi-model.md +++ b/docs/tutorials/multi-model.md @@ -1,5 +1,9 @@ # Deploy a multi-model API +Deploy several models in a single API to improve resource utilization efficiency. + +### Define a multi-model API + ```python # multi_model.py @@ -27,3 +31,9 @@ api_spec = {"name": "multi-model", "kind": "RealtimeAPI"} cx = cortex.client("aws") cx.create_api(api_spec, predictor=PythonPredictor, requirements=requirements) ``` + +### Deploy + +```bash +$ python multi_model.py +``` diff --git a/docs/tutorials/realtime.md b/docs/tutorials/realtime.md index f21ad88903..5befb96bc8 100644 --- a/docs/tutorials/realtime.md +++ b/docs/tutorials/realtime.md @@ -1,6 +1,6 @@ # Deploy a realtime API -Deploy models as realtime APIs that can respond to prediction requests on demand. For example, an object detection web service that receives an image and returns a list of objects in the image. +Deploy models as realtime APIs that can respond to prediction requests on demand. ## Key features From 1c4cea1f109a61e39e1629f3ff2c6668aa93f36f Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 19:17:21 -0500 Subject: [PATCH 24/36] Remove references to specific documentation in the code --- cli/cluster/errors.go | 2 +- cli/cmd/errors.go | 4 ++-- cli/cmd/lib_aws_creds.go | 2 +- cli/cmd/lib_cluster_config_aws.go | 22 +++++++++---------- cli/cmd/lib_cluster_config_gcp.go | 10 ++++----- cli/local/deploy.go | 2 +- docs/aws/install.md | 2 +- docs/workloads/python-client.md | 7 +----- manager/debug.sh | 2 +- manager/info.sh | 2 +- manager/install.sh | 4 ++-- manager/refresh.sh | 2 +- pkg/lib/docker/errors.go | 2 +- pkg/lib/k8s/errors.go | 2 +- pkg/operator/endpoints/errors.go | 2 +- pkg/operator/endpoints/submit_job.go | 2 +- .../resources/batchapi/validations.go | 2 +- pkg/operator/resources/resources.go | 4 ++-- pkg/types/spec/errors.go | 4 ++-- pkg/types/spec/validations.go | 18 ++------------- pkg/workloads/cortex/client/cortex/client.py | 9 ++------ pkg/workloads/cortex/serve/init/bootloader.sh | 4 ++-- 22 files changed, 43 insertions(+), 67 deletions(-) diff --git a/cli/cluster/errors.go b/cli/cluster/errors.go index a0a953428b..0256283da5 100644 --- a/cli/cluster/errors.go +++ b/cli/cluster/errors.go @@ -62,7 +62,7 @@ func ErrorFailedToConnectOperator(originalError error, envName string, operatorU msg += fmt.Sprintf(" → otherwise you can ignore this message, and prevent it in the future with `cortex env delete %s`\n", envName) msg += "\nif you have a cluster running:\n" msg += fmt.Sprintf(" → run `cortex cluster info --configure-env %s` to update your environment (include `--config ` if you have a cluster configuration file)\n", envName) - msg += fmt.Sprintf(" → if you set `operator_load_balancer_scheme: internal` in your cluster configuration file, your CLI must run from within a VPC that has access to your cluster's VPC (see https://docs.cortex.dev/v/%s/aws/vpc-peering)\n", consts.CortexVersionMinor) + msg += fmt.Sprintf(" → if you set `operator_load_balancer_scheme: internal` in your cluster configuration file, your CLI must run from within a VPC that has access to your cluster's VPC (see https://docs.cortex.dev/v/%s/)\n", consts.CortexVersionMinor) } return errors.WithStack(&errors.Error{ diff --git a/cli/cmd/errors.go b/cli/cmd/errors.go index 7757ed835b..bd94fa8b11 100644 --- a/cli/cmd/errors.go +++ b/cli/cmd/errors.go @@ -249,7 +249,7 @@ func ErrorMissingAWSCredentials() error { func ErrorCredentialsInClusterConfig(cmd string, path string) error { return errors.WithStack(&errors.Error{ Kind: ErrCredentialsInClusterConfig, - Message: fmt.Sprintf("specifying credentials in the cluster configuration is no longer supported, please specify aws credentials using flags (e.g. cortex cluster %s --config %s --aws-key --aws-secret ) or set environment variables; see https://docs.cortex.dev/v/%s/aws/security#iam-permissions for more information", cmd, path, consts.CortexVersionMinor), + Message: fmt.Sprintf("specifying credentials in the cluster configuration is no longer supported, please specify aws credentials using flags (e.g. cortex cluster %s --config %s --aws-key --aws-secret ) or set environment variables; see https://docs.cortex.dev/v/%s/ for more information", cmd, path, consts.CortexVersionMinor), }) } @@ -343,6 +343,6 @@ func ErrorDeployFromTopLevelDir(genericDirName string, providerType types.Provid } return errors.WithStack(&errors.Error{ Kind: ErrDeployFromTopLevelDir, - Message: fmt.Sprintf("cannot deploy from your %s directory - when deploying your API, cortex sends all files in your project directory (i.e. the directory which contains cortex.yaml) to your %s (see https://docs.cortex.dev/v/%s/deployments/realtime-api/predictors#project-files for Realtime API and https://docs.cortex.dev/v/%s/deployments/batch-api/predictors#project-files for Batch API); therefore it is recommended to create a subdirectory for your project files", genericDirName, targetStr, consts.CortexVersionMinor, consts.CortexVersionMinor), + Message: fmt.Sprintf("cannot deploy from your %s directory - when deploying your API, cortex sends all files in your project directory (i.e. the directory which contains cortex.yaml) to your %s (see https://docs.cortex.dev/v/%s/); therefore it is recommended to create a subdirectory for your project files", genericDirName, targetStr, consts.CortexVersionMinor), }) } diff --git a/cli/cmd/lib_aws_creds.go b/cli/cmd/lib_aws_creds.go index d2a8866393..99a49167db 100644 --- a/cli/cmd/lib_aws_creds.go +++ b/cli/cmd/lib_aws_creds.go @@ -69,7 +69,7 @@ func promptIfNotAdmin(awsClient *aws.Client, disallowPrompt bool) { } if !awsClient.IsAdmin() { - warningStr := fmt.Sprintf("warning: your IAM user%s does not have administrator access. This will likely prevent Cortex from installing correctly, so it is recommended to attach the AdministratorAccess policy to your IAM user (or to a group that your IAM user belongs to) via the AWS IAM console. If you'd like, you may provide separate credentials for your cluster to use after it's running (see https://docs.cortex.dev/v/%s/aws/security for instructions).\n\n", accessKeyMsg, consts.CortexVersionMinor) + warningStr := fmt.Sprintf("warning: your IAM user%s does not have administrator access. This will likely prevent Cortex from installing correctly, so it is recommended to attach the AdministratorAccess policy to your IAM user (or to a group that your IAM user belongs to) via the AWS IAM console. If you'd like, you may provide separate credentials for your cluster to use after it's running (see https://docs.cortex.dev/v/%s/).\n\n", accessKeyMsg, consts.CortexVersionMinor) if disallowPrompt { fmt.Print(warningStr) } else { diff --git a/cli/cmd/lib_cluster_config_aws.go b/cli/cmd/lib_cluster_config_aws.go index 2971e47c19..020a9648ee 100644 --- a/cli/cmd/lib_cluster_config_aws.go +++ b/cli/cmd/lib_cluster_config_aws.go @@ -70,7 +70,7 @@ func readCachedClusterConfigFile(clusterConfig *clusterconfig.Config, filePath s func readUserClusterConfigFile(clusterConfig *clusterconfig.Config) error { errs := cr.ParseYAMLFile(clusterConfig, clusterconfig.UserValidation, _flagClusterConfig) if errors.HasError(errs) { - return errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/aws/install", consts.CortexVersionMinor)) + return errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } return nil @@ -85,7 +85,7 @@ func getNewClusterAccessConfig(disallowPrompt bool) (*clusterconfig.AccessConfig if _flagClusterConfig != "" { errs := cr.ParseYAMLFile(accessConfig, clusterconfig.AccessValidation, _flagClusterConfig) if errors.HasError(errs) { - return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/aws/install", consts.CortexVersionMinor)) + return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } } @@ -121,7 +121,7 @@ func getClusterAccessConfigWithCache(disallowPrompt bool) (*clusterconfig.Access if _flagClusterConfig != "" { errs := cr.ParseYAMLFile(accessConfig, clusterconfig.AccessValidation, _flagClusterConfig) if errors.HasError(errs) { - return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/aws/install", consts.CortexVersionMinor)) + return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } } @@ -192,7 +192,7 @@ func getInstallClusterConfig(awsClient *aws.Client, awsCreds AWSCredentials, acc err = clusterConfig.Validate(awsClient) if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/aws/install", consts.CortexVersionMinor)) + err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) if _flagClusterConfig != "" { err = errors.Wrap(err, _flagClusterConfig) } @@ -258,7 +258,7 @@ func getConfigureClusterConfig(cachedClusterConfig clusterconfig.Config, awsCred err = userClusterConfig.Validate(awsClient) if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/aws/install", consts.CortexVersionMinor)) + err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) if _flagClusterConfig != "" { err = errors.Wrap(err, _flagClusterConfig) } @@ -542,23 +542,23 @@ func confirmInstallClusterConfig(clusterConfig *clusterconfig.Config, awsCreds A fmt.Printf("cortex will also create an s3 bucket (%s) and a cloudwatch log group (%s)%s\n\n", clusterConfig.Bucket, clusterConfig.ClusterName, privateSubnetMsg) if clusterConfig.APIGatewaySetting == clusterconfig.NoneAPIGatewaySetting { - fmt.Print(fmt.Sprintf("warning: you've disabled API Gateway cluster-wide, so APIs will not be able to create API Gateway endpoints (they will still be reachable via the API load balancer; see https://docs.cortex.dev/v/%s/aws/networking for more information)\n\n", consts.CortexVersionMinor)) + fmt.Print(fmt.Sprintf("warning: you've disabled API Gateway cluster-wide, so APIs will not be able to create API Gateway endpoints (they will still be reachable via the API load balancer; see https://docs.cortex.dev/v/%s/ for more information)\n\n", consts.CortexVersionMinor)) } if clusterConfig.OperatorLoadBalancerScheme == clusterconfig.InternalLoadBalancerScheme { - fmt.Print(fmt.Sprintf("warning: you've configured the operator load balancer to be internal; you must configure VPC Peering to connect your CLI to your cluster operator (see https://docs.cortex.dev/v/%s/aws/vpc-peering)\n\n", consts.CortexVersionMinor)) + fmt.Print(fmt.Sprintf("warning: you've configured the operator load balancer to be internal; you must configure VPC Peering to connect your CLI to your cluster operator (see https://docs.cortex.dev/v/%s/)\n\n", consts.CortexVersionMinor)) } if isSpot && clusterConfig.SpotConfig.OnDemandBackup != nil && !*clusterConfig.SpotConfig.OnDemandBackup { if *clusterConfig.SpotConfig.OnDemandBaseCapacity == 0 && *clusterConfig.SpotConfig.OnDemandPercentageAboveBaseCapacity == 0 { - fmt.Printf("warning: you've disabled on-demand instances (%s=0 and %s=0); spot instances are not guaranteed to be available so please take that into account for production clusters; see https://docs.cortex.dev/v/%s/aws/spot for more information\n\n", clusterconfig.OnDemandBaseCapacityKey, clusterconfig.OnDemandPercentageAboveBaseCapacityKey, consts.CortexVersionMinor) + fmt.Printf("warning: you've disabled on-demand instances (%s=0 and %s=0); spot instances are not guaranteed to be available so please take that into account for production clusters; see https://docs.cortex.dev/v/%s/ for more information\n\n", clusterconfig.OnDemandBaseCapacityKey, clusterconfig.OnDemandPercentageAboveBaseCapacityKey, consts.CortexVersionMinor) } else { - fmt.Printf("warning: you've enabled spot instances; spot instances are not guaranteed to be available so please take that into account for production clusters; see https://docs.cortex.dev/v/%s/aws/spot for more information\n\n", consts.CortexVersionMinor) + fmt.Printf("warning: you've enabled spot instances; spot instances are not guaranteed to be available so please take that into account for production clusters; see https://docs.cortex.dev/v/%s/ for more information\n\n", consts.CortexVersionMinor) } } if !disallowPrompt { - exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/aws/install for more information", consts.CortexVersionMinor) + exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/ for more information", consts.CortexVersionMinor) prompt.YesOrExit("would you like to continue?", "", exitMessage) } } @@ -567,7 +567,7 @@ func confirmConfigureClusterConfig(clusterConfig clusterconfig.Config, awsCreds fmt.Println(clusterConfigConfirmationStr(clusterConfig, awsCreds, awsClient)) if !disallowPrompt { - exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/aws/install for more information", consts.CortexVersionMinor) + exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/ for more information", consts.CortexVersionMinor) prompt.YesOrExit(fmt.Sprintf("your cluster named \"%s\" in %s will be updated according to the configuration above, are you sure you want to continue?", clusterConfig.ClusterName, *clusterConfig.Region), "", exitMessage) } } diff --git a/cli/cmd/lib_cluster_config_gcp.go b/cli/cmd/lib_cluster_config_gcp.go index 4530bab618..62724d0616 100644 --- a/cli/cmd/lib_cluster_config_gcp.go +++ b/cli/cmd/lib_cluster_config_gcp.go @@ -66,7 +66,7 @@ func readCachedGCPClusterConfigFile(clusterConfig *clusterconfig.GCPConfig, file func readUserGCPClusterConfigFile(clusterConfig *clusterconfig.GCPConfig) error { errs := cr.ParseYAMLFile(clusterConfig, clusterconfig.UserGCPValidation, _flagClusterGCPConfig) if errors.HasError(errs) { - return errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/gcp/install", consts.CortexVersionMinor)) + return errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } return nil @@ -81,7 +81,7 @@ func getNewGCPClusterAccessConfig(disallowPrompt bool) (*clusterconfig.GCPAccess if _flagClusterGCPConfig != "" { errs := cr.ParseYAMLFile(accessConfig, clusterconfig.GCPAccessValidation, _flagClusterGCPConfig) if errors.HasError(errs) { - return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/gcp/install", consts.CortexVersionMinor)) + return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } } @@ -120,7 +120,7 @@ func getGCPClusterAccessConfigWithCache(disallowPrompt bool) (*clusterconfig.GCP if _flagClusterGCPConfig != "" { errs := cr.ParseYAMLFile(accessConfig, clusterconfig.GCPAccessValidation, _flagClusterGCPConfig) if errors.HasError(errs) { - return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/gcp/install", consts.CortexVersionMinor)) + return nil, errors.Append(errors.FirstError(errs...), fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } } @@ -196,7 +196,7 @@ func getGCPInstallClusterConfig(gcpClient *gcp.Client, accessConfig clusterconfi err = clusterConfig.Validate(gcpClient) if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found here: https://docs.cortex.dev/v/%s/gcp/install", consts.CortexVersionMinor)) + err = errors.Append(err, fmt.Sprintf("\n\ncluster configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) if _flagClusterGCPConfig != "" { err = errors.Wrap(err, _flagClusterGCPConfig) } @@ -212,7 +212,7 @@ func confirmGCPInstallClusterConfig(clusterConfig *clusterconfig.GCPConfig, disa fmt.Printf("a cluster named \"%s\" will be created in %s (zone: %s)\n\n", clusterConfig.ClusterName, *clusterConfig.Project, *clusterConfig.Zone) if !disallowPrompt { - exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/gcp/install for more information", consts.CortexVersionMinor) + exitMessage := fmt.Sprintf("cluster configuration can be modified via the cluster config file; see https://docs.cortex.dev/v/%s/ for more information", consts.CortexVersionMinor) prompt.YesOrExit("would you like to continue?", "", exitMessage) } } diff --git a/cli/local/deploy.go b/cli/local/deploy.go index 3f3407ab42..3a7741c9ba 100644 --- a/cli/local/deploy.go +++ b/cli/local/deploy.go @@ -101,7 +101,7 @@ func deploy(env cliconfig.Environment, apiConfigs []userconfig.API, projectFiles models := []spec.CuratedModelResource{} err = ValidateLocalAPIs(apiConfigs, &models, projectFiles, awsClient, gcpClient) if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Realtime API can be found at https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration", consts.CortexVersionMinor)) + err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Realtime API can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) return nil, err } diff --git a/docs/aws/install.md b/docs/aws/install.md index 55dc61d1a8..e44282dd51 100644 --- a/docs/aws/install.md +++ b/docs/aws/install.md @@ -62,7 +62,7 @@ nat_gateway: none api_load_balancer_scheme: internet-facing # operator load balancer scheme [internet-facing | internal] -# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator (https://docs.cortex.dev/v/master/aws/vpc-peering) +# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator (https://docs.cortex.dev/v/master/) operator_load_balancer_scheme: internet-facing # API Gateway [public (API Gateway will be used by default, can be disabled per API) | none (API Gateway will be disabled for all APIs)] diff --git a/docs/workloads/python-client.md b/docs/workloads/python-client.md index 6c98188c56..3af866f68c 100644 --- a/docs/workloads/python-client.md +++ b/docs/workloads/python-client.md @@ -116,13 +116,8 @@ Deploy an API. **Arguments**: -- `api_spec` - A dictionary defining a single Cortex API. Schema can be found here: - → Realtime API: https://docs.cortex.dev/v/master/deployments/realtime-api/api-configuration - → Batch API: https://docs.cortex.dev/v/master/deployments/batch-api/api-configuration - → Traffic Splitter: https://docs.cortex.dev/v/master/deployments/realtime-api/traffic-splitter +- `api_spec` - A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/master/ for schema. - `predictor` - A Cortex Predictor class implementation. Not required when deploying a traffic splitter. - → Realtime API: https://docs.cortex.dev/v/master/deployments/realtime-api/predictors - → Batch API: https://docs.cortex.dev/v/master/deployments/batch-api/predictors - `requirements` - A list of PyPI dependencies that will be installed before the predictor class implementation is invoked. - `conda_packages` - A list of Conda dependencies that will be installed before the predictor class implementation is invoked. - `project_dir` - Path to a python project. diff --git a/manager/debug.sh b/manager/debug.sh index 46c3a6e03c..0b292b5fed 100755 --- a/manager/debug.sh +++ b/manager/debug.sh @@ -27,7 +27,7 @@ if ! eksctl utils describe-stacks --cluster=$CORTEX_CLUSTER_NAME --region=$CORTE fi eksctl utils write-kubeconfig --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION | grep -v "saved kubeconfig as" | grep -v "using region" | grep -v "eksctl version" || true -out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/aws/security#running-cortex-cluster-commands-from-different-iam-users"; exit 1; fi +out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/"; exit 1; fi echo -n "gathering cluster data" diff --git a/manager/info.sh b/manager/info.sh index c754737605..a682d17f8b 100755 --- a/manager/info.sh +++ b/manager/info.sh @@ -36,7 +36,7 @@ if ! eksctl utils describe-stacks --cluster=$CORTEX_CLUSTER_NAME --region=$CORTE fi eksctl utils write-kubeconfig --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION | grep -v "saved kubeconfig as" | grep -v "using region" | grep -v "eksctl version" || true -out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/aws/security#running-cortex-cluster-commands-from-different-iam-users"; exit 1; fi +out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/"; exit 1; fi operator_endpoint=$(get_operator_endpoint) api_load_balancer_endpoint=$(get_api_load_balancer_endpoint) diff --git a/manager/install.sh b/manager/install.sh index cb0b9616ea..aeddadbc3a 100755 --- a/manager/install.sh +++ b/manager/install.sh @@ -97,7 +97,7 @@ function cluster_up_aws() { echo -e "\ncortex is ready!" if [ "$CORTEX_OPERATOR_LOAD_BALANCER_SCHEME" == "internal" ]; then - echo -e "note: you will need to configure VPC Peering to connect to your cluster: https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/aws/vpc-peering" + echo -e "note: you will need to configure VPC Peering to connect to your cluster: https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/" fi print_endpoints_aws @@ -242,7 +242,7 @@ function check_eks() { function write_kubeconfig() { eksctl utils write-kubeconfig --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION | grep -v "saved kubeconfig as" | grep -v "using region" | grep -v "eksctl version" || true - out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/aws/security#running-cortex-cluster-commands-from-different-iam-users"; exit 1; fi + out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/"; exit 1; fi } function setup_configmap() { diff --git a/manager/refresh.sh b/manager/refresh.sh index ce1389cdd5..42595008c4 100755 --- a/manager/refresh.sh +++ b/manager/refresh.sh @@ -27,7 +27,7 @@ if ! eksctl utils describe-stacks --cluster=$CORTEX_CLUSTER_NAME --region=$CORTE fi eksctl utils write-kubeconfig --cluster=$CORTEX_CLUSTER_NAME --region=$CORTEX_REGION | grep -v "saved kubeconfig as" | grep -v "using region" | grep -v "eksctl version" || true -out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/aws/security#running-cortex-cluster-commands-from-different-iam-users"; exit 1; fi +out=$(kubectl get pods 2>&1 || true); if [[ "$out" == *"must be logged in to the server"* ]]; then echo "error: your aws iam user does not have access to this cluster; to grant access, see https://docs.cortex.dev/v/${CORTEX_VERSION_MINOR}/"; exit 1; fi kubectl get -n=default configmap cluster-config -o yaml >> cluster_configmap.yaml python refresh_cluster_config.py cluster_configmap.yaml tmp_cluster_config.yaml diff --git a/pkg/lib/docker/errors.go b/pkg/lib/docker/errors.go index fe7483e41b..d12033731a 100644 --- a/pkg/lib/docker/errors.go +++ b/pkg/lib/docker/errors.go @@ -81,7 +81,7 @@ func ErrorImageInaccessible(image string, providerType types.ProviderType, cause } case types.AWSProviderType: if strings.Contains(cause.Error(), "authorized") || strings.Contains(cause.Error(), "authentication") { - message += fmt.Sprintf("\n\nif you would like to use a private docker registry, see https://docs.cortex.dev/v/%s/guides/private-docker", consts.CortexVersionMinor) + message += fmt.Sprintf("\n\nif you would like to use a private docker registry, see https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor) } } diff --git a/pkg/lib/k8s/errors.go b/pkg/lib/k8s/errors.go index d47a644f4f..0fd3ff0f69 100644 --- a/pkg/lib/k8s/errors.go +++ b/pkg/lib/k8s/errors.go @@ -63,6 +63,6 @@ func ErrorParseAnnotation(annotationName string, annotationVal string, desiredTy func ErrorParseQuantity(qtyStr string) error { return errors.WithStack(&errors.Error{ Kind: ErrParseQuantity, - Message: fmt.Sprintf("%s: invalid kubernetes quantity, some valid examples are 1, 200m, 500Mi, 2G (see here for more information: https://docs.cortex.dev/v/%s/advanced/compute)", qtyStr, consts.CortexVersionMinor), + Message: fmt.Sprintf("%s: invalid kubernetes quantity, some valid examples are 1, 200m, 500Mi, 2G (see here for more information: https://docs.cortex.dev/v/%s/)", qtyStr, consts.CortexVersionMinor), }) } diff --git a/pkg/operator/endpoints/errors.go b/pkg/operator/endpoints/errors.go index 061df2e0bf..b4ba8ccfaa 100644 --- a/pkg/operator/endpoints/errors.go +++ b/pkg/operator/endpoints/errors.go @@ -42,7 +42,7 @@ const ( func ErrorAPIVersionMismatch(operatorVersion string, clientVersion string) error { return errors.WithStack(&errors.Error{ Kind: ErrAPIVersionMismatch, - Message: fmt.Sprintf("your CLI version (%s) doesn't match your Cortex operator version (%s); please update your cluster by following the instructions at https://docs.cortex.dev/update, or update your CLI (pip install cortex==%s)", clientVersion, operatorVersion, operatorVersion), + Message: fmt.Sprintf("your CLI version (%s) doesn't match your Cortex operator version (%s); please update your cluster by following the instructions at https://docs.cortex.dev, or update your CLI (pip install cortex==%s)", clientVersion, operatorVersion, operatorVersion), }) } diff --git a/pkg/operator/endpoints/submit_job.go b/pkg/operator/endpoints/submit_job.go index 44bc1f606c..e3eff9bd63 100644 --- a/pkg/operator/endpoints/submit_job.go +++ b/pkg/operator/endpoints/submit_job.go @@ -60,7 +60,7 @@ func SubmitJob(w http.ResponseWriter, r *http.Request) { err = json.Unmarshal(bodyBytes, &submission) if err != nil { - respondError(w, r, errors.Append(err, fmt.Sprintf("\n\njob submission schema can be found at https://docs.cortex.dev/v/%s/deployments/batch-api/endpoints", consts.CortexVersionMinor))) + respondError(w, r, errors.Append(err, fmt.Sprintf("\n\njob submission schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor))) return } diff --git a/pkg/operator/resources/batchapi/validations.go b/pkg/operator/resources/batchapi/validations.go index 13323161b6..e1e7ebf84c 100644 --- a/pkg/operator/resources/batchapi/validations.go +++ b/pkg/operator/resources/batchapi/validations.go @@ -86,7 +86,7 @@ func validateJobSubmissionSchema(submission *schema.JobSubmission) error { func validateJobSubmission(submission *schema.JobSubmission) error { err := validateJobSubmissionSchema(submission) if err != nil { - return errors.Append(err, fmt.Sprintf("\n\njob submission schema can be found at https://docs.cortex.dev/v/%s/deployments/batch-api/endpoints", consts.CortexVersionMinor)) + return errors.Append(err, fmt.Sprintf("\n\njob submission schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } if submission.FilePathLister != nil { diff --git a/pkg/operator/resources/resources.go b/pkg/operator/resources/resources.go index fb98a2244e..13c0bfe750 100644 --- a/pkg/operator/resources/resources.go +++ b/pkg/operator/resources/resources.go @@ -101,7 +101,7 @@ func Deploy(projectBytes []byte, configFileName string, configBytes []byte, forc err = ValidateClusterAPIs(apiConfigs, projectFiles) if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found here:\n → Realtime API: https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration\n → Batch API: https://docs.cortex.dev/v/%s/deployments/batch-api/api-configuration\n → Traffic Splitter: https://docs.cortex.dev/v/%s/deployments/realtime-api/traffic-splitter", consts.CortexVersionMinor, consts.CortexVersionMinor, consts.CortexVersionMinor)) + err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) return nil, err } @@ -252,7 +252,7 @@ func patchAPI(apiConfig *userconfig.API, configFileName string, force bool) (*sp err = ValidateClusterAPIs([]userconfig.API{*apiConfig}, projectFiles) if err != nil { - err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found here:\n → Realtime API: https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration\n → Batch API: https://docs.cortex.dev/v/%s/deployments/batch-api/api-configuration\n → Traffic Splitter: https://docs.cortex.dev/v/%s/deployments/realtime-api/traffic-splitter", consts.CortexVersionMinor, consts.CortexVersionMinor, consts.CortexVersionMinor)) + err = errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found here:\n → Realtime API: https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) return nil, "", err } diff --git a/pkg/types/spec/errors.go b/pkg/types/spec/errors.go index 937c60f6a7..db4cd568dc 100644 --- a/pkg/types/spec/errors.go +++ b/pkg/types/spec/errors.go @@ -100,14 +100,14 @@ var _modelCurrentStructure = ` func ErrorMalformedConfig() error { return errors.WithStack(&errors.Error{ Kind: ErrMalformedConfig, - Message: fmt.Sprintf("cortex YAML configuration files must contain a list of maps (see https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration for Realtime API documentation and see https://docs.cortex.dev/v/%s/deployments/batch-api/api-configuration for Batch API documentation)", consts.CortexVersionMinor, consts.CortexVersionMinor), + Message: fmt.Sprintf("cortex YAML configuration files must contain a list of maps (see https://docs.cortex.dev/v/%s/ for api configuration schema)", consts.CortexVersionMinor), }) } func ErrorNoAPIs() error { return errors.WithStack(&errors.Error{ Kind: ErrNoAPIs, - Message: fmt.Sprintf("at least one API must be configured (see https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration for Realtime API documentation and see https://docs.cortex.dev/v/%s/deployments/batch-api/api-configuration for Batch API documentation)", consts.CortexVersionMinor, consts.CortexVersionMinor), + Message: fmt.Sprintf("at least one API must be configured (see https://docs.cortex.dev/v/%s/ for api configuration schema)", consts.CortexVersionMinor), }) } diff --git a/pkg/types/spec/validations.go b/pkg/types/spec/validations.go index 46dcb5ae23..1522416d98 100644 --- a/pkg/types/spec/validations.go +++ b/pkg/types/spec/validations.go @@ -641,14 +641,7 @@ func ExtractAPIConfigs( kindString, _ := data[userconfig.KindKey].(string) kind := userconfig.KindFromString(kindString) err = errors.Wrap(errors.FirstError(errs...), userconfig.IdentifyAPI(configFileName, name, kind, i)) - switch provider { - case types.LocalProviderType: - return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Realtime APIs can be found at https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration", consts.CortexVersionMinor)) - case types.AWSProviderType: - return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found here:\n → Realtime API: https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration\n → Batch API: https://docs.cortex.dev/v/%s/deployments/batch-api/api-configuration\n → Traffic Splitter: https://docs.cortex.dev/v/%s/deployments/realtime-api/traffic-splitter", consts.CortexVersionMinor, consts.CortexVersionMinor, consts.CortexVersionMinor)) - case types.GCPProviderType: - return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Realtime APIs can be found at https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration", consts.CortexVersionMinor)) - } + return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } if resourceStruct.Kind == userconfig.BatchAPIKind || resourceStruct.Kind == userconfig.TrafficSplitterKind { @@ -663,14 +656,7 @@ func ExtractAPIConfigs( kindString, _ := data[userconfig.KindKey].(string) kind := userconfig.KindFromString(kindString) err = errors.Wrap(errors.FirstError(errs...), userconfig.IdentifyAPI(configFileName, name, kind, i)) - switch kind { - case userconfig.RealtimeAPIKind: - return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Realtime API can be found at https://docs.cortex.dev/v/%s/deployments/realtime-api/api-configuration", consts.CortexVersionMinor)) - case userconfig.BatchAPIKind: - return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Batch API can be found at https://docs.cortex.dev/v/%s/deployments/batch-api/api-configuration", consts.CortexVersionMinor)) - case userconfig.TrafficSplitterKind: - return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema for Traffic Splitter can be found at https://docs.cortex.dev/v/%s/deployments/realtime-api/traffic-splitter", consts.CortexVersionMinor)) - } + return nil, errors.Append(err, fmt.Sprintf("\n\napi configuration schema can be found at https://docs.cortex.dev/v/%s/", consts.CortexVersionMinor)) } api.Index = i api.FileName = configFileName diff --git a/pkg/workloads/cortex/client/cortex/client.py b/pkg/workloads/cortex/client/cortex/client.py index 27722bcaa2..e1af803e5a 100644 --- a/pkg/workloads/cortex/client/cortex/client.py +++ b/pkg/workloads/cortex/client/cortex/client.py @@ -44,7 +44,7 @@ def __init__(self, env: dict): self.env = env self.env_name = env["name"] - # CORTEX_VERSION_MINOR x5 + # CORTEX_VERSION_MINOR def create_api( self, api_spec: dict, @@ -59,13 +59,8 @@ def create_api( Deploy an API. Args: - api_spec: A dictionary defining a single Cortex API. Schema can be found here: - → Realtime API: https://docs.cortex.dev/v/master/deployments/realtime-api/api-configuration - → Batch API: https://docs.cortex.dev/v/master/deployments/batch-api/api-configuration - → Traffic Splitter: https://docs.cortex.dev/v/master/deployments/realtime-api/traffic-splitter + api_spec: A dictionary defining a single Cortex API. See https://docs.cortex.dev/v/master/ for schema. predictor: A Cortex Predictor class implementation. Not required when deploying a traffic splitter. - → Realtime API: https://docs.cortex.dev/v/master/deployments/realtime-api/predictors - → Batch API: https://docs.cortex.dev/v/master/deployments/batch-api/predictors requirements: A list of PyPI dependencies that will be installed before the predictor class implementation is invoked. conda_packages: A list of Conda dependencies that will be installed before the predictor class implementation is invoked. project_dir: Path to a python project. diff --git a/pkg/workloads/cortex/serve/init/bootloader.sh b/pkg/workloads/cortex/serve/init/bootloader.sh index d119fdf0ff..7ea38ff92f 100755 --- a/pkg/workloads/cortex/serve/init/bootloader.sh +++ b/pkg/workloads/cortex/serve/init/bootloader.sh @@ -21,9 +21,9 @@ export EXPECTED_CORTEX_VERSION=master if [ "$CORTEX_VERSION" != "$EXPECTED_CORTEX_VERSION" ]; then if [ "$CORTEX_PROVIDER" == "local" ]; then - echo "error: your Cortex CLI version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your CLI by following the instructions at https://docs.cortex.dev/update" + echo "error: your Cortex CLI version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your CLI by following the instructions at https://docs.cortex.dev/" else - echo "error: your Cortex operator version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your cluster by following the instructions at https://docs.cortex.dev/update" + echo "error: your Cortex operator version ($CORTEX_VERSION) doesn't match your predictor image version ($EXPECTED_CORTEX_VERSION); please update your predictor image by modifying the \`image\` field in your API configuration file (e.g. cortex.yaml) and re-running \`cortex deploy\`, or update your cluster by following the instructions at https://docs.cortex.dev/" fi exit 1 fi From 6429cb4b470ced559bb941b67e171c3118a8a0c2 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 19:17:41 -0500 Subject: [PATCH 25/36] Update generate_python_client_md.sh --- dev/generate_python_client_md.sh | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/dev/generate_python_client_md.sh b/dev/generate_python_client_md.sh index fd68e9130f..2aa4317250 100755 --- a/dev/generate_python_client_md.sh +++ b/dev/generate_python_client_md.sh @@ -30,38 +30,38 @@ cd $ROOT/pkg/workloads/cortex/client pip3 install -e . -pydoc-markdown -m cortex -m cortex.client --render-toc > $ROOT/docs/miscellaneous/python-client.md +pydoc-markdown -m cortex -m cortex.client --render-toc > $ROOT/docs/workloads/python-client.md # title -sed -i "s/# Table of Contents/# Python client\n\n_WARNING: you are on the master branch, please refer to the docs on the branch that matches your \`cortex version\`_/g" $ROOT/docs/miscellaneous/python-client.md +sed -i "s/# Table of Contents/# Python client\n\n_WARNING: you are on the master branch, please refer to the docs on the branch that matches your \`cortex version\`_/g" $ROOT/docs/workloads/python-client.md # delete links -sed -i "//g" $ROOT/docs/miscellaneous/python-client.md +sed -i "s/^## create\\\_api/## create\\\_api\n\n/g" $ROOT/docs/workloads/python-client.md pip3 uninstall -y cortex rm -rf $ROOT/pkg/workloads/cortex/client/cortex.egg-info From f4ed4295f4853dbd5e4477b79ce59fa409b928cc Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 19:22:42 -0500 Subject: [PATCH 26/36] Update traffic-splitter.md --- docs/tutorials/traffic-splitter.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/docs/tutorials/traffic-splitter.md b/docs/tutorials/traffic-splitter.md index be4502929b..ea403d9586 100644 --- a/docs/tutorials/traffic-splitter.md +++ b/docs/tutorials/traffic-splitter.md @@ -7,6 +7,8 @@ A Traffic Splitter can be used expose multiple APIs as a single endpoint. The pe ## Deploy APIs ```python +import cortex + class PythonPredictor: def __init__(self, config): from transformers import pipeline @@ -43,7 +45,7 @@ cx.create_api(api_spec_gpu, predictor=PythonPredictor, requirements=requirements ```python traffic_splitter_spec = { - "name": "classifier", + "name": "text-generator", "kind": "TrafficSplitter", "apis": [ {"name": "text-generator-cpu", "weight": 50}, @@ -57,11 +59,11 @@ cx.create_api(traffic_splitter_spec) ## Update the weights of the traffic splitter ```python -traffic_splitter_spec = cx.get_api("classifier")["spec"]["submitted_api_spec"] +traffic_splitter_spec = cx.get_api("text-generator")["spec"]["submitted_api_spec"] # send 99% of the traffic to text-generator-gpu -traffic_splitter_spec["api"][0]["weight"] = 1 -traffic_splitter_spec["api"][1]["weight"] = 99 +traffic_splitter_spec["apis"][0]["weight"] = 1 +traffic_splitter_spec["apis"][1]["weight"] = 99 cx.patch(traffic_splitter_spec) ``` From daed845eb05e97af22271b167bcc3e1ea41f9056 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 19:32:31 -0500 Subject: [PATCH 27/36] Update batch.md --- docs/tutorials/batch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 5c9941b64c..1b69532e5a 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -129,7 +129,7 @@ job_spec = { response = requests.post(batch_endpoint, json=job_spec) -print(response) +print(response.text) # > {"job_id":"69b183ed6bdf3e9b","api_name":"image-classifier", "config": {"dest_s3_dir": ...}} ``` From 48d08598f8f99274979918c727e4020141d7c88b Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 19:40:35 -0500 Subject: [PATCH 28/36] Update traffic-splitter.md --- docs/tutorials/traffic-splitter.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/tutorials/traffic-splitter.md b/docs/tutorials/traffic-splitter.md index ea403d9586..1191ab6c91 100644 --- a/docs/tutorials/traffic-splitter.md +++ b/docs/tutorials/traffic-splitter.md @@ -12,8 +12,7 @@ import cortex class PythonPredictor: def __init__(self, config): from transformers import pipeline - - self.model = pipeline(task="text-generation", model=config["model"]) + self.model = pipeline(task="text-generation") def predict(self, payload): return self.model(payload["text"])[0] From 2f4d0f2dfc3cc3011dd8df73e3d3355735680ac0 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 19:56:14 -0500 Subject: [PATCH 29/36] Update multi-model.md --- docs/tutorials/multi-model.md | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/tutorials/multi-model.md b/docs/tutorials/multi-model.md index 4993dfebf5..043ea6c6ad 100644 --- a/docs/tutorials/multi-model.md +++ b/docs/tutorials/multi-model.md @@ -12,19 +12,23 @@ import cortex class PythonPredictor: def __init__(self, config): from transformers import pipeline + self.analyzer = pipeline(task="sentiment-analysis") - self.analyzer = pipeline(task="sentiment-analysis", device=device) - self.summarizer = pipeline(task="summarization", device=device) + import wget + import fasttext + wget.download( + "https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin", "/tmp/model" + ) + self.language_identifier = fasttext.load_model("/tmp/model") def predict(self, query_params, payload): model = query_params.get("model") - if model == "sentiment": return self.analyzer(payload["text"])[0] - elif model == "summarizer": - return self.summarizer(payload["text"])[0]["summary_text"] + elif model == "language": + return self.language_identifier.predict(payload["text"])[0][0][-2:] -requirements = ["tensorflow", "transformers"] +requirements = ["tensorflow", "transformers", "wget", "fasttext"] api_spec = {"name": "multi-model", "kind": "RealtimeAPI"} From fe8797a85fd0f2c37df544edecda31c6aa28857f Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 20:06:29 -0500 Subject: [PATCH 30/36] Remove examples from linting and skip version check in tutorials --- build/lint.sh | 30 +----------------------------- docs/guides/contributing.md | 2 ++ 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/build/lint.sh b/build/lint.sh index a5711abfdd..94dcd54465 100755 --- a/build/lint.sh +++ b/build/lint.sh @@ -137,25 +137,12 @@ if [ "$is_release_branch" = "true" ]; then exit 1 fi - # Check for version warning comments in examples - output=$(cd "$ROOT/examples" && find . -type f \ - ! -name "README.md" \ - ! -name "*.json" \ - ! -name "*.txt" \ - ! -name ".*" \ - ! -name "*.bin" \ - -exec grep -L -e "this is an example for cortex release ${git_branch} and may not deploy correctly on other releases of cortex" {} \;) - if [[ $output ]]; then - echo "examples file(s) are missing appropriate version comment:" - echo "$output" - exit 1 - fi - else # Check for version warning comments in docs output=$(cd "$ROOT/docs" && find . -type f \ ! -path "./README.md" \ ! -name "summary.md" \ + ! -path "./tutorials/*" \ ! -name "development.md" \ ! -name "*.json" \ ! -name "*.txt" \ @@ -167,21 +154,6 @@ else echo "$output" exit 1 fi - - # Check for version warning comments in examples - output=$(cd "$ROOT/examples" && find . -type f \ - ! -path "./README.md" \ - ! -path "**/__pycache__/*" \ - ! -name "*.json" \ - ! -name "*.txt" \ - ! -name ".*" \ - ! -name "*.bin" \ - -exec grep -L "WARNING: you are on the master branch; please refer to examples on the branch corresponding to your \`cortex version\` (e\.g\. for version [0-9]*\.[0-9]*\.\*, run \`git checkout -b [0-9]*\.[0-9]*\` or switch to the \`[0-9]*\.[0-9]*\` branch on GitHub)" {} \;) - if [[ $output ]]; then - echo "example file(s) are missing version appropriate comment:" - echo "$output" - exit 1 - fi fi # Check for trailing whitespace diff --git a/docs/guides/contributing.md b/docs/guides/contributing.md index ebdb48d443..6eb6d76d88 100644 --- a/docs/guides/contributing.md +++ b/docs/guides/contributing.md @@ -1,5 +1,7 @@ # Contributing +_WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ + ## Remote development We recommend that you run your development environment on a cloud instance due to frequent docker registry pushing, e.g. an AWS EC2 instance or GCP VM. We've had a good experience using [Mutagen](https://mutagen.io/documentation/introduction) to synchronize local / remote file systems. Feel free to reach out to us on [gitter](https://gitter.im/cortexlabs/cortex) if you have any questions about this. From c17a49cd806c614049b053ca7375b7c681e63660 Mon Sep 17 00:00:00 2001 From: Vishal Bollu Date: Tue, 8 Dec 2020 20:16:29 -0500 Subject: [PATCH 31/36] Update install.md --- docs/aws/install.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/aws/install.md b/docs/aws/install.md index e44282dd51..bae9c28e4e 100644 --- a/docs/aws/install.md +++ b/docs/aws/install.md @@ -62,7 +62,7 @@ nat_gateway: none api_load_balancer_scheme: internet-facing # operator load balancer scheme [internet-facing | internal] -# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator (https://docs.cortex.dev/v/master/) +# note: if using "internal", you must configure VPC Peering to connect your CLI to your cluster operator operator_load_balancer_scheme: internet-facing # API Gateway [public (API Gateway will be used by default, can be disabled per API) | none (API Gateway will be disabled for all APIs)] @@ -100,8 +100,6 @@ image_istio_proxy: quay.io/cortexlabs/istio-proxy:master image_istio_pilot: quay.io/cortexlabs/istio-pilot:master ``` -The default docker images used for your Predictors are listed in the instructions for [system packages](../deployments/system-packages.md), and can be overridden in your [Realtime API configuration](../deployments/realtime-api/api-configuration.md) and in your [Batch API configuration](../deployments/batch-api/api-configuration.md). - ## Advanced * [Security](security.md) From d38a99843768ac51e4dfb8dfd01a9a980ced2ac1 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 20:18:35 -0500 Subject: [PATCH 32/36] Update gcp install docs --- docs/gcp/install.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/docs/gcp/install.md b/docs/gcp/install.md index e65520c4b4..263267fb78 100644 --- a/docs/gcp/install.md +++ b/docs/gcp/install.md @@ -19,12 +19,8 @@ cortex cluster-gcp up # or: cortex cluster-gcp up --config cluster.yaml (see co cortex env default gcp ``` - -Try the [tutorial](../../examples/pytorch/text-generator/README.md). - ## Configure Cortex - ```yaml # cluster.yaml @@ -62,5 +58,3 @@ image_istio_proxy: quay.io/cortexlabs/istio-proxy:master image_istio_pilot: quay.io/cortexlabs/istio-pilot:master image_pause: quay.io/cortexlabs/pause:master ``` - -The default docker images used for your Predictors are listed in the instructions for [system packages](../deployments/system-packages.md), and can be overridden in your [Realtime API configuration](../deployments/realtime-api/api-configuration.md). From e51a969c9d7c6f93ed96294e38124e310b403723 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 20:25:23 -0500 Subject: [PATCH 33/36] Fix docs links deployments -> workloads --- docs/aws/networking.md | 2 +- docs/aws/rest-api-gateway.md | 4 ++-- docs/guides/docker-hub-rate-limiting.md | 2 +- docs/guides/production.md | 18 +++++++++--------- docs/guides/self-hosted-images.md | 2 +- .../server-side-batching-errors.md | 2 +- docs/troubleshooting/tf-session-in-predict.md | 2 +- test/batch/image-classifier/README.md | 4 ++-- 8 files changed, 18 insertions(+), 18 deletions(-) diff --git a/docs/aws/networking.md b/docs/aws/networking.md index eddddb4a31..824abe0c1f 100644 --- a/docs/aws/networking.md +++ b/docs/aws/networking.md @@ -4,7 +4,7 @@ _WARNING: you are on the master branch, please refer to the docs on the branch t ![api architecture diagram](https://user-images.githubusercontent.com/808475/84695323-8507dd00-aeff-11ea-8b32-5a55cef76c79.png) -APIs are deployed with a public API Gateway by default (the API Gateway forwards requests to the API load balancer). Each API can be independently configured to not create the API Gateway endpoint by setting `api_gateway: none` in the `networking` field of the [Realtime API configuration](../deployments/realtime-api/api-configuration.md) and [Batch API configuration](../deployments/batch-api/api-configuration.md). If the API Gateway endpoint is not created, your API can still be accessed via the API load balancer; `cortex get API_NAME` will show the load balancer endpoint if API Gateway is disabled. API Gateway is enabled by default, and is generally recommended unless it doesn't support your use case due to limitations such as the 29 second request timeout, or if you are keeping your APIs private to your VPC. See below for common configurations. To disable API Gateway cluster-wide (thereby enforcing that all APIs cannot create API Gateway endpoints), set `api_gateway: none` in your [cluster configuration](install.md) file (before creating your cluster). +APIs are deployed with a public API Gateway by default (the API Gateway forwards requests to the API load balancer). Each API can be independently configured to not create the API Gateway endpoint by setting `api_gateway: none` in the `networking` field of the [Realtime API configuration](../workloads/realtime/configuration.md) and [Batch API configuration](../workloads/batch/configuration.md). If the API Gateway endpoint is not created, your API can still be accessed via the API load balancer; `cortex get API_NAME` will show the load balancer endpoint if API Gateway is disabled. API Gateway is enabled by default, and is generally recommended unless it doesn't support your use case due to limitations such as the 29 second request timeout, or if you are keeping your APIs private to your VPC. See below for common configurations. To disable API Gateway cluster-wide (thereby enforcing that all APIs cannot create API Gateway endpoints), set `api_gateway: none` in your [cluster configuration](install.md) file (before creating your cluster). By default, the API load balancer is public. You can configure your API load balancer to be private by setting `api_load_balancer_scheme: internal` in your [cluster configuration](install.md) file (before creating your cluster). This will force external traffic to go through your API Gateway endpoint, or if you disabled API Gateway for your API, it will make your API only accessible through VPC Peering. Note that if API Gateway is used, endpoints will be public regardless of `api_load_balancer_scheme`. See below for common configurations. diff --git a/docs/aws/rest-api-gateway.md b/docs/aws/rest-api-gateway.md index cc5a5ced37..0144df386d 100644 --- a/docs/aws/rest-api-gateway.md +++ b/docs/aws/rest-api-gateway.md @@ -17,7 +17,7 @@ If your API load balancer is internal (i.e. you set `api_load_balancer_scheme: i Disable the default API Gateway: * If you haven't created your cluster yet, you can set `api_gateway: none` in your [cluster configuration file](install.md) before creating your cluster. -* If you have already created your cluster, you can set `api_gateway: none` in the `networking` field of your [Realtime API configuration](../deployments/realtime-api/api-configuration.md) and/or [Batch API configuration](../deployments/batch-api/api-configuration.md), and then re-deploy your API. +* If you have already created your cluster, you can set `api_gateway: none` in the `networking` field of your [Realtime API configuration](../workloads/realtime/configuration.md) and/or [Batch API configuration](../workloads/batch/configuration.md), and then re-deploy your API. ### Step 2 @@ -96,7 +96,7 @@ Delete the API Gateway before spinning down your Cortex cluster: Disable the default API Gateway: * If you haven't created your cluster yet, you can set `api_gateway: none` in your [cluster configuration file](install.md) before creating your cluster. -* If you have already created your cluster, you can set `api_gateway: none` in the `networking` field of your [Realtime API configuration](../deployments/realtime-api/api-configuration.md) and/or [Batch API configuration](../deployments/batch-api/api-configuration.md), and then re-deploy your API. +* If you have already created your cluster, you can set `api_gateway: none` in the `networking` field of your [Realtime API configuration](../workloads/realtime/configuration.md) and/or [Batch API configuration](../workloads/batch/configuration.md), and then re-deploy your API. ### Step 2 diff --git a/docs/guides/docker-hub-rate-limiting.md b/docs/guides/docker-hub-rate-limiting.md index 98d9ff3570..378919686d 100644 --- a/docs/guides/docker-hub-rate-limiting.md +++ b/docs/guides/docker-hub-rate-limiting.md @@ -64,7 +64,7 @@ Once you've updated your cluster configuration file, you can spin up your cluste ### Update your API configuration file(s) -To configure your APIs to use the Quay images, you cna update your [API configuration files](../deployments/realtime-api/api-configuration.md). The image paths are specified in `predictor.image` (and `predictor.tensorflow_serving_image` for APIs with `kind: tensorflow`). Be advised that by default, the Docker Hub images are used for your predictors, so you will need to specify the Quay image paths for all of your APIs. +To configure your APIs to use the Quay images, you can update your [API configuration files](../workloads/realtime/configuration.md). The image paths are specified in `predictor.image` (and `predictor.tensorflow_serving_image` for APIs with `kind: tensorflow`). Be advised that by default, the Docker Hub images are used for your predictors, so you will need to specify the Quay image paths for all of your APIs. Here is a list of available images (make sure to set `` to your cluster's version): diff --git a/docs/guides/production.md b/docs/guides/production.md index bd2f259826..2f3eb6c0f0 100644 --- a/docs/guides/production.md +++ b/docs/guides/production.md @@ -10,24 +10,24 @@ _WARNING: you are on the master branch, please refer to the docs on the branch t **Additional tips for realtime APIs** -* Consider tuning `processes_per_replica` and `threads_per_process` in your [Realtime API configuration](../deployments/realtime-api/api-configuration.md). Each model behaves differently, so the best way to find a good value is to run a load test on a single replica (you can set `min_replicas` to 1 to avoid autocaling). Here is [additional information](../deployments/realtime-api/parallelism.md#concurrency) about these fields. +* Consider tuning `processes_per_replica` and `threads_per_process` in your [Realtime API configuration](../workloads/realtime/configuration.md). Each model behaves differently, so the best way to find a good value is to run a load test on a single replica (you can set `min_replicas` to 1 to avoid autocaling). Here is [additional information](../workloads/realtime/parallelism.md#concurrency) about these fields. -* You may wish to customize the autoscaler for your APIs. The [autoscaling documentation](../deployments/realtime-api/autoscaling.md) describes each of the parameters that can be configured. +* You may wish to customize the autoscaler for your APIs. The [autoscaling documentation](../workloads/realtime/autoscaling.md) describes each of the parameters that can be configured. * When creating an API that you will send large amounts of traffic to all at once, set `min_replicas` at (or slightly above) the number of replicas you expect will be necessary to handle the load at steady state. After traffic has been fully shifted to your API, `min_replicas` can be reduced to allow automatic downscaling. -* [Traffic splitters](./deployments/realtime-api/traffic-splitter.md) can be used to route a subset of traffic to an updated API. For example, you can create a traffic splitter named `my-api`, and route requests to `my-api` to any number of Realtime APIs (e.g. `my-api_v1`, `my-api_v2`, etc). The percentage of traffic that the traffic splitter routes to each API can be updated on the fly. +* [Traffic splitters](./workloads/realtime/traffic-splitter.md) can be used to route a subset of traffic to an updated API. For example, you can create a traffic splitter named `my-api`, and route requests to `my-api` to any number of Realtime APIs (e.g. `my-api_v1`, `my-api_v2`, etc). The percentage of traffic that the traffic splitter routes to each API can be updated on the fly. -* If initialization of your API replicas takes a while (e.g. due to downloading large models from slow hosts or installing dependencies), and responsive autoscaling is important to you, consider pre-building your API's Docker image. See [here](../deployments/system-packages.md#custom-docker-image) for instructions. +* If initialization of your API replicas takes a while (e.g. due to downloading large models from slow hosts or installing dependencies), and responsive autoscaling is important to you, consider pre-building your API's Docker image. See [here](../workloads/system-packages.md#custom-docker-image) for instructions. -* If your API is receiving many queries per second and you are using the TensorFlow Predictor, consider enabling [server-side batching](../deployments/realtime-api/parallelism.md#server-side-batching). +* If your API is receiving many queries per second and you are using the TensorFlow Predictor, consider enabling [server-side batching](../workloads/realtime/parallelism.md#server-side-batching). -* [Overprovisioning](../deployments/realtime-api/autoscaling.md#overprovisioning) can be used to reduce the chance of large queues building up. This can be especially important when inferences take a long time. +* [Overprovisioning](../workloads/realtime/autoscaling.md#overprovisioning) can be used to reduce the chance of large queues building up. This can be especially important when inferences take a long time. **Additional tips for inferences that take a long time:** -* Consider using [GPUs](../deployments/gpus.md) or [Inferentia](../deployments/inferentia.md) to speed up inference. +* Consider using [GPUs](../aws/gpu.md) or [Inferentia](../aws/inferentia.md) to speed up inference. -* Consider setting a low value for `max_replica_concurrency`, since if there are many requests in the queue, it will take a long time until newly received requests are processed. See [autoscaling docs](../deployments/realtime-api/autoscaling.md) for more details. +* Consider setting a low value for `max_replica_concurrency`, since if there are many requests in the queue, it will take a long time until newly received requests are processed. See [autoscaling docs](../workloads/realtime/autoscaling.md) for more details. -* Keep in mind that API Gateway has a 29 second timeout; if your requests take longer (due to a long inference time and/or long request queues), you will need to disable API Gateway for your API by setting `api_gateway: none` in the `networking` config in your [Realtime API configuration](../deployments/realtime-api/api-configuration.md) and/or [Batch API configuration](../deployments/batch-api/api-configuration.md). Alternatively, you can disable API gateway for all APIs in your cluster by setting `api_gateway: none` in your [cluster configuration file](../aws/install.md) before creating your cluster. +* Keep in mind that API Gateway has a 29 second timeout; if your requests take longer (due to a long inference time and/or long request queues), you will need to disable API Gateway for your API by setting `api_gateway: none` in the `networking` config in your [Realtime API configuration](../workloads/realtime/configuration.md) and/or [Batch API configuration](../workloads/batch/configuration.md). Alternatively, you can disable API gateway for all APIs in your cluster by setting `api_gateway: none` in your [cluster configuration file](../aws/install.md) before creating your cluster. diff --git a/docs/guides/self-hosted-images.md b/docs/guides/self-hosted-images.md index 61f298eaf0..916dd4a2ca 100644 --- a/docs/guides/self-hosted-images.md +++ b/docs/guides/self-hosted-images.md @@ -131,7 +131,7 @@ echo "-----------------------------------------------" The first list of images that were printed (the cluster images) can be directly copy-pasted in your [cluster configuration file](../aws/install.md) before spinning up your cluster. -The second list of images that were printed (the API images) can be used in your [API configuration files](../deployments/realtime-api/api-configuration.md). The image paths are specified in `predictor.image` (and `predictor.tensorflow_serving_image` for APIs with `kind: tensorflow`). Be advised that by default, the public images offered by Cortex are used for your predictors, so you will need to specify your ECR image paths for all of your APIs. +The second list of images that were printed (the API images) can be used in your [API configuration files](../workloads/realtime/api-configuration.md). The image paths are specified in `predictor.image` (and `predictor.tensorflow_serving_image` for APIs with `kind: tensorflow`). Be advised that by default, the public images offered by Cortex are used for your predictors, so you will need to specify your ECR image paths for all of your APIs. ## Step 5 diff --git a/docs/troubleshooting/server-side-batching-errors.md b/docs/troubleshooting/server-side-batching-errors.md index 4740d903fa..df03f75b06 100644 --- a/docs/troubleshooting/server-side-batching-errors.md +++ b/docs/troubleshooting/server-side-batching-errors.md @@ -2,7 +2,7 @@ _WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ -When `max_batch_size` and `batch_interval` fields are set for the [Realtime API TensorFlow Predictor](../deployments/realtime-api/predictors.md#tensorflow-predictor), errors can be encountered if the associated model hasn't been built for batching. +When `max_batch_size` and `batch_interval` fields are set for the [Realtime API TensorFlow Predictor](../workloads/realtime/predictors.md#tensorflow-predictor), errors can be encountered if the associated model hasn't been built for batching. The following error is an example of what happens when the input shape doesn't accommodate batching - e.g. when its shape is `[height, width, 3]` instead of `[batch_size, height, width, 3]`: diff --git a/docs/troubleshooting/tf-session-in-predict.md b/docs/troubleshooting/tf-session-in-predict.md index c8e1d56218..fa0f2d6b49 100644 --- a/docs/troubleshooting/tf-session-in-predict.md +++ b/docs/troubleshooting/tf-session-in-predict.md @@ -2,7 +2,7 @@ _WARNING: you are on the master branch, please refer to the docs on the branch that matches your `cortex version`_ -When doing inferences with TensorFlow using the [Realtime API Python Predictor](../deployments/realtime-api/predictors.md#python-predictor) or [Batch API Python Predictor](../deployments/batch-api/predictors.md#python-predictor), it should be noted that your Python Predictor's `__init__()` constructor is only called on one thread, whereas its `predict()` method can run on any of the available threads (which is configured via the `threads_per_process` field in the API's `predictor` configuration). If `threads_per_process` is set to `1` (the default value), then there is no concern, since `__init__()` and `predict()` will run on the same thread. However, if `threads_per_process` is greater than `1`, then only one of the inference threads will have executed the `__init__()` function. This can cause issues with TensorFlow because the default graph is a property of the current thread, so if `__init__()` initializes the TensorFlow graph, only the thread that executed `__init__()` will have the default graph set. +When doing inferences with TensorFlow using the [Realtime API Python Predictor](../workloads/realtime/predictors.md#python-predictor) or [Batch API Python Predictor](../workloads/batch/predictors.md#python-predictor), it should be noted that your Python Predictor's `__init__()` constructor is only called on one thread, whereas its `predict()` method can run on any of the available threads (which is configured via the `threads_per_process` field in the API's `predictor` configuration). If `threads_per_process` is set to `1` (the default value), then there is no concern, since `__init__()` and `predict()` will run on the same thread. However, if `threads_per_process` is greater than `1`, then only one of the inference threads will have executed the `__init__()` function. This can cause issues with TensorFlow because the default graph is a property of the current thread, so if `__init__()` initializes the TensorFlow graph, only the thread that executed `__init__()` will have the default graph set. The error you may see if the default graph is not set (as a consequence of `__init__()` and `predict()` running in separate threads) is: diff --git a/test/batch/image-classifier/README.md b/test/batch/image-classifier/README.md index 03cc827d35..3d62908e52 100644 --- a/test/batch/image-classifier/README.md +++ b/test/batch/image-classifier/README.md @@ -105,7 +105,7 @@ class PythonPredictor: ) ``` -Here are the complete [Predictor docs](../../../docs/deployments/batch-api/predictors.md). +Here are the complete [Predictor docs](../../../docs/workloads/batch/predictors.md).
@@ -140,7 +140,7 @@ Create a `cortex.yaml` file and add the configuration below. An `api` with `kind cpu: 1 ``` -Here are the complete [API configuration docs](../../../docs/deployments/batch-api/api-configuration.md). +Here are the complete [API configuration docs](../../../docs/workloads/batch/configuration.md).
From 685194f17513933a7e3fa2637b419900b7abef3a Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 20:31:13 -0500 Subject: [PATCH 34/36] Update single-node-deployment.md --- docs/guides/single-node-deployment.md | 24 +----------------------- 1 file changed, 1 insertion(+), 23 deletions(-) diff --git a/docs/guides/single-node-deployment.md b/docs/guides/single-node-deployment.md index c9973ebe68..6a949bb48c 100644 --- a/docs/guides/single-node-deployment.md +++ b/docs/guides/single-node-deployment.md @@ -114,26 +114,4 @@ $ bash -c "$(curl -sS https://raw.githubusercontent.com/cortexlabs/cortex/master ### Step 13 -You can now use Cortex to deploy your model: - - -```bash -$ git clone -b master https://github.com/cortexlabs/cortex.git - -$ cd cortex/docs/tutorials/realtime - -$ cortex deploy - -# take note of the curl command -$ cortex get text-generator -``` - -### Step 14 - -Make requests by replacing "localhost" in the curl command with your instance's public DNS: - -```bash -$ curl : \ - -X POST -H "Content-Type: application/json" \ - -d '{"text": "machine learning is"}' -``` +You can now use Cortex to deploy your model. From 80d77cb7704d72e8bdab80146c125a8639ce7364 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 20:33:23 -0500 Subject: [PATCH 35/36] Update summary.md --- docs/summary.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/summary.md b/docs/summary.md index 64cf145e9d..1aac625cd9 100644 --- a/docs/summary.md +++ b/docs/summary.md @@ -17,7 +17,7 @@ * [Credentials](aws/credentials.md) * [Security](aws/security.md) * [Spot instances](aws/spot.md) -* [GPUs](aws/gpus.md) +* [GPUs](aws/gpu.md) * [Inferentia](aws/inferentia.md) * [Networking](aws/networking.md) * [VPC peering](aws/vpc-peering.md) From 0cbbd7cee5d59354c09ac5dac0639bea7c578fd3 Mon Sep 17 00:00:00 2001 From: vishal Date: Tue, 8 Dec 2020 20:48:21 -0500 Subject: [PATCH 36/36] PR review fixes --- docs/tutorials/advanced.md | 119 ------------------------------------- docs/tutorials/batch.md | 2 +- docs/tutorials/project.md | 2 - 3 files changed, 1 insertion(+), 122 deletions(-) delete mode 100644 docs/tutorials/advanced.md diff --git a/docs/tutorials/advanced.md b/docs/tutorials/advanced.md deleted file mode 100644 index 347d45f9aa..0000000000 --- a/docs/tutorials/advanced.md +++ /dev/null @@ -1,119 +0,0 @@ -# Advanced deployments - -## Install cortex - -```bash -$ pip install cortex -``` - -## Create a directory - -```bash -$ mkdir text-generator && cd text-generator - -$ touch predictor.py requirements.txt text-generator.yaml -``` - -## Define a Predictor in `predictor.py` - -```python -class PythonPredictor: - def __init__(self, config): - from transformers import pipeline - - self.model = pipeline(task="text-generation") - - def predict(self, payload): - return self.model(payload["text"])[0] -``` - -## Specify Python dependencies in `requirements.txt` - -```text -tensorflow -transformers -``` - -## Configure 2 realtime APIs and a traffic splitter in `text-generator.yaml` - -```yaml -- name: text-generator-cpu - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - cpu: 1 - -- name: text-generator-gpu - kind: RealtimeAPI - predictor: - type: python - path: predictor.py - compute: - gpu: 1 - -- name: text-generator - kind: TrafficSplitter - apis: - - name: text-generator-cpu - weight: 80 - - name: text-generator-gpu - weight: 20 -``` - -## Test locally (requires Docker) - -```bash -$ cortex deploy text-generator.yaml -``` - -## Monitor - -```bash -$ cortex get text-generator --watch -``` - -## Make a request - -```bash -$ curl http://localhost:8889 -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' -``` - -## Stream logs - -```bash -$ cortex logs text-generator -``` - -## Spin up a cluster on AWS - -```bash -$ cortex cluster up -``` - -## Deploy to AWS - -```bash -$ cortex deploy text-generator.yaml --env aws -``` - -## Monitor - -```bash -$ cortex get text-generator --env aws --watch -``` - -## Make a request - -```bash -$ curl https://***.execute-api.us-west-2.amazonaws.com/text-generator -X POST -H "Content-Type: application/json" -d '{"text": "hello world"}' -``` - -## Delete the APIs - -```bash -$ cortex delete text-generator --env local - -$ cortex delete text-generator --env aws -``` diff --git a/docs/tutorials/batch.md b/docs/tutorials/batch.md index 1b69532e5a..b10691f806 100644 --- a/docs/tutorials/batch.md +++ b/docs/tutorials/batch.md @@ -152,5 +152,5 @@ Once the job is complete, you should be able to find the results of the batch jo ### Delete the Batch API ```bash -$ cortex delete image-classifier --env local +$ cortex delete image-classifier --env aws ``` diff --git a/docs/tutorials/project.md b/docs/tutorials/project.md index be51aea5fb..dc512bfb49 100644 --- a/docs/tutorials/project.md +++ b/docs/tutorials/project.md @@ -46,8 +46,6 @@ cx.create_api(api_spec, project_dir=".") ## Deploy using the CLI -Navigate to your project directory and define a yaml with the api specification: - ```yaml # api.yaml