diff --git a/adaptive_low_rank/data.py b/adaptive_low_rank/data.py index d96dd8f4fb3..608f620e71f 100644 --- a/adaptive_low_rank/data.py +++ b/adaptive_low_rank/data.py @@ -172,6 +172,7 @@ def get_test_and_validation_dataset( target_key, validation_set_size = 256, test_set_size = 1024, + download_data = True, ): """Retrieves specified test and validation datasets.""" try: @@ -179,7 +180,7 @@ def get_test_and_validation_dataset( dataset_name, split="test", shuffle_files=False, - download=False, + download=download_data, data_dir=_DATA_DIR, ) except ValueError: @@ -187,7 +188,7 @@ def get_test_and_validation_dataset( dataset_name, split="train", shuffle_files=False, - download=False, + download=download_data, data_dir=_DATA_DIR, ) test_ds = _carve_test_dataset( @@ -233,13 +234,14 @@ def get_dataset( text_key, target_key, test_set_size = 1024, + download_data = True, ): """Retrieves specified train and test datasets.""" train_ds = tfds.load( dataset_name, split="train", shuffle_files=True, - download=False, + download=download_data, data_dir=_DATA_DIR, ) try: @@ -247,7 +249,7 @@ def get_dataset( dataset_name, split="test", shuffle_files=False, - download=False, + download=download_data, data_dir=_DATA_DIR, ) except ValueError: diff --git a/adaptive_low_rank/model.py b/adaptive_low_rank/model.py index 75a4d27aed0..4ef071bbb6e 100644 --- a/adaptive_low_rank/model.py +++ b/adaptive_low_rank/model.py @@ -43,15 +43,26 @@ _TOKENIZER_BASE_PATH = "" +def get_pretrained_tokenizer( + tokenizer_path, + local_files_only = False, +): + """Obtains pretrained tokenizer.""" + return transformers.AutoTokenizer.from_pretrained( + tokenizer_path, + local_files_only=local_files_only, + ) + + def get_model_tokenizer_path_from_name( model_name, get_tokenizer = False ): """Gets model or tokenizer path from model name.""" base_path = _TOKENIZER_BASE_PATH if get_tokenizer else _MODEL_BASE_PATH if model_name == "bert": - return base_path + "bert_base_cased" + return base_path + "bert-base-cased" elif model_name == "roberta": - return base_path + "roberta_base" + return base_path + "roberta-base" else: raise ValueError(f"Unsupported model: {model_name}") diff --git a/adaptive_low_rank/requirements.txt b/adaptive_low_rank/requirements.txt index 7a352a13428..43e80ecbdc7 100644 --- a/adaptive_low_rank/requirements.txt +++ b/adaptive_low_rank/requirements.txt @@ -1,5 +1,7 @@ -tensorflow_text>=2.16.1 -tensorflow>=2.16.1 -transformers>=4.34.1 -numpy>=1.23.5 +tensorflow_text>=2.15,<2.16 +tensorflow>=2.15,<2.16 +transformers>=4.34.1,<=4.37 +numpy>=1.23.5,<=1.26 absl-py>=1.2.0 +clu>=0.0.12,<0.1 +tensorflow-datasets>=4.8,<4.10 diff --git a/adaptive_low_rank/train.py b/adaptive_low_rank/train.py index 3be75589539..1cdb0366a7d 100644 --- a/adaptive_low_rank/train.py +++ b/adaptive_low_rank/train.py @@ -221,9 +221,8 @@ def train(strategy): trainable_weights = prediction_model.trainable_variables logging.info( - "***Model trainable weights***: %d, %s", + "***Number of Model trainable weights***: %d", len(trainable_weights), - trainable_weights, ) @tf.function diff --git a/automated_feature_engineering/GithubAutomatedFeatureEngineering_Demo.ipynb b/automated_feature_engineering/GithubAutomatedFeatureEngineering_Demo.ipynb new file mode 100644 index 00000000000..b7b26212a01 --- /dev/null +++ b/automated_feature_engineering/GithubAutomatedFeatureEngineering_Demo.ipynb @@ -0,0 +1,648 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "# Automated Feature Engineering\n", + "\n", + "This is a demo of the automated feature engineering framework, which has been shown to have **state-of-the-art downstream task perfromance** in experiments.\n", + "\n", + "## TL;DR on AFE\n", + "\n", + "AFE uses a set of **feature importance masks** to learn which features best benefit from which transformations. It uses **local** and **global masking** to determine the best transform functions for each feature, as well as which transformed features lead to an **optimal performance** with respect to the **downstream task**.\n", + "\n", + "## Information required to run this notebook\n", + "\n", + "What you need:\n", + "* GCP project ID\n", + "* BigQuery dataset and table names\n", + "* Training target column name (e.g. sales)\n", + "* Task type, can be classification or regression\n", + "* A GCS bucket to store AFE output. Used for displaying features in colab.\n", + "\n", + "The authenticating user should have the following IAM roles:\n", + "* BigQuery Admin\n", + "* BigQuery Data Editor\n", + "* Storage Admin\n", + "\n", + "A service account (can be created under “IAM & Admin”) to run the container is also needed, and the service account should have the following roles:\n", + "* Vertex AI Custom Code Service Agent\n", + "* BigQuery User\n", + "* BigQuery Data Editor\n", + "\n" + ], + "metadata": { + "id": "g7q5_x7_DQ0b" + } + }, + { + "cell_type": "code", + "source": [ + "# @title # Install the required dependencies\n", + "!pip install google-cloud-aiplatform --upgrade\n", + "\n", + "## Import libraries\n", + "from google.cloud import storage\n", + "import json\n", + "import pandas as pd\n", + "from IPython.display import display, HTML\n", + "from google.colab import auth as google_auth\n", + "from google.api_core import exceptions\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "TAD-RuvbCNpK", + "outputId": "3ee0d3a4-b983-4d84-fd6b-87f4be45f31e", + "collapsed": true, + "cellView": "form" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Requirement already satisfied: google-cloud-aiplatform in /usr/local/lib/python3.10/dist-packages (1.48.0)\n", + "Requirement already satisfied: google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (2.11.1)\n", + "Requirement already satisfied: google-auth<3.0.0dev,>=2.14.1 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (2.27.0)\n", + "Requirement already satisfied: proto-plus<2.0.0dev,>=1.22.0 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (1.23.0)\n", + "Requirement already satisfied: protobuf!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.19.5 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (3.20.3)\n", + "Requirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (24.0)\n", + "Requirement already satisfied: google-cloud-storage<3.0.0dev,>=1.32.0 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (2.8.0)\n", + "Requirement already satisfied: google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (3.12.0)\n", + "Requirement already satisfied: google-cloud-resource-manager<3.0.0dev,>=1.3.3 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (1.12.3)\n", + "Requirement already satisfied: shapely<3.0.0dev in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (2.0.4)\n", + "Requirement already satisfied: pydantic<3 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (2.7.0)\n", + "Requirement already satisfied: docstring-parser<1 in /usr/local/lib/python3.10/dist-packages (from google-cloud-aiplatform) (0.16)\n", + "Requirement already satisfied: googleapis-common-protos<2.0.dev0,>=1.56.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (1.63.0)\n", + "Requirement already satisfied: requests<3.0.0.dev0,>=2.18.0 in /usr/local/lib/python3.10/dist-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (2.31.0)\n", + "Requirement already satisfied: grpcio<2.0dev,>=1.33.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (1.62.1)\n", + "Requirement already satisfied: grpcio-status<2.0.dev0,>=1.33.2 in /usr/local/lib/python3.10/dist-packages (from google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (1.48.2)\n", + "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform) (5.3.3)\n", + "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform) (0.4.0)\n", + "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.10/dist-packages (from google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform) (4.9)\n", + "Requirement already satisfied: google-cloud-core<3.0.0dev,>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.3.3)\n", + "Requirement already satisfied: google-resumable-media<3.0dev,>=0.6.0 in /usr/local/lib/python3.10/dist-packages (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.7.0)\n", + "Requirement already satisfied: python-dateutil<3.0dev,>=2.7.2 in /usr/local/lib/python3.10/dist-packages (from google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform) (2.8.2)\n", + "Requirement already satisfied: grpc-google-iam-v1<1.0.0dev,>=0.12.4 in /usr/local/lib/python3.10/dist-packages (from google-cloud-resource-manager<3.0.0dev,>=1.3.3->google-cloud-aiplatform) (0.13.0)\n", + "Requirement already satisfied: annotated-types>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from pydantic<3->google-cloud-aiplatform) (0.6.0)\n", + "Requirement already satisfied: pydantic-core==2.18.1 in /usr/local/lib/python3.10/dist-packages (from pydantic<3->google-cloud-aiplatform) (2.18.1)\n", + "Requirement already satisfied: typing-extensions>=4.6.1 in /usr/local/lib/python3.10/dist-packages (from pydantic<3->google-cloud-aiplatform) (4.11.0)\n", + "Requirement already satisfied: numpy<3,>=1.14 in /usr/local/lib/python3.10/dist-packages (from shapely<3.0.0dev->google-cloud-aiplatform) (1.25.2)\n", + "Requirement already satisfied: google-crc32c<2.0dev,>=1.0 in /usr/local/lib/python3.10/dist-packages (from google-resumable-media<3.0dev,>=0.6.0->google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.5.0)\n", + "Requirement already satisfied: pyasn1<0.7.0,>=0.4.6 in /usr/local/lib/python3.10/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3.0.0dev,>=2.14.1->google-cloud-aiplatform) (0.6.0)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.10/dist-packages (from python-dateutil<3.0dev,>=2.7.2->google-cloud-bigquery!=3.20.0,<4.0.0dev,>=1.15.0->google-cloud-aiplatform) (1.16.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (3.3.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (3.7)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (2.0.7)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3.0.0.dev0,>=2.18.0->google-api-core[grpc]!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,<3.0.0dev,>=1.34.1->google-cloud-aiplatform) (2024.2.2)\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "# Copyright 2024 Google LLC\n", + "#\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# http://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License.\n", + "\n", + "# @title # Authentication. { display-mode: \"form\"}\n", + "\n", + "# @markdown The user should have the following IAM roles:\n", + "# @markdown * BigQuery Admin\n", + "# @markdown * BigQuery Data Editor\n", + "# @markdown * Storage Admin\n", + "from google.cloud import aiplatform\n", + "from google.cloud import bigquery\n", + "from google.colab import auth as google_auth\n", + "\n", + "google_auth.authenticate_user()\n", + "print('Authenticated')" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "tdG245WUDmgy", + "outputId": "f89b7c35-3a38-4e1e-e67d-1c5a19491faa" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Authenticated\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "# @title # Setting project parameters\n", + "\n", + "# @markdown Specify the following BigQuery meta information:
\n", + "\n", + "PROJECT_ID=\"sungyong-internship\" # @param {type:\"string\"}\n", + "# @markdown SERVICE_ACCOUNT: the service account should have the following roles:\n", + "# @markdown * Vertex AI Custom Code Service Agent\n", + "# @markdown * BigQuery User\n", + "# @markdown * BigQuery Data Editor\n", + "SERVICE_ACCOUNT=\"feature-selection@sungyong-internship.iam.gserviceaccount.com\"# @param {type:\"string\"}\n", + "# @markdown * DATA_LOCATION refers to the region the data are located.\n", + "DATA_LOCATION=\"us-central1\"# @param {type:\"string\"}\n", + "\n", + "# @markdown The GCS bucket to hold AFE results.\n", + "# @markdown Must specify if want to display features below.\n", + "GCS_OUTPUT_BUCKET=\"feature_selection_0318\"# @param {type:\"string\"}\n", + "\n", + "\n" + ], + "metadata": { + "id": "QqBXSNPoOdns" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# @title # Setting parameters for training.\n", + "\n", + "# @markdown Specify the following model parameters:
\n", + "\n", + "TASK_TYPE = \"regression\" # @param [\"regression\", \"classification\"]\n", + "FEATURE_DISCOVERY_FUNCTIONALITY = \"discovery\" # @param [\"selection\", \"discovery\"]\n", + "\n", + "# @markdown * DATASET_NAME refers to the BigQuery dataset containing the data table.\n", + "DATASET_NAME = \"kaggle_data\" # @param {type:\"string\"}\n", + "LOGGING_FILENAME=f\"{DATASET_NAME}_features.json\"\n", + "\n", + "# @markdown * TABLE_NAME refers to the data table to discover features on.\n", + "TABLE_NAME=\"CO2_emission\"# @param {type:\"string\"}\n", + "\n", + "# @markdown * TARGET refers to prediction target name.\n", + "TARGET=\"CO2_Emissions\"# @param {type:\"string\"}\n", + "# @markdown * Number of learning steps:\n", + "NUM_STEPS=20 # @param {type:\"integer\"}\n", + "\n", + "BATCH_SIZE=512\n", + "FEATURE_DIM=128\n", + "LEARNING_RATE=0.005\n", + "NUM_SELECTED_FEATURES=30\n" + ], + "metadata": { + "id": "O448B_BnN20_", + "cellView": "form" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# @title # Deploy the training job\n", + "\n", + "# @markdown The following functions deploys the training job defined above.\n", + "\n", + "def delete_gcs_feature_file():\n", + " try:\n", + " storage_client = storage.Client(PROJECT_ID)\n", + " bucket = storage_client.get_bucket(GCS_OUTPUT_BUCKET)\n", + " blob = bucket.blob(LOGGING_FILENAME)\n", + " blob.delete()\n", + " print(f\"Existing {LOGGING_FILENAME} deleted.\")\n", + " except exceptions.NotFound as e:\n", + " print(f\"{LOGGING_FILENAME} does not exist. Nothing to delete.\")\n", + "\n", + "def deploy_training_job():\n", + " aiplatform.init(project=PROJECT_ID, location=DATA_LOCATION, staging_bucket=f\"gs://{GCS_OUTPUT_BUCKET}\")\n", + " training_job = {\n", + " \"display_name\":\"feature_selection\",\n", + " \"worker_pool_specs\": [\n", + " {\n", + " \"machine_spec\": {\n", + " \"machine_type\": \"n1-standard-16\",\n", + " \"accelerator_type\": \"NVIDIA_TESLA_T4\",\n", + " \"accelerator_count\": 1\n", + " },\n", + " \"replica_count\": \"1\",\n", + " \"disk_spec\": {\n", + " \"boot_disk_type\": \"pd-ssd\",\n", + " \"boot_disk_size_gb\": 100\n", + " },\n", + " \"container_spec\": {\n", + " \"image_uri\": \"us-docker.pkg.dev/vertex-ai-restricted/builtin-algorithm/afe:latest\",\n", + " \"args\": [f'--config={{\"project_id\":\"{PROJECT_ID}\", \"dataset_name\":\"{DATASET_NAME}\", \"train_table_name\":\"{TABLE_NAME}\", \"target\":\"{TARGET}\", \"task_type\":\"{TASK_TYPE}\",\"batch_size\":{BATCH_SIZE}, \"feature_dim\":{FEATURE_DIM}, \"learning_rate\":{LEARNING_RATE}, \"num_selected_features\":{NUM_SELECTED_FEATURES}, \"num_steps\":{NUM_STEPS}, \"model_type\":\"{FEATURE_DISCOVERY_FUNCTIONALITY}\", \"gcs_output_path\":\"gs://{GCS_OUTPUT_BUCKET}\", \"logging_filename\":\"{LOGGING_FILENAME}\" }}']\n", + "\n", + " }\n", + " }\n", + " ]\n", + " }\n", + " job = aiplatform.CustomJob(**training_job)\n", + " job.run(sync=True, service_account=SERVICE_ACCOUNT)\n", + " print(\"Job done!\")\n", + " return job\n", + "\n", + "# delete_gcs_feature_file()\n", + "job = deploy_training_job()\n" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "iC0s4eVeEtWK", + "outputId": "2a0f3645-6bf4-48c7-d197-66b9d7ea241b", + "collapsed": true + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "INFO:google.cloud.aiplatform.jobs:Creating CustomJob\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "kaggle_data_features.json does not exist. Nothing to delete.\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "INFO:google.cloud.aiplatform.jobs:CustomJob created. Resource name: projects/712758616004/locations/us-central1/customJobs/4493634787847700480\n", + "INFO:google.cloud.aiplatform.jobs:To use this CustomJob in another session:\n", + "INFO:google.cloud.aiplatform.jobs:custom_job = aiplatform.CustomJob.get('projects/712758616004/locations/us-central1/customJobs/4493634787847700480')\n", + "INFO:google.cloud.aiplatform.jobs:View Custom Job:\n", + "https://console.cloud.google.com/ai/platform/locations/us-central1/training/4493634787847700480?project=712758616004\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_PENDING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_RUNNING\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob projects/712758616004/locations/us-central1/customJobs/4493634787847700480 current state:\n", + "JobState.JOB_STATE_SUCCEEDED\n", + "INFO:google.cloud.aiplatform.jobs:CustomJob run completed. Resource name: projects/712758616004/locations/us-central1/customJobs/4493634787847700480\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Job submitted.\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "# @title Retrieve Engineered Features\n", + "\n", + "# @markdown The following functions retrieves and displayed engineered features.\n", + "\n", + "def retrieve_features():\n", + " # Initialise a client\n", + " storage_client = storage.Client(PROJECT_ID)\n", + " # Get the bucket\n", + " bucket = storage_client.get_bucket(GCS_OUTPUT_BUCKET)\n", + " # Get the blob\n", + " blob = bucket.blob(LOGGING_FILENAME)\n", + " # Download the blob as a string\n", + " features = json.loads(blob.download_as_string())\n", + " # Display the features\n", + " if isinstance(features, dict):\n", + " features_df = pd.DataFrame(features.items())\n", + " features_df.columns = ['feature', 'score']\n", + " html_table = features_df.to_html()\n", + " display(HTML(html_table))\n", + " else:\n", + " print(features)\n", + "\n" + ], + "metadata": { + "id": "r_YegiQdQuMs" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "# Retrieving the engineered features\n", + "\n", + "In addition to outputing the names of the engineered features, the pipeline also uploads the engineered features into a new BigQuery table, under the same project and dataset as the original training table. The name of the new table is a combination of the input table name, along with a time stamp when the new table is created.\n", + "\n", + "An example is as follows:\n", + "\n", + "\n", + "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAACPAAAAIGCAIAAABfoHStAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAI8KADAAQAAAABAAACBgAAAABIBYAxAABAAElEQVR4AeydB5gT1dfGpWzvlWULvfcmTUSKIAhSFEQUK6IURbB8qPi3N0RFFBRURAVFEASkK0WUIiBFeoeFZWF7Zzt8L85yczNJZpNsEpLdN88+cOfOuWV+mbT73nNOpatXr97EBwmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAk4K4HKzjoxzosESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAErhGgoMX7gARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKkJUNBy6qeHkyMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEqCgxXuABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAqQlQ0HLqp4eTIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESoKDFe4AESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMCpCVR16tlxciRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAuWXwLaDOXPXJp+Oz8+8XHxDrjLQt2qrut4vPxgR5FvlhkyAg2oTSEoveueHSwdP52bkFGlb4myAT9VW9bwn89kslZTLGny5Mhlzf6J/qMteASdOAiYJ5BVeORqbfyQ279DZ3PjkguSM4vTsa+974YFVw4PdwgLdqgW5NYj2aFbbMyLYzWQvpZ2Ysyp5ZD8XfgVVunr1amnXyPMkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkYGMCULOemXHOxp1a1V31YPf5r9T292YoI6vw2a1RUkbRA2+dSftvSdf8QSKC3Oa/UifAh8+m+cxcwxJq1lcrkzDXUf3DqGm5xnPGWZpBYPP+7D/3Zh2KzT0Vn2+G+TWT0AC35nW8Ojf1HdQlwMwmitm3a1JmLk/cNauxRa2cyrjK66+/7lQT4mRIgARIgARIgARIgARIgARIgARIgARIgAQqAoHXv7uYkFboDFeanVuck3ulS3NfZ5gM5yAIvDv/IjwVxKGZhey8K9m5V25twWfTTGCuYSbULEx3z/HLN91UqW0Db9eYOmdJAsYIwM9o076s/82NX7A+9VhcXlqWBW7Kl/OvnL2U/9f+rOV/ZVSqUql+tEfVKpWMDaJX993aa2oWqp7oH6Z3wqUOGHLQpZ4uTpYESIAESIAESIAESIAESIAESIAESIAEyguBk3F5znMpcBdznslwJgqBf67pFtY8th3MtqYZ2zgrAVnNUuaouGrRT8tZnzHOS4vAlas3rd+d+c3qZPNdskx1l5hROG3Rpbmrkx/oGTykW6Cvl8nYuVCzZiy7pma5+oMhB139GeT8SYAEbEmgoKDAzc2tUqXSNzXYclT2RQIkQAIkQAIkQAIkQAIkQAIVksDNo4841XW7dBQmpyKpPRk+79p8eFZFwFDNEgaMPShQsOAqBE7GF7z0VdzZi+ZGFzT/uhCHcOqY6Ga1PA2bqNQsl/6wo4eW4fPLGuME1q1bt337dnHuvvvua9SokTh0zoIrztk5SZbjWUHBmjt37p49e06cOHH8+PELFy5Urlw5ODi4du3aderUadKkSbP/HvXq1UN9OebASyMBEiABEiABEiABEiABEiABEiABEiABZyOgoWZhqvTTcrbni/PRJrBhb9br38TnFV7RNrPubHJG4RMfnv3fg9X7dtBLrLVgQ1r58M1SsFDQsu72qIitIA5NmzZNXHnLli1dQtByuTkLwiw4gMDatWufeeYZ6FjyWFeuXEn+77Fr1y5R7+vr26pVq9atW7/55puBgYGingUSIAESIAESIAESIAESIAESIAESIAESIAF7ENBWs5QRqWnZgzz7tDmBwqKrH/+cuHhzqs17ljvEKK/OjT9wOu+5YeFVKl8LQAU16+OfL8k2rl6moOXqz6De/NPT01944YU//vgDi+8PP/zwhAkT9E7zwM4E4OWzadOmLVu2xMfHp6SkFBcX161bt0GDBvXr18e/Xbt29fPzs24K+fn5GzduXLlyJbyIEhIS0Dm6CgsLi46O7tWrV79+/cLDw63rOS0tDT1j2ocOHUK3qamp6FbMGRJOixYtrOs5Ly/v77//PnLkyOHDh/HvyZMnMUn0rDzatm0LLNb1bKtWjz/++Jw5c8zsLTs7G88sHuPHj6egZSY0mpVLAsfj8ncczjlwOhfZR5G+GylMg/yqhgVWbdPAp18Hv5oRHuXyqnlRJEACJEACJGArAskZRdOXJOLDNC27qOx9NqvtNXdSrbL3wx5IgARIgARIwAkJmKNmKdOmpuWETx+nJBNISi+a+Pn5Y+cclDby582pR8/nfTQ2eu2OzHKmZoGqLQUthOeSnycHl0+fPu3gEZ1wuA8++ODrr79WJrZv374OHTp06tTJCedZzqZ09erVxYsXv/fee3v37lVd2tmzZzds2KBURkREvPvuuxAaLYpcV1RU9Omnn77xxhuZmZmqzo8ePYqaBQsWoMMHHngAzz6GUNloHJ4/f37q1Km4YXJzc2UzxNzDzSNqhgwZArNatWqJGnMK0N7g+aR6VcbGxgqfJ8wZetI777wTGhpqToc2t1myZIn5apbNR2eHJOCKBDbuzVqwIXXfSXVO5tyUgviUgn9PXZ67JqlhDc+htwUPvEXPt90VL5ZzJgESIAESIAE7Efj454Tf/1F/sbfTWOyWBFyagL3TezhbDieXfrI4eRKwBwHz1SxldGpa9ngW2KdNCKRnFz8+NRYrJ+b0FuBTtX1j76Y1vaLC3KuHuEWHuRUVX41PKcTfxeTCI7G5u45eNmdf1IHTl4e+djojxwY7qMyZtiNtbCloOXLeHMsogR9++EGu//777yloyUDsUYZL04MPPrh69epSO7906dJjjz02c+bM6dOn33LLLaXaw+DixYt33nmnLC8ZbYUQefPmzVu+fDnELdgbtVFV/vzzzyNHjszKylLVGx5Cq4M69eyzz06ePNnb29vQQFUDEQtSFpqo6lWHmPOXX36JaSCC35gxY6pUqaIysOsh/NKeeuopeQhFYOvZs2fDhg19fHwgH8JfDdeCB9jiiZCNWSaBikbgSGze5K8vnE8q/bsXdhu9PS/+xw0pE+6p1qmpT0UDxeslARIgARIggVIJbD2QXaqN+QbFV823pSUJkAAJkAAJuAwBS9Us5cKoabnME1zBJvrcF3GlqllhgW73dgvq2NS3UYyRyDcBPlUa1/AU2BA755c/05b8mSZqjBbKpZqFK6WgZfTpdtXKoKCgc+fOidmHhISIMgv2IBAXF3frrbfCDcv8znfv3g3VZOfOnaWG8oPo0rt374MHD5rZOTSYe+65B0oS+tduAr+oV155RdtGPovggfAtO3PmzI8//ijXG5b/+uuvO+64Q+XyZWgmanCNTz/9NKS+t99+W1Q6oPDJJ59gUDGQv7//b7/9Bo9GUSMXEEmSgpYMhOWKRmDJX+nv/2CZpns6Pn/8Z+d6t/N/5/GoioaL10sCJKAiAEV874ncY3F5p+LzYi8W1K7uXifSs0ENzzb1vY3+VFM15yEJlD8Cl/PtkgO8/IHiFZEACZAACVRYAtapWQoualoV9rZx2gt/ZU78/lPqUDfybGPC3Ef2C+3X0YJQNw2iPV68P+KJ/mHfrUvBlmK5t4pQpqBVrp5lOADBOUa5pKpVqz700EPl6vKc7GJycnLuuusulZqFpFYjRoxo/N8D+iISUx04cACh7WQzJMS6//77//nnH09PnbRueHF4+lRqVsuWLXv06NGxY0ektoK8tGPHju3bt69btw4xD5XmUJ4QIRB5tjTi+M2fP99QzYISBmc+zBoJrpKTkzHnzZs3Q74SPaN/uH/17dsX7miGU1VqkChrwIABKjULKb5AqVGjRvXq1UtKSsLc/v33388//xz0RD9TpkwZNGhQu3btRI29C6rgkHA+M6Vm2Xsm7J8EnJzAlB8TFv9pZcLS3/7JxHvTu9S0nPw55vRIwG4EcvKufLQoYcW2dHmEI+fy8Lfq72t193QNevrucB/PyrIByyRAAiRAAiRAAiRAAhWZQFnULIUbNa2KfP8427XPWZW8bleGxqwG3RL0wn3h7m7W/CYK9q8ycWj44K6Bb39/EWkgNEYpZ6coaJWrJ3T8+PHdu3eHYhEVFTV8+PCwsLBydXlOdjGTJk2SgwFWqlRp7NixyKTl5+cnZgoVZ+DAgVAZn3vuua+++krUQ+h64YUXPvvsM1GjKmzcuFEVtQ9Zsr799lvolIollCf4QqH83XffIXhgcXGxUp+env7aa68hsKGqQ+UQebNGjRoln6pbt+7s2bNlpy7MGZoZzB555BFIpGgi7BGmr0uXLrVr1xY1cuHJJ5/E6KKmevXqCK44dOhQUYOG7du3x4XAK2vcuHErVqxQTiFPGFKLwRHKw8OIU61obsMCtDfRG6Id4oUjDlkgARIQBBZtSrdazVI6QY4Qt6qVX3+4eqVKolcWSIAEKgSBXUdzXp97MTGjUONqESLjr3+zX3+0+s2NGKFUgxNPkQAJkAAJmCRgKg+WvfNvmZwQT5hNgM+R2agqlmHZ1SyFFzWtinXfOOvV/rEve9aKJI3ZIaoNYttoGJhzqlY1969fqPnN6pQvfk00x74c2FDQKgdPot4lNG/eHP4uelU8sAMBpFZCCii541mzZj3xxBNyjShD4oIxsjRBOhKVM2bMePHFFyE9ihq58PLLL8uHkJfQP3qQK5UypCBfX1/ol4WFJWtGGOWll16Cr5ih8RtvvAEvLlEP7QqaHFJGiRq5cPvtt8NPq2nTpsLpClENEXtQVuaEPTJ4Id6gOITwBkGuTZs2okYuxMTEIHsWej516pRSf/jwYbhtTZw4UTazUxkE8PSJzgFK21VOWLJAAhWKwP7TuVMXGo802CDGs0Mj3xZ1vZrXueZmeuB03v5TudsPZ528kG+IaPXf6e0beVvkO2/YCWtIgARci8Ce45fHfqILgq0xeShesPxiYs12DUvP06nRD0+RgOsS8HKv/OenDV13/pw5CZAACZAACdiKgK3ULGU+1LRs9bywH6sJTFmgS3ei6sTTrfLUsTEdG9vsR9Bjd4Y0q+s5bppZv8JUk3G5Q/sKWvKqsW3R1KlTx7YdsjcSsIgA1BchIKFh586dVZ5Phr29//77y5YtS0hIEKcQ+M6ooIUXDsIJCrPAwED4chlVsxQbBAzE6JiScghvrcWLF0+YMEH0oBTgPgV3LrkSopopNUsxg0/Vq6++CnlMtFIF6xP1CEgoyihAkDOlZilmcMaC/1b//v1Fq127domyXQvIM3flii51gSmHM7vOgZ2TgJMTQBzTN76NN5ykW9VKYwaEP9AruLLkcdWtlS/+nro7bN66lNkrkwqL1OnpZy1P6tvev7LcxrBr1pAACZQXAtl5xZPnGHkD0bi+176JX/xWHSzra9jwFAmQAAmQAAmQAAmQQDkmYFs1SwFFTasc3zDOf2mIeZNsIl4Ffvh8Or5Gq3peNrwKLOOs25lpww6duSv+bnTmZ4dzc14Ca9eulScHwQkhB+UawzJ0KcQklOuRTUo+FOWlS5eKMgr33ntvqbH4nn32WbnJokWL5EOlvH79egT3E/VIbaUELRQ1RgsIliiPjmCJIryhbL9mzRr5EMEJ5UOj5TvvvFN2jVIlDDPaxCaVspqFDuU52KR/dkIC5YDAnweyzyUWqC6kVnWPeS/XebC3npolbCBXPdwn5PuX69QMdxeVSuFSWuEvW7RiRqvsbX6YV3gFuXxs3i07JAESMErgw58STf1yM2qPSvhpfbhQt+PHlFlZ6iGz5RbwfaAsCNmWBNQEsnOLi4rVu1jURuYdYzdMVm5JBHXzWtCKBEiABEigXBGwh5qlAIKmhc7LFSxejCsQwHebuWtN3niTH6xuWzULSN6ef/HXrbpEMK4Ayfo52tdDy/p5VeyWBQUF7u7qBUFHIoHvkZubm3UjIp6bnRQC6CgQjTQclaybMFpZOufk5GToOmI4RBTU9kYSlggIKcoomBK0Vq9eLZs9+OCD8qHRMlJhNWrU6OjRo8rZv//+Ozs7G6EIZWPED5QPb7vtNvnQVBl3QsOGDffv368YgNWxY8eaNGki28P3C9EIRU21atXMyd+GZxOulgg2qDREt9DbRJIw0RsLJEACjifwzSojX7w+HBNtKFap5lYv0v3DcTFDXyuJJirOfrcmeUjXQHHogEJSetHiP9L/OZFzIakwJfNaRNYAnypRoe6Na3oO6RaMeWrP4fVvLyZnluwA8PWs8v4Tkdr2L3xxIbewZK28WlDV/z1YXdjHpxS++4PJOAPCTFWICXWfdH81pVK7hyqVK1ULrFo9xA1/UaFuTWt7qXzhLv43AbHiiM0Xk0dERASX8in/x7/ZizeniVmF+FdlLjRBo6IVDNOTaCS9yMgpXvW3Nb+j8OsLCY3xcrMV3ivYorgrc+uB7AvJBecTCzNyrr2ifTwr4+a/pZlfz7Z+TWpeC5pq9DFp9oWc/JJXdLBflTcfLeUdAJ18vjz5cGyu6O39UZG+XqVfC2S2l7+Kx1SVR5+b/ft3ChCdaBTkGarM8BrHnK+9J4S6RYW4N6nl6e1hcgsjklTvPaWbtqorU4dP9A9tUceW+0lNDVQB63FLzFyavGF3Zlq2bheaFRyCfKv2aucP52mbOz4ePJu3ZHPa8bi8+ORCCFr4DIoMcasV4VEj3P3OTgENoi1IiIu3i6V/pm87nB2fVJiUUYgXAl41kaFu9aPwSR3UrJbJV6gCRPU5ZUjJx6NyTLh7TDU3zK1elIefwUvyypWr//smPuOyTuoe1j341ubGg7GL/i8kF773o+5jHa+41x6uHhrAdRVBiAUSsIaA4ZcNa3qxvI3GVxrLO2ML6wnYT81S5kQ/LeufG7a0lsCyLRmmNvl1buZ3x81lzZulmteUBQkVR83CtfOLl+oGuDGHEANWrVqFeHRwUrl48WJaWhoCwUGiQIojPOBG06NHD3NmBr8cOSLck08+WatWLcOGcCeKjy8JBQPx6bXXXoMNtARM4JtvvkEPCIvn7+/fsmVLlQRi2BVqcnJyfv31VzgVIWTcpUuXIHhg8pGRkbfeeitci5CHqUqVUn7PHzly5PvvvxedjxgxAtmVlMPY2FhcFCZ25swZzAo6H8SVxv89HnnkkRo1aohWFhXKOGc8R/JwDRo0kA81yuK6FBvEvjNqLPsqRURE3HLLLUbNVJXdu3cXgtbVq1chFLVv3162EU+6UgmS8lmNMqYtBC2YYdoqQQtJtgYPHix6UJ0V9YYF3ORC0IKOe/LkSchyhmZlr/nxxx8PHDig9JOSkiJ3CCFNjqmIU7h7n376admGZRKoUAT2nrwsL8sq1/7csIhS1SzFEvlIJw6JmLZYt9aDejhpnbpYULd6KTKSTTinZhZPW5zw+z+ZxWKR+L9+sXCWkZOLS1vyZ9rNjXwmDKmmse6278TlCyklPmpQwkqd2M6j2ZevL3+rQME5bMfh7FJ7UBmkxegW8izqoW6kx6i7wnq08hNuw1jXRrzHbQezxBDzfkt94b4StUxUygWQm/lLwtkEnZfeC8Oqiw5lS5ZJQEVg70mL1RHRw94TeaUuJQtjjQKibWzalzV7RdLpeHViP7yaTsXn4+/735I7NfV96f4IvEAMu/rnWE7m5RJnkYggIwaGTY6fy5Nf6UXmuZps2J29/ZDu/eFScoGZgpY8Q8PJyDWBvlUf6h0ypFugUWHjWFy+PG25oUZ5yG1BGmd5qiwEZvyShA+psvSgtIUetuiP1KpVKkEnLntvSg//nsqdvjjhwBm91zg+as8nFeAPNgs2pgy8JWj0wDBIqtqD4pWIK125PR0u1LIlFLLj5/GXB128eW2v8fdU09i8nJBSaP7dC7H80b6hw3sGIXiyGBEfjo1qeH36i849FAtPXZrV0f68+25tijxuzzYBVLMEUhZIgARIwAoC9lazlClR07LiqWETqwnAPeubNUZ2CaNDfBX534MRVvdstCHUrMWbU42eKq+VFLRu8DMLKeuNN96YOXNmfr7eT24oLpAQFBXhgw8+6Nu379SpU1VyiOHUV65cOW/ePFGPkG5GBa25c+cK3cvLywuC1oULF4YNG7Z161bRNiMjY9u2beLQVOHnn3+eOHEimssGmPyJ/x6Qx+C6NGfOnFatWskGqjJskV9KVLZo0QJXik7+97//ffrpp3KAO6hl8GpSHJs++uijjz/+eOTIkaKhmYWyzxkOSaGhoWK4tm3birJ2QRWW0Gj2JsgtiYmJoh+IdqpW4pSqoJKCoIqpBC043snThiyo6sHUoWoChtOuXr36L7/8Yqq5Rj20W/ms/dyzoInieZfHEmVopfLth3rcrhS0BB8WKiCBXUcvq666ZV3vYd0sWL7EgtGmfZn7Tur1s/1gdt3qwaqebX545lL+hM/i4q9rUab633U0Z9TUs28/Hm2T1XNTo9yQeqzUvzg7rkGM57RxMeGBJV/zxg0K234oC6v8ymPZ1vRH+4ZoLMBt3JMlq1lw8hh8q1leIzfkkjmoUxHYXwZB6+CpXJu8JN+df2nZVr0vGEYRQUka9sbpj8ZGQ+E2auCAyl+36XmzxSYWQDNoWdeWzk/p2UVYr/9hfepbj1W/gVfqAJjlY4jfduliHpT9iuDpZStBa83OzLe+jzdMkylPEpshlm5J+3135ruPR3VqavJlBRfqCTPPQ7WS2xqWoZyN/SR28ojq/Tra4AMIrm+fLU2AWDhpeETnZrq5De0e+NOGVEQ9VSZw6kL+5v3Z3VrqRbmQ55aQVrRSckKFJDZmQIhswDIJkAAJkIBFBByjZilToqZl0VND47IQwKKKKfesR/qEavwSt2LQCqhmgZLJABRWEGQTSwkg7RCce6DKqNQsw35gCX8pxZXK8GxZauAWA/mkY8eOspplTofwAYKUBR8slZqlartnz56bb7554cKFqnqNQ8TKg8YAWWvatGmymqVqAi3w8ccfHzRoEGaiOmXq0FZzhvNZkvSYPXu2qRFV9bguucaoj5TwslIs4aElN9EoI3yffFbVD07BkU6adRK8o2R7jbI8bWhOqoE0GmqfwtOhSLaKGZwFDaUy7R54lgRIwB4EsJqj6nZYj2Dt3coqexjffas6wOA2yQtBZW+rwyOxeSOnxBqqWdii7u6m/sIDh6rnPz+3eseNTO5lqws37AcLha/MuXDluo8afNF6t9MtCBYUXpn/u8kNXPhQVe0me3JgmLyl3XA41pCAIPDvaT0lW9SbU9h3yvq2ov8fN6ap1CwE4QwPcGtex7tOpIfqrQBZtf5v1oUTBm96oje7FiAb7ze45JXb7PKmhMirL38dDyHBrlfEzstOwLZ5pBLSS3SaMk7sp01pr35zwVDNwnaHaoFuqm8I8LJCHL9EEzcb4vU9/P5ZQzULr1NDJ0KM+Pq38RofWJZeF74hvDA77mS8zv/Y060y3JrlfuauNr6fWrGBi7PMYUCnwJoRFkRZlAdimQRIgARIwJFqlkKb+bR41zmGwO7jxn/XhAa4PdrHllthpv2cWNF8s5Rn0L4eWrZa+HbM3ebgUeBNdc8990BPUo2LAH1wo4HwcOWKXgQGSDtvvvkmHJ4GDhyoalKWQ3Q7ZMiQuLg4SzvBZBRPqVIbIpghskAh0RQ8xko1hgEiEL777rtnz541x3j58uULFiy4//77zTG235zNGR0227dvly1VHlTKKdk9CzXIRyU30SirpC+V85NGQ+1TcJUTwfpg2bp1a6vzq6kGgpoFVVJUgkap0SmFMQskQAL2I3DygnrTdHSYWRG35ClFh6mjC567pP68k+3LXkZe+je+i5eXArFyPbJvaNuG3g1reFStXAkLWAfP5CJhjNgqBbnngwWX2jbwQdarsk/AzB5ua+n/9GCdj6+pVqpld9lM1QO+KySkFccm5C/fkn4iTvfc7T1x+csVyQj9pLQdMyB0w55MUFIOf/kz7ZE+IYG+RqJC/XkgW+4HeUdsHuBbvhyWyxkBQ4XG/As8dl4vlJn5DYUlXuPTf9aLd3pvt+CH7ggRr3HotVsO5sBj6ezFEuUefhtvf3/xu5dqiU4cVli1XfctSAz6++6MZ4eFG67sCwPDQoi/2+xnY+T69JwrZxPyt/yb88e/uiHgqvXKnPgvJsbAp0Q2lsszn6lZLcjI24Jsg3KYeWEYVa146KIEYi/lf7pEF5QPV9G4ltfYAeHISRngc22zCDZJ7Dh67aUHF0PlGpGyDpsqZk2sYXizvTf/UpIks+FmHNErtENT76Y1vbw8Kp+5eO2Tev5vybKX8MxliR2b+paa/HLswPAerfU8q3ILbzpzMR8xhOGXLHZ4YLaTv47DSx5SljLbuzr7z/st+dz1ySM08fZDOUY9zFIyi+CCprTCv/ikfrx/6R/owp6FMhKwNMeSKXvmTCrjE8HmJGBDAsjKiT/tDk29lk214mvcFBnWO5LAnhM5Roe7v2ewDbeKQs36cYNeUhWjg5bLSset4JRLfFZfFBQClZoVEBAwYcIECDNwUoFgAKHr1KlTCOMGaefyZZ2uiwh77dq1i4qKsnpow4Z//fWXqISWBhkSEeQgYyCwnqg3LAg1y9vb++GHH0ZQRIQ3hJPN7t27d+zYsW7dOuhSohWC3Y0aNer06dMeHqVvYYNjlmgI7y5kAkNwvOjoaLiCITghkiGhc2GAAiITQpNDei250mjZfnM2OpyqEgolYjCKypo1ayI7mjgUBZAXZRRUMpV8SlVG9Ei5RtaK5HpLyz/99BN85kQrPB2iXJYC7vDHHntM7mHy5MnyoW3LCOwpoggiMRsUVtE/hLQPP/xQHKLg66v3Y1g+xTIJVAQCWVJ2dOV6Y8ItF7TC1e/JRXqbNGwPEpumEW1P9BvgU/XjcdEt6ujeGBvFeODv1ua+E2eeF4IN0nhA00LMMdHQ3gUfr8pl3M1t2EPtiJs6NvYe2jVw1oqUuWuSxCV8uy5lZL9Q5RtzVJj7oC5BYvcWHFN+3JA2dqCRH5Bz9Denjx1UzfTqtxiKBRIoIYBdh0IzthSKt2fpUop2n1v2Z1/3S7xm+EifsHGD9G5yuJIgqmHr+jUfeOus8ObE+jXCkDo4HB+W11dvN/JNG29Km/Zm3dlB51Kpfck461blJtW7Ss2bbkLcwoGdA/86EPj8F+fFUj5+Wv9z4nL7hrp4a6rOo8LcokItfsNXdcLD8kQAGvDb8y/JPkm3twl47dEIoQbhYqHr4GXVsXGdkR+cPXKuZF8FNlUs+TNjaDc9d214Re84qvtZAeH2rcejbmuh++JdN9Idf91a+eG+FYGLsRXjnXnxc/6vlvaHEd58VC8EzA2f+33b+9/dNQgf/UJIQ3a9r1akPH13yYaPKggbODD8pa90+zvxOWhU0Prh9zToYeL5HXpbkBDLRSULJEACJEACJEACFZwAgsEcPmtkox5+idzR3t9WcCqymgWGFLRsdSNZ1s9zzz0n+2ZBo/rtt9+CgnQZSiDPQMXBEv+IESO6desm3JWQYGnSpEnz58+3bDwzrBFG76WXXurTp48qYZJ20969e8NBKjg4WJjVr1//vvvuQ7IraAaLFy8W9fHx8d9++635cghUPWhXUKpED8gm1alTp4ceegiXj39FpEHoZEjTNWbMGGGpXbDfnLXHRdzFc+fOCZvRo0cbdUiS1SMYh4WV/NYSDU0VoCzKp2wiaEGJlPVF3KLDhw+XR7GujJv/mWeeQThK0bxr1654XsShzQtywjAVUty9uPltPiI7JAHXJQC9JE233HTtOvILrvp6WnZBYv1UNEPeeFG2eSEj58rXkgyDLeGzn6uJRTHDgbD29NXzNYe9floEYvpzf9bek5db19N7CzVs6Pw1uOrRA0J3Hsk+dP3bM5ifvljQMKZkK8nIO0NWbU+HlKVcy8+bUh/sHeTnpSch/H348pHrzWHWqp63TXIaqeh9vTJ59kqd8KY6a+bhk/3DuDXeTFYWmZmzDdbQRmyGhRxitaAVY+DZadHMYbz/jO6nI34xPtxH99Va7srXs8rEodVemHVeVG7Yk+VgQWvHkcsibQ+W6VvW84YAoMxnxbYMiwQtcRWGBbx+h/cI/mG9bufmyQsFGoKWYQ+sueEExIvLnJkYvjbNaaVh8/fhHCEswQwJGt8dFYkXl+EDmydeeSjyoXfPiI/7rQeyZUELXwymL9HlCUYPH4yJwW4Mw67g+PX5hBqPTNFFJoTbFpI73t7Wz9DYnBp8Dr4yovozM3Q/xHYdxw5C3Y+snm38GtfwFGrcv6cuI1JQ2wZ6c8vIKRabQjAo3kaQjdKc0WlDAiRAAiRQoQjwl06FerqNXuz+U7lGFz/qR3uKLNdGG5pfOXNZcoX1zVIolTjam4+MlmUnsG/fvt9//130A3er1atXy2qWOIUC/HhUKZosTXYl92aq/Oijj27cuBFeVhapWXfffTcCJ8pqlugfrlqLFi2Cz5moQWHKlCkIPyjXaJRnzpwpq1myJUQ+POSabdu2yYcaZbvOWWPc5ORkSDjCAD5AcLYTh3JBJWghZ5V8VqOsslR5emk01Dj13nvvHTp0SBg88cQTKtlMnDKzABly2bJlTZo0mTVrlmiCbF4WZVkTDVkgARKwBwE/L/V3gxNxFkcLPHZO5yylTNLHQ92tDSe/5UCWvGm6Ryt/o2pWyUw8KyMrmDz6ht1Z8qHrlrEs3qut3p6v41IASeSeHd5Dt/qGSGuLNupCJylXPWe1ns40dpBuvc+GWCBEQY4qS4dUs8pCz65tOzUx6f1T6ridm1rfVun8aKwu6ibyZmHF2dSgsvsmbOISbZNqyNRwhvUrJPes1g18Huile23uPp4Tn2Kz+aj2gZ44r0NkOCvWkICKwPo9uqiVOPXA7Vo5NZGvsUktnWM0PAJFnFu03XMiNzVT90uwWW0vo2qWMgHIYyOkFwUqy/hJ3bmZT91IXaQQpAsVwhs6h0Q3dnA1ZWjl369XJcuHKC/YkCp2hOBwRK/gAB+T7zCqtjwkARIgARKoOAT4S6fiPNemrnT3cd0eO9mmVV29vTLyKYvKs1ckf7tW72e7Rc3Lh7G5a+Xl42qd5CoQlE+eCXJiqVxG5LMow3MFkfd27dql1MNbC35aISG6370qe0sP4R/29ddfV65s2VJjeHj4V199pZFOCdrY1KlTlyxZcv58yQbYM2fOIH6dSosyOttnn30WIQqNnlIqEWZw3rx5wgDhGUVZo2DXOWuMi1NPPfUU8qIJG0SSNPWkI6uZMEPBIolRbig82ORKi8rIcfXOO++IJtBWX3nlFXFoTgGqlYg8idxgR48ePX78uBxCE53ExMSsX7/e/MiK5oxLGxIggbIQMIz6dfRcbscmln33Ono96JCYSZPaFjp5iZZmFLBxW7Z6oJdxtwxhM7BLIBICi5WpjXuznru3mtEt56KJqxRqVtct2GHOZ+P1xMgRdwQt/jM183LJB82CjanDbw/2vq417jl+Wd6J37mZn/0c1xTnKuv8tKhmOfPd2KOt36wVui88Fk3VavcLMQocO0QiPfg/waPC1IpzsH+Vtx6LEl+WfPRdFUWHdirAqfSPfbp3rT43B3Ru4gN3SWXyCPK2cntGqSklzJxbrQg9X9Wzl9S7Dczsh2YVkAB8qjbv07lsQyTu3U5vz4QhE6zUHDhd4muID9lDZ/MQ/VIxU39S317Kj9nb2/h9ukTn8bn1YFZe4RU51KHh6No1ELREaGJsgjl7qVDe+wJ1rV1Dn3+O5SidoLD/dK5QvrNzixdu0m0BCfarik9P7eF4lgRIgARIoMIS4C+dCvvUKxd+8Iwuc5CMomU93b4fud7S8pN3heLP0lblzN4yDaOcXfyNupw2bdogl5J4jB8/vtSZNGvWTLZRSWLyKSvKn3zyiaVqFkZ59dVXjfpmyROAz5BIXKTUYyzZwGgZIRahhBk9JSrr1asnp4xC1EFxSqNgvzlrDIpTuGrZAwmBE8eNG6fd5IafhWg6ePBgOTAm1ClLk0stX7780+sPaJnwTZTVLKihzz///MGDB2vVqnXDr5cTIAESEAQa11R/zVr2V7qcP0NYmirAePlWdW6YZnUsk8RMdW5YjxWu7Yd1K241wt2x79vQTK7x9658S3Nd0g7k1TgWV04WeS/n6e2K8PPW+6aHFfNH+ui++2K5f8lm3TP1zWpdXDLIe+Ps454lngjrdi9SzRIAnbNQO8KjryX5n8RV3NM1CJnexKF1hfoxutc+ZKH3f0gwGu5D6bxPe39MVfnrKmXxsW5oi1r9titTvKlWrVKpe2s/+KN0b6MLp7Zqewbmb5NHTq4u5Q869HWsdGeTS2AnN4rAkXP5GTk6n6qW9b1wu2pP5q5b/J+/N0L8eXvqPoP+/Fcn4iJ7Vo/Wuk9ho33iRSHrZ5DH/jlmfL+z0eaGlZ7Xd28op/KlbFhKzbjB4XKrOdJn4qJN6dC0xNnH+oaKvSCikgUSIAESIAESEAT4S0egqICF3Hy9r9+CQM1qentPRT0LVhCwr4eWocxQp04d1SxtZaPq1pkPW//3sGiG0G9kezMdkuQmGmXEf9M4a+pUZGSkqVNyPRyt3njjDRH+DvHrsBlW2+uoRYsWpQps6KF27dqHDx9WxkpISMAQPj4+8tCGZfvN2XAsUYPsaJBtxCGyo1nhDyeaO6aAyJBDhw6VX5vwq0N+NduO3r9//4kTJ/r7l7LT07aDsjcSIIFSCfRs4zv/d71IOxdSCr5alTJ2oE4I0e5k1q/J8Sl6jkGwb17LXh5ayelFYmkYA5m5Jl49WG/pPD65AKnjta/LJc6qgj0a0hjaPfCnDakiec/831NQgz3vB07n7jiq0wV7twtA8Ch7X7KluxepZtn7GbFJ/08NDt+yP1t4SpnTZ2iA25iBZYpCqYxyd9egTXt1EdLW78lI/bjo6XvCm9nt/cecqzO0WSnFG+zc1BeOZbBBbMBfr28FwFsoUvi0a2iDfQDHz+up9dGhem99hnNjDQkIAknpOjULldWCSr95IGnjT/QgCvCIEqkrURkR7Ia8j+KsqUJkiJt86mJymUJxnrmo+2aCwesYJNrEG0X31v7iPWTbwSxk1UJuLaR2/3FDqphJZIj73V0DxSELJEACJEACJGCUAH/pGMVSESqLTGypC/RlsGKbPf+6PVM265Id2YGAKrKfCJBih6Fs3GVgYGCXLl1Ep3l5eQg9Jw7LUkD8QLm5HNBPrreibMM5I8LesGHD5CiC06ZNs05BtOJCrG4Cr8FNmzaJ5g0aNICflTi0VWHp0qVQJZGXKy1NF8HDVp2zHxIgAasJwL0pxF9vCQldfb8u+ZR+8DpT/Z+6WDB/vc7RRzGLCXNvWprXlKkOS61PztBtmoZxRJB68kZ7CAvU29OTnK7XidEmNqnMyik+Hpev/We1Z0ZcUoGcsr5K5UrNDSI9QrsadZdOOUjNKlq2JQOXNneN7lnDHvwxA8zVL8uIxfzdi1SzyojaYc2R6/jNkVHmDwd3wPefiDIVG9D8fmCJoGG3twmQmyCLz6Pvnxn98Tnc5wj0J58ys1xQfFX7Bauczck39z0E76WHY3WOJr1vLtnZ066+t/zeu2KbznvSzKkamkHsn7lc74t36wY6JzZD+9MXC7QvNjlDT+Ew7IE15YlAqv7THRGk97lp0ZWmZum9QKAJmdM8TH/ElAzrBS3c24ifLAatEeFhNHrhmIGhstI29z8nrV/+TJc91Z4cEArvMdEVCyRAAiRAAiRgigB/6ZgiU77ri4uNR1oI9qOgZbNn3vpvpTabAjvSJ4CYbHH/PeTgbCIRkb6taxxFR0fLE42Nja1WrZpcY5OybUU+m8z50qVL8GpKT9etRyDS4NixY21yvfbrZMqUKV988YXoPygoaMWKFfhX1JhfQOawAQMGKPbJycknTpw4efIkvPQyM0t2TyOkITKx7dix4/fff1cplOaPQksSIAGbExh5Z+gHP12Uu0Xu9Be/intvVHQ9gx3NshlWQl+cHYesG3Ilyo/2CzVjK7aqkbmHKVKSebSprr+h21Qv4frLZMn6nZhqVfb6vw5k4U+7n20zGlmxWLbzyOUPFlzERnLReZ/2AdgFLw5F4a7O/vN+Sz6XWLJX/ft1KdiWLs9q0C02CP4mhiu1YM7uRapZpWK0lcGuWY1VXd08+oiqxtBGZdClmc+7o6Jfm3tB9p5U2SiHiDz2zqhokWXHqI1Fla8+EpGSVbj3hF7k+t3Hc/D3wU+VOjf1gyNUl+Y+GNfMblMzix54+7SZxuaYyUoVptG1ZUngNSyj92rn/9PGEml5096sF4YX+3pa/7s3NrHgo4UJx8/niVlFh7ohL5E4NCw8O/OcYaVc81ifsDGDHKR2y+OyfEMIJOl/MlYz9oFi5sSSM/UErWrBZq1CQB2X+1fNRz6lXcb3hNf1346aGERXVnqAe1m/jgHiRfrHv5mHY0Pm/a7b8IFEXPhs1R6OZ+1EwNRHj+GHlDIBU/Z2mh67JQESMJPAjXrN3qhx+UvHzBujPJkV633r0V0ZPLfcbuKeGB2QspT0viOWpSO2tZoAlJhVq1bNnTsXEd7Onz+P3EWldmVb8abU4cpoYCgOtW/fvox92rt52ecMzaZv375nzpwRU+3Vq5c5KcSEfRkL2nEdTXX+/fffv/jii+IssqD9/PPP8NASNRYV+vXrZ2iflZX1wQcfwFNNBKLcv39/165dt23bVmpWNsPeWEMCJGAPAkNuC1y6Je1EnG4ZFKOcvZj/0LunR98VPqJ3sKE6BQ3r+7UpX65KMly/RnC/O9vbMbhohv6+7yA/s77bBPvrLRNnSmlC7IHUhn1ij/n0JUmiQ8iHl1ILYxPyRa575RRcPUylioXn1piB4S99FadYIoXYszNLyqjBCvvIfiGif8cUtH/pUc1yzLNg21F6tfWrFVF76k+XVNqSPEr3Vv7j7wmLLnPqLLlP3MBfTKzx2S9JCzakqLR1vDtt/jcTfz6elYd2CxnRK8gmbmHy6KWWsTlgzY5rPpHKo2tLP1lag9gmBC1kDNqwO3vgLaUvnSO6o/yegJ5TMgvjEgsQLa1I2h+KF/6zw6rL3ifXZ8H/ScA4gdw83Q4JWHiaLQMbdpem/qTW+wg2tFdqgv31PtAzs/Xmo2qVmFYAH3G5Mi//yoXkAkTi/fmPVLygxKkAn6oaSSKfvCts7c4M5csMHKYnfHY+LVvnmDhmULjhVyDRMwskQAIkQAIkYEiAv3QMmZTvGnzhN3qB6VnFXiHmbqoz2gMrBQG974iilgXHEMjPz583b95HH3109OhRx4x4Q0YxFIduyDQsGrSMc4bj0eDBg/ft2ycGbdiw4aJFi6APiRpTBZXNlSu6X1+mmhit9/LSCiljtMmaNWtGjhwpn/rss8969uwp15S97Ofn99Zbb40ZM6Zjx45QcJUOjx07NnXq1Pfee6/s/bMHEiCBshNABLDJI6o/8r5Oklf6xPrOZ0sT1uxMh5dDi3pezWt7oeZIbB7SL209mKUSVMQ0Av2rFBRd9XK3114kacH22piYvDmPyvp2Vkf5M2cs29qcjs/Hn3afgb5VZ06soeGs1rONH/ani7hn8mrdsB4hoQGlf1ppT8CKs6Z+6VHNsgKmkzSpH+Xx5XM1dx3N+fvw5QNnLscnFSKJDhTuGuHuzet63dLc106praDcTBgSPrhLAJL//f5PhuEvypy8K9+uTfr5j5QJQyIGdSldMbIhz60HcxDnU3R4h76rB4DAiSrueqIguImYI2jhclSJD0X/ooAl+Dcfi7y1eSnpZoU9CyRgWwKq7ZhmbrxTSUfGF4euT3TWiiT8XT/S+v+1RyJVvl+ydbWgqvfeFvzDhpI9pvLnY4u63re1KHGplJuwTAIkQAIkQALaBPhLR5tPOTtrykMrLbtY4xd6OYNg78u5AQsW9r4kV+kfakf//v0vXLjgKhO2ep5hYbpcHehEaBhWd+iAhmWZM36wPfTQQxs3bhTzjIqKWrduHVJziRqNQkCA3sKKHHlSoxVOQR+VDXx9Lfu5tXPnzqFDhxYV6RZZJk+ePHr0aLlPG5YjIyPhDQa1TCh2M2bMeO6550JDGcfGhpjZFQlYT6BpLc8He4XO+z3ZsIuTF/Lxd9NvhmeM1xw5mztxRtwnT0cbzVdhvI0ltd6eehJWRrYJD3/9PjMv620X8ClDXC/9jks5gjNKm3re2kZVyrBtC8t//ToFYu95iP7GdtWIkPOwzfzp6bGqen/vKg/dYU2MWVU/1h0a/tKjmmUdSadqdXMjH/w5fko1IzzeHhk5/p7wdTsz1+7KkCPvKZOBDvTO/Hgoba+MqK4vcOtNFm9cnZuV/p1q36nLCE6o19LYwcptOvcsvNyQ9EtldcfNAXPWlLzx/nvq8tmEglrVzMo2pOpHPmxd3/vZeyMaxXjIlUbLPdsE+HjovaOqzBrWLL0TVRMeui6BSvqCUkGR3uemRdfl7aH3wZaZY90ntdbNac58EMNz9ICwUpXdR/qGLN+anp2nnqSGX5c5o9OGBEiABEigIhPgL52K8+yHB1e9kKLnNa5cuyqlaMUBYo8rta+gVadOnVInbSubUgdyKoPdu3f36NFDZBJS5oal/Hvuuadp06ZIJgRBBQ83t5LUF3PmzPnwww+d6hLMnwySJ8nGnp6e8qFzlssy54kTJy5cuFBcF8LoQc2qWbOmqNEuqASt3Fxd+mLthgjlJxtYJGghuxXCA4oYgOgHUtbbb78td2jzcrdu3R5++GEE21R6zs7OhhOb8+cYszkHdkgCTksAocCwerVwU2rZZ4jsNU99cn7GhBh7aFohAXppohLTzUoarzILclSCVuzv/t/D1cuO1FQPnZr5vfqQWf1jJb1dQ59/juXIXT3UO8TPy6xIUHIrG5blX3pUs2wItsJ2BT+MB3sH4w+xyH7bcU3Zitf/efnr1vTIYLeR/Uzupwn0rTLlyahSAU74LG7rIb1vYoZNsCtTTlbXs42/Yba8Xu11ghZ6WLU9c1zZclYhutqM8THubnpyguHclJqn7w6LCtV7RzVlyfqKQECVujwlXS3wmA8hLEDvkyUxrXT1F50npuuZIZSu+SOqLPESuLdb8CN9gs0JNIpX/YhewSp/r05NfdvUVyvQqlF4SAIkQAIkQAIaBPhLRwNOeTrVoo630aDrJ87nIdOwg6/0k8WJP6w3kttoyG3Bk4ZXc/BkbDicfQUtG060PHUFTxp48MhqVv369ZFdqXfv3qpwc+KqIXGJMgpmRmmQm9zAcnx8vDx67dq15UPnLFs95ylTpkyfPl1clI+PDxKkQaQUNaUWVI5c6enppTZRDOQ7CjWI7Gdmw0uXLt1xxx2yhnfvvffOnDnTzOZlMevcubMQtNDP8ePHy9Ib25IACdicwPPDqvl5V/l6lVkxfLRHh6uBnTStMP34eGYukyWl6eleiIuoPX/nOdu2gc99PYLl+Uz7OUGs0W87mHU4Nq9JTbP2jmCz+aNTdIIWspXc2+OGuWeJK1J+6eFQFMQpFkjAagJ1q7uPGRQ6emDohj1ZM5cmiLB+6PDLlUl9OwZEhli/Vm7mrNbuyJSTWvl4Vfl9txENDCFD06/n7Fm9PX3MgBDtxFeQrF55UE/GRqzCP/eX9JyRU7Toj3Sszps5SZqRgCAQFqi3VpCYofe5KczMKYSoPqnN23qSkqEnaKmSX6rGbRDjGRGk9yr29qwMgRaO0Xh114n0gEylaqJxOPz24AUb0/DyETZjBuoFHRH1LJAACZAACZCA+QTEDxxRML8tLV2FQEtEZFlnZLJbDmY/2jfEyAl7Vm25/qNANUi7Rq69TUfvS6rq2nhoJwLffffd4cOHReeNGzfesGFD9ep6P0TF2XJQUIlDtWrVcv6Lsm7OiKH34osviqtzd3f/5ZdfkClK1JhTUKXvOn36tDmtYBMbGytb1q1bVz40VYYM1rdv3zNndJlyevXqhdRulSubtZPXVLdm1jdr1ky2PHnypHzIMgmQgDMQePKu0NYNvKYvTjQM2GVqeqEBbrUi3FWuPzCGpgUnBpvHHlStuCXoK1WmJpmkv0wWob9sV6mSLlWHOem1ZBuN2GWmJmNRfbVgt26tfOUmWXnFb35bsncEM/nsl8QvJtaQDUyVa1fXix4WFeLm5e6IN39T8xH1/IEnUDhDYdesxlZM41Jq4fG4/FMX8o+eyzt2Lk8VdiMqxL1RTa+GMR71oj3rR7lHBOstQ1sxnJlN8PK8va3fLc19Hp8aK97TkGTr1y3po+2/Wr1iu94upVITX+GiICHsOHK5U1OtvZzIUKh6T6gb6b71YLbIR/3t2uSBXQJurPOlmU8QzZyKQHiQ3lqBSl4yOlW8moqlzJaIoKvIsb5eVRB18HJ+SdBCvD8Yba6qVH2gq+ajMr6ve8hdnf1VlVYfYraRIVVlQQuiuNW9sSEJkAAJkAAJCAL8pSNQlNdC89rGd5ceOH05O7cYX4ocduEXUwpjE40EP8QE2lPQctjTUG4GWrBggXwt33zzTTlWs3ClKnGovHporVmzZuTIkeKZhSAEWQhed6LGzEKDBg08PDxEQqxTp06Z2VDl3gShtNSGBQUFgwcPRjo3YdmhQ4elS5dCihM15hTgRiZ7kiF4ppkBD2vU0FtyTUqygReIOROmDQmQgEUE2jf0+WFybXgSYFVULAEb7QEpYR7tG3pvt0DE9pk0+8LGvZkqM8QefHr6+U/Hx9hQOPH3rty4hueRc3nKWEfP5+F7m3a21StXru46clnMDYEQm9f1EocoRIS4CweOrNzigsIrGgG7YJBbULJIp7SVu3JAuV97/+/XJCPXjjIWpMS/D+d0bKK1Au6AWXGICkggr/DKlv058A3afihH+BgZ5QB9C38b9pScDPKt2qmZb7dWft31xVqjbU1V/rk/WyTmuVrppv4dAkypy3j/mTQ8YuQHZ0VXB2NL3kBEjc0LR8/nn4izZpRft6VrC1qGU40Jdx/QOXDpljTlVEZO8XdrU58aTP8SQ1Ss0SKATQ+Qo/CJqRgdO1/6Dfzj+tTpSxJEp++NioaErBy2b+T7x78l3wpSs4oOns1rVsv4co9ovuOIzocY+bzaNeDnmmDDAgmQAAmQAAmQgJMSgFN4zXB3QyUJe0+X/pWBWOgOm/emf7ONjgW/dlff66a368roRbLS5gT+/vtv0SdC0rVr104clr/ClStX9u/fL64LwRLNzyYlWjm4YMWcd+7cOXTo0KIiXVQKhOxD4D4rZl6lSpUmTZrs3btXaQtnPmhFqjiERrvds+f6stB/pxs1amTUTFRevXoVoS83btwoajDu6tWrcU+KGjMLX3755aRJk4TxG2+88eqrr4pDjYLsGQazkBBH+95qzI2nSIAEVAR6tfXDH3LA7Dqac+B0HpQtLM5Cy4FZ8zrezWt7Narp2aW5j/hihKwzL35ZacOeDFU/+05enjgjbtpT0TbUtHq08ReCFpbeFm5KmzBEL1Svag6b9+dcTNXtVLq5kY8quVfdSE/hYYbvneeTizS2Zl9I1r35Y6D6UaWs0KkmU/ZDLDg+OSD8pa/iRFefLU3s0Li2qdV8YcYCCdiKQHxKIRayV25Pz8nTibvmd56WXbT673T8QRTv1zHwwTuCVaFEzenql83pchar1nW9EGrMVEOo4PJKfZJ5AdBM9WZO/cpt6jdDc1rBZvO/WRk5VwJ8LPOeRFawlX+nFxaVSBELN6YO6x6k8mc1cwI0q7AEgnyrIGuU+DQ8HJt7JDavsWZI23+O6zaL4DOobUNdNJuebf2EoAWkC9anvPO4VnY6jLX/lK43ZKDUDjlYYZ8mpcrDXwAAQABJREFUXrh1bsTkRgIkQAIkQAL2I9C6vo+hoIXh5v2WMrR7oGrxwX7TWLrZeEJ0bDOy36CO6ZmClmM460ZJS0vLy9PtboM7jqm8Wbo2TlmCHGLOvJBB6uzZs8IyIiLC09PRK31idDvN+cSJE/369cvJ0W0hfOutt0aPHi3GtbTQvn17IWgVFxdDc7r77ru1O4FH1+bNm4VNZGRkqcLhxIkTFy5cKJrA/rfffgsOtmanAJ5W0Q8K//77r3yoUT527Jh8VtWPfIplEiABJyGA5a3e7fzxZ8583nm8esEXV/46oE4So/hpzZgQY6tvctgAPnNZopjS8q3po/qH+niaXP/F2q4wRuG2Vuqkg/Wj9WLx/XMkR0PQ2iVtIUdv9aJNrqHLg9q23LONH7ZZCf85FNbtyuzT3qynybYzYW8VjQAk5Hm/pX69KhnuWWW/9szLxQs2pvy6LW3swGpDuwVaJMpGh7vfdEg3BSST0xC0EPpM+J2gDQKl6lraoQRhae1OnaAVFujW5+YAjXHOJRZsvu7Lgrbrdmbe2z1Qw97wVLWgqkO6IglQinIKz85XK5NfHqH3hc2wFWscQ8CF1t97tfEXghbgLP4j7X8Pm4yTjz0u+07ofhDVi/LEdwaBtEsLH7eqlYTIimx2T6cWaoQb/Un/k7pbS/UnteiZBRIgARIgARIgARJwKgKDuwYu21oSLEGeGLbxLf8rY5hDElcv/TNDBHGR54CyvOVIdcpVDk2u9djkApD+R/Uw7FZlgEPrbAxbOWeNm5ubnJ0IWkipKgv8fhYvXuxsl4NIieZM6dNPP5XNhgwZIh86uGyPOV+6dOmOO+5ITk4W1/LMM8+88sor4tCKwj333CO3mjNnjnxotIw4gZcv6/YwDhgwAM5wRi2VyilTpkyfPl0YhIWFQc2KitLaJimMDQtNmzaVK//55x/ZWU0+pSpv375drunSpYt8yDIJkICrE6hSudLUMVG3NjeyCIV8Wk99ct4mK+CghGXr29voVocRmXrCjPPwaTAEiM0Yny5JgqImTsWEud/ZQS381IvSE7S+WpWEmF2iiVxIzy6eu0b3EYBTDWP0ohfKxvYr4y1/zEA9p7QvlieJdUP7jcueKzgBhCL7v9nxM5Yl2uq1rPCEm9fUhRff/P6iRXi7tNDbaTjjF7xsjbwJKH1u2qsXf6N5aaHPLJqJofGfB7LlZDwDOgeMvydM4+/lByKUzENKV6v+1ku+Zdi/0ZpH+oTIjrDLt6WfuZRv1JKVJGCKQM92fvCbFGfX7MrYsFe9SUWc/fCnBNlHE/GKxSkUfD2r3NtNt3MOOd7wSZ2QpufiLOx/3Jgma8DBflXvusUyTVd0xQIJkAAJkAAJkAAJOJhAk5qerevr/NTl0b9cmZSsn89bPmurMpImzFqh2/Ird1st0K1jY+Nzk82cvGxfQcvJL/6GTA+5heCVJYbOzs5WremLU6KA6G2IaCcOUSgsNCuPrtzE5mW4Xs2fP1+72927d69fv17YIDPT//3f/4lDxxdsPufMzMy+ffvKcfMefPDBadOmlfHSevToAYVJdIIwgFu3bhWHhgXcD++++65cj8xY8qGq/P3337/44oui0t/ff+3atfJtKU6ZWWjbtm10dLQwPnfu3Ntvvy0OTRUQI3Hu3LniLBQ4wBSHLJAACZQPAg7TtF64r5q86IbAhqOmnj0ely+7E6dkFr389YV5v+vpTxPvjcCecRXtprU8u7fSqVxQs8Z+cg4OHyoz1Iybfk6Ju6ic6t8psG7kDfDQwuhdmvkgIpOYYXxKwZK/rFkEFz2wQAKlEvh2TYpwJCrV2FIDBDBctkXn1VRqc/wwQ+xTYYawohNmnPv3VK6oUQp4T9i0L3vqwktyfdM6uteOXG+r8ir9eIO9Nd2zMChCq7VvpBMDEOrtZLwuSqqZs0Inw3vogjnDI+2LZXrvfmb2Q7OKTCDAp8rEodUEAeyTePnLuIUb067n1So5k5pZPOXHhNU7dC9YSFAjDFJEPDkgNCpE9xF5Kj7/sffP7j15WXaXhA794cLEaYsuyUM8Nwwf8Vy4EM8DCyRAAiRAAiRAAs5O4MHeoUaniIgUr8yJN3rKhpXv/ZiAlKVGO3xyUFjVKuoFEKOWzlzJkIM34NlB0qyjR4+KgZHHCCv7EBVEjSjk5uaOGTPmu+++EzVKAXELVTU35PDxxx+Hw9mwYcOMjv7HH3+oZJVHH31Ulj2MtrJ3pQ3nXFBQgAvct2+fmHP//v3hBKbtGiWMNQpIo/Xkk0/KmhBCGq5bt65Dhw6GrRDBEn5vBw4cEKeQCqtnz57iUFWAdjVy5EhRiQiQy5cvb9OmjaixrgCwr7/+umiLyd92223du3cXNaoCtMAnnngCARVFPVKOVa9uMoaJMHPmApzkfvnll7i4uIYNGw4cOFD2xRTTRjBJBIfEC6dXr15GRcQdO3ZAv/Ty8urTp0/t2rVFQxZIwHUJKJrWC19cMIw9qPhp2ST2IJZunx9W7dW5uq+G8EV44O3TULma1fH2dq986GyunDdL4TnglsBbm+tWjWXIL42IwPTEt0AE8Xv0/TPIFlYzwj080C0hrfBcQsGB05flFbfIEPfnhum5SckdKuU/92UNfU3nUGtooNR4eVT+/mWL3wHGDgwb/XGs6HPOquT+nfyxKV7UsEACtiWwfk+mbTtU9QbPpEFddM6XqrOGh5Purz76o9jsvJJvFwfP5D4+9SxetrUi3KNC3PANDa/c/acvn7yg56jUoYnvLU3tKGhhD+bWQzqHsPrRnnWq69b0Da9CqUHI0L8P61qt3Jo+YWgpby+GXT3QO3jR5lQ4rSqnNu3L3H86uEUdneynajJu2jk3M34dPtQ39K6OFjwvqlF46FoE+ncKWLsrc8f1uxGfeh8uuvTFr4lNank1iPbEogxS6B0+k5tboHOIrFzpprcejwwNUN9McBl8+aGI8dPPwz1LgZCYUfjEh7HeHpXRW4h/1SOxueeTCuSdKDDr2sLPzEDHrgWWs7WUwO+7sz5fmhCXbP3W3uhQt9EDw++42cjCi6WToT0JkIDDCLhQnF6HMeFALkEA+01rVXM3GvQPAWN+2pR2X/cgO13Iok3p2BpotHNMqV+H8vA1nhudjD6/9q2888475QFOnTrVunXrn3/+WY49mJqaOnXqVCgThmoW2uKs3MONKiNv0/DhwyFLyPocJpOQkPD5558jEF96uu71gxV82SvI1eeMJwtKJLJbiQtBuLxFixbZKiPapEmT5IRSGRkZ0D+gRckKkIIaXk3wPBPTQOH999+HJCbXiPKuXbugfol4gDBbsGBBt27dhIHVBfje1ahRQzTHPG+//XbIcnIwRnH2xx9/bNSoERz4RI2Hh4fKyUyccpUCcnHcfPPNcNF76aWXkPNMFTdSuQrEDoWejexlTz31VIsWLbZs2aK6OrisderU6bnnnhs7dmyzZs1EKjWVGQ9JwOUIOMZPq2+HgDcfi1K5W2GtbdvBrPV7MgzVrLs6B04eYVJHR+aPVx6KlKN+Yf0NEtevW9O/XpW0Yls6ytdX5K49IRj3jUcjS1WPsNSOL7Wl/iGDjhXPctsG3h2k/K7p2UXzf3OKLwxWXAubuASBE3Fqt0XbThuulhZ12DDGY/r4GCyOy60gPOMFO2tFEpbgf/krTaVm1Qx3f2+U3itdbmuT8podmbIDyh3m5SC8rZWvu5vuQuD7UlRsVv5aec5wanmwl85JC6c++yVRNlCVL6SU/u6Et6+sbJ10oeqBh+WSwPtPRHZqqhfSE6EFdx3N+WF9Cl5cu4/lyGoWCDzRP0wVb1BgQf0nT9dQfVYipx0yda3blYHPPpWadcfNAe8/YWVQdDEoC+WDwGdLEsuiZgECms9cmlQ+aPAqSIAESIAEnJwAkgI83Me4kxZm/tHCS9h7Z49L2HcyF8HbTfU8dnA1bDwqBw/dz6RycDGucgkQgfCQZ4vMYXBP8fHxwRr3rbfeGhMTg4hzUAjOnj0rm4lySkpJhmdR4/gCZotBoet89dVXEN4aN24MoW7o0KHwKYESM27cODgwybMaPXp0rVq15BrHl204548//njhwoXyJSB0JNzsoNtZ+gAruR+ljNCUSD8mO3tlZWVBuwoKCurdu/fkyZMhp9WvXx+o4QknNx80aNBdd90l14gyfLng6ZWTo8scA9kJT5mlE1bsIVuKnlGARxFEWUxbVELg+fLLL/Gkd+zY8eGHH37jjTeg4kDmhO71wAMPXLyoe3vFZSIKYp06dURbVyxs2rTp8OHDYubwezN8/X7xxRfAothAD541a5awVwozZ84Uwjb8vb7++muVAQ9JwHUJOEjTau//+YSaEUFupYLCfPp1DND+MgfnrfmTa7fVzwJitOfbWvr//HrdVvVM+j0YbWWPytGDdBFr0f+P61MRaNEeA7FPEnBOAnA/WvBqHTlkqKl54kcmzD59poafl/FtQKYaWlqPFX+5SW/znAOw4i8nIEQG6S0HdV/h5A61y/f1DAry1TnKQCP864A1/WiPwrPlmwDuxk/GRQ/rHqL9oQkIdSI9Pn26xsh+JpdvYIPooF//X63aER7a0LBN5PF+YW89Fqnap6LdimfLMYEieRuRtddZ6j1sbcdsRwIkQAIkQAJqAnd2DJAjoqtOj/n43M5jNv5aju/54z89pxpIHCJTePdWumVbUe+KBd3PG1ecvevOefbs2f/888+JEyfkS0CAQTlwnDgFSQDuGnL2Kdm1RZg5uPDrr7/ef//9iqqBJXg4aan8tOT5QGWBAiTX3JCyDeeMsHKqS1D5TqnOahyaagip6dKlS+PHj5fbQtb6/b+HXCnKUEPhcSUOVQUoKElJ6i1pwltLZVzqodBdhGX79u1BGGKtrHVBP0MMPTyEmarg5+f3ySefQNBV1bvc4Z9//inPGXxQI4u4eKK3bdsm26jESGTUQ/RRDQP5FMsk4IoEoCF9PC560uwLG/eqw5TB22nCjLhZE3WOnlZfIFSlZe/U3bg3e8GGVHhmmOoH8Y7GfXLuqcHhI3rpctQbGteP8sCsNuzNgjJ0Jj5fTpcF4wCfqvhS+Eif0I5N7BivzHBWGjXNanlCXRNpjbBrfu6aFARj1GjCUyRQzghEhrh9MDpq38ngdbsyEeQTMc1UF4jV+Y5NfR67MxQvcNUpmx8ePJuH8KeiWyS6qx5SuuKu2N/R3n/DHl1SopXbMrq1tPgnKPzVHrkzFBmJxBxmLku4tblr7yIS11IRCjePPuIMlwl/5eeHhQ/vEbjoj3R4KovAnsrcEEswppr73V2DBt8SIHs2m5o5kk0ufK3O9kM5P25M3XkkW+WVhfxb6GrIbYEIQmiqB9ZXQAJIpTbjl8S4JL1dsxZxgEvuqLv09v1Y1JzGJEACJEACJGARAeyieHdU1LA3TsMZ3bBhXuEVhPv+YHSMrUSmWcuT5qxJNhxI1Pzf8AhRdvWCfb8jmuNyYSsb13omsIi/fv16RB5Dxh3tmcMdB84r8NWQBa1jx47FxsbWrFlTu61dz3bu3BkuKSNGjFAtwasGhfMNYhJOnz7dVrH4VP1bdOhyc3766acDAwOfffZZo4H75GsH51GjRiFMJXJiyfUOLiNpFmRaxDyEfgmHMO3RMWcE6IOxq6fOUi4TjnFvvvmmuGT4scGjThyigACPSIu1bNkyUYk8W6KMAvzbAFCOY6kykI1ZJgHXJfDuqMgXvriqyqcFrWuEflysslwgeuvV1g9/kHPik4uQ3iO/4AqSeVStWum9Hy4iG5bSOTSt6UsSDp3Je+WhCB/Pyhoj9mzthz8YpGUXI3VWYnpRtaCqNau5B/iU7tiBFfMyRn63tIcPxyA6k1kBmnDVZZybBjSeIoEbSwDaNv7+775qeAdAFqvkzKIqlSpBTMIfAvFpz23Dxw20DQzPfvJ0tGElaqAxW/0qw49bU20tmuH9PYLwZ3R6Hzxp7tuF0easrGgEosLcJw4Nf2ZIeFJ60cWUwsT0Qny2xoS7hxmkyyqVDFwkOzfzwV9B4RV0FZ9ShDCGESFukcFuSIpZanMYDOsRhD9zLK2zsSKTpXUDsZWZBMSXMTPtaUYCJOBaBJxkA4drQeNsnZ9ARLDba49ETpqtdooQM39x9vmh3ULGDQ7F9iBRaWnhfGLBa99e1NjOiw6fHRrRup6zbMO19AIN7a2HZdgXaywigMBrS5Ys2bBhQ8uWLQ0bVq5cGTmZVqxYsW7dumrVqiGOX7169WQzlW+HfMoBZW9vbwgnCDO4c+fOGTNmIFKi4aBYvkcWJVwg4qohQ5KhgYNrXHHOQATJB/rlCy+8YEr6hQqCtFh//fUX3P4Q89DBVA2Hg1j7zjvvYM4QO+U0YLIlAif27NkT7kpIEVc+1CxcHZJjibxZ0OrwlCFwqHzVKKMyJKQklQXeASBYqgyQfysgoCQ9I17ySEKmMuAhCZQDAlCbpo6JkkNpoebDsTFImmrzq8OXQmwDR+TA29v6YWkbK8tz/q/mnfpJUJFe65EpZ2UXCo1pILFWy7pekMoQ1swcNUujK54iARJwAAEsmkeFuuFli5XQbq18kWSrVDXLAbPiECTg0gSw3Ri7OvCp2rudf5v63laoWfLlI1dczQiPTk2vfVLjY9pMNUvugWUSIAESIAESIAEScFoCPVr7De5icgsO4uku3JRy72undx4xGV1G49KwhXfOquS7Xz2lrWbd0zVoeE+Tc9Do32lPVTKMG2b1XE0tuFvdoUUNkYbKInunMs7IyPgvYt9R+F3BIycqKgqx48LDw51qktqTOXXqFAIhIkReamoqluxxCbfccgukOO1WN/asy8350KFDyNKUmJiIyIHQsYA3OjoabmfOoBeaeiozMzOP//fAKxQTbtiwYaNGjcy5t5E+Cj5npro1v75r166bN282376MlnBQi4+PhxaFl4DRrhBX8ODBg/Dfatq0qVF3OhCDAXKSwcDd3d1oJ6wkgXJAAN5RL3xxAX5a9lOzNCgt3Jg2bXEC5qDYVA92nz4+utR8Hhod8hQJkAAJkAAJlJ1Az2ePZ14uLns/Sg+hAW5rpujtibRVz+yHBGxLwJRnhikvVVuNfqPGtdX8y1M/pp4Le1+jve8xe8/fFfu/Uc+1KVa8B0yRYb0NCcAf/f63zsQmlhI1t2ktr/6dA7FhyJwdeEkZRT9tSFu2Ja3Ur45tG/h8PiHGnKDQNrxke3dl35CD9p59uekfPhkd/nu47hXV/e/hWvN3uTlD4cDDtSDDaQyuS3i41rStnm39/x4azaFEduzYUcMAxCBSahjwFAmUDwLQsZBP6+WvL/RpH2AP3yxtSghS1LCG56Qv41Izi+pGeXz2dI2wQH4j0mbGsyRAAiRAAnYncHMjXzlxWhnHG9QlsIw9sDkJkAAJkAAJkAAJkEAZCcAffcqYmNEfxaZnF2l0dehsLv6m/HixU1NfiFtNank1hfO6X0koZiQ+SM8uvphauPf45T3Hc46cKyXPizIQ8kciJUE5U7NwaVy+0biReIoESIAESIAESMCOBN593Lgvox2HvN41YiXNn1z7m9UpYweF+nmZla7jelP+TwIkQAIkQAJ2IfD8sHC3qjf9fThHe72j1LHDAt2G9wh+sHdwqZY0IAFnJuBsnhzOzMrV54bErkik5+Cr8PXkTwAHI+dwJFBxCdSt7o71h2dmnDt1Ib9UCtsPZeOvVLNSDeCb9d4TUb7lcbmDglapzz4NSIAEbgABpKGqWtUGb1DI5XYDZs8hSYAEXIEA0n5MGu7UoXFdgSLnSAIkQAIkYDMCoQFV33os0mbdsSMSIAEScBEC1UPcTpqxyGvbq4kJd7Nth+yNBEiABDQIIAXp3Em1XvoqfuuBLA0zm5xCIuGH7wgbMzAUqU/L5cMG68WCi0tnsRJXwQIJkIAzEBj538MZZsI5kAAJkAAJkAAJkAAJkAAJkAAJkAAJ2IlA63o+jhe0Ojf1tdPlsFsSIAESMErAy73ytLHRny9P/nZtklEDm1TCJQuxcDo19bFJb87ZSWXnnBZnRQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUL4J9O3g7/gLHMhEg46HzhFJoMITgO/UuEGhc1+s3aqetz1gIFn4gv/VLt9qFrjZ0kPLHk8D+yQBEiABEiABEiABEiABEiABEiABEiABEiiXBMID3BIzCp3k0gJ8uEp2A56K5nW8+nUMXPV3usPGHnpbMOIcOmw4DiQI7JrVWJRZIIEKS6BZLc+vnq+JtKmfLU08fj7PJhwa1/C8//aQXu38qpTXOIMSJn5USzBYJAESIAESIAESIAESIAESIAESIAESIAEScBSBjk18ft3uOCVD+7LqRnpoG/CsnQhMur/a4bO5Zy7l26l/uVss+04YEibXsEwCJEACjieAj78OjWuv35P1xbLE80kF1k0ALl+dm/mNuD24XUO7uHxZNyt7t6p09epVe4/B/kmABEiABEiABEiABEiABEiABEiABEiABEhARSA9u3j4W2eSncNJ67PxNTs2qUBrgqrn4sYeZucWT/oyfueRbLtO4/Y2Af97OMLbgxlY7IqZnZMACVhG4I992TuP5OANMDbRAmXr7luD7u8ZVDOiwm3FoKBl2e1FaxIgARIgARIgARIgARIgARIgARIgARIgAVsRSM4oemvepYOnL2deLrZVnxb14+9dpU6kx2N9Q8t93hGLsNwQ4z/+zf5rf9a5hIKTcfnZeba5HxBJsn60x63N/Xrd7BcWwFBVN+SJ5aAkQAJmEUjKKNp1NOdkXEFiemFSelFSeiE+InMLriiNm9T0ahDt2bCmZ8Noj/oxHp5uFVSbp6Bl1s1EIxIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggRtFoILqeDcKN8clARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlAAFLUuJ0Z4ESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESMChBChoORQ3ByMBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABErCUAAUtS4nRngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIwKEEKGg5FDcHIwESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESIAESsJQABS1LidGeBEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABEjAoQQoaDkUNwcjARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARKwlEDV2NhYS9vQngRIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgAQcRoAeWg5DzYFIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgASsIUBByxpqbEMCJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJOAwApWuXr3qsMHKzUC5ubmVKlXC5eBfpVC5sk4aVGqUs/Ili3q5UilrnDI0Zg0JkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkAAJkIDLEdBQZFSnxKEo4GKvXLmCf1GjVOJfLy8vl4Ng9YR1MozVXVTMhuJ2UQrKbaRCoZwSlapDUc8CCZAACZAACZAACZAACZAACZAACZAACZAACZAACZAACZBAhSWgkg9UhwoWQzWrouGioGXNM477BveTckuJgtC0lHqlX7lszUhsQwIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUGEIyLKCKBuqWTglVIkKwoaCljVPNG4U+V5BWelF3D2iRtW7pfWq5jwkARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARJwRQKWCgTC3lB6QA3OCgNXpGHFnCloWQHtWpxKPNCyuLhYuWOUQ9QY3kCGNdYMyTYkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQALlmoChoCBqFBkChxAmwOCaSvGfTlGueehdHAUtPRxmHuAuwU2DB+zlguGhYYdKK/PrDS1ZQwIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIk4EIELJUGShUgYFDRBK2qLvR8O89UcZdUqlQJ/1aurKcIolKZJO4kUXaeaXMmJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACLkFA1sBQlh+K101FE7T09BiXeAqdZJK4UZSHcg9hVnJBmSRqVAUnmTynQQIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIk4FQEDDUFpUZWH1C+rk5cy4tUoR4UtKx5uhGhEneMcg+Jf9ERyqI7uSwqlYLGKZUlD0mABEiABEiABEiABEiABEiABEiABEiABEiABEiABEiABMolAQ2xQD6llPGv/IBIoSTTKpdkjF4UBS2jWEqpFPon7h6UhbVyM+EQBVHJAgmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAmQAAlYREARGoTuoLRVnG2ESGFRh65uTEHLmmdQuWPQUlGzcD+JGlnKku8zud7UkObYmGrLehIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIgASckYM7iv7BBQZRxLUoZ/woZQhYmnPBi7TclClrWsFVCDirefNfurP/8seR/xU1mTe9sQwIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkUIEJKIoDAMjSA8rKoSxSVBxIFLSsea5xx+B2wb8QQvFQCqobS+lXubcMxzBVb2jJGhIgARIgARIgARIgARIgARIgARIgARIgARIgARIgARIggXJGwJRMINcrZeVfIUYoBUWkKGdMtC+HgpY2H+NnxT2EgvKAnVyQm6FeORQF+SzLJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACJEACFZmAkA9EQaGhHOJfuSAfVihoFLSseboVbz7VTaPcT9Z0xzYkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkYIKAELRwXtEm4KcFqcKEefmspqBlzfMqbhfcMUp7pUbVl3KHqSp5SAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAIkQAKmCBiKC7IGAWECD7nGVD/lrJ6CljVPqHyvKGXRi3KIf0WN4Z0nTrFAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAiRAAgoBWVAwKjco6oOiZslKREUASEHLmmcZ94pyJyk3DbrQvm/ELSgKpkYt1cBUQ9aTAAmQAAmQAAn8P3vnAffXdP/xJiQSiYhNjCA2RYgELYraq2gpWqOooDVam1JUaavLKi2latWs0Ro1y792Qs3EaKzEii2JLP83J77P9zn3/u5zf/e3n9/n98rrybnnnnXf99wzvt9zvkcEREAEREAEREAEREAEREAEREAEREAERKDZCHQp9rcA5kh9BK/KImRQUqSG7K6eUmgVebPUFX7EDI7gDpf5k7NYUZRS/lEwXYqACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACDQzgVIC/1L+qc9igXGEH8HMkRqlW3pKoVXktVJRiGa7soIitA1rTxF2iiMCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIpCbQNA+8NdrJYgdVBW5k2n5gFJoFXyFoaKkVhd/KzVAwSwVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREoFsT6FLF4AN0axLxw0mhFRPJeW2aKnPkjKhgIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIlCAgKkkzFEgkRaNIoVWkRcXtvWF6mKVJnLYZZEMFEcEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKBdCeRRQJgFwjaBJIVWkRdtyqqoShVJS3FEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREoCsCkUrCVBVdxesm96XQKvgiS9WbdqtABfEpmgiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIQFcETOkQOeyyqwS6z30ptIq/y2LVpVis4qVUTBEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgaYkUExlUCxWUwIoo1BSaJUBy4IWqCsFolh2coiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACHRjAgWUCAWitDRAKbQKvr6oomRfFsxD0URABERABERABERABERABERABERABERABERABERABERABNqSQLbeIbrbDoSk0KrCWy5cbwpHrEKhlYQIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiEDdCRRWDRSOWPdHrEmGUmgVwZpRafwt7y6SjeKIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAi0GQGvXPDuCEPGrShk97iUQqua7zG19qR6VjNXpSUCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACItD6BFIVCqmerf+sZT+BFFplI7MIqkOGQg4REAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREIH6EGhP9YQUWpXWrvasN5VSU3wREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREIEyCbSzSkIKrTIri4KLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAjUl4AUWlXj3c560apBVEIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAKdCUgBAQ8ptDpXCl2JgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAg0GQEptCp6IVKKVoRPkUVABERABERABERABERABERABERABERABERABERABERABMoh0LaKiRZQaD32+a+ct6mwIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiAC9SPw+89/tctv9tolXZWU//vf/44ePZqkevbsueqqq1YlTSVSmMC0adOmTp3ar1+/wilYxEmTJs0xxxyzzTab+cghAiIgAq1C4MMPP+zbt+/sszd7H9oqPFVOERABERABEegGBKZMmdKjRw/mON3gWfQIIiAClRNg4Txyj969e/fq1avy1JSCCIhAqxOgTfjoo4/69+/PaKHVn0XlF4EMAn/605/QZxGAUfHIkSMzQha+1dTCOLRZjz76aHi24JBOq/CbLhxxzJgxd9999xtvvPH666+/++67tL+MxuaZZ56VVlppxIgRvJH8Sqnnn3/+X//617hx49566y0GdkRcYIEFFvn8t/766y+55JKlCvnmm2/yMdjdL3/5y9tuu61dlnL88pe/RAMX7i644IL77ruvD/nwww/fdttt3iePe/jw4ZtsskkImUyhT58+P/rRj/L3TE8//fR1110X5Xv00Uejvo08k5eTJ0/+3e9+N3PmzHDrK1/5yte+9rVkMPm0OoHf/OY3vOvkUzAvmm+++eaff37q9mqrrYZyJRkm8knW2ChA6mWo83z755xzTmqAnJ6UNqMbO//882lkQlKLLbbYHnvskSdZtu/+4x//sJBbbrnl0KFD7RLHNddc8+yzz5rPgQceOHDgQLssy8FboPl67rnnKCe/Tz75hC+dkSgNGIhoDGnNykpQgZuTwPiJ035+6esZZZtj9h6LzNd7iQV7L7FQ76UH9V5wYMlxVHZSs/XssdDA2ReZrxf/Fp2/18pL9e1ZelJz5HmvffzJrNY+o2zRrV+NXHS2Hl86/LzxM2Z+arcO/dZCQxbpbZelHEQ54tzXPpneEXHktgussmQfwr/13vQTL55gEYctN+eem89nlzjufvyjq+951/vkcW84dK4d1xv43Guf/P6aNy38ZsMHbLP23HbpHZOnzqSEVr7jvrPwwvOmi4pueeiDmx543+Ie+I0FVlziswfRr1UINGcHVKp3hiqjOMbJ9Hr86BoYMGcMlYsNce3dRdHNP9ux8MIL77333tlhwt1KRg6k8Oc//3nChI7mgqH+brvtlpEv2qAzzzyTxXMWxo9voz7dwgQHnfKAAQN4NPpl/g4ePDhj0UmUVCXDA3IfP378rbfeyjSH18F6F3zmnntuXv2KK6642WabMVSLihpdZtQlHiokRWqM94YMGcJcI4pulxW+LNLJToGRJ88SSrLo5z/LWo6WJhA1Izkn2v6Rr7/++ieffNJ89t9//3nnndcucfhKTgtJgC4H5FdcccULL7xgiRx22GFeT5xdVy1W5PAT+eiWXSL6uPrqq+1yl112WXrppe0yp2PGjBmPPPLIqFGjnnrqKbowLvmW6RdomhZaaKE111xz2LBhGUlFbyQjpL+Vv2H3seRuBgIZb3zOOeek1aXtHTRo0CqrrJJHTJT6RPkrdjS/Tk0t23ONNdbYYostUsPUQoTVZWuAnMTGBksssUTG6vzspCrpBKdPn37//fffd999iBHefvttLpGp8mYZsSAIXWuttbLfbEYNSeUcPDfddFNSDu7otVZdbBINqzJK5W9961vfWm655byP3N2GAAL8U089NTwOknkcGcLAwk9dUhBTOMVqRfTarJCmdFrVYpsznRdffPFvf/vb448/HoVHS0STyg9FF00wiiLm6lGY6JIe9K9//SszPe/P2A4lGT824d18880bbbTRzjvvzFzUhwlu5rdPPPGE+Xc5Ag4hiWIKLUTkFj04Jk6c6NOM7pa69OmkpkCa+dWuzH6TZUBlWCp37/+f//zHvxr6RSm0PJ9u42Yi9PHHH2c/DtINJD5f//rXl1pqqYyQqTU2I3y4Feo8+ptkXe0yrg9AW+EvvRtp1+23324+TIm33npr5IDmU8oRPRFapSgk6nNfbC8ji0JmXBIL5Tdz9SClspB8qvjwGzt27KWXXrr55pt/+9vf9pNtCylHCxH4eMrMB5/+KGeBUUFtu+48I7ebf74BKaOpspIaMmiOfbdZYKPV50pdq/fImI8/mDQjZ6ks2PQZn/btO1uPT7/kn+hfD/cZsm3XytdRYyf9+7+fiWXDr1+fnsssOksNNmVqJ0QD+8U7rd+YOM3n+EUaXfy/9MKfbWhYZN7ZH37mI1PAoT4spdB64oUpD7g39eCzH2+3brq6+p7HPvLl+dn3BnVRFN1uMgLN2QHl6Z0DSPQQdGoM0hCFJNEWG+JaOlF08892fPDBB9kB7G7Uz5p/tsNGy8htbTZLFHpk1p1kzBpY5mVrGQk/11xzHXTQQZZX1Kebf6oD7CjPkgODEDhKqtjwgKRQZTHBQU4Ujd7f//zHxIdlN4iTWKYTyfd9mfPXJYCwpA8RVepgo8KXRZHKSmH11VffaaedCsj6/bPL3QwEomYk50Tbl/zll1/2420S9HdxR5X8L3/5y8EHHxyFiS7/97//+TSR//pqX1ZdtZStaTKfpIMxv8+XlqTcSo5kg2aBxsEnThPxzuc/1rPeddddyHB33XXXFVZYwYcxd/RGzD/bkb9hz05Hd+tPIOcbpx9BYsYvo0MpVfj8FZt66j+BUglm+PPVlLpbCxFWWa0Byq3tt98efVvqjsmyksrZCfLt//Of/7zxxhvfe+89jwVBJa0EP4Y9KCwp1cYbb+wDeHfOGuKj4GYMZj7RoyVHRxWOi6Lolm+2g/FMdgDdbVECXpsVHqFGOq2uN4I0hCCSej+fsTKgFzEVhXnKUQsC99577zHHHONVJqm5IIk++eST//73v6feDZ4kRZhImxWFp6G/4447DjnkEOaE0a3WukTJl7PAbDROreQ5o99zzz0+JC+Cr8P7yN0+BBji8PnwwbKYMRKptASEqDLzCP/+97+bpOSw/clPfnLJJZeguMooEmVGK3/44YczB8gIplvdjAB6l7//37s7/OSFv95W6Xt/YfwnR5336nd+/r8335teXUqbj+i0TOSux7JqsmV956hOWr0Nhw7o06seI8b+fWdbetBnmq3we2pcLBT74s6XHh0zydw4RnW+9Leefqljk+uSC/Ue2D/WwPnAcrcbgTp0QKwAY6PSoYceGsky2gE1O8gjoQmCbLMuEBFg2TJCH++JwBcVjvfJ7wb7b3/725/+9KdIi/LHKivkM888c/zxxyO8zhh68bAPPvjgsccei3S+rMRTAzMUYQENmoAKpY2piZfryZSNkSfGZFihWG5chW9zAuxUaM4pPyZkHnroIf92kL+XpfBGK/+LX/yiy2aHxXC0TldeeaXPS24RyCbANJPtgxgEolvJDhndrbxiRwkWvowGXfUXYbFF7LLLLoNhONem8IMQMU8niCb+rLPOQsOdPQJkbToKAMaKpQZIlZRTcUWgzgRocKjzyUzRaZ199tlJ/0p86iGeKLd87M1ig3YyFsad2BqZqktPBpZPJQQYY/3xj3+MUoA/pkLYAhLt0mUWhxi9lCIHIS+1lqY8So2FGywtYfe996fqE7ilJcLsU+5yP014ZMbHSSyeRoabUTLvKApQ6hVEwXTZXQnwJaJa/vnPf46utIWekWIn1VfRYLdRj0PZGIO+9NJLvgBYjqL5YmUlfyPjAIjPTjvttFQTkT4FubsZgUmfzDzj2jeu/78Oo3aFH3DsK1OOu+C1mbZBqXBCLuIGq/fv27tjsPfi+E/GvdFhzssF7HCS/52PddrAsWVnrVhH0Bq4Vh/ScU7nxA+mldLwPTy209bVSL9l5XrvoxnjJ3Y876rLzGm35BCBenZArI3FUnQbiv7ZnOSt5NGlsgQnte5dfPHFftki/WzltgcwO/yrX/2KTX6pOVbiyaK0U045JTniYqKatDCJzTGE1yjAKsnR4iIXO+OMM5pkuoRm4qqrrrKyySECOQkguq3Fh5kz91LBmJ77VohgjOrzKw9uuOEGzNskE2fKsPjii/sdZiEM2i9EB8nw8hGBDAKstmS5BqstGcNkBPO3KqzYPqlK3M0jwuIAFOygsrW0kscJcTM6QYZ8SGb+7//+L5lL6mkR7KL79a9/nf+1JpOVjwg0AwEMpbJcI3VLNDX83HPPrWIhU4zkVDH1AkklLQ2GRII2K1KlFEhfUbokQB9JPfODOUxpYwxwmWWWsbi0/qwQ9Pu3GJViLTcyGEinRTCLhYM9+ySFCXheKJfkwhunP2Z1RgjG2kMmaSx4jCTFPpFauClVtIY0NZdQ7NRbwZMnolfD+FtGmHCrEv1Tqrj/gQce2HPPPZNj5S5LogCtQgAbIHwaobTMr1555RUEQ+zMwzqoPQKLdlkty5rZSFtsAcxRVp3HxDPDPovrHddeey32oM2HRejM2ezSHKWOsqAFSAplsESKEKqUIQ5Ls9aOm266CQv4lgtDzx133JGv22RzfO8crAUBE2nRNmJC+jvf+Y7FkqOlCWyw2oAfbj+/f4TpM3u88ubUMa9MufKud7wZwF//7fVVh/RZ6nO7eT68uaOkOBHrjXdnvPTGJ9ff995zr3bsQxr93KQ/3vj2yO1KWgWcb0Cv836U8olZRubo1+ezrUhos742dK6bH+zQt909+sPo1CuLEhz/fWHyOx90LENZcO5eay7foWSKAnd5ecB2C2409LMeP/s3V79ZI9JVl+l79b87wj49bsqCq8fRMef49LiOfVeEfuO9aa+9NXXRBXp3xPzc9ezLHWzx8NqyKKQum5ZA83dAvncGIwtsMTjHeluWa2Boy8DSr7HuhH7EfKruwLxM9iFVIcfCqwPLGjmEvFi+xhkJfqkms9x11103mtNFE0BmAZzylT2SYdtTZJ2YSQRDIwTEfobCFINJCmf2VJE2nf55553n16UxQdhhhx2WX355Fv9RAdiPxetGWm1rXJDd/+EPf0C7lj1Qj+oSZcaMGI+A/swPSHhSDhtjB3nGdKnAy4oQRSnwvOyig/Att9zijZthkxkzkvktrke56LI9CbAIjL0medqrPHyiuloqSpcT+dQpNnP29dZbr1Sa5k+zzxTALnFw6BEafZN7IKfmVDB2h2B1MATDhyOKscvKAT8+YuSudcMeZafLhhPwb5wVDMwu+dFLMq6wsjFLpRvFTp35ZDjKqthrr712qTk4XZhJ7bCizDrO1EwRZ6f6pxaj6iKsZGtA//vqq6/SKWPt07ZAMYtH6ojCKdUcdCh/lFS5nSCdo33sIUGO8aMxWXbZZenreZsczg0Tv12Vvp6DGDbZZJNUgObpa4h5Jh2cwZn0rI9PcoSWmm8B45mp6ciz2QjQqTHaZ2CMWC8qW3VtD84SH0R5NOoymsxYMRh8sDcrmvnYXTmqS4CGlQGZpUl/dtRRR0WLDTlN8eijj0bvZauK2IzPZiwafYvIEI1tXn6yh8aIU5d9n8GMmuYYgyRMycwWB50N7XidDarSqTDotMJX4mDU26VCi9mgV0KUlR3dcHJHCymEFWScKllWagrcQgT4DH0tZYJE4fnQMMrMekBb941Oi4+RNjP70cqq81HWPuVocojk0RfSh0x1l9Ls4l9qMJ2aTi08vfAIsRoneXD4h8+IFgzOlJMWzPjTfDG7UIflQbWuu1/fnoMTOqohg3p/bfX+3/raPMee/xqnW4Wnmzx15kl/mXDhkUuWethkUkst/KW1V5zzW+sPPPfGiRfe/JZFvOjWiXtvNX+v2TvtYLa7vWb7UrJIdjfVseWIub1C666uFFp3jupklnCz4XNzWljh3/xz9yqrwKsN6evzembcFGh7H9yo3GYk9rE9MnZyUqGFPszH/fKQPv5S7pYg0PwdULKEYVniBhtsgAYLMwbGmQ66pgotpEhldcFWsJyOskYOliZnTCKyQR4XfFDGsKeHNVgWgA4UU4R2iYMogwcP9j5J90ILLcS+h8gflRKDcFajs8Ha5Fbkvs4663DcRRS48CVL8bxGh2IwVzLy1Ae2l/FDx4PxMbZnhYyQ4PPg2UteknWJZBlmbLjhhtgvOf300+2h2O+FpGyVVVYp9RTFXpZPLZkCE0COBOPtsN2QaXsIzECUlT1SaHl0cuchgInRr371q11+6XmSStbVPLGiMK+99hpikMiTSz40RM8ccpO85X3QZvk9ZywI5jAFPx1gKsH6YNYmoo22TRvM35F677fffj6pyF3rhj3KTpcNJ+DfOF1AOHiSqnL++edbzaGQqIRpeINAIKPM5VZscueXmqBfDUN9tl4vNXDkWTcRVmprgJUpNEkoihCm21JatFwwxLhxVFS7TCaVvxNE88eiFksKB/IBDp60lTo0KfxQHzL+YZmIhUTnjYA0W9PDCyoLviVeN0fqCK1uuSujZiAQdFrU+ZrqtDqs0DT8maXNavgrCAWIRnIcPszkKrVs3//+9/3CwGg/PqsjUU1ZREarjOq8NstusXVj5MiRPpdKDpeyZOvp8IXntLAuDWf79Smldq6UKj9fis2N6RG90N8nWyq6/LsZAeoAH+kJJ5zgh5iXX355sudowgfHPqdXGvmD4lmu5aeF9S88ynivdWYcHGmzrEhsPPULqdjk6teGWzA5uhmBeeea7aTvDZqrb0f/+NS4yewcKvcxe/bsMXLb+VdeskOLg6rmxQkdVvLKTTAZfvgKc847oGMBE2dKvf7OtGSw4IP9krtGd7I3GJ3CVSpitfwHzdcLHZil9tTLnXZiBf9H0k7MerSzEcIQ8oaZj8MAAEAASURBVOmXOhRaA/vPzhlalrIcbU6gPh3Qdttth8DCULOvGum/XbaJg0FytN2Kzc0IkuzxMbODxM0u55lnHjZ12WUBBzvAttpqKx/xySef9JeVuNHM+bU4CKw5KjhVtMRuLW754RkSfBvDl1sGLGFwjr2PZTpC71kHN6I0Jm7eYlKjSlKHh1UWtSOAJptjY5qnSfTz6EUWWQRZdnh2SuhvpQJh86I3poow+sgjj/TaLB9r33339btL/f4MH0xuEfAEaHJ/+MMf7r777ubJF+SXbph/5PC1t9yKHSVVyWUziLDolPn6/FMUGxvk6QRZzMQmMMvrG9/4Bkv/TZtl/viwvsdLEtBcRpowCyyHCLQWAZb3sU8rdQsyquWq2B5sFoWWtFnNUzVtp1Qo0qKLLlqqbBzU7GsnCw9t2SBRIv0WE8tkC24po+5CKGyXaMJsu4N5NrODXs2bZPQT3WSxebR7773X/Mtd0ugHJSuuuOLWW29tSbGCjLdgl3K0DwEWAvt9gYyf2FPf/I/vLXpjhOeAAw4w3TBqIXRaDXwEhGt+GJrRElJIloT7oraENtEXWO5iBBaYe/YtRnQYc0BM/fxrRU5qYf/TJmsO8GUY+1qHGsb7F3OjM9tsWEc5SeSu0R+VSuqpl6a8/m7HBGzIonMst9gcpQLXyH/1IR0rQ58ZNzkp//cHaM32xfaxUWlaLqJbIVdzyZqnHG1LoD4dEENf1vMaZOQU3mSQ+Xd7B72kPxCLkbBtyWKrE0ukPQEEdl5Z4m/ld7PWxweOjsP0t8p1R4OTjTfeOMOqD+pM9qBYFkyUzLiFeeZ3fOUrX/GBq/hQPtk8buT1zEEsJKcgcySJXcohAjkJsAwUfXbOwDUNFm0f4VtDL245MvvOVryxSdHLLjbbbLOMFauYLmdbhiWOkruB37IVQ46WIMCyBrQyVlS2AbHz2y6TjgordjLBwj5NIsJifao/2gezSf7Lzf902Z0gq3JHjx5tqSHliBbZ2K3gYPGT3yfw0EMPZTc4UXRdikDTEqi1TqspFFrSZjVV/YsMiGWvufve976HFcHwY7O89Qf0nX7jBWstozlY8pG9RJg+gAFuMkzT+lBgjIFY8dBXQcAuIwe7N8yoI5qwskygYLXfg4UqBhtt/Rc9X6o1wqgAuuyWBBgJ+R2QnGvV/CMhr/odNmwYch8MdNjb8QNf86ybI2oJs6ea2Pz5oiH87P9ytdR1eyhlVHUCSy/aSdkz5uUiCi1KNXiRTumMG1/NHVqkH+2yuuuxTnuwPJa7Otsb3HL4rBXKPkyt3ast27FfjYPKXnu7E40PJ88Y89IsNdUi8/YetsKs8704RuvVtzqFfPv96W++36Gci4wZ1voplH6TE6hbBxQt58o+QqnJoVVSPKz6+I4VGVxQ7WAzmd1yljLDAMwD2mVhB6vuvJ5p3LhxhZOKIiJpMh9W4WB/zy5THdG2qmjBX2qUUp7R2hq/ra1UlNr5R5vSGluY2j2mUq46AeatfJ6WLPsYzAKYedbfwfScw4os30ihxYJRjHza3aTDW7ihkUfPnQzjfVBoIfqwX5fGXXxcuduZACOKb37zm56AXyft/YO7woqdTLCYT1OJsPxp36xeLfz1ZXSCaLM4kMVYcSaIb/TM3xzIQDDna5eICr2lK/OXQwRakUBNdVqNV2hJm9VslTKyZI3tMm8mPiotYlzM0drPrGpgqgsT+RYYs3i28cI8IwcKIfbb2q/ytZlR+jW9pC9kYGpCClZaUbFL5ejF9H7xV6nw3t8vKAYpx5LBnOMlLUyXK8gspBzdjACWMbzaeOLEidlTr4Y/PuaGOBvZihEK7x+B8jdwxyELr/zQE9W+N29txQ4OQloziMPv14xC6rKbEZizd6fTpT6ZVnIpQ/aDT5oywweYa84qD89WGtxn8IId1vYef37SOx90ytFyv3NUxyGaPXp8abPhnbaOWbCaOlZbukOhRUbebCCXjz0/xc7PWmvFOYcvP0uhxa1Hx3bsx0pGXLXz6VwE0K9tCdStA2Kll+/pULEMGNCAb6oZXjQdZXRSxV//+texY8feeeedVjz2NLBUzi4rdNiwnHQQLVVllQ+GxbyxROZBjBayy8kWPb+gnrENG/Wyo5S6G0VkVVypkHXwb6rC1OF5lUW1CLA/yZ8kR0W66KKLqpV44XT8EgfMxmCZjcE8x8BYgj6AeQYHbYvfoUh0r7yPAofLZZdd9kT3q4oWPzUjeXY/AqwB9UaSWBqSYaXf19tyK3YV0TWVCIsmyD+a1zx5/y7dGZ1gdH5KnkPuESD4HKMU/C25RaDlCNROp1VliUm5ZFkykPqtsnJnyy23tH0n5Sar8JUQoDH1M0DWLBx33HGcP1nWPDAyEO/tRJcqG6sOWeRoP2+hpVSU5vEHDpPwSLGUWjz0fL7OY4ClrE7UD0rYmxWGy14HwHgaw4OpWcuz2xPwO+h52OxNRQ2n4SszNZn6TJEYo9s+Mz4rH6bOBWYFXLS+EvtImPpFU1jnkii7Zibw4oSODUCUc9nFO82R8pc82tq16AIdyqf8iWSH9NYRUQjd81iK1cGxr37y6tsdT7TGcv0Wmqfj8K3s9Kt4d7nF5+jbu2OA+vT/OhlgfPTZjs0cI1bst9YKHSLdR8Z03KI8z7gDtHrN3gOtXhULqaRamoDvXGraAbEszK/633TTTVuaW4WFZ+0aFpItEdas/PznP/fzC+wEIke2AJU4GF17ETPL9aKtcsUSj0wKL7jggnnSWWCBBSwYJhwKDyQis/Be2m7p180RbXprbGHq9tTKqCoENthgg5VXXtmSYtejnx2bf90cbB/xBbD1pn6Kzd7KSH5txUPu4a2Us9nCbskhArUg4Hfros2KOibLscKKbelU7vCDroaLsPyqFAYGfsNWWU+a0QlGbyRP/xgNJ/wApqxSKbAINCeBoNNKHeRznha/YsVugJzCCsoWllGjRtmlOaTNMhQNccCfFZQXXnih5c6E88wzz7zmmmuwAs/ALk+L7GfvpFPdgR19cx5JvZ8h27OUcjC3zE4TJZ9fC5NMh5kzE3Xb9I1hQCyoJJWyrE+ZPn16iE73yeIv1LrJ1FJ9MPLL1je7ZYPslVZaid05xpwRg58kWHg5uj2B6EPLtuBReZ2vhCeL1u1jIR32GoZNnGzN5NhzO6ACE5qcDF8VIVSB0m6zzTaUxIakQcGGdh+tG18fxkIz7OMXyE5RWo7AlGkz73L7mSj/cp0tEOZ8IgzlXX3POxaYQ6G+vFRJ1cvUGZ+idrLAqY5F5+/Vr0+HQiiE2XzE3Ofe2HHGyZ2PfbD9+p0O1iLYnaM6mSLcYkQVtpJMeGdqdoHnnKPHYp0VeBBYeam+pp166gsDg+FB7AAtNpANW77fwP6zzdV3NuwQcvfRsZNCmPDXb+1aaXBfdFr+rtxtS6DqHRC9gxd0orFAvsmCsJtvvtlvlUanwnK9mmLPM0JmxVjhXrXCkQP5sgHrmGOOMbvcnJdpQJDmcGq6XVboiJbiRfYnCidug+2QgtdUZaQZ7eKiekQrkDLi2i2mDxhns0scGMDwl5G7wpcVpRZd3n///X7rIRIKb+AxCqxLEUgS2GeffY444gjTA/35z39m9hrtnEjGKuVTYW2nubDpOc2UKbRwcDhWyBS1AZMCf8SAFcZmCsEnz0Jei5vHUeuGPU8ZFKapCCSn/Kl9XIUVu1qP3FQiLLRZfmkInZctpS3rebM7QT9UwJxSnq35HNHiC+BT8P6t4oazHxgni82YAQFm0l8+3ZgAQ1/MjO+0005Rp8kjs2qcz2TkyJHlPn7DFFqlLA0GbVaX27TLfU6FL4sAB5liUuOf//ynj8XM/MrPf+hg0GyxNT5j6hI1wdUd2GGUlp8vW+VuzqOOjqSO0mRJ6UknnRR5+kuGuRyPzGw8GEljgM4YIrkU19sbDOdjZ2wS9+nj9mtbULAhVQ8BOEMSg4dmD41lbsgLWstmY/SkuixGIBKsZCu0Kq/zxQoZYj322GN2khw+pp0NblNovf32208++aQ/WKuSTMuNS390+OGHn3LKKZ4knzZDWH7cRQ9HydEoFxYOllskhW8eAlOnzfzVFW+89GaHhfSF5+k174DZyi3hQ89M+uXlEyZ90mGrcPPhcy88b69S6bzzwfTdftaxsiE12JkHD157xY59SyEMWq5Vh8z53xdmqXxQF30waeaAzrYN73QHaPXu1XPjoR2nXKRmlMfzTze9xb+MkJTqgsMHRwE478oUWmNenjJz5qc9e36mjnr/4xnPvTpLAr7sYn3mnesz4Ki1wqlgb7037ZU3py7+hXHFZ8Z12PVabUgMJMpRl+1DoOodEH3EXnvtlQ2QYeSPf/zjWo/N8oyQL7nkksKrMSofOWB8D2MM0RQjoINhMblSkjzLvy6++GLvj6klf1nY7YcuJJJzghMptKJZUp7CTJgwAbNsfu0dM45sM2WVv6zUgqEPxkrkZZdd5u9yjKu/lFsEuiSAHBkF9lVXXRVCopFCzLD77rt3GTE1QIW13U+xmc7bB4v0jRUAdqA4wVIVWpGB9JzNQuqDpHrWumFPzVSezUwg55S/wopdLQK+GI0VYdH5nn322aa95gGHDBlS7mPm6QR9Lx9pH0tlh1lmVF+m44/sXUWxOLTv/PPPjzz9JZlWcYWQTzmnu8sNNxTv29/+ds7UFKzbEAj7tFJ1WqHOlKvTaoxCS9qs5q+RDCiXWmopVkslVevPf/5joogwl80T0XGI4dGiWNWaoDYzN3ZoIdRGR8VwPJQT3VWk0GJAbFus2I8STOXmNDlI33nfffcZgTXXXNNbhkTFaAotNGTsA4uspVlEOboxAZuAhWeMxC5N9eB+aEuxmT1a8dj5xLEQkybNErsTslEKLYqESYfTTjvtrLPOSp6KxxZMZDr8wpCRKW6XJwXaM8rRKgQ+mjTjhQkdKiuKPWPGpxPemfby61Ovuvtd9h7Zg7Bb6OjdsixlPfvy5N9f06HXQUPz+jvTXnrjkxfGd9puNd+AXvttUytjNVgdNIXW9Bmf3vfEh1uO6Nik9eKEqf97vaMw66/av3/fsvVzBqRCx+rLdOifJk+d+eLr05YZ9JkZxtHPTf7001lpD1+hf3CZQovLUc9NDgot8L770az90Pivtkync7lmJaH/2pJA/Tsgjk/Yf//9tfQhVDfmDiwKiYQ1HIc+dOjQsuojOp5oCI1ZbyTL9NcMg72dBrSJeU6wyJN7dK5wxto+n1oUzB8z7IPhZvCDxtF7IhdjKSsrypkImD/jDU4dZkGb+dTCwWt67bXXLGVog5e5jF/lw102ikWHf1gUOUQggwB6UD5Vq2PMZJnPVkv3nJFvdIsq7Xds2PasEIwimfp2zJgxfIxJky1eeE2s1IULiE9sZ2pUAC7ZnBGpKJJh5CMCRiDazZM65a+8Ylt2lTjqL8JidOGNClJ49oLTefGZ33bbbX4pOTqkXXbZJePpinWCZGd6KRKPBgAZ2RGStbwhAPsyM0LSEPHLCEBD2liFVkbZdKvNCVRXp9UAhZa0Wa1Sg5mcMLe88cYbb731Vm8SJJSfMRlNPBspNtlkk+9+97ssKGiV56pROUPviE1w1pqFWTS2OOhN+WItR+ynmRu7amH3se9W7W7SkbGgmMBsm7PNYVyiS5NCK8mw2/tEoo2MiVNjUSDK8fZmWWLsxXw0JhxHZwJHjrpFvtPAk8/5TrGPRM/Fp816qCQ6hp4skrrhhht+8IMf+NNBkiHl03IE/v3fD/mXp9i7fX3+dVfplxHyxfGf8C8jALcG9p/97EOXWGS+WvWnX19jrl//7XVUWaEYd436yCu07h7d6Un9mVvZxa7F3VWW6sOOLM76Cr9nX5ocFFqPjOkwKjjii11oa63YoaxiX9d2X/lMS+ftDXK56tIdYWYlqv/akkBDOiCGf6xn2nvvvf3qjbbE/9lDM1ZJrv8osGns1FNPzcOQ7dQHHXRQMsc8cZNhopGVH70kA5tPzmCEZ0XgTTfdZBFTHaR2wAEHMI9IvVtFT0Y+/LITZAJy5JFHVgtvdl66280I8NXvu+++J554Ypg483H96U9/wi5CNJ2p9VPbjIOMqMks2PU5ot/iKERTkBM4uavA7oaIqd875wXaWj2ffnCzbxUVddJfPiKQSiD6RqKOKUSpvGKnZl2uZ/1FWMGmVJ5yshckWg0cxSrWCUYNQpRmxqVvOgonkpG+bolAkxDI1mmxGQZ7YzmL2gCFVs6SKVgzEMD2I8sWUO9jxQ4Detj+ivpLmlpWOqC5Oeqoo1jjYGWOelm/sdfCFHawkT/P7mDk4Pl7guWXXz653sqXMPsuIYNeirKxm8Smf4wkvvOd74R0WJ/iDw0ykwXR8lKfqXf7QQmTc47T9HdxY/rsuuuuC55jx47FRGTq5rkoli67E4FoLU+27dbK63xhdOw19AuNvb3BkCY+VuH5QNCdN1xBu+rnP05/pfCsJ42WJ1NsVn4xJ0deudFGGxUmo4itSABTeFuvM/eB36hoWxXKm63WGXjgNxaYb0AXA7O+vXtusmYXR1stMDA9EY6bWnfl/qaie+Dpj9j8RIIB+x2jOw7Qmrvf7OuunKWfy/+m2Gu1ROcjsqK4i31hIdD7szls6UFzPP/aLBXgU+OmAJkAj4ydtWKRA7Fs09VSC88x/9y93n5/GgFGfaHxeualDvXhkgv15tl9+nK3LYFadECsx+fMRY+U0TIHeqPE4uiI4M8KJxQwWB1Mjt98xArdHHObfa4S6UdD9LJyrMrIAeNgtgbZcqejZ2BMT2s+VXGw2Gu//fbLafAnT47RGT/RuKtUCmzp9rdSN3D4ABlu3u8ee+yBFY2MMOFWVV5WRi6gYIa41VZbaVFjBiXdyiZAfebDx9pBCMYWCvZpFThrsHBtj7aP0AR5gQalovVgsRp7s0IJWZ2w8847e7kz/pFCN0NxFRIp92+tG/Zyy6PwDScQdT3JKX9VKnZVHtNm9KTWPCIsemE+ZCweVfKMpTpBxPE0ESaHjF5WRo5+qFDJOCEji7rdYmVANF6Kss4zjImi6LI7EWCixKrx1I2GOcXjgUa6yKOmpMJc5dFHH41y4QPGojojmGSLHIXUZZ0J0J6y8Ygf25mR5CIL8OcAUxguzzjjjKOPPtomyWHvkZUzsiti/sUcnFhz4IEHdhmXfWN+t292eEwFmoYpO2Spu/bhkY4ptGCFRjAMcx9//HHbD84+ceyqhaTy7NDCwonf0UIPkVzK6hVapMwmrew91KUeRP6tS4Cj73zhs9ccVV7nfV5luf3QFlVx0roI50Kz6d6+F8I3XKEVHpDDP/jttttuTz/9NF83mn4/9GTywFYtpp2UvywgCty6BDYcOmD/7eZHoVLhI6yzylzH775InkTm7jfbT/bIFTI1tc1HzG0KrSnTZv7nqY/DQVmvvTV17CuzzqYi4ibDBsw+22dnVlX+23bdebZZtwsNXGouqw/p16HQeumz07De+XCG7XJbdek5TRXHrbVWmPPmB9/H8eb70zjYbPCCvf0BWqs6A4apecmzfQjUogNiD/EOO+yQyhDp53nnnRfWcDBQPOecc84888zaGeJG8Iptw9SSVMWz8pEDGr7UA7QoHnbOf/WrX1VLO8Jh47wUBg+RrLlCDpGhJw7+yZNgtAim8EHoiNqPPfbYnIgqf1nZj7bjjjtGetzs8LorAqkEGFQjF7IxP7srsNNQrhK6cG1nfu3tiCbX2FFmPE2hxbfMND9al1CsWUilkepZ64Y9NVN5NjOBLqf8VanYlRNoQhEWHehmm22GvdNIdV3gYUt1gow6kIVamxYNAEplhEjQ68KzxwksoN9+++1LJYV/w/VhNOysKMoooW61MwEW+bHXOVWbdcQRR5R1jFYDFFq8Oem0WrT6ImLe4vMfa06x8I5I1x7kiSeeYPsRSq/gE0nSq6vQskybymF6qWHDhqGUDWsxsKnNqDecCoCGyQqMKX9T/llEu5t0RAuKEZ2wlDUZjI7Z7PKHFWSWSzKwfLofgS5Ht83wyGxy8qeao9BKrczsL7SBIIb+mmrHIauuUFnx4wR7BHPXX3+9nRrIYhNkl7///e+jxZvNQF5lKEBgoYG9ll+ij4/Ye/aeC8/Xa9EFei02f68lFuo9KLd5wDWX6/ftjeb1Sf32qjfGT5x1Ctd/nvwQE3krDe6Ulw9cLfd6q/br16fnx1NmhgTvGvVhUGjdObqTrfYtRhRRQVWrkCGdVZfpe/UXZnqff3XKtOmfjn5ukjtAq9MGsuEr9AsKLeKOHjuZPWFPf64DC0mhG6tu2ZRaixKofwfEeI8TYugmAjH6tbvuugthSosCrLDYLFi+4IILgnovJIXcxy6Z2QLqm9/8Zs5c6IWj9b+IkBCg8GNlyRJLLFELxWEkuc4pqIrmQRknajCS//73v+8JUGFsTRuDfOzAb7311j5A7dycBOwPMUXWxgjHbHVgGpEA/kDf2pVEKXdjAmza4OhuNP3hGTnm4MILLzz88MPr88h+ek6OrIVNzkqszocisSoiUmhhoMWXNrkDlbu77rqrt1iDXYdSqn2flNwikEqgyyl/VSp2atZleTZEhDV48OBII85QgVFBGB4svvji+VVZhTtBhgomx6DrpFmLhitJjPnHCcRF3cUS/2Qi8hGB5ieANmunnXaqijaLh22MQouMpdNq/qqWUUJ2VBx//PF//OMfzUQAgXFXotBilmtzWlJDE9NayhjbocW6D1ZyMeEMABn1otBiCup3JbKOLNzlb55tZH5BMVG6NK9PGDpFdGm2D8yyk6MbE4g6BsZtTfiwUWXGsja/LstJLKaCXQarcwCEZdjb4Sj74447znRazFGxzuplQHUulbKrIoE1V+h34p7Ft0P5kiw0b6+vrd7f+3w4ZcZJF40PPuhpzrz2zT8cuoQPUAt3n149Nx464Ib73wuJ3/fEhyiKMN93p7M3iK6uGU6cWm1Ix6lXFPK51z7xB2gN/+IArfAgw5af03BxjNaw5ft+OHmG+Xx5SM01hZaXHM1MoCEdEEM+U2gB59lnn21bhRYiNh7fagh6HZQ37MoyH0B99atf7dLKdwiPLcH6j3OiFXs5d2hFgqooEXt8HIwrGFR4H6Rv2HiwKdLf//53TEGgA/BhauTGIlBUGNYympwUaR3zEZao1yh3Jds+BJg4sxCTeh4emSnzgw8+WIfHZ/vI6NGjfUaobP1lqvuRRx7BPIP/BvMotL7+9a/71DgdQAotD0Tusgh4hRZrKKOusFoVu6wipQaOBl31EWFxIl2FhpfsWQp3gii0WEFl6TBUWHTRRe0y1RGtj8kYJ6RGl6cItASB6mqzeORZByc05OHRaaXaLQ22B/MbG21I4btrps8//zwdj/3YYJTxpJzV5tcasG3LbMVyzptXR/kGvVSC//jHPzhuyn71GciWKkwBf796y3eiDMrRZmGq0ZZlYS58kUU6JKQ2Ry2VKfbEX3755VJ3M/x5jxl3daubEWCr3wMPPGAPxZrZJlRn8hVwGp8VMr+Dia7/xPJHLBaScloziCNb5cYIFcMFPqPIKKu/JbcIGIGthg/gbCe7RA3zwNOdzlmxW9V1bLZ2x+4rtmo9+OykN96d/uT/PrPpF36bjxj4hbOR/7P7jZOxrATsYHtk7Cw+nLAV7WZbeN5ei31xUtfosZMIbBEH9p/dczZ/OdqNQKM6ICRNfkjs5VBt9QoYDF966aX+kVmnwkxwnXXWMU/WeGF40C6b0MHSb7+4GwMVXRpaYGLrRwVMkbJNCUVPjXrP1gtyiwmy149GgWt9ifrKm3BEOunNtdU6d6XfjQlwBq3fUnnRRRfZQrHaPTXWZbqchidzp5mK5jILLLCAP22BmTvNXTKifESgKgSoYHY8JwkiWfLVD59qVewKS9stRVg5O8FoYatf116Kqh1ZEgJEKZSKJX8RaCECNFxV3JsVHryRCi1KIJ1Ws9U/Vt6d635m4yK1nBzjxHEydovhnY3e6FZXXHFFu4Wui59dpjqeeuop8w8Wveyy5Ryfn7OzZCg2AhS0WbaeEU+/PSvPo/m4ecJbGPpOKYaNRrd33HzzzV4DzaJar29uksenSbFWoqwi8Wi2crOsiMUCI6/B4Im1hexGzU5nmWWW8QGiNVb+ltwiYAR69uyx37adtlGeed2bZlLPglXdMWy5fgs6RdFdoz+467FOkpfNm8DeYHjq1Yd07Lv69+MfjpvwSfBfa/l+0IvIYHUw+HCM1i0PfmB3V3OJmKccbUigUR0QO/j9goyGH2zQqFd/2WWX+QEA/Sb2GCkMS9m82TpkOkmTX40qczJfhgd+0xLDbBbcJIN5H2wGeqWXj+6DZbiRoPlzs2655ZZGDTPQzm600UZWVFQO1157rV3KIQKFCVC1vLlRNjWyh6lwajkjFp5iR2tGWbKw9tprW6Z870zK7FIOEagugcsvv9wWkZPyeuutF6VfrYodJVvuZeFiNLMIK2cnyIH3/vQBeu1s3Tly1DvuuMMI0+M34bpkK54cIlCAAHuzdt5558igVEin3HOzfO4NMzlohci2PbjVVlv5Dd0WS44aEYisfLCi0M9bkpmy6tA8WVflXxYDO6+jwgRfxjnVpOONkGBeI1ppYrm0igOtFYvLQmkxD2LGRtAx+CFvl48TLShm9/G6666bEWvChAm2BiSsIGtbyzYZlLrfLVbI3njjjf65/Hpe799Yt58BMs7jSD6/bj0qG0LA2267zTyJG46jM5/aOSgbtrZtZySSIzRqGUuqfUtIqaIzNmpXTqXc6gQ2XmOu5RbvM/aVWduJcNz68AebD+/YQVWLB0QTtOnwuS/519sh8Xse+2jc61Mto1WW6jt4wY59Y+bfEMeqQ/rePur9kPX9T3Wc8rXWF7orX6phK/S79t53g899T3ao6LzpQh9e7nYj0KgOKFrRxdlO7Uae5x0zZkzEn0Mog7gHa13scr7yyisNy8UXX4wop2k1fwiqvMV1JNcYEwvPYo9gDnSZSLLsEkcBhRaIyMJE5IyOrr766uioLZ9FTd077LADYkoztH777bdvueWWyPhqmqkSbwcCiH04ccfG3rV+ZFpmn9eyyy67/PLLZ2SKOXEzOUNcVpojr7DwTM/9nAW5xzbbbNO0jZgVW46WI8CaD35WbJaDRJKl6lZsy6hcRzcWYeXpBOm1WbXDKeCBG5IE7OhgW7UURpo+v+KH7VlNuC65VOHlLwJdEqi6pUHLsfEKLYqSrdNilOzVJFZ0OWpBgG1VtJ6cWxgSZ/KJWHy55ZZLzYtjrvnZLYzMejMUdK5XXHGFyXnZm7/GGmswCbTw3hEZFlhllVX83VZ0cwYAllVQKVF402bhxrJKWf0T2infvaEnY59mBhAs2qM4tLXAzDml0MrA1T1uoTk+++yz7VvjoZhWNeFG9WiXFfNGToHOfgXPPPOMGVUI34I39ZMdt8K7tFd+ossmLU6oLiWxeuihh3x20YYtf0tuEfAEevT40v7bLXjoWR1GZf9w/VtouTjUygerunuLEQNMofX+x9P/+8J0y2LzEXObu+GO1ZbpOEbLF2atzgdohVscowXPsMXNb3RDK+bjyt2eBBrVATEORD3jmbehQouFyRdccIFfUc5odsiQIYYFyS/bmN56663gw7AZ/dYee+xhAZrKwfiKwpsVwfHjx59xxhkHHHCA30QVCszbP+ecc95+e9bqATyJyznEBR6HAzvRotlOLyZoSP+7PJOjQEZdRmHJzqabbmpHoSC1/Nvf/vbDH/6wy4gKIALZBBAj7LvvvpzS7duK7CiV3PUqdtJh5Xi2/IHTEH77299ajkyx2V1ql0xq+LRt+QKTst/85jeHHHJIKSlW9nYNS1YOEfAEWB4RWe797ne/O+ecHcYMCFzdiu1zL8vdjUVYOTtBem1/RCijIJbGrrzyykmMmMi65JJLzB9pw7bbbmuXcohAqxOonTYLMj2bhE4p24NYcuDATC+obZICd9diICxmlmJPx2CL0RhLBpIjy5deeok22hQnRIlkuCRFF2tJMeH53e9+RzccJYUChvYdU78WkjOiu0Ej3r9//2HDhtlDmaNce4PRoCR7exa5QM8Px6NFOlYMOboBARb7YIXvL3/5y89+9jNvfAZL7vvss08TPiAtiZ+/dVmZeQS/lCla6lXrB0RU5I0gYasKraGJ2yx3xFVXXXWVN6bPMNTL6SykHCKQSuCrq/Rb1dnEGz9x6jX3Zp1emZpIuZ7LLTbHkEFzJGPN1rPHJmvWdn9YMtMMn+UXn6Nv73ikuvA8vVLPxJqn/2zLLNonSg3VYHTaVhRAl21CoP4dEL0Dy6h/8pOfMGA2yHQrfpBm/t3bwdYiv0AE6dsuu+ziHxlVULTAhf0NthnCh2wGNzvL99tvP7+GDxuJp5xySnQ62ptvvomnN5+IKQvk9cUegeE9m9otLvMvVg3aZZ0d7KjzW0+wrN60L6vOZJRdhQTYJrXJJptUmEie6LTP1FsLiaB5pZVWsstUBwvdfLWPjili8B9ptTlg79hjj0WQF6WGJARlGFKRyF+XIpBKgAqDAR70qb/4xS9YAk7VtWDs92Xzrl3iqHrF9omX5e7eIqw8nWB0ROikSZNOPfVU1u7Y/ubwvvD5+c9/7sXd2MfiXLSyaCuwCDQtgZpqs3jq2ZvnyUvt0wo6Le3TqtubooF+7LHHbObJmtazzjoL0xaIaBGUMwtFdM6eCW9OkLJhCo/1lVEhUd4g52U8F/zpj+mGWcfHCiZOmaLhZrLHCkdbb0gwhoOs8ssw7RVlUa1LShVZbEtNmVUVHFqbeivpyeP7eSwBBg0alG3NIEokWlDMql6Oko7CJC/RAfit6IwnIjFBMop8WoIA9eFHP/pRKCo2BlMPSEPkceihh0ZrtVKfrhZ1PjUj8/RDWyRBkYUEC+YdVGYvsiGFzTff3Acoy43mj5P/sqPwhSKoIgwq+T333PO8886z8MhD+aKRRWJah8aQnawTJ06ktUQrb2FwcAZAHv4+itxtTuCA7RYY+ZsOkfcF/3h763UG9O8zWyqWt96f/q0TXki9FXn+YuTiSy9S0njg5sPnPvvvb0ZR1l65/7xzpecbhcx/SS4X3zpr40VGrGHL9z9y14WiACjYVl6q7yNjOowbE8DOyooCc8nZWs+9OmuLebi70uC+td7uliyGfJqQQE07IN87h2eng2CbUbSEi1t0K3QfGXweeeQR6+gzgh100EEMpJMBckZHr4ZgJRm9S58CIwc6SpZ9+JTpJZN2xRHMsXvJpgwobM4///yTTz6ZeYGPWyN3WcMDysCAfPvtt2dyZOXBcPrBBx/MGAyzFrz3559/nlphd4MDEwuVmOZjnoVNM4RiIbWHH36YQ4ZKWdEgTIGXFRW41CUDJCbm11xzTQjA83JA2jHHHFMqvPxbi0DOZqRUK1Thw6Ltpm576yZ5Eiy3trN9xE+jmJJkmEAPBUDvPnz4cHRR4ZLBP1MAZNZWPCbpbPPyOy04LOSwww7D+BhSFO4yd0PtjXLC79oM0QcPHmzpJB0530jhhj2Zo3waS8DeOK0rfahXgVjBqFRhxmo+OGpRsX36Od3dXoSVsxPEtDK7r0xQwNpcBAuMbfje2WCNpSsWPPnFvuCludh1111zcq5psHLHRVFh8kQnCpLn5jwmI3ocXRYjUGttFqXqQrRXrNyFY0mnVRhdFSOyNf6444776U9/ig0NS5YBGT+7jBwsOWS4lqqFYlrOEiT2kVgUDhBGGRbpw+wus9yGrF2l3+VnxSjlWGSRRUrdSvpTn9Hz+a0z5W7P4qBpvwfO71ZJZmc+jLbpKW39DlL43Xbbza8ktZBytBYBRjz+q0wWHpOhyFNSv8Rk4FrU+WQu5oNwx69S5DNPyrMssDkQ/CGssXOhWQLMyC971mdxkw406EnPyGf++ec3nw033BChJHvgzIdX4Fsz8zcHNkU5vN0u5RCBPATWXG7OESv0f/DZWWdEvffR9Etue2fktulS7xkzPx33RseRVxnpT53+acZdTuo65/o3vWk+Am85ovrbsyZ+MG3iBxkFmXVriYU61pz60JyAFSm01lqxnw/g3RyjddkdE73Pam73m/eXu60I1LoD6rJ3DrQxRk23kk2eQTK/7DDcTRVv4Z8zut9n0GVePkCBkQNL2fzCNaS6pUxhY2PwyCOPNOEOb40z0qPl574wVXSXOzwgayYsLJFBjO7VlsitkMUnC4ZaDqsVKIGSt/L7MEHbeuut/WFjqJGYr5VKocDLKpVU0p+SsIvOVAIspOOEoYbM4JJlk0+FBHI2I6VaoQpzp3VC8e+N++VJsNza7pc4kH7OKTZtuCm0iEUiXqGFD98FHztG4fz8HYUEv8g4uT0U4Wn6sruGnG+kcMNuhZGjSQh0+caxqMR65eQyzRpV7HKxtIMIK08niKyDdTlsrfOntDDIwYQSvyRV1sowCiplpzQZvqY+BcZFvjx5ohPeRhE+rtzdg0AdtFmAig25NJydbA82/BVQABpftr5i9y+PFoQlorS8pSzCM7TiLpsqulxiyRT3qKOO6k6yYB7Za7CAuf7665f1fv2gmYh5TLQRDOZYRbCMWA6GtTS7lKNbEmDzH+NaVNE5tVn1h1CsMlPOaJIZjdRr/SBY+DnhhBPyHHmCXp/AnGBX6yIp/W5JYOQ3OqmvLrv9nYkfdJxrVYtHXnjeXkOX6aQZ6ten53qr9q9FXpWkufoyc0bR11oh9rEAQ5ftw6Yuu8SxaolTuHwYubs9gYZ3QPTRLDc58MADuz3q6AEZf0YKHkTVpSYXSV3X5ZdfziA2SrN5Ljkhg9eKjfHsIrGUm+V9FWqzQhYk4hcDsS2M9fjZudfoLnONyD482jWv26tRvkq2HQhw5HakKKruU7P9y9syYd8kpg7zZIGlFnZhWkjat2QDhcXyo48+Oud0jE02mCWsxPiEFUaONiGAbUwOdf7e976X1GbVtGKXhbfYoKu1RFg5O0Gal5NOOilPg8Y+9RNPPJEdWmWhVmARaE4C9dFm8exNp9CiTOi0vDje3hD6W87TYqum+chROwJ9+vRhu+uvf/3rHXbYYfHFF09mxL57dk4cccQRp512WurxhhaFLfxMX9mnxTSMpt/8g4Pd8ajEsOP3y1/+cvXVV4/utvolW2hNk7faaqvlHN2Gp2Zpql/NAe1sMzWeVWN1AL4kcteIAJ8Soh8qFUu0WJ/LWXd8X6WERDUqQ/5kI4vewWpHzujsefJmQKJzUHImUkkw9r3RymFZBYMkNIzJpJjcskrrzDPPZIklaq1kAPmIQJcEVlmyzwardeyOmjx15oU3d9pp1GUKBQJsuXaHXIboX1t9QPLAqgLJVjfKykv18SqqZRadY74BJa0LYKcRE4W+AKst3enS35K7TQg0pANicEsfPXToUProH/zgB6effjp9mQ0I24Q8G7MuvPBC/7DIqbM38bDtycuLMU7u7Xf5pJrEzcDgnHPO4WQsXneySHhyiwM4EVQl7xbwYRCCFs1H9GaZvX8d3Oy08/Ma1puX2oNSh8Ioi25GAFNdqUPuqjwmx1/5HVQ5F4ySNfMRWnIrAzstmJXYpTmwnsp5DUwcmEGYp3dgDYItWUhROGYvu0n0seRuTwIsiUBWxtHs2LlFnnb88ceX0o7UumLn5N8+IqycnSA7rtBBsk+Lle7IQCKM+NAaIAj98Y9/nJSURoF1KQItQQDFDQZ4Uw28Uc9HjhxZxafo0bRrqVg4k1x0xnygGXp9jCogtWRZRPghRLYfk1XGOvxwhB9vi0v+chnenDm8Z7iVDOD9G+hmIz+b5fmLeQEWDrDWgAmnf5CcZaO+YYLvrbfe4i+zICz4zTPPPDnjKpgIiIAINJYAglF20AfDJgxP0TEzKUVw2dhSKXcREAEREAEREIHGEuB0K0YIzHEoBhMlfhJONfaNKHcRaDgB9PqIUGgWPvzwQyYOKCcQfWgTRsPfiwogAg0hgCodKShDBbbT0RQwTuCAkiArbkh5lGlDCKSqYLynuYMjrMDAHX5c8sPNogr7se0n/BDXN4PG5Pef/yK8LOOorjaL9JtXoUXhOGlz9OjRRqFJtFmUpw0VWvYW5BABERABERABERABERABERABERABERABERABERABERABEchDAF1UMpj3NHdwoL4iPO7w+1yf1ewKLQrMvsNzzz3XnrQW2iwSb0aTg/bMGKDD/GC4bB5tlhVPDhEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARFocwJeg+Xd1cVS8hyC6mZTODVMxGLXDst+zbBvrvBTKKIIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIdFcC6LF4NI7nqLqlQSPW7AotCrrGGmtYceUQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARFoNgJBp1W7UjW1ycHaPbZSFgEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERaBUCUmi1yptSOUVABERABERABERABERABERABERABERABERABERABERABESgTQlIodWmL16PLQIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAKtQkAKrVZ5UyqnCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACLQpASm02vTF67FFQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREoFUISKHVKm9K5RQBERABERABERABERABERABERABERABERABERABERABERCBNiUghVZFL75Hjx4VxVdkERABERABERABERABERABERABERABERABERABERABERABEchNoG0VE1Jo5a4jCigCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACItAIAlJoNYK68hQBERABERABERABERABERABERABERABERABERABERABERABEchNQAqt3KgUUAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREoBEEpNCqlHrbWqusFJzii4AIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIlEOgnVUSUmiVU1MUVgREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREoO4EpNCqOfJ21pfWHK4yEAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREIHuQkAKhYw3KYVWBpySt3JWqZzBSmajGyIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAm1GIKdyIWewbgNPCq0qvMrClaZwxCoUWkmIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAjUnUBh1UDhiHV/xJpkKIVWQazZ9Sb7bsEsFU0EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKA9CGQrGrLvdktCUmjV6bW2Yd2qE1llIwIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAItTkBKhC5foBRaXSIqGaBY9SoWq2QhdEMEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKA1CRRTGRSL1ZqEOkothVYHi0pcVnvMUUlqiisCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACpnQwR9sykUKrbV+9HlwEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEWoOAFFpF3lOqIjTVs0jqiiMCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACXxBIVUCken4Roxv+L4VWpS+13WpMpbwUXwREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREoBCBdlZJSKFVqMp86UsZlSbc8n8L5qFoIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiAC7UTAKxe61ES0E5gvSaFV/HVbTUo6iieqmCIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiLwBYGkDsJ8vgjSFv9LoVXRa66k0pSKW8q/ooIqsgiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIQH0JlBL4l/LPU7pK4uZJv2nDSKFV5NVYdUk6UpPLGYy4FjI1HXmKgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAi0EIEuxf4WwBypT2d3k47U8N3Pc/bu90h1eCKqS2qNMX+7W4fCKAsREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREIFuRiAoGiJ1g13iMHc3e/BSj6MdWqXIdO0f1ZXokvhJn64TVQgREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREIE2JpBULkQ+0WWboJJCq8iLjupKdFkkRcURAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgdIEImVEdFk6Xje5I4VWRS8yVJfsSmN3zVFRloosAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiLQjQiY+sAcqQ8X7maHSY3YPTx1hlbB92g1xioQDnN3mWgI2WUwBRABERABERCBpiKw1shnmqo8KowIiEAg8PC5KwqFCIiACIiACIiACIiACIiACIhAaxFATfDpp59mlzmE4W/4ER5HiGKO7BS6013t0CryNnv27Eld4S+RQ6VJVp2kT5GcFEcEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKDNCCRVDF4Z4ZUU7QNGCq2K3rWvQOb29czc5sjIL0+YjOi6JQIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIi0GwE8gj/LYw5eArc4TL5t9mesQ7lkUKrCOTPq9CsP1F8X6uiW7oUAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgTwESqkbMjQUeZJt3TBSaBV5d1ZdiGxVKnjmSS5EyRNSYURABERABERABERABERABERABERABERABERABERABERABLolgZzKgqB9CIHtb/DsllhKPZQUWqXIZPlb7QlVh6Dm8G7zNEdWoronAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiLQrgRMlZB0gMR74g6/tkLVY9y4cW31wHpYERABERABERABERABERABERABERABERABERABERABERABERCB1iKgHVqt9b5UWhEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARFoOwJSaLXdK9cDi4AIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiEBrEegxderU1iqxSlsjAuPHjw8pDxo0qEZZKFkREIEMAvoGM+DolgjUgYC+wTpAVhYikEFA32AGHN0SgToQ0DdYB8jKQgQyCOgbzICjWyJQBwL6BusAWVmIgCfQq1cvf5nfrR1a+VkppAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIQAMISKHVAOjKUgREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREID8BKbTys1JIERABERABERABERABERABERABERABERABERABERABERABERCBehO45ZZbpNCqN3TlJwIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiUBYBKbTKwqXAIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiAC9SYghVa9iSs/ERABERABERABERABERABERABERABERABERABERABERABERCBsghIoVUWLgUWAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARGoNwEptOpNXPmJgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiURUAKrbJwKbAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiEC9CUihVW/iyk8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKAsAlJolYVLgUVABERABERABERABERABERABERABERABERABERABERABERABOpNQAqtehNXfiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAmURkEKrLFwKLAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiUG8CUmjVm7jyEwEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKIuAFFpl4VJgERABERABERABERABERABERABERABERABERABERABERABERCBehOQQqvexJWfCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIhAWQSk0CoLlwKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAjUm4AUWvUmrvxEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATKIiCFVlm4FFgEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERKDeBGavd4bKTwREoFsT+OCDDyZMmPDee+/NN998Cy200FxzzdWtH1cPJwIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiUA8CUmjVg7LyyCAwderUvfbaywKccMIJyy23nF3K0UIEHnvssb/+9a+vv/66lXmllVY69thj7VIOERCB+hM45JBD3nrrrZDvPvvss+GGG9a/DMpRBIoRmDx58pQpU+aZZx4fvcthw4wZM6jzCy+8sI8ld0MIfPzxx9///vct65/97GdLLbWUXcohAvUhoH6wPpyVSy0IqB+sBdVy00x9C+UmovAtR+D4449/4YUXQrF32mmn7bbbLnoEVvGyfrdnT1m9isA08vLiiy++9dZbQwmGDh162GGHNbI0yrsyAvoGK+NXPHarNG71UGiNGTPm//7v/1JZ0vojp1jg8x+iB23mSKUkTxFofgIXXXTRv/71r6icvXr1inx02RIEkAU/+uijTzzxxMSJE9ly16dPHwbrCy644CKLLLLeeuv169evJZ6iuoW85557bEoTpQyfeeedly2J/AYPHjzbbLNFAYpdvv322zfccEOxuMTaZZdd+vbtWzh6E0a88sorP/roo1Aw1HISizfhO6pukS699FK6FbRTK6+88uGHH57zy/rPf/7D0goarsUWW+zggw8eNGhQdUul1NqEgPrB5ItWP5hkUmcf9YN1Bt7w7NQPNvwVUIBib6EZSq4y1I7AK6+8csYZZ4wfP37gwIG77777iBEjapeXUhYBEUgS0DeYZFIVn9YCWw+FFg39HXfc0SXc2Wefff311//GN76BTLDLwAogAiLQPAT++9//JrVZFK93797NU0iVJA8BFAZXX331nXfeiRDZwr///vtvvPFGuLz22mu32WabzTbbrN1e7pNPPomg3JiUcsw///xbbrkl6pbK+SCRz9N1lirJjjvu2J0UWi+//PL1119vD/vJJ5/sv//+dilH9yPw7rvv3nLLLTNnzuTRUK6PGjVqrbXWyvOYtFF8O4R89dVXb7/9dqQMeWIpjAgYAfWDhiJyqB+MgNT5Uv1gnYE3PDv1gw1/BRSg8FtohsKrDLUjgOgDISfpc87CddddJ4VW7VArZRFIJaBvMBVL5Z6tBbYeCq2cTKdPn44U9d5772U/LwLBnLEUTASah8C5554bpG8Uaeutt15iiSWap2w1Lckll1xi6bMra4011lh77bXZ05Pr2K32AABAAElEQVRzQb3FlaOxBFiQfuqpp5ruKrUw2I+64oor7r777hNPPLF///6pYdrZk21VGDq46aabTjrppMhOWjtjqfzZ//3vf/tEHn744T333LM7aez807WDG+O0aJ7sSbFKx6omu8TBmPDTTz81Hy7NneEgilfGT5s2LSOwbolAkoD6wSSTcn3UD5ZLLGd49YM5QbVKMPWDLfGmio1GWuLRVMhKCPgRZs4xaiXZKa4IiEBEQN9gBKRal60FtgH2Xnv06IGlwfDDHXEH3+WXX85q3MhflyLQ/ATuv/9+rGuG3zvvvNP8Ba5KCTnOJCxQCqltuummBx100PDhw7G9hsWnqmShROpAgINqTj755EibhRnYVVddFQ3loosuSqNtxWASfs4553hxs91qE8cXndhn/ycfmc8fMxResJ4Mk8fH52LuqN/0XaqFSS1VnhybMwwkaVd92dih9cADD3gfuVuLwIcffvhFb/nZ/7YWxJ4CW9TsdAw1eZlllhk2bJjdynDwOWy77bZB08l2yU022SQjsG6JQERA/WAEJPsyu8dRP5hNr9y76gfLJdb84dUPNv87ooTFRiMt8WgqZCUEGGEGs1Jzzjln8mytSlJWXBEQgTwE9A3moVQgTGuB7bQetsDTFohy/vnnc+KIRWQf92uvvfaPf/wDq2XBE7nGH/7wh9NOO23AgAEWTA4REIHmJIA2yys2UGU1ZzlVqmwCN998MydmWZjFF1/8Bz/4gVdJYr+Lk2mwOBTCPP7445iAw0isRWkfB6c3/exnP7PnZR0Gkrtnn32WI69Q9QX/sWPHsl978803t2DlOpZeemmAJ2ONHj369NNPN380Z5zgZZfd0sEjByNy/uk4zQWFh/eRu5sR2Hvvvb/1rW9NmjSJM1bzPxq14itf+QodE4sqIu1v/kQUsj0JqB/M/97VD+ZnVZWQ6gergrHlElE/2AyvrNhbaIaSqwy1I8A07fe///1LL73Eok+dGl47zkpZBEoR0DdYikyF/q0FNmVpeYXPX250jDKtssoqRx555BZbbGFxObIFaaldyiECItC0BPxeNMZzQ4YMadqiqmClCLBKlFUFdvfLX/7yT3/6U6/N4haXhx9+OD2cBUNh43WZ5t9uDqo9BjY32GCD4447jj1t9vjPP/+8ueWohIC3s2Sbz5577rkJEyZUkqziNj8BFjaVpc0KT8QJdksuuaS0Wc3/fpuqhOoHK3kd6gcroZcnrvrBPJS6ZRj1g83wWou9hWYoucpQOwKMMxltSptVO8JKWQSyCegbzOZT+G4LgW3ADq1SWDk667777mM+GQK88sorpULij8YLM/fsJ0Afxk6CUgdpIGw1Oza8FROEWco+QOp5P95mVGoAS0qOOhDAGgwm0Xj1c8wxB+994MCB+TPlVXKcMnWGiAsuuCD1gbi+AlA9gmf+NENIqyReuE/FC/6+4llIIlp14okoGALxUmduoTEaN24coyWKjQ0lixiV0z+LzxSzzuHBEQvyyzPqgvObb75JwdhMWSpTy84/FPnaF+efMSpqWe/RMiIRe0fYOWQHDLfQn7HTn1u+GBYMf85y4PGZh7CaOBUdx7+/8MILRGGBVe12ulA8tqKyfYcsyKhUkxWBqtsle60mT54csgMFSxH9PlorBofccOvYY48NPpyCy8I0hvIWIPUtsLcG1Q4HbvGykq+A+kll46Nmn5P/Ni1N7/CVAf9kanimlgF/H9d/IPQmfF9IwKkhqU/tC5Dtpj/CMNpdd90VggEnOHzW+Pj66RPk2yFk8PEl9GGq6ObT4MHRwFEhcx6HxpdLHeZlEZ6XlRHLP4t/XqrZ//73PwwGkilWXHjMLp+I+vPYY4+FYLyg9dZbD01quETAt/POO0cpRLRTKwlRStWTKLWGXPIIoblY5PMfACmGfy6PNM+z+LiEL8UkPGxNexwriedPvrQDoVT+0XwY7x/KWepvqbpnWZeK6P0zssv/FfgE87h9CX0LkKcT93HJK/UVl+Lp4/p86WfpPfkAaRtTTwQENTt3aRMYJwwaNIhWNM9jhjBgpIVk+x2LJOaee+78EWsXUv2g+sE8tSt/C1CqLVI/mIcz7ZL6QUCVarezGZaqe761z06Bu83TD/I4jFcx6sOMIxh5iwqf56vMQzLiYz1pnrihSHlK4nPxfW70UPW/9I/ZZe4Gp8uQ2QHINMgceMuMJfjlH0sQt9jkmgOhqVG8LMQvZeXonyXjPZb6AAs0/jxjEOP4ObLPOuM79aUt5fYv3d4p6TO0Y4DH2C9DapEa12dUigNhUuPiyVJFsmZIyQQodZLIJJTRI4IdwmTMQ30xgpuHoqYhYaYNQeZmD5sM2UAfj6XLYlTrEchU32AGbfjU/xvMlg9TmRHj8NaYRvGFIuClHYgewbcS3CpVW3yVs8bEx83upPKUhNxTc/EFpq3wl2W5m0ihRe+FfRizZ2WiwOh5Hnroodtuu+2ZZ57x/kj511577W9+85uRsJ7UMF0YQtJd/fa3v/WxcJPUxRdfjINXdeaZZ0YzduRurLgPUfr163feeeelNqxRmrqsBYEXX3wRUzC8fSQ7lj6i2JVWWum73/1u9OIsQHDQBl100UWkYAfcoQJBKrrrrrvefvvtZtRrl1122XrrraO4XV4yvMYyWzLYr3/96+C57rrrHnjggbgZPHHuvYWkSJjZ5G/Y4YRAh0OJ7C4OHhbJOJXQtLx4olH42te+tsMOOyQFQOiDzz333JDCWmutdcghh/DIf/nLXxixGTTaMmygYcQp+lIsX7REf/vb3zCeZj44iIUFJzJFAG3+FOwnP/mJXQYH8q/dd989uNnQ84tf/CIKUOA98oJuvfXWkM4222xDMQCF6ZXwUHvttdfXv/517vKp8qJDsOOPP57GhIgPP/ww2IMnj4xCZbfddgt7jGhYr7zyykceeYRhUwjAX6hSfhoT86ncwUukncH+lb0F0mQAjQq/uhkVLipd0R133GHRqTz+RZt/cDCZpL2lEw2XaBq8Qsu/hSOOOILHxCAe74UsCP+rX/2K0acliHYZMnfffTc6RfNEV02CfIzLLruseZrjzjvv/POf/xwuSfw3v/mN3TLHAQccYAn+8Ic/NMiPPvqodQEjRozgpDee+rrrruMTDnHpxSnejjvuWInZTDoaK4mZcCQLsgsQuMurTxpbp0Luv//+Vl0xXrznnntaUlV0kBHf+L333uuN+MF8v/32K6VTJ3dmjP/85z8568haUTzRzkISlVKyPfnTn/5ky8n5pjbbbDNyRF7M52Yc6FX5HtnZlv10tGw2DEJfSANoCi3SpDWLxnDMVY4++mhLk72GybrEU/C86NVCsL333nujjTayKA108GVdcMEFtMNMs0Mx+CL4JGno6C+wbRI8aQm//e1vWzmp5+hlwyX1n+babgUHxOgLgptKzpcYBeCyPj3Of/7zn6inCyXZd999g2OfffYJliTpTXhqK+cJJ5yw3HLL2WWGgzfOGoUQwH9rVMg//vGPGRH9LToamgLvg7vcryCK3uVlJZ14tdpGTCbw+XCQ7SWXXEK3ZRMMOgUaRlrmMAxmjQLNCB26VVT8Q0VFypD9pAweaB+sXyAwLQlDMr7lBo6x1Q+Gt6Z+UP1g9vdbh7vqB9u5H/RjdY7PPPHEE6+++mqmgYjtqHsM6Rnw+EqYv19GzsOsMMTleGCMA/l0gpth6lVXXRXcdExMYeiVco5G8pekS/FUsmD18fnRj35k87suc2SQUGGXjWbi2muvZWZhg3wyJc0VVliB8cDyyy+fUYZik2sGHsz7EIwgCLbEyRHDJAyqER2YZx4Ho6Abb7wxhAxjJ4tVlUkQpa2d/CoUldd98MEHBzeTMgbJzA2ZaD/11FPhiwu3VlttNUbpkbSNISKGW+yR+b74ZOwyOPiC7KPbeOONv/e97wV/LxNDyoQwFvF9mP7YNBNl1YorrkhjGLRWFAwaSKhsvkNSvDJka14QERUgXIaZL+NkW7zLvJWlWswyMnr81KRq7alvsCzC3eAb9N8Cz04lz5YPI05E+oFUnB7HWFGf+RYQqviZciXykAywlmn+khDl5JNPxrJOiDty5EgmfZYODqZgiC9oAbxnfncTKbQotJ8Gm1zPHobxBCql1HPgaY5vuummUaNGIaJi9GNR6At5waFlpOtCqhi1xUQJgeGIiDwSaSFUsqToqCrsti0pOcolwMvluzLBikVH0/Pggw8yLkSnFX0YFgYhHWoeE1wGfzppBsfIvLz02aLUx8EA7sILL/RKDssXT4aJJrQ1fxzcQglHQ4Z8fPXVV/e3vJsOGz0BbaINC8JdenQsy0GMZoUBhI+Cm+EXsSJPLomFKBBBJA3lpptumgyQ06eS9xiyYBCDKPbpp5+2HFO/ShTePEtkjgwUDIN++ctfIhilHUA0bOf2WWqkz4AMUd13vvMd8yzsQK3CkYE2kvPpMG4jI4bjKDt9u+fD1M2NSpXW1bJDYWDuVAfjV1OyJhWrFoVFCTx+0NeapzkQmEIg2c7zqY4ZM+akk07acsstk2sULHqFDroMmgW+I58OLQyL0Rh8I8jmbLDUquXDp7r9HAnNXwjDEJ/pma3DYLqeVGjR3Xga66yzTmr6FXryOjj9y3dtIUFmd+inmdWsscYaySwAxatMNlakxoiK9gT5AuuDkhGDD7WFto6GKwrA8zJ3QrqdrbozxRjRwcK8hVV74eumT6ciMdHyKTM5QZvOqwye9OxJhRbvwjoFVglUosL0WVfopoGiXfKKRhKknOg56K38ILXCjKLojepxomI0+WWFX0G5T1esEy83l2R4vtYwjTHFcwiD4InBA1UF/TQt2FlnneV7DcIQnuEWdRiFIqsNkikHH3o9vuhoZEJLghiRVih1eVCppKrrTxn8E6kfVD8YVbAKWwD1gxHPUpfqB0uRaQb/Cr+Cch+BfvCKK64wnUEyelnlYYGpzcgYBLIaI2mVgRGj5YLyLP9EoKySdCmesjJ0YweDAcQUyZkFYwneDtNA3hcizuQrKDy5ThWMQJgcEQgwm9h+++2TK5kqfwXFGv/6y6/43BCAMA1hOBQ9NQfBoABGu8z8K7pV+SVSJirDpZdeSgF8arxoPlgmescccwxFQoDjp9ghJKIGpDrIUjLmccyqvDA9RGQUylyY8SprSVmM5fNtH7e+wWxBRP2/QepehnyYuzQmLM5GUBbVUuozHy/1nOWYJsKqqTykrJJQWhYNmEKL8kdye26RYPRQ+S975g9ah5Be0xhpGmjsTj311FRtlhWMlQI0tQYLf3Z9IUa0ANHrp91k3Gx3TbllPl7qx2swfznqSYCx7OWXX57UZlkZaIyQTaMsMR9zsP8G8bQJLs0/OPjyUdJEnnW7jATENmJjhE01jrRZbCnzuxB4Ih7Z9n8ky0zFRqMTyYwsGN2/35ET/FkkFWmzULSwWsdiMehkz1Phw+0qeY9WBiYMXptl/pGD5XWRNssC0Fyya5OBclKbZWFQdpo03DzLdfCO2K5kc6fU6GgCeE2pt+rpGQ0Quxytoi1gzVT4+R1XUZnZABeNia2SM2NkYOr1N1Fcvne+6N/97neRf7UuUQ9Ql1JTY2LDatAbbrgh9W62J89Lm2Nh1lxzTXMzMTM32xoYoNtlcPhZNMYQaqS94DP3/ZovQ9Bq0NV6T9y0RbQ2yTmnBWMFENOJSA1jd3EgjEhqsywAbRGLEO0ycqDuIv3gyZbc0BF7mPfcc08UhUsfINmzE8DTZrVKWTYrktlVxQdpPoOcUhhp0FKftPKsG9XjVF7ymqYQWbyp/Csot7QFOvFys0gNz/CA1o9mMPUunePpp59Oy+x1Pz4kFZiW3/tEbr73UiMTRhckHoWv26X6QY9a/aCngbvyFkD9YIQ09VL9YCqWRnk2vB9EsBNps2weAZNyv0qWgZq9d7ohFBgRWPovxufmmX9hWbkl6VI8ZWVoWkdUN8otJxuAokW9WCOIFp0jorHdcpZ+4ck1u44iwQg5evMS9Hps3jIL55Zj5Y4CjX9D5FdMvRGIRzN3e3zm7Kx2ssvqOpCJRdosSx9hHfZ+0DxFgzQLwPwUZVipgSXBmPJ74bBFxEF1QoGXbAp8mKZ16xvM/2pa5RvkiUrJh7nFfgw+hEid4SHwCSPC8rOwGslDCpSEXdFW1OQjpMpqLHyXjibaoUWDZXIryo2hWF96hJteDIehOSwyYTqMZhehMEs7g7iNF0k9OOWUU1hzHaIj/7J2CgGBmZ/iLhJtL6Sjc2WK7lsHnyMyL18euetDgBrPomDLix0PaJ5Zbs+L5hbCWU7xCXevueYaVlj4dcEMTfikvVzmq1/9KiJmwjBgZcjiRc+WRbkOtD5m94ydKyYOZos0tZTUkuu/QhZWMHZvIL+2/VI8r42nUWJttdVWrDphWzR1FaU9m7LpfUkBxcxll10WGV6wwoeKzW5FDIKh0+WSr4DW3Pp7Phm/14rvCJgWHSN+GH1i7yozB74CdkYGu3xgx3322WfjzxcaHpwP0KzAMTQ0M4P2DZJsJe/RSoXDoJE46w6gHQ7Q8mGCm/PV2HDJIjheEBI6ns5GQuzO4UcwdAbw4TUxh2EPhGk4eEwGtaXYJvNK9UEmyJJ2u0VhaHxQ/1AMZILsLAy3aMGR9QeriRa4zg4jQ75UuaTFgGLlsZfFO2L7DkPVUCVYcsWkAsghWeoMH/XKK6/Mm2LQyY4f+oJwi4+U+YzvjIuVpFQsWnVM2NGeUP+RpXqlC+b1sJJX6uNNJsjDoiNB9W4LTKiBW2yxhYXkK8bmW/gwCcwWh+il+768rGWhlkVOB1M4Nh9Q7TG4jLCAiorSKMRl4yCNzPrrr29JoUdhnmCXKDsxMEhzxNukSWHYFJojLmlS2ClrIb0jPDXfGlup6LXZB4luxhYTUBOIW+pz81ocGIZ2EikDDX7IAm7UqEgjBUAbzzGu4Hu33XIhlldo1a6CeQhdummjAqgQku6M10Qzi3KdNvaWW26xT6bLpMoKUOcehy44dBzUOvZoWlFZZxBGX7QD5lldB1/6HnvskZomny1tvjVZNNSMMC1kVb4CSy2nI1SG/J14zmRzBuM1sVmTr5WP6/7772exi1W/8O3wxfE98sOBDJraSysaEqd3Y41zxloHuhisYrIkgri0+TRBpuBPTnJyFrjyYOoH1Q+GWqR+sPKvqXAK6gfVD/rKY50ys076FMbkdnh2gX4Ztoz5ba7H6DHamcFEwHo6RsjBQL0vT6q7QElIJ1s8FQnBUvOthSd9t5kRjtJndmCzM26xV9srF6PAXV4iNzCBCfMRrFaEXWsM17E6ZUtOmR0wRGcZpSVYbHLNWli/kJf5CNbOyZdpKbJBbEWEAQD1DXeQcliOlTvKnQRRjDrIr1Kfi6IyXUJcxqycyTsDdSZ6JmpjkS7TxozRXWqaOT0xv8HIkGrA4/O+eNGm4gojQ17W0KFDGXbyddBNIxk2NRXVhq3/mDTMyIthLZNQBtUIZ3gQxD4hMNn9/e9/D8slLTqtBKdU2GU9HfoG2/wbpEKG+hbJh/HEfJedk0LziykjmjKCsSOIptJW/CMxQMYb1gfUSB5SoCT0pzQpwZApzQiiVz/f90KwAp9bsyi0mNPSf9gwAhGnt/6H/Isxrj0efRu2F4Ngi2YXqTTNH+s6w4pRpD8I7tk1HML7FiqaKkfsiI7YyAwusf3F5ucMazIOlbGCyVF1AuxGt1pBB8bRDmbijPENPRNL2sO3zetjDMSuZCsD+/n8nj9/RBaqGhQ2KIdQLVj4Yg7qoWnR/NiOdsT8S6XMcVaIvCNJK8IjC482yw5K4aNAvsm3YIJar3C1KOZgoMYhRsQKPnwm7G+wY1QYnfimhKSsAQ02hS0dImLe0MBSAIAj1ybl8IAeMgRSn7qS92glCQ4mM1glYkRijxYF4JKNZUcddZTNB1B9obpA3WhjI8IwssFOscUlAGNrE7KjA7NbBRy0HmhELCKmGul1wiUVg/YKICzoCz5NpdDi+zLdqpW/sINKyJyHT9V/GjTm1KKQJoslMWJgFuFoZnkviLnR94QAbApkTUekrihcHh+RMTFKlLDxkWF0kN4yigphEOPyXkodqked96c0Mf2js/DaCGopNdB31TwC7ZU9F9p0r9Dim2JKbMXLvyzUouR0MB849NBDrVsMreiPf/xjEygjm/YKLfibFpwPis2j6MNCXlzyGbLZMTQdzBWTrZmVatttt0UTZhH53EjW9i/6tSwWBQdhfHtomifaH5qpoIcjDJ+tV88TkVaIstncGxG810/w+kzZjICDKuczbYgb7FY3KADKVLSD4athuQM1h0aMymm9YRUL6QnXocdBIMWP8tMB+afglQVBnvesrptJeOo8nAqMJVvrAanh2N4MhQwFqNZXUO7jlNWJl5t4RnhGGnakGcEoBg21X1pEs0n7xjcYEsFBp0ZLHuonJOlAU1ETnt6Z4YTtA0ZkgISRVWg2ScsoWE1veYWW+sGAWv1g4FCtFkD9YPYnrH5Q/WCyhjAl4ZRTuglvMoRgxb5KRtem0EJ9RW/lpyd+qRNywGRhUn2KlcTG4aSZFE/ZBDY1x9p5+vG/z4USIsE0HxQe4axT8ynLESQJIQrDCWYltjgVkQhHgB922GEmgiNrU2gVnlz7gS6zA2Z/9t5ZW4NhQ/Y9hPJQNgYDqaKMsp4xClxW418f+VVUwnDJ7BXrMibzZHbMII0xm42QsahRanSXmmBOTzIiX9tAyTviE4i27CPHQ24WEuQuGy4R7NjIje4jIy9/fBcyQNoTFtrahjOUmlQzxrEhBSSKfNRWJTKSrcUtfYNt/g1SqVLlw8g0/BpfTlkyq32IFtFAY/ooLAWgArNGM5xXVwt5SLGS0JVTSJP8IHnmMwxfEF+xFyYX+KwaYHIQKSFahPBDhcjqb7Z0YLDImiQegwPPTW/BJS27yQoReSAhjUSuAPI9q0mKiUuTZ8t50HUFxSD+TLwZyuDwPz+U8doCP+zw4eWuKQF6TX6WBaodXyvwZ9xD92YB2GOHytcuqV3mpvdKyqaRGJpUxULWzcFwkAJE2ixyR5XLk4afr9WhYKZw5ZLRXiQTtMIzNERbE6l8yNEC4GB5i136rw9Vv/kHB/QQJSP2DT/bgxIFK3VZ4Xv0yTLWQUvH6DN6NB8GN+86mgzQ1rOux4KRTvLYHs8HeXclsmPaN7qTkB3WU20EZgWg3toj0IiHbS52t84OmzaQbzRjDCWBRqmfqaaSZeYVMEvhr00bCIPW0M8rGGKaNiukABbmrgaHGp5taTaZbx4f+hEsbgdtloVnvR5diV1iWtrckYOXG2p1+MvnYz0UIekvGIVHNRB/08fgxkC8dUZc+tUVrJ8wSTG3qvtjM1zUnSG+R7dnuXixMi/X9jcTgN0tps0K4dF20yYEN4qlUm8KgbhpsywjG4fhA0CbLFkAHGBBohp80OvYZAMfD9MfsmXRvVLQ4yWA7+hpFkx1kVGZLdkaOfzSUVpvcPmvhkzB5e0nV7EYDelxqlj+ypNiCZQfDVLPTXpC4tX6CsotZ7mdeLnplwrPN04LHN31K8y4hcI1aqMQcHgf34xESbF5PRp3MeRA1hMFq/+l+sHAXP0gHHwFrlYLoH6wy49a/WCXiGoXoDn7QZ4XqRyNUjQ3KfxVsprKlpqRCMupDSnTE9sYhKcfQ1qYpKNwSZgj5BdPJfOtpw8TMU7NtA1VaAKCqLRwGbzMgRG4qTFCggxCWIL2hcih04KzwpNrJndfSFa+vdNOO0UDbOahft2kWa0o/IBRxHIb/wbKr1gxbNqs8BQsp+NnT+RlR+ZZuQOZTFQNmJ0hurGUGWFGKxdZoMn2FAuQUTCkWEnzDIia/ZPeeeedlhSTxIwzESxYPR36Biuk3ULfYCn5sO0pBAWP46Uo+NCmeZE4gU2u4vuynPKQbNqFS+IFUH4ZR1Sq7NxT7zZghxZGmVKLYp40WNGRjGaeizC8ZjaaWGBzsLL4tttuCy+P4QWLOJB/cZcXzPAloEdCjZoKlT7+aBdNLo9oLOzMACiddOjnPGhv9tFylKPWBPx7RwLCKvVkjox4kP/a5BNjfbZyxO948MvzLRFedK2XhFteSUdSyRHC2FaeZBR8bPgb7rKvwobmPjzDwWhkwF0EZPxMjM4GbdMl8I3YQhUG9Ej3ItpI/336ZbkrfI8+L0S6jEu8T6qbUU7Sn3pinkwkknq70GKEMOgnaEb8WMri5nH4I7hY7R4pTkiBF4QWwfSvDO6T+o88GVUljFfdhdYvSpYdPDaTiW5RT1AxRp7hkkVG1LfoFsugTAsLlmh4GgKjt8bsgJ3oVvWpBbmQdWoloTzoxUMx0OHRoaQCiR4qukQJhP20vfbaixVn/hbaaGZuwZoHFYyvzAYZXsVinj5utdypzYVXq9MsWF6chmp1g+8FRbLdMgdKJrPdWmo6EbVaIa7PFGUYtSJar0AwvxYpMsPIJTZXQ49PDUGz6GdcxCUAy2VC+aPTv0vR5nm9RtOesQ4O31sxSKV9SGaabEaSYQr4NKTHKVDOGkWhRfKHNNBqITvzeVXrK/Bp5nGX24nnSTNPGFq8ZE1jWQae1hpEw4OQLF+0CQcZgZfKy0sQLAyDcHbMl7J0ZMFq6rCnI5fUZl/9YFn81Q9GuNQPRkCSl+oHk0zq49O0/SBjZuZQSQiF+2XWQyMEt3XPiH3YCxLSB4KZ8WBG5te1JAtgPoVLQi/TpXjKcmmgg2H2H/7wB1vwwciELeypY9T8hcT2FBrKoDNAKIGxLLQLfql6qTUuhSfXXe63o3229XNmYS//E2WHLLfx9y1hneVXqXIPRndmtyZjdJcNIftu6pQcuY2ZD0ESmxyaerlNRsGYwPraFUrCB0jbYuN/P4H1YqvsYtfnrr7Byjm30DeYKh+mDtDXGAe/qNc86bZMzMtCecSY4akLyEMszaSjkpLQ5VmCXs/ixTIWoCxHiti3rPjVDcxEd9ddd/WrxUP6JvblspRFYxo1xGHWCSHhsmaORtB0ieALCi1TBvL6MZT0/+y9d+A2RXXHK4olGg3EEkVKECGWYIkCGhDjC6ELgiioNAEFKxaaoBSpEpqCIgoioIAFkCJFjKASC9jB3mgiqGDDaKz3I+d67rmz5dnd2X3q9/fH+84zO/Wzc6acMzNrBi2iE9Gy8BNadIIYQvutqVJrQiB576UqBjx5X27Q4vXZORvmSTZVsozcctMk3+kJQ72YvTGP5A/7K50Imt+c4vnBFxKJ6ht2vjDekwv+ZMGpVaiif6frwVGcB7QqQ857bJVRfeA4W4JkMXCEw9N45qYYuN7HGyTBqi4u4IqD+kTG9jSS8eXcQLnHM8UMtN5LJ9lxxcGgBq0kO/9pnb/9xIbHTLpo+OQpnvH7WPjQ21A1PvplEsqGCS7R4g4uXyoTBvM5x6t9MMIOZLYrlnNxXB/UoGVVS/6Ndv1ouYw3AbKeiadYPIWog8YE6P4jHYm4xe7I4gI/ng9LZm+8AmxpKCAsMIe0tttuu5gprYuBm2uE8eSl4LCz7SxZfXDH6m/zAYtIfSdl0IroEjtorNTQ7rGNOENXpGH6tufR2x6zQezQSdxBpSDJq8nPKDhe8iYRc8KgRyidgMU04zgSu5EYpspNpZj/R9RVIYfzj+XXOOicNQ7GZqlx0BvGEA6Ng0NQHZnmLI6DOVLJHNsNWujROK9jiKJOLZlw1jDMKUm9eqom03E+wtoUD66x7ZsTWpkFQKvAthjTvJEURgWuxOB7Zugc2LeKFbMq/V4W1zR4lEV3a1buRO9n0xU3nFRl3bt/nMuRuE/n7lZf/cazmwb9VSyql9NLOJwjap9K9TZx2tZBaeM74KlC3M0ZhXq42jVPWTLYnFWrkLFhE9Hb9hTKIL0WBiqvHb1WqU6GXct+boEJlRm0OuhDPKOiI6ckaET5M+Mxd4RidWNvBAsuV+YUs2voM10GLT58VbRmoYryo1TUqlS9aLVlpeEGLTD5zYyl9kA3aKG1j9p8JjToNFHSeXeGRrJmcG0IWsE6EIjTi/r37on7FbpxXcTT0i0nHmvaHAzbTB8vvfTSqM8tLWTpAF8ast4TXRW3wx1//PG+w4WzbvzxLVB0vuiFWQAw14xTh/oE49Oc9xjTmRU3w0xsflU2m+mpjg14Vh5eFiN6cRtUX6WNjaFGKunMPUdsusxTu7U9T6ShAxTxIAIT3NKeh80TpedaGKS5bNAsuEwpTjnlFD7yFxXBLJLdoMVcxOqFwxZUFBKVev5asWFlRwbz8ZSQGIGwc9dHibeI1Ids8hRQPrnkhG7RzANMnwPxtWrO2sfFD1nQa5lBCzcjvk0J6FQ9WXzidDbWt0kJ+wrD27fNBJZglMe+sqhPZ/wjTn15xvOUWvPpVu+RmObxtcho3LVixFYxfikYD4ppyIXhwGfdEylPlDuNgxoHvRFOtgfQOOgvYmiHxkEIz8o4mCOV3JXNEsP01xzux2ErDnakeRtrvrEspyT16ikvzAQdXG9w7rnnegE40WKbht2ns4MtaMx7fQ7PW0DpwR9rPdRu3L7Dcfl4DSAZ5S+umWOQBYY023pYVfi+VCtV6df7RwUCIWtWyvXp6OlIApEt1gJXNfi6YGQKYwggGRwD5CSLKZTBpE2edtppSZmLP9HJuDm8rT6kmJr7ZJaEWwfNoEV/zuZvthGj8TZrdFTIeHYNHRMwaKHgi0eVTzjhBDZKWHHPO+88VFRJfZKdknHNmVQy3rAcY6F85JoyOy2Ljp5hjJfhBwUYNVE48q9t2EHtxYWH3/3ud13nFS98THLUz0EJuJGZXNq+92gEZXoU28agZe4l8dNPP923j/WSYJNEsOMeeuih73jHOxIrGqL0xbv/0Oxj9IpHWJokS5ic99gwi6kKRtvz3oOCTX/bizYbBhi6x8QIt//++8f5PdfGMgh1Y96hMVAkvlk1HoMWWjzW817IuFJtUl82nrB7ETmywKydGKfj9VwsX9k+w/YWAiBZXG/I027bQpuUJzNM3A3UJCmrV5OQTcLEL2NhZuCCwSRWPINL1mD0XSwWkv0xzPlsnoSqgjbMWO97WQiT6Cza1jcpT+eftITYYxSvXuyccsOIExlxGpZtuGAf+chH3N5JLnw4Ku7W9Hzbtop+pcCLMfeORHU1/vpqHHTmGgcdBY7J9gAaB+O7GNStcRC8szIO5kgl80DmfnwzzJoTU0dudePoj9/zhkGl9Grc0uaXU5J69VRpduP0pGp8Ostnp1zrXfwQUefyUPf99tuP5SSfxfIsSI0ZO1Yu/ji2RXbxwsnMxTVf+aU6cTHbufCDRpxp/dWgZHpPPE47aYTM3pkHoiWOOuTeM22VoGSwFa6+Ak+hDHZYWsYobfUhNSRjsjXB4qMYBavKxz/+cXvK7UQYtFwtk3Mf3gQMWtzFHw88Pe95z/N932zQQI+f7Hzni1kYwDiVZpWPOqwIC3ecVcROikfgM4MW/RQ2LTehYSYxBT2KMDMhcMSHPSN+JZHFTTLSz/EQYM+U2x1r3ntUOvt7jxp5pkcoxIs7r8dTi7a58C3QaM3iRiw+6cE9wrRVtAzcl8WHHNqm2TA8qvZ9992Xaf21117LfWh8DyPO/JALNPVclJdojUcmnvMeRybeLUCsV7cUamKBEeuLqdEJ5rdy10SZ7CPbnOhlYE9HFB/8udvNn+JwKYueDd1xlVgj1LEz59hN8YNwDbNrG4wBwq1ZxO1gWuC6jDhgMe5EgxZ14S5jPvdoBcNSzNgUj42PvOe9bY1ywsfPVfLS48/SZHt8TVhM41WlYOSvNFP3RPGXdE3cJQ18IBOGFkWHxnDvd6dQnbg9ljAjK+h59euALcMTg5QlG1tgvxmVpjbBEae0POPxxJbMDirPa8mSJeym8p/REVvFmKUgFmOq3EMMoDXDwXjqrnHQOWscdBQ4JtgDaByML2JQt8ZB8M7QOJgpldGghTYNg1bcWJZsdapveJklqVJP1Wc6hqeM8hxhdwUL0/vSI+w5JWFBxDmtLbbYgleA2oENRgw9niBT4ne/+90sSNET2kUXOYtr+tJ3vvOdPnXBOLf++utzPxNndGwb/cEHHxwXHV6M8TviAny29FfjZ5WZY5x20sZMllHgoKaO1+ln5tI5umSwM7rMiFMog4nmjXtrRtYxRmmrD6lJPCZLsLYlwWpFz89WdeJi0MKQ7EowDhfV5Fv/aAIGraRA6JvQlvoXRC688EK2Y8Ddg9HFoM13ZZZfieYB3BEPwcVjpARgxnDxxRdbSPJCl2Fu7hu0YZLP3fsHKhlZ3aCFZ4cjKV4kOXIIICTeytu+90Q3QfQmIpdT2l7iMnr5BypJkEs4t95665iyz8bM01pvDJDvRtz4Oit/qICZX/IRQiTC9k9hV+YI18knn5wco6zPNOc91qc8nU+xO2K295lx7Jems8D0wFGfjmGA/nCgotK6POV4zZp7miM+wqo0RDtPcrSfvnizn0k3Uhol8aSojD5+z3vx7aM3jwYtVs5u8uTLYbScJMEJ/uTyQ1/ks4mG1ezYCvPJT36ybV6cwaLLSmyQ0DaDFqlRF6ZQTpv9SkyqYi4TvOyR2bPvDo6NPxZvCPc0jDhD1Ks+TdoJmhrfFMwJ/h122KEqygSloKpIc+lfM8cbT301DjpnjYOOAscEewCNg/FFDOfWOAjb2RoHM6WSypKCbZnlRi+U137fICu4VhvLMktSpZ4arrU3TJkdP64rI8quu+46kBYFKwLbdvnjLXAwixfBItR3sfP1INMTUoacxTXVcWsZW9n22muv5NoPEncyY1tyeo7RkSw8Z0V/FaswK+642mLxaKot3j5CzQ7IiddCMjipVzCFMkibpGW6Evg1r3nN8ssv34pPK31ITcqZJcHEg7LL7Czch4dRxs7DUTuMMjX51j/6/7rv+nCDPvUPcpILtfJj4J5p1IH64Sp/ag42ccQTbcm4aytVC4lWiyHT3G4MRLHl++jRf/mFWmjx4iCXZKqfgxKIL7HqvSPbHLnzYngUFPRRs+nf1vKQ0+lgeu27Qphsbb755hMsJwDXXnttrhnkxjm3YDHLpANqVSp/KcRq+x5bZTQ9gWOX5baNpHh0Mkzc7S+ebk6CjeEne5HiKML14sPpFmNjIJfYaceaxqlkvAfM2yGB/URLjJjp9p0TpIMAxkuoGqaMySTS4xLCJCI3GrvVCr0hn6nzAFVnRDzAmB3MWjxH33TiPsM5eLPcDdI2fcjzJa0kFg3bT4RjmHf7HMGKtGN9k3SG/hlPLsZG2DDfznIxVSNOw8pmBsOOxbUz2LQsHfYd77HHHhFgkn5sFeOUgqQYI3/GKgzRN44sQE4ANE32RZOcRDLjahx0gLEL0jg4qR5A46A3yIaOzn2gxsGZGwfzpdKPYTH6cDzI9ULcslDzjYNiU8wsSZV6qpjROH349AB3MnuOHGZqZeTziK0cDMHcSbPTTjsde+yxvkoihXg1dOfFddQUsWE3sWa1KufQgWdIfxW7XLDM3MzT9xFS+GjDiEI99OuuSl8yWEVmDP5TKIPc/RPPjfnBm+Y0WulDapLNL8kTnvAESx+Vst+VYudla/KtfzQVBi1mD143ist1usmu9vg1eDaslV7Iw553t1uyQZ5vfsaa0+eSi/nQCOw2MF4J9ioP5rcVcSWRGxXYO+MB5Bgzgfje+XBr3CvkJUFBiS3Tf7Lp3t1xQOIroO7vDhqMbwJyz74c3hpbJRj1CJwOpEtNopc2/iRM25+IG6f+7W/77bd3TZ+ng+ww5/afbQ1ame/R850hR2x7V111VbEx4HPMMcf819/+JmvQAiyWS8dL93jOOef48QX378WBgccvpsMCQW9fTBYr12c/+1n3j0Id9f40VL/X0QMzny56+lN3UDty95/u8It98aGoHbbpcaLRtwGSCPdaeOLuiKYUX0Wzc2KttdbyMNPgiNt/sL19+tOfLi0VMPvdMYBywTs6XgGLWz6FVfUXocXPjVhR6UJ92wqDyNVXX23+mCpjn2aesb6lNR3OM/YYTHJKl4U1DTvKRalhIF6sEWsxkREnFsDdA3U4nr47+ML5N77xDf/50pe+tGh19qc4YqsYpxTEMjRxxzaQ0zc2yas4ojWJZWH87HKMwi4Kn3VH/zG7NQ4acI2DseFNqgfQOKhxMLbDft2zPg7mS6UbtAD7gQ98wOdXcX7ehHlmSarUU02yHigMOoGTTjrJp2ToGdEP9JvXnnvu+Tetw3bF7WsYFDmw5TlG1W2cKjdfXFOjuMoublVkPTicOsgr0twRqzl+/VXzckIyXnRRuvrw1VzzZEtD5kw7f/rTn7qAe+K0cJqQ/4y20ijUHmCcDsngOGmX5jWFMrjCCit4UekWoq7J/XGwQad07tRKHxITLLozSxJtK74ed11NMbsmPlNh0KKg3JDrxY32OvPcYIMNfL8Mi14+3JroItk2e+WVV3oKTEeKisiIz0JiRYubCzihFX9amGIsz0WOoQnwXdYnP/nJnsv73//+xNZCj4/m3QMw64qHOTbddFN/xIEPbkj3nzgYGk855ZSq0zMxZHN3vFe0m4Y3TrOobNxSRDHwOfroo2N5cgZ4TwcDMMYzkuKPAb5UbR3Jx51TnkiNI/M91qQ8tY823HBD7OVWPAyuRbMNQ5EfTuIkXJxITaRSbNxgZ5xnjXoRW1vpNJRNQ93atiXOtritttrKM2Ijgh+HNU8mne9973t9nKZPjgat2PZoq8mJHOIed9xxTRSjDPYs2GjwXhIcdoe7+7An0d0NHZwBwu7igansSIOWB8a+ktyU648m5eC63bjX5Mwzz8QmlBSGb+fSKR144IHFRWkSsvnPaJcCC/YG5mFVf1EHffPNNyd9JplG9QRDhhUDXUZxkjDB64WZ5PhZcC5F5EPZSeNEUtz2WSQZ5QLhTSZIyUa/GH0iI44VIA6X+OT0KrFG9W62K3GDjYeho47di/tHx6SkIJahiTu2gZy+sUleOWHoIZHTmAJSyWXj0WdSbo2DkNc4mDS/SfUAGgc1DiZNsa+fczAO5kslyy4uPjKkPjNk0r7GGmu04pxfkqKiKU68WxUmPzCTh3iEneuhuGy8qBzLzAiDwV81Dnf/sc+M/5MEfYGMf9xy1G1xza2G8UhWvKqB9Jkwcwc1Ng8vAxDcPRHH+PVX3aqJNSueGimqjzCcl25G75Zd51i33HLLqaeemkS/5JJL4ob4uOTn2zfxkqck4tA/JYMQlgwWmxm3drnigq2BZ5xxRrLYJwqXzL3pTW868sgjUc4UU2iuDynGjT6ZJWHQRO0cE8TN+ivxafVz8t/QsuJSN+YQrKPsJzuUN9lkE7+cimPI2267LZ9ztKcMflx7yjc8MWCwYRbj3vnnn+8KUHSCfGGySIH5ARaR6J8YA8mFA1vcAOZhmO7ETa/uL8fYCLCFB2WcbaxgLzlqUz4rhYGE2Q96cA4q+u1eaAN33HHHWDAU9MxW/e4ytLGkwEvnWDGeyLx/oCvGynHTWmiQlgL7PlDYcTiJTrl4FKAqF5o0FfF+nEM86667Lho39O/MCZgrxNGXRKr23VelX+WPOtVPfX7wgx+kAMiXnaRhWsmtaFEDlRx/rEoz+ue8x5jOrLjphTbbbDNmclZg2h5jD0dJMLjSYunf4rYg+jrXZU+wgi95yUv23ntvb1GseN/whjfQbdI5U2zWe7Q9NsolKwG6zbZlZkHyiU98wk76YzM77LDDEGq+Esn0EWtEorjnG3Lxk8vo35k9u9SzuYECE5d/6RCwJyXmsZqycQiMzRNco8EdgNhr6flRqvqyii0Uz3rWs6qiMwDF0YRY8GFTTBQT4rJRI9HaW4Js/OEG/3g4Bv+4XbQq3/H777zzzvvss49tXQQy8ySaK62CwZH6clgTm6K9ShaErBj9oHPnovJy4/onTr9K02QnChMjb7coAWmxMSTKAppQ3J7J0zFcnxLLMNKNNYKa+gEypIA2yR5Vlv20E4QxnpkopkYbplcxf+SUuSx3qtCGictgx6Pi5kQLPKkRh9wZiJmau8Qx7jAM0XMiMnFnXLGynX0QUr4B6Tmy5wDsVWAZ/txWOn4p6FDHHvvGDrk3j8Iqi/ZJd8d3LPjGO30+Jsb4JYPmSQ0RUuOgxsFiuxp/D6BxkLegcbDYFPN95mYczJdKhiFXERhYpoulk/Z67JklGameqs+936d8xjvuneKDysw/q7JAaeaauqowpf5M9lzjx6IS/R5rPVO4oc1jyhonZlHn0G1xzdqENZe/a+poS3ImyagQP//5z8dDYBS4dDdnaUUG8hy//qpzRdDIuWKKJQwzZ8QK4NCmIcG2c8r9RmRtyKoKsDQnhleaXzS/oUZgGeU5sqis+bCuBxvIIRkErGSw2Lpouuutt573jWjSkDJsS7ZzGoUMEkerZpHLOZ8jjjgCnXk8QEmCfelDMkuCzpOxgwuNvI50/qX7vz3ASMe0GLQoKIMZNgZT5WNy5AA4XzzzCqyzzjp0Rn4wDYe7PQwO+lCWo0W7H49QDDF0ub0BmvFEgiWCMi4atIq7ZmJeco+BAGpTzJNuGGAHzbve9a7SfBHpOBpZGBTKRx11lNuHaEL8lUbvxRMJ9+v4OHnNQROS5b4+tMAN08c8QH0x0Fp4mivKBf6qoida2qpgI/3ZDYT0mYbdpA/1IgYGukUMWq4BJB2KF/fjjEzZAmS+x4a5TFUwDFrRAPnfd/8VS4jBcuONNy76j9+H7pHP5HLCyU/j0YYZL/mrKgwGYxZyVU+r/BlfsT1zrMeU7Bhr6e1LA7OUwnwSH6EBZ6Q4+eSTzRNDSzQsxZBN3BjA+CsNSTuv2ZMIIvZ2lUZ0TwwtcZOd+5sD60U0aMFk5EmRJIXx/GSSwUcuschadryyqh6J9QB/+aWii/YOpwkWBn3Q+RFtzie96EUviu/OEkH+vGx0R4nRyx9N0PGc5zyHJZbZDikG68DmS0G6kYsvvtgPHFdNkIq1m9SIQ0l4R5jT/EQd82/+8OcE53Of+9xiUfN9OKUdR0xQe8MuJo69zQ1a45eCYnlG+vTbN47MLicAExvOKPNXTAQrV9FznD4aByNtjYNGY/w9gMZBI69xMMpjL+65GQfzpZKNTWeddZarCMDbbWNZZklK1VNxrtLLe2+SCPdbJJ+xRzPAX1Xc3XbbrZtBi8UCh2B8ZyeGEP7Q3bHNiOVVPHZAyGT/WbfFNQuZt7zlLfausZmRtederF08H1Z8Oh6fMeuvOleK6Tq9tL+yK+7+65zaoBHZxV66kR2FcLw5xspAqyu9H3vQEpK4ZNAISwZLW9o222yDncLv9kSPdMIJJxRDsr6mn0T7kTzqUR+SWRIsLNGglXk8i2reM6nqBH8yoseZBJqduNeetfq+++7L4qrmKAO7sAmDGrGqFuyC8UfYHuLGf/MHaExfBi3HNUEHw8wrX/lKjr1XlYFXxpef4q2VHpI3iI6+1MBJGAxg8SOQHquzA6NaTTkbJkt9a9owe/bjzUKuDWyYeFUw1Jqcw0AoPAA6ZbROzCxducwjJqClnD1WjSPnPdYk2+1RrFS3FEbG4oa0Qw89NH7gpxgF2wzrgeKQUww5Hh8kgjL7LRw1mTJYbrTRRhzh6rCZkWQRzIMPPjheEFrMC20ybTJ2yBbmGc94Ro3Sk0GEjXjF1BIfyl91rwg5Ym9j/2ASpflPOhzWTnvssQfDVlUsyhmfUqPieFQVd8z+vOjXv/717IusyReDCsf78ns/BDNummuIJZ7i4qhWcfkdA1CLONOoqdSYH6EXOOCAA+IdgLEAXLoS712Jj3DTh7Cd0K85TZ7StGpsqxMZcayETIiTog7605fcTXKJ4kn4cUpBk+KVhumlbyxNuS9PZhpM56oaKlvKXve61/WVV+d0NA6CTuNg0n7G2QNoHNQ4mDS/Hn/O0ziYKZVMa/3z6hBm1Zbc3NMce2ZJRqqnmpckJyQ9D3/NU0imSc0jEnKXXXZZsmRJjMLsnbPasX2y5+m1r30tu9ZisG6La1YTmF6qCswxo7jHvS/VSix2W/eY9Vdti+fh2SDIbjz/mThYjyf2yCTAeH6iPa7SG3CkjC+6sX12PCUZmYtk0BBJBkubCmqWww8/nLVe6VPzZMm///77V22S7ksfklmSxMLSeeR1DlNk0KJMDDZRt8sZES8oDh6hJeQlsV8jMVFwlIT+lGtM6rukiK/UGEg6jGqWKdn1st88VkHubgTQP/JysQ0kixy00hh4UMEzlaxKmWkiqnNePZoUD0P7QYuH9tBu1TN/UvMA3Rwo9zmMxQwsRi9q5OPTopvwGOF23313dgrHp9ixOLPIF+zjxDceKIyBO7hp/AAhfcxayZyPItHXHHLIIRx/TB61yijnPbbKaEoC08y4fJxXSZNIWhf9DG+Tq/ampKheDEQMecFShb0n9sYWgJZAyelsuQwTK3IUH0+hoYPbOLlskNNpmLWijNDAaOFYUF7+8pcn0CxlArBxgVM4Se4Yp2m9aEsTwSktD1UzaYp2Gjx5U0xt21qzKBLpIDgY4bgd9/jjj2f2XKW0tfJwzDHeRDqdJhZHx3DJUVduGMemkvQAmD9Zl/Zll+WMzu233+75JhMv908cjPvxPfrlex4M1HHsmFraCAXDGWr9OMNh9c4lA9wewC5gr1HRwTYIprnUNL4gGiHvjrg1lvVJjThUgRUmdl+Gnlid2BtE/4m7xyYFnWvaS9/YOfcmEZFTpI97MBhHopaK6Q1TMvrk+m6zSRa9hNE4qHGw2JDG1gNoHNQ4qHGwKIClPplSGSeZfLQ7KgpKs6vxzCnJSPVUTb4z+ogWztoBnQnqheJaj8kwk0PUDqX75Lotrlm6MvdIVDQof9E94h91xHzS1a8qmSDeceqvcqrJ5I0djckKBbAs8NEnJMBzMuocd4UVVmCDrN3E7okw20TNe9BBB9VsIvfAc+mQDI58rVMog/R+6BVRFKN0SvRg9JZoxRG64o1lXtMe9SE5JaG7wBZupSKduLPEi9rKsRTXPbWKMD2BuQKVD5YwCqLH5K+oe52eos5ESfxobf2xiYnX5a677uKIJQfGeekoQVD2NSwSJ835LCTbfzgLSFyLxXSK473mRkvOnJKbxFpdYoaiEKNFLAPbK2ic8ES7hLiSV9QwxpAj3dx1TjokyNyu21GYkVmUBuCoL3cic0KLyT2dDsr3qHsqjdLWs/N7bJtRZngsN1W30pWmvNNOO/H5seIjmh8XgvG1ITpurAJR/+6Bp00GuV+OlswfL4vmx/SUxpwMn174HAfDEJ057Y2WRhYNm7oJGl9yopUyWy1d9sRSfeELX+BCRfMhin8hltZ+0003MZQw7W7en8SUu7mZXtMjEZesTzrppCHAditYfSwuaoM5HSl2CF5WaUuuT2Gan06JDNK2rbvg2BaDiI0g2KWuv/56o8fCDNNpKUl7QUgTQ3nRAFkaxT0nMuKwIRcrJvWlb6SyiX3LyzZVjpFS0NfA0a3WbfvGbrlkxmIWR7fP4ELHy+DiqU2JDHp5NA46iiEcGgeHoJqZ5pTIoMbBzPc4dPRpGwdHlmdoIJ5+fkmmRAa9RlWOfJ0JKYOL1TGzVkQenQMri+aWxSaL66TwfJ6HVQxzD77a0uEbCklqY/g5Nv1VZl24J5NVLVM7luRTuzZkmYMWkU1LKAPr9w1IBhu2B8kgoPrSITdk7sG4fhChoyUjcaygi5sDPOTQjl5K0q38XKT8/zvGO3RV+02fQWgmxqF+a63UUHbzKbnmHLBX2cQIaWfuwp/HZaxyaxaeDMD+KMeB8pHZGH85iVhcuif+8tNpmwKfauevbaxW4du+x1aJT2Fgmh/6Zf6msGxVRcKKiUKcv6oAffljRmpyz2GSXV+CRlOvuosgybHHMPDwCQAAQABJREFUn2zBNmsWaXLTxaxYsygtm9r4ANUUfoOqx7cz8aRo25iy+OtQEntBHSISZSIjDruRZq5vnHIp6Ktv7NaKGsZi3TIT3YjGwYYvtEMwjYMdoC1OFI2DU/6up20cnJ7yTE9JprwJWfHAxbbdbkXtsLhm+2PNCYZuxRgi1vj1V5m1YGdS/UVZmen3En0iy5xeSj5oIpLBUrzTL4N9KZxLq9/Kc7Ilma4rB1uBU2ARGEmAfSLcwnTyySfjSAKzD+j00093T3aFN7mpzMPLIQIiIALdCHAk5bTTTvO4U3sDnpdQDhEQAREQARHokYDGwR5hKikREAEREIG5ISD91dy8SlVkRglIBmfoxc3wCa0ZoqyiTooAJqvv3/3HYYhNNtmEzzZwmJ0eivuRzz77bDaHesH8+iZOS7S68qiXk1heDDmmjQDNptUXTWdiy/m0QV6c8tD58KXTq666iru2rNZ0IHwzaXEIqKYisAgENHAswltWHbsR0DjYjZtiicBsEdA4OFvvq0NppTPpAK1JFOmvmlBSGAhIBgdqBpLBgcAOkawMWkNQVZpTQYBP4/DVHCsKSuQTTzwRNyfNOUDKfa+xiHySdN111zWfmbv7KFZE7t4JTP/x+d6rrAQHInDsscd+7WtfQ5cX0+djSPoAZAQitwjMAQENHHPwElWFIQhoHByCqtIUgSkkoHFwCl9Kv0WSzqRfnpaa9FdDUJ3XNCWDQ7xZyeAQVIdLU1cODsdWKU+YAJ/G2W+//R72sIfFcvAt0MSa9eQnP3nXXXeNYeQWAREQgd4J/OY3v0msWeyrWrJkSe8ZKUEREAEREAERmEICGgen8KWoSCIgAiIgAlNCQPqrKXkRKsbCEpAMztar1wmt2XpfKm07AiuuuOJRRx11+eWXX3nllbfddlsS+RGPeMR2222HTjnx108REAERGJrAmmuuufvuu/NB46EzUvoiIAIiIAIiMIUENA5O4UtRkURABERABCZIQPqrCcJX1iIAAcngDDWDpX7/+9/PUHFV1OEI3HrrrZY4Z1eHy2WCKfMZLb5b8/Of/5xbBzFl8Yf5fYLlUdYikBCYexlM6jvmn7/4xS/4oJ5lyi1/47dkX3vttXy379e//vXDH/7wf/3Xf1111VXHTEDZjSQwzTL43e9+lzsQrAo0oUc+8pEjq6MAIjBzBKZZBmcOZrHAGgeLTOSTEJhmGdQ4mLws/ZxLAtMsg3MJfJorJf3VRN6OZHAi2KczU8ngeN7Lve997w4ZXXbZZTJodeA2n1HUcc/ne1WtZoeAZHB23pVKOp8EJIPz+V5Vq9khIBmcnXelks4nAcngfL5X1Wp2CEgGZ+ddqaTzSUAyOJ/vVbWaYgKdDVq67GiK36qKJgIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIicI97yKClViACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjDVBGTQmurXM1zhXn7333DpK2UREIF6ApLBej56KgJDE5AMDk1Y6YtAPQHJYD0fPRWBoQlIBocmrPRFoJ6AZLCej56KwNAEJINDE1b6IlBPABmsD1DzVAatGjhz+yinxcwtFFVMBMZIQDI4RtjKSgRKCEgGS6DISwTGSEAyOEbYykoESghIBkugyEsExkhAMjhG2MpKBEoISAZLoMhLBMZIIFMGlx5jUZXVVBCwFrP99tuvs846f/rTn6aiTCqECCwSAcngIr1t1XUaCUgGp/GtqEyLREAyuEhvW3WdRgKSwWl8KyrTIhGQDC7S21Zdp5GAZHAa34rKtEgEXAY7V1ontDqjm8mI3mKwZs1kBVRoEZhxApLBGX+BKv7ME5AMzvwrVAVmnIBkcMZfoIo/8wQkgzP/ClWBGScgGZzxF6jizzwByeDMv0JVYMYJ9CKDMmjNeCtoU/ykxeh4Vht4CisCPRCQDPYAUUmIQAYByWAGPEUVgR4ISAZ7gKgkRCCDgGQwA56iikAPBCSDPUBUEiKQQUAymAFPUUWgBwKJDHZOUQatzuhmLGLSYmTNmrH3p+LOPgHJ4Oy/Q9VgtglIBmf7/an0s09AMjj771A1mG0CksHZfn8q/ewTkAzO/jtUDWabgGRwtt+fSj/7BHqUQRm0Zr85NKhBjy2mQW4KIgIikBKQDKZE9FsExktAMjhe3spNBFICksGUiH6LwHgJSAbHy1u5iUBKQDKYEtFvERgvAcngeHkrNxFICfQrgzJopXzn73dpi8HT/OevvqqRCEwbAZO17bff3r5dZ+cj7xbBl09bUVUeEZhLApLBuXytqtQMEZAMztDLUlHnkoBkcC5fqyo1QwQkgzP0slTUuSQgGZzL16pKzRCBKhnsXAUZtDqjm42IvbeY2ai2SikCU0NAMjg1r0IFWVACksEFffGq9tQQkAxOzatQQRaUgGRwQV+8qj01BCSDU/MqVJAFJSAZXNAXr2pPDYEhZHDpqamdCtI/gZoWY4dF9CWt/qErRREIBCSDAYacIjABApLBCUBXliIQCEgGAww5RWACBCSDE4CuLEUgEJAMBhhyisAECEgGJwBdWYpAIFAvgyFgO6dOaLXjNUOh61uMXX02Q9VRUUVg5ghIBmfulanAc0ZAMjhnL1TVmTkCksGZe2Uq8JwRkAzO2QtVdWaOgGRw5l6ZCjxnBCSDc/ZCVZ2ZIzCcDMqgNXONoVGBm7QYHc9qhFKBRKATAclgJ2yKJAK9EZAM9oZSCYlAJwKSwU7YFEkEeiMgGewNpRISgU4EJIOdsCmSCPRGQDLYG0olJAKdCDSRwU4J/zWSDFqd0U1vxCYtRtas6X1/KtnsE5AMzv47VA1mm4BkcLbfn0o/+wQkg7P/DlWD2SYgGZzt96fSzz4ByeDsv0PVYLYJSAZn+/2p9LNPYGgZXOqGG26YfUqqQUpgv/32O/zwwx/84Affcccd/qzU05/KIQIi0COBUnEr9ewxUyUlAiLgBErFrdTTo8ghAiLQI4FScSv17DFTJSUCIuAESsWt1NOjyCECItAjgVJxK/XsMVMlJQIi4ARKxa3U06PIIQIi0COBUnFLPFdaaaUOOV522WUyaHXgpigiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAIiIAKtCXQ2aOnKwdasFUEEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERGCcBJZebrnlxpmf8ppaArfeequVrZt1dGrrpYKJwKwQuPHGG62oksFZeWUq55wRkAzO2QtVdWaOgGRw5l6ZCjxnBCSDc/ZCVZ2ZIyAZnLlXpgLPGQHJ4Jy9UFVnjgnohNYcv1xVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATmgYAMWvPwFlUHERABERABERABERABERABERABERABERABERABERABERABEZhjAjJozfHLVdVEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREYB4IyKA1D29RdRABERABERABERABERABERABERABERABERABERABERABERCBOSYgg9Ycv1xVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATmgYAMWvPwFlUHERABERABERABERABERABERABERABERABERABERABERABEZhjAjJozfHLVdVEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREYB4IyKA1D29RdRABERABERABERABERABERABERABERABERABERABERABERCBOSYgg9Ycv1xVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATmgYAMWvPwFlUHERABERABERABERABERABERABERABERABERABERABERABEZhjAjJozfHLVdVEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREYB4IyKA1D29RdRABERABERABERABERABERABERABERABERABERABERABERCBOSYgg9Ycv1xVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATmgYAMWvPwFlUHERABERABERABERABERABERABERABERABERABERABERABEZhjAjJozfHLVdVEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREYB4IyKA1D29RdRABERABERABERABERABERABERABERABERABERABERABERCBOSYgg9Ycv1xVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATmgYAMWvPwFlUHERABERABERABERABERABERABERABERABERABERABERABEZhjAjJozfHLVdVEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREYB4IyKA1D29RdRABERABERABERABERABERABERABERABERABERABERABERCBOSYgg9Ycv1xVTQREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQATmgcDS81CJma3D73//+0suueRDH/rQu971rgc84AEzW4/eCn7XXXddcMEFX/7yl7/yla9885vfXHHFFZ/0pCc98YlP3HzzzZdbbrmcbG666aZLL72UlPn70Y9+9NjHPpZkSXzrrbe+3/3uV5/yrbfeeuaZZ1Ie/m688cbll19+1VVXffzjH7/77rs/5CEPqY97yy23nHrqqd/4xjeI+7Of/ezRj370aqut9tSnPnWXXXa5973vXR+Xpx/96EevvfZaaPD3oAc9iDLzt/7661PykXFjgF/96ldXXnnlDTfc8OMf//gvf/nLW97ylvg0cR9wwAE/+clPEs/iz1e96lVAwJ/ARCkGqPd585vf/LCHPSyGueaaa97znvdEn1L3yiuvvM8++5Q+kmc+geFkMJaNru/iiy/+4Ac/iHR06Pq++tWvfuELX7jttttoz9tuu+0666wTE0/cOfL729/+9t3vfjedBvKL+CD7yO/jHve43Xbb7aEPfWiSUf3Pn//852efffZ3vvOd448/vj6kPaWCF1544be+9S2yvuOOO1ZZZRWyXnPNNXfeeef6ruN973vf1VdfPTKLDTfccMstt0yCSQYTIBP5OZwMfv7znz/33HNpUQxJf/7zn2lR/NESNttss5E1/eMf/0j//LnPfe7b3/72D37wg0c84hGPecxjnvCEJ7zsZS/7h3/4h5HRY4BW8hsj4mZI/dSnPsUgjuwzADGSJgFqfrYaB5N0/vCHPyBWyCP5MuQdcsghrXqAtlVmusLoT3b0GPe///1BzR+VZe6RFEw/x0Cg7etrVSRE8oorrrAoRx555DLLLFMV/fbbb2c8Yi7HRPRPf/oTM8k11lhjyZIlOKqimH+HQWHffff9xS9+UZ9s8nS77bYrHYtZ41x11VV0O3Qd1M66HQKvvvrqSQr1P1vJYIcqx9x/97vfMV7zahj377zzTorK4Pu0pz1tk002WWqppWJIuSdCoMdRknGNmU+rWthMrBil87SNpNrOVHMkdEqqnLlALvKXzzgJTKcMFgk0H2GTuCPH/RwZTPL6yEc+YjNMBkqbnDPZ23jjjTfddNMkZPHn9ddff/LJJ3/pS19iYrDssssyK6CDIu5KK61UDNyXxqaYsnwmTuAzn/nMGWecseOOOz796U/vsTBt1Rc5Gtek2Jk1+s1vfoPCkNUryTI7ZdqZpG8/mesiy6WPEs8TTjihqIFpqzVN0uw8sU/SWfSfKBb1N34CzJ5f+cpXujmEAWb8ZUhyZNlmfxg8JvL3wx/+sGp9i+WD9XDnUqEsYIwvFXUUAfS8VSnTCaJ9fuADH1gal5X5scceyxq7NPr//d//oZuoUtazpEdjVRrRPP/3f/93m222Kc136aWXfutb31oTNz66/PLL11tvvdj/opSPAYruhtpJbLEWF41baTnrPVFtJFkfd9xx9VHsKRO1JOLc/PybCN4wqRoNJ4NeI+bcr371q73rY57kj0Y6CLz33nuvsMIKsZ2ccsopVRFz5Jc0zzvvvNL1ALkj+8hglezH8lAGJOX5z3/+fe97XyJikI5PS91UEyPBPe9Zfn6aruOiiy4qjWieW2yxReRT5YZkMRHJ4LzKIHOMF7/4xVWq2A022IBVdLE9uM9///d/Y8ctbUuMzmjHWIF74CpHW/mN6SBHb3/725OdHCxOYpgad4dx0FPDqvTCF74wGRa/973veYAaR4cqk3KVCuM+97kPiy60SDU5zsGjicugM+zw+jxuQwcaqDjDRMNbFZG5KA2gKIMI9WGHHVYVy/w7DAod9pChU0uKgUKw1MRFLe51r3uxCMJWlEQp/dlBBjtU2bNmuZFst3LsW2211dwLIBymRwb9pURHvzNVund/vw0dL3jBC2J5cOdM27rNVHMkdOJVzlwgJ/Dn8qdksF4YizJY2gyaj7Aevfm4nyODnh2jG7tSqir7n//5n9irPHDiYOPFDjvsUBqXKatraWKsvjQ2Mc15dU+5DDr2m2+++fDDD0c5YC3h/PPP90c5jg7qC7LrrHGNRe2rRnvssYdLx6677hqziG52LXuwege62RjR3MnysCqFUnnMmdgXS7KwPmjU75FYNfRzUAIsVv/rv/6raLaRQet//ud/XMdNX/D3f//3a6211oMf/GDvF7DiFFfLTUQXLRgrZ0/nn/7pn7CI/N3f/Z37sM+aXefFpNgAiynIg+HAFLT22ms//OEPj56ve93rinHR7kWdFIX/l3/5l3//939P4rIFuxgXH/aA/9u//ZvnQvQnP/nJnExyHxxMYkrjuieNqnTffb1Bi7V6zKXG7V1zh+kRdj42RHhRzbHXXnvVZOePZNBKuPX1czgZpITsQMH6y3kOf4/mYOXQsPyckowdgqdTZdDKkV+KlJxixIqGYg4ZjJ0JLbam8ByCwWjEQRYvKo6RBi1kP0axroON4RzQjOlwkrUqa3bnxZBV7lKDlmRwsguYgWSQw8F0+94SOPHDgPKUpzwlzsKx0VYp01HvRvMqIyZjWUyQlI855piqBmn+beU3psYxZTsN7FUwRxODVrdx0HJnOYdExO0gXoAmBq0OVaa0TFE8FyZCiDNns2K38+xnPzvCmT/3ZGXQeXZ4fR63uYNt1P66cVTJYNIzY25x5YVF56Qy54mr8u0wKHRQ1aHIiAWgN2P88toxfLNrGLt4vBSBoQ0pi7ESd2cZ7FBlyxplUBR5ZJAlW6wI0xiUpEk55+znlMhgKdXeR8kO1p39998/li1n2tZ5ppojoZOtcuYCOZKfY7dk0MeOUkcig1UtoeEI69Fbjfs5Mmg5MmjGfSo2OWedGD2ZnJeOOFiFUUlFOFwCFNePzNuLM/O+NDZObI4d0yyDYGfKd9ZZZ7EfMS7QaA/5Bq1u6guK1Fnjaq2o3xp99rOfjWRqDFooY6Mc1biLBq0OWlMXmcyJvacjBwatpbDf1Lw5PeqLAIPWSSeddNlll7E8K6aJFoMRq+g/Th+uO7Dsqg4lDFoYlojXXXcdWWDW4n6SZzzjGabBYds4B5XsEVYQjPZVZ61Ki8elZNxbyEEKnrK+5QoRbu7CzVv4+Mc/jgrgl7/8JT9Rz7FGSlJA//7617/ePLnmCwW3m9ww5nOk10rF9lh2f7OJJkb3uNSC8ygHH3ywb8Ll8kDuK7MLLpgMsfWmWCNu8zvxxBNJkOg4yMsscLST17zmNdTC8vrYxz6W5OtloOJY4xiTzAcVJPdKsSznDz2IQfDA0YGezi41Qtd5xBFHxEeJG4sdbPFkcYJZInla+pMEmV3x6OUvfznDXhIG7eT73/9+PFlrVe3q5Sn6PjbJJnHn4ydzVqvIPMkgrZR3jfmztOuj5TTp+jgORcv3t0z/gF3zX+/+oz0nxh4L5jLIz7byy81stEArMMshZPBRj3qUJYsM0i1wnRE/mSpxR0TxaD9d/Xvf+97SS2xYqNADWFKl/3K9KgeweMTChn6D3sMXNh/4wAde8YpXcP0gT+mL6H8SA7kliKWBC9lwH3TQQVXbzHmKSQNlokXxfyWDcymD3qgYazg3zLTeZ/kf/vCH2cJmoz9NHTn1xmAOhkhGZ84x8/Of//mfuR7ZBx2OXzA28S+POICI2YnxJYluPzvIr6eDiHHN169//WvzwbrzzGc+8+6h7K9jWakIeNzO4yApIP4vetGLuBbVUkMMKQYoyJoysEMlar09R3d0qzK3gHLzDIkw22G4RN7tTf30pz/FzdTI0uctvOQlL/G85swxWRk0mN1eX9sXwYSHLjfGwqD1yEc+Mvrg5o5B38HNcHP66afbJA01OledsKXDwiPaVbcxdxgUGJqZ2iUlKf7k/k+bjzEa25TYwhCd85TWbyAvjIn/8R//YY9YdR599NFcOo0+Dh+m1uyusEfJvzky2KHK5I6gYXJjBwBuNIMM9PQ2zMPRWXCTD8Mx5gcebbTRRn/dEDq/f9Mgg1V0e18t8nKtKVblaP7cWEvnzxVG9PwoOqMu20fYDtO2zjPVHAmdkip3WyDXv6a5eSoZLL7KGhksBsan4QjrcduO+zkySKbMrhk3GfRxs2nyHe94B3oVu0eBUfLQQw9lEmjr0HXXXZdpgM/brcAs8Vgkmhu1Emd0/vEf/5GfaH64j8FXoOzYZoO4BePfvjQ2nuAcO6ZWBlGNcrIHnYDpMJNXgEHrOc95TuLZ8GeO+iJH49p7jZAgtByuBaX6rHy5sruUA7NoW1Jxaxf6otIw5vnSl76UYSsG6KA1tej5E/tYjAV3Y17RCS3a/Dj+EgU9YsalUt7+UJKOoxC1eUxwJwIbwA0FeyG//vWvJ6ZmFLishy0AS9/kaf1PxnuLyNKafj8JzO1nvlcUFXZ8+t3vftcfkUh8ZG5S840w7JGJATiM4qou1A3xkbmpkZ81KSbOjM2tXyyhk+gspzlob5VC/5g8tZ+8Z5Z8Foa5ER/eIMfSkEXPT37yk06s+DTHh30N2NVInDkZW4SKST3rWc+yrNHoFZ8ugs9cyiDGJ3ut9i9HD9HMug/rgZFv9pxzzvHw2FDrL9yz1HLklxQwO1mO6OCY/RdL6G2VAMlTU3h5gdnZjR7cJ5f1J7TYG+gR6RWTlPnJysSXNKX7E8ndJ1vFQ5DFBBMfr5dkMCEzhp8DjYOsbaxRMRZwc2CxIp4vwYqtzo8qYtEpXkuI/Po4+NznPreYOD4d5NfT+f73v+/X9qIr5IB7k3s+LXrOOEgKcYbG/K04M/FCFh3dqswkxMWfWxyLyaJGtwDMH4pP58ZnguOgMez2+tryZ9pve6RMgWVvtvSEltuJMWuheU8yYv+TxWV+hbY9ecrPzEGhmGD08WsAkqPSftMLdWRKHKOY2zVx2MK5N6kYAJ/OMti5yg6TvSDsC0lKhUXZXxbqwuTpPP2cuAxWwfTRqvfVYlWO7o8V1gQtORmcM23LnKl62WocVRJaE8UfDVHlzAWyl23uHZLB4iuuapDFkPg0H2Et+nDjfpUMopyxLoVRkl0UxVocddRRFoB/kwPQbGdxRVNxMYgeiY3aFhcjWTHlkT4jNTYjU5iDAFMrg29605u8YeDAGvqGN7zBvmvAz84ntHLUF7zuzhpX4vZeowMPPNAQ+Zyt5oSWT0fZOd223XbWmmZO7NuWc77D68rBWgtPrw/NoMWWrj333JMd+qTNWUjvjxbcoLX11lsbCrZ7l4ocX+GzAPTarfRZrms77bTTSlNGB2cpJ9cxv+1tbzN/TmZULba5AtHCsLKKWu+LL77Y/NE68aJL891vv/0sDJruJICdzeIp2+Fjsh7Mt8HSTXP5svu7g309ljjTnWQO5GGqHH78ixlYVZhu/mw+slKxmbE0Bfa8WwC+q1waYO49Jzh5GkgGeWVm0GLXObuw7SpwTnLYi+bfkQYtbLF+zIg2ycbSJs0gR35ZCfgECJt3aXYcO7MqcLwsCeAzQozo7L1lyxIBUMRb+HqDlh895GhIkqz/dHs2djL3dAeLHMsIo7h7NndIBudPBplyWJOo6nhpHr70ZdNf0lq8vVUtBvysLWNlEpef3eTX01l//fWt8IyGvBr3b+LIGQc5d+KdAEud0oG4qgydq+wzCs7flCaOUc1o8G/Ntz9L486Q5wRlEEqdX19bwi5Z22+/vb/WohGFQdOflk6NuKTFtgoRrHi/EKXKHBRq6kXZTEw4N5/Mk9noasVmECxNAX2Z771gTVQMkyOD3arMXni/8LOUJIXkwk+r13zf/DlZGSw2BvcZbqbqWZQ6WMr50clkWpgzbcuZqZaWM/GskdAkZPHnQFXOXCAXyzmvPpLB5M3WNMgkpP1sOMJa4OHG/RoZtANVDCgcHCmtAvoutF424mDcimFcU8SYVbqRhdmCReTfL37xizFuE/dIjU2TRGY9zNTKoJl/0D2iNWWfImoHUPvmv3yDVgf1BeLZWeNK4futEWpSM/eiPuJKRhMEzmBVNUgOOFoYTOZVYar8u2lN8yf2VeVZTH8MWvf0/k6OQQmw+OEynx/84AccCq76uPqgBZjaxBmw7Y4dShgX9rHA3DpoV9hz5eDXvva1+KjGzS2Ctqzlvj43XCXhURybj0+y7afnwpZY3/WQxPULu1Cys5HcnzJ1MDfKa99B40/N4WfOoorKHvm1QmwDdJ1ajM4RdTuARbdVvPiVK1MOOeQQC8+2HVcIxhRq3HZZGQF8FlUTuPkjhlu7bJAofHWsNOJAWZfmJc9IYDgZJBe+zY75B/UrRz1KP4QTS1J087l77P34o7Pj1ILP2Ioho0+O/HKhKJJFahxJ8R00MXHcLr+cgvLLWj0MJgQKQDqvfe1rXUHmT2scXuzk2vcYxbsdu+otPsLtQpR84igJVvXTo/cr/lXZyd8JDCeDPh65itkzdYe35+J45G3Sjy16LHN4sj/84Q8ZCpOn3eTXEuEsJjcD42YcRPZbXcSaOQ7aNj2y5swiC63SgTipqf/sXGVH7QesPU1zPPaxj/UO0AMnYfQzk0Dn19cqXyactgxmiPGrrUtTuPDCC82fywb57l0xDHcJ+KHnU089tRjAe/Vug0IxQfexxT8/OUoV58n0Zt4+vX/wWOZgWu4SXex2CJMjg92qzKErjo+QNZa2eDgsltxfFr2TzUziU7kHJTDcKDmy2HyqxBoVh/K5YSWG96beYdrmcTusNGMZqtxVEloVPvoPVGWfkHRbIMcSyj1+AtMpg0UOzUdYizvcuF8lg/Qnd955p+Ve1XWg+PIBNFnx+cSAG9K4vaBIgNmCX/NbOjEoRnGfJhobDyzH+AnQeXIymP2yfN2AscMvbskvSWf1RY7GlWL3WCM2ILL/kp6KZLlEtMlXLXzG2EH10S2uy2/niX3+u56zFGTQGtMLRbOJaaHHTmdM5R4+G3pkuyOYrIpfo7H80WX70QG7brhJubB+WTBUBn6JXxLR70PjEx2czPCnmB6Jwh/WI/dMHHxlyn18qyk+r3zlK21bh39+w4O5w9VStn52fxxebN81H5+a24tdpMGl/3YdPHe7YdAqxq33ce18v7qP8847z2x+zLH4JkGxDPA3ZShWBN9uXAwmnyEIDCeDlBYFEB/d6db1MSPh1k2r8jvf+c7mDSNHfmmiJr8kEj8FH8m7/OKZiDA15TxWlSUsJpK42WiPFt66HTTXyVP/6d1O7HP8aY78SgYd4/gdw8kgI4K1ZyYhVfXy9pw0ZsK7jcrDJImwSdB8MANz6iI+7Sy/loh/HIgh1S/DjOnXuHPGQUzR7MEncYSRs92trFk5VR6JmpK45oI9uTXV16NuBHJeX/McmWqavYTBgq3ZzHlq4iK89tTnwMXA7BoxT4zKthsjhskZFGI6iZtlvH30FIsaX0WNT6kX10pbt1M1qye8dynFbidHBkm5W5UdNUoN+2xtrJS5mb76EAztYgD5DEdguFGyvszIlB+yd4umRcmctuXMVOvLzNMaCR0Zd7gqZy6QR5ZcAQYlMIUyWKxvqxGW6MON+zUyyA3ettzjQ7B++rNYFx9ukhWfj1ZNJgb0M8WUa3xGamxq4urRGAiwR58t7z6D6ivHzuoLCuCqyw4aV6L3WCOMWHZ/O5dtbrvttk3gdJsxWsrd4raS39KJfZN6LVSYpReqtqrsFBKId4O4sqZYTtdlF004xcDm4ynXHJLgxDc9OPZ8opCyH57jDG9Vsu7/6U9/2tys532rKT6kyZ8HK3X4adPiXKRJsWto8MFwy5GDUFXq+NIimWe3rrkmQXvkN0FXHc/yfJnYtVIgjsxaAUYS8CbH9LpfGRyZdX0ADiDa59kf/ehH+70u9VHsaY78MkccOU10+aWtcj9YkyKNDIMGjfNeI4N5t+OnamIUl6MOBmmPKxmMSMfjHk4G47frq+ri7bk4HiF69mHk+HHdmI7703v47aAWoLP8Eh3jLofaLZ199tkn5tjEnTMOelyOhseRvUm+OVUGtWXhSJMc6QzBYp6rrLJK8lQ/8wnkvL7mue+77742j0W3y9faUWzVxPVumVs3q4J5K0W9zrGhZMbrKXQYFKpyxJ/rSdED4uBmBZ+RWnhUb16kqhSI++1vf9ueFrudHBkkzW5V9lg1qEmcqtnBGlQSvL6qCsq/dwLDjZL1RWUkskOENFR0ZDFw5rQtZ6Yai1HqrpHQ0vDRc7gqZy6QYyHlHj+BKZTBIoRWIyzRhxv3a2SQPe5xF3WxFuZTteJrMlr5KOza86pcEv+RGpskvH6KgPcMyfwzkqHzL9W4xjCZbmw/b3zjG0mErZYnnXRSw9RcmjpMkrvF9Vg1s02X39KJfcOqLU4wGbQW511PaU29E2SXSk0R7evZBOhg0KpJmb6VHtaU5tGgVVMSf/SJT3zC3HybhHTcf6SDSQz7vi2Y33loP3/5y1/Sc5m7pthVNPgikR9L5xQXKgNue8OHPzLlogyObXFcxrf8FIvqnSx7VFGrcRskMyHOxbOQ41LdNddc03vYYtwqnyuvvNJuc2aoeN7znlcaLObLxxjYH8RnFVCkoiTlJDJ/3LRWv5G5NFl5NiEwnAw2yb0mjH8A3I4k8gFtJvc0DFom7ZPGzCnG0luYatL0R53llxRYpVg6bI33Eyqe8qAOLzaf3ytmFOUIzSaHRNkpzyZBcCFE9ABIcTGW+cS4ksEqSgP5T1AG6Zyvvvpq6oV19oUvfGFSwec///mmdEPFzGeHk70a7ON2WSBkEjdHfrm/wo5ur7zyylhYGaNp+cg+YxlGd2SfPzQCya5VK0DmOJgUm8+d8tk/subeVLa8kC8XuVQtQpK4rbosPg9zwAEHgJRLmejo/By2Uz3uuOPMvdpqqyXXXnkYOXII5Ly+hvnyZjltTOAVV1yRa45GxvJtSUwOqwIvu+yyzJQ4YksAJmyJQiF27G0HhaocKYx98o1Oo+boZ1V0/NE18NEFHHx3oXj+MnkRrWSQNLtVuQlqEo8GLX7qb2wEJjVKunqXazC77bern7bVA/S4rVaamRI6wSrXLJDrQenpGAhMvwy2HWGBlgw3raZtNcwzZZCUmXP69yz8HLbl2GS0cnWNbUqrKWp81ERjE8PLLQIQaNIz5GhcG0J+6UtfajeF8H0f9B5NYnHBJhNjQlI8pqNcdM8HXJlIo0Hl+nc0J1wzwBy7Kqk422yuNW0iv/UT+6ryLK4/Kwr9TYQAKzRvdsjSRMoQM0V67Q99yjj//MuTWCxq8t19990NF7tBa4LFR65fY7dO9E/cftCBW4aTRzU/eWWu2vvkJz9ZEzJ5hNlsww03tLqgXMZ8FQPErdnJR7ZjsHPOOcdSoL+O/r7dD3/mZH75src0HExxOI0bY0W3bxIvvWuRT4IdccQR9v3JGKvevdFGG1kB+IpSVUhuiLIwDCcPfvCDY4HNjRrxW9/6VlX0+fD/mwjeMObqDCeDpRVBO+zvF9VzaRjzdJUu9zJx5UvpF+n22GMP+rGaREofdZZfpkp77bWXlZ8egH27peknnn5fDXOj5FGrny7gD3rQg1BfFuNyc7SVDZV3qQGY7yRzDqwYER/JIBAWRAa9ATAW+FkfBln3dwcjFI3WGtV6662HcdQfIQt+1Rhns1h4+yNz5MivfSKYfBnxL7vsskRHb+VBD84iKsmUny4mHcZBhl1faWDE4loPyyv+i0WNLSnFfPHJqTLR/dPEGMySYZoLGK0DpHidv/lcWuZp85yUDOa/vpEkkSZ6ZmtLnIGw8HHWF+XLnvrHnNBn1aTvdxaxiSEJljMoJEn5zyOPPNJqwcc/3LOhgxsRuKvQvrmFQZpxJ4mYKYOk1q3KH/jAB6xSbPlKihR/+i40er/oP0/uCcpgDcYxz1StJJ/73OesVbAwYcirKV7VIx+PqqZtVRHx7zxTzZHQCVa5foFcA2r+HkkG/Z02b5AdRlhyyZy2eTkTR44MWlI+DWaumyTu99Kzzyl55D/RAlnfxb/0JO5f72iisalPYW6eTqcMVuH1q2X6XSA0VF8MoXFtWyO+tWwNHn2yKyq9YC95yUtK0fkxCXQmPj93wcHBDQR8k680Lp7dtKb5E/uq8iym/6WXXqoTWrHRyj0BAmi0Ldf6jW+uY2r+6YjhUqbA3BVj3/PcYIMN1l133RpwmPptiw2qCu6W4cyTbR9As4yKiusKY1wvM541QKpooICz1OjRsADZjl18uBaDeZ49YqsOszesd6WqOt9r4JdQWSz7lw0LbNLn1Bede83ddDEKd7OgjsSHgyxsnYiPotvz9V0e8SluPmpCjfiiEjftJo/0M5OAt7qaJkcWVa0uM/ea6N6ejz32WL+XCRUY+2hoihaR65JZ7bCnptVJqebyywknDouQF/LOFUmMmjb7wRrNrLHqoEZNpTo/outjQmbRMdKX1tfliGKXZnT22WezlwJdqi+HPJjHlQw6k7E5xiaD6LtpSBx+4mYGPlRz+eWXM1ggU+w2SL4OYnVnhGLHKydrCYlijiNKfEWGST9SwLkukwU8ubOlKAs58utxsX+bytvLg77b3Owk5aASBWMbXXxTHrfDOIiJ2k6GYT3iyNT3vvc9Sxn9uxnO+ckI/uIXv5iCvf3tb4/54vasu3VZHFtBbcrWQhbSHD/lKNjjH/943hd52S1nqNo/9KEPccw6yVc/eyGQ+fpGluHNb36z9cxM/6q+A58k4luLPvOZz9DwSudd6K1sOkpcHxk9He/YOwwKnkh08JVWhl3zqbpEOobnyMW5556LD7NQykB/Yt+oo6mzN4tzljEw7kwZJIVuVXbU7Kzn3GrphjA6Hw6JWoGLqJOK6Ge/BMY2SsZi+1klNnxUfVkthk/cTaZtSZT4s/lMNcZqK6ExLu5xVrnVAjkpp36On8CUy2CHERaGQ4z7mTJIqZjymUmMhfkhhxySvGtGK1sUs/jl5Gjy1H7al4TM3XC0aqixKc1OnotMYCI9QwTO5/1sDcvaDeVq83uzfLrIEq90ksxN75ttttnee+/NMjnmaG6P3kpr6rPNzhP7YkkW3See0ZF7nAR0QsvMyH7pCuob8yn9l2m9ySr2jNIARU9X+uy3337Fp+7j3coJJ5zgnvUOX8yzfcA+1lcT3rdzxr6GBUNpFFOdW0haY2kYPD/84Q9bGDQCMQwdbsyFXe0U1fawo3nkmIvvd1hmmWW4xCnGxR0/4cPKDeaov1lvsJ2WW2WiJuWggw5K4lb9dLPZq171qqow+McJGTrKE088EcvBFVdcccwxx6y99tpeKc7EoLipSWemH01qN9BwMlj6Ohqe0DK7r796HGxRp9tEo8QfRqyoEOTS5NK8Sj1byS9SE8tgbjTdFK808VLPhlucSuOaJ7va3ZTLiRkWJ6WB/UZBlkDg4hwJ99VccMEFBx98sN8+QRVQlBejSwZhMvcy6Cc5vFVzWRmWqmJ7iD60t6qP6zJwl575y5Rfrrf1EuKgzWM/RtGMIHCixU9J8midddaJRcWdMw5i6435YsdCLljksN2PavLUt+MRDFVCzDqzyp4Uw18sg7vZJ8iSyYPNq2NSMtjX66t6L9iPbVMI07a4Xbr+hBYboXyjCVbSYuKYi+LuBLZMJWFyBoUkKfuJssDaJIbk0gCJZ9wn7o0ZKzgvOglpP3Nk0FLoXGXfn8v1bqVli/N5eobSMHPgOSkZrEc35pkqhWEPk+nF2Ltdehq4vsANp21VibSaqcZE2kpojDvmKkeB8s6haoEcyzn3bsmgveLmDbLbCDvQuJ8jg1T8jjvu8LsTXvGKVxRb+1lnneXywiXVxQBY6aLSpnSWXozVUGNTjDiXPtMpg1WoXb83kRNaQ2hcW9UItYxJBOcaI6KRJ7SYM7socUcUxmMAcvMWt2qz88wf4eBu0pgy7s5a0/yJfVKSBf+Jxvge4zThKK9IQAYtE7/hlihDdK+UGSuLf7eDbyGM7ERK5+vMVN71rncV42YatPzCInpednazcSDJgr2lqC+tg371q1+dPKV9svube1RYzGMAS56yGwgdhMVlnlQMkITnJ6fBTIPDmrB4IVUMz3WCrGEwUbBFPfrjRpN46KGHWr78yxVzSYC5+TmpydNwMlj6ahoatOI5ITR6fKonSY2GsfPOO7dqkKTQVn5LDVqck8BMjiYxKVLVz3yD1oEHHmg1xdLMsq0qIyZhFIyr4dgFn4Rh4rXVVlu5HBWnvJJBiM29DBYNWjQJzuxi+EwajP/kxBKadK5L8sYTHZh8aHIoBTy8OTLll682ei4sSBibkvT9O5QE45uL8WnOOMgeDs+XKhfliB0V/v0q9OZ+rwUFyKyyVYGzbq7I8JK4g6NyfFYhVnb+3JOSwV5eX9XrwCTMEXN7j3yOLgarN2gRks2hFpHJG8u2GJfzH/7UwhSN0zmDQszL3Ojo/Zruqos3k1ilBi1mhujOOAGZBOZnjgxaap2rfPTRR7usMe2MZaPicSJKsERvEgPPunsiMoi1noNxxT/2ExjPMc9UydRvldhxxx07vNOG07bSlNvOVD2RDhLqcXGMucqtFsixnHPvlgzaK27YIDuPsEOM+5kySF1YxNlIxB6L0pUmF/D4d9aZLScKFmaJyZUJcaZaJTvNNTZVKcyZ/3TKYBXkVuafqkSK/g3VF0NoXJvXiDWgyQuzU3Y8x1qMNGhxAAs9D7YrlJ/cIhDj4r7oootQ+FjiLP0SOcrRmvrUvdvEPinngv+UQSsamMbtlkHLxG+4JcoQ3St7XlyvV7prptincIKbO5G4do/F//777x8vR+ICsaRzzDRo+W4CthP+4Ac/KBYGH86yWNfMsdyiFrI0inuiSfdjvPvss4/7Vzn8zMeWW25ZFaahv+viKXbp9K5hOtMcbCKTJ4AMJ4OltBsatNiXZw2Vf6vuPmYXm8tjk0NaHeQXwy3yyx8f2ECfFY+FrbHGGhSgtI6JZ8MZYRLLf/rF0Bj2ip9I8WAjHWg/l19+eaO61lprjQyfBJAMJkB6/Dk2GWSLGY0ZqwnfI6F/5ttX1h7YpVGlnnazMZ0/ksjOUG4y4ZTwnnvu6VfmLlmyJFkJZMovG+WsYJw+YXlfipqzWRYmOaSVMw6yKc/S5F+OJpfmy8EsDxMPaWVWmbze+973+nEcJjCsspgSMFPnpovlllvOMsUkWTW4l5Z25jwnNQ7mv74a1H5XCa81CTbSoMXr9rvOsAMhj4wILOA5KG8qLSZFvsuK4TVJv/5n20HBd7MiodyqVJ+4PSWLu4fQy4nLmfuddtrJPqBFe37IQx7C9YNJIjkymCRV+rOmynQ1fmcDxeNrIuykQaPBd0rcjO2dHsN6afpz4DkRGfTjcd67msNFZmyjpL1BrjDyd81ewLavNWfa1mGm6sXrIKEed/xVbrVA9nIugkMyyFtu3iA7j7BDjPs5MkitGSKt61t22WUpXlVrjzs/OL7PfWvsSOaCbnRT1nF594VhoCqR6N+jxiYmO7vu6ZTBKp7NzT9VKZT6N1RfDKFxbVgjbju0BSOrp+IexJEGrdJaR8+3ve1tPidJtpTFYKXuGq3poBP70sLMsacMWuM2YsX8ZNAy0fLPZvIRvxph8ysH+UR8TbD4yL9syfdmon/i9uUrn5VKHiU/uV3QtWz03VVatiRW8hMLFmezXGnFnRIxgH/7lN4z2WUQg/mVg3z0Pvq7xpldb9E/ulHBe+7XXXddfNTE7QfS2VxQH56d7P6ZH762Uh945FP/lglk2MI5MvwsBpjI5AlQw8lg6VtoaNBioetzCLZ4lyaFJwcNLdjIy0h7kV9yZMLkZoCGltqGM8LSOvK9OjvmSDW5ObA0THNPvweDPUHNY1lIyWBbYs3Dj1kGvWBs8/TzTIwLxcN/nM0y+cJyzGXfHtEcLLb9Cr5kD3um/HK7rOWLUSfJ1H/6przk6t2ccdCXLoytbLP1vBKHb0yJNxVnVvmaa65xSefrXEmOnLB89rOfbUxWXXVVJpNJgLn5OalxMPP11fDniImpljjaXjRGjjRokTIW6Hh9kDUD+5fBiDHCJ3XsN68pSemjVoMCV4xavskBptKUqzwZiz0dLLWJLTxHBqtyTPxrqswwV3VEEvFE3v3bWugTk2Tn5udEZHCkQWvMoyQnj62p85m3tm82Z9qWOVN1yeogoZOqsuOtXyB7sEVwSAZ5yw0bZM4IO8S4nyODKKys22G4ifulSts8m1oscPFfNlxy4Y35c4SrNHr07FdjE1OeXfd0ymAVz4bmn6roVf4N1Re9a1wpT8Ma7bLLLtbO+c5lsRb5Bi2UvX7ekUP8xSzqfWq0poNO7OtLNWdPMWgtXewE5SMC4yTg+44ZTWvyRe9mT92kVBPYHvWbMl+c2nDDDe1wOlfHYFJy3dPIksQAtsmd6ZfdbcIKDX2i71f1MhMFIFWVraLh23h9GIhZmxtFIbt6rSIUw+15xZClPtgd2QHEo9JrZGIUTgDcdddd+PA1lPgdrBimuRsVA5t5eQtEIeunPOUpzeMqZD0Bb3W9y2B9vvVPvTETrKY9c7WmpUNjrkmwL/klC46DsPfWTouzEY+tu3zgpybrnEdYuJmNcecbiXA45oADDshJjbjIr6VAH8LmR7+zokmyksEmlLqFmZQMsucA2wltmJbGBJdParuViIqgaHYdIhtgn/70pye1QwXJoaJ1110X28+ZZ57JsV3/nE+m/Hr0JrLPLg36LreBNYlbNQ56XBxuJEhqzU+6HdODxG7H4xKgSbFjXKIA3yR9m2224e6LJFNuvQA1+SK2jIAcp/OreJKQ+tmNQObrq8oUsaLrZn8SAXjFK6+8clXIGv9NNtmEjRQc8eesvwejjfE1RC6+vvnmm8kFfz6b2qpLt6SaDwqcF8SqTSysa+gOvCRtHWgHOPGMVZittdyMzR4v35hCUv4iOshgw5LUVJlhjjpy4PuMM87gCJolyAE4TmjZJ139E33+pa6GmSpYPQGGj9IpqOuSxjlKsnhxdfDrXve6+pInT3OmbZkz1RwJnVSVI736BXIMKfcQBGZRBjNHWB9u4Nlh2lZ8CzkyyIYJm3Iz+WSZ6RcPFnMxHy415UsQ7HT8+te/7mHY48Ld1KiYOFhsnk2Gqn41Nl4YOdoSGCmDbRMcT/hxjs6xRtyWz2kEfLi7wo9pxgD5bpS9bGPCuklSIzWfxexqtKaDTuyLJZlvHxm05vv9zkDt2AdtpeQa05rimgGGAFU2nmLchimjHrK4NSlzNR/6a1M/ocJDp+AXnRXzbeLDgTMzaFEvPl3jCnGGBFbObFIjEYBUFamKhm8s9QClhUGlYgHYCVgaoMbTN1Gy0Rglpt9AmERBfcPMzDzbrgaTpPwnWZtBK54U8adydCbQUFK8UVU1y84FKI240koruSyQdVWmrh+sacy9y++mm26KkoX5DUsp7nBz+S2tSGdPjr/Q7VB4UuD8matXOidIRM7TILNILm7kqK32UzKYA78m7gRlEClDN40OjuJdeeWVsZB8j+cnP/kJPpwvQSkfH7mbzQrM11EB06g4soDK3h5lyi9jGV/NJSnvdjxHdyCDLPtNlY/4u0ErZxxsHteKEbudnCpjYGZTv6UZlfteWRxUkOvmDj/8cNxsK5FBK8LJd+e8vprcOWLIJ50IgKDxIdKiHQijjkdHl2E6NdbPu+66q/vjYB8VmxCZd6G6YtbKoIOPbatim6eF7DYSNR8UfK/uDjvswLHIWLy2bi6/3WKLLbDREpFuJ7b5HBlsWIz6KqMQxMaGoHHzG6P8iiuuyI57u2+AQRkzP7mgBuWgZMPsFKwJAb/btirwOEdJzvCZYKIv9r3nVQWL/jnTtvyZao6ETqTKEZ27qxbIHkCOgQjMogxmjrC9j/udZZD75P3SP2xRXEfU5C1ju9p6660ZqljQcZsu2878A7R++GzkxGAIjU2TwitMkcBIGSxGmQafhqNzE41r8+owYvo6CG2GbzaKKXClk/1kT5hPv7EBs/0rBqt3u+azg/rR45ZqTYeb2NfXaP6eyqA1f+90xmrEWtFKzI06nOvkYwClFejQCXrKppIrTZYVi+/BrNKYs2maGQNfDSEFtgB87GMf47bi0tSae1I2rvyyg1Z8zNNnG6g8sGmx35akaopdRcNvf7KtBFXlsYNTPPWb06pCFv256cg82aJbZc0iAJtbb7/9dhzUFF18MZ0OPp51zS6qDskqiktK7zKYw5augJUGMwASoT0z6pemNrIxDyG/lISjjSZlyG9pwTI92bfOkVCOnpAO2/T4nA+dQ2aaRIeGWbNwd5AjyWD+KyhNYbIy6Od06QHiUSc/QsS5q5rmt/rqq9u5DQ9PHTPll7HsU5/6FOmYlJVCY/Vu1iyexrEsZxz0uIzOEUVSgNJuJ6fKcd+fnzpNMuUnqM0zoi4Gk08HAjmvryY734jARiXbSVoT2I6/EwBJTAxaFutRd/8lKZj04bnBBhskj5r8bDgoYI278MILSRArsuvdmqRfFca7nWQMzZHBqrwS/yZV5koA7ppLIjpqBuWaLjGJpZ+9EBjbKMlS1O/apanXHNVN6pUzbcufqeZI6ESqnNDzn7zo0gWyB5BjUgSmUAYzR9h+x/3OMsiuDixYtjrjfPAee+zR/BXTQXGAmL8YhfmGqa3wHDkxGEJjEwsj99wT8J6hRnXZROPaCtQVV1zhs8cv3v1XE53DA/xZAIb1VgatHNWHx63Rmg4xsa9BMZeP7jmXtVKlZogA+zRttYBmKn5LIFaBif6NN95oPlVmpxje3L5fgB1zpFAMgI8b29noWmWmYmf6JZdcQmD2R2PNQslempR7srmMe2D4Y4rgnkWHHcPCf5lllolPvdjxCHkMgNuLndDwHaOYAWwbaRLRfvqmct/IY/5smOXbYPzVbIo3AwPhfdNBMQvmZHb+jEdsvB15NyP3N1q+9eeFPeuk2MUCyKcVgeFksFUxioG9PV9//fXFp+ZT1Zg9fFv5ZZuqyW/9ycIq+fV8cxy//OUvN954Y+v0uAmdiw39VtKaZN/0pjeZHMVrqZLwLkT0ulGEJYMJqDH/HEgGmUkvWbLE2rPv1ixWzRszfXU0c7rts2bvAqn5Uw9vWeTIb6u4rBMA6PXyuB3GQdLxL2l36HY867ZxIzrn6TVyhz+K4f2pHJkEOr++mny7vanmOnS+SWlmMOwrfDw1KUnOoJAkxYV7Zj/mgHL9HIwtsdbnMB7VVN+7nWQOnCmDPVY5IfCrX/2KxM2TTwYmT/VzaAIDjZLFYnMfpm0rZFXY8JwEiXSbtnnubWeqHtEdzSXUo7hjbFXOXCB7geWYCIEplMGaIaYGURxhexz3u8kgl34/5znPMcU3h0gOOeSQmpI3fMQH2m11zAap+m80ALCVxqZhARRsoQi46jJT49oKWlYqCEcAAEAASURBVDfZJwsXf7YSmtrkhS98YU3WrjlJpr59aU2LWddP7Ivh5SODltrAhAnc5z738e+rcwqhtDRctMJSgUdc+pFsQikNb54ceyJx3NxThyGqNOQ555xj/two4qqiGJJPktqlKKj5MGtx+0R8WupmUsINM/xdfvnlpQHwvOWWW+wmMdxefQuM/tocVTQ4tMGuBAvzjGc8wxz2LzteTTXA1nX/6nUMgJte0k6G4U66ZmxgfIOUv5qS+wbVJG7MBf277Tdnk13VRVUxPIfSLF/faRWfmpsx0q/Xr8m6GFE+IwkMJ4Mjs64PwNeqLACfCfGTlEkUbkUzn9JW0UF+UY6b/LLCT/KKP2mQ9jOR3ximm5uasrYx88NjHvMYuh277GhkalyCZHLk3Voxissvh02j6UIyWGQ1Tp+BZJBkGWusPftuzWK9vDEjRNF06jfvs+3Uz0IVo/veCz9qbGFy5NfjsnG1yhTnso9p1tcnZJ0zDpKOf/ERjUCxsvgwdtsNjbiTbseL3bbLosx+4KNqZw/ZVaEuLac82xLo/PpqMuKLGkxfa/6iYCJoFpLvZ9Sk6Y/YqsVBLlvVM2oUr5DNGRQ8FxxscuIjeeZTv9WDMIwm1ucwHvn+2Ziaub3bScbQTBnsq8rFAr/hDW+we1Cp4LOf/exiAPkMSmCgUbJY5qOOOso8X/ayl/n+hmKw6NN52maJdJipxtxxt5LQJC4/x1blzAVyseTyGSeBKZTB/BG2r3G/mwxyCQH7F9ktwXvkk8l81Db/hZKm773wa9aqkm2rsalKR/6LTKAXjWtbgEzDaqbW9mjLLbe0ZNmE5IHdhs3F9Z/97GeZqXJhvm+PTorB8WW/tzBZ8fWlNU1yHDmxT8Lr518JMLfQ30QIIELeBDmhOZEyxEwZ/+wPvdWY/3zpzt4feodi7t4fsbooPq3x4aJ/g8ytd8VgrED8zCnK62KAt73tbRadORwmsWKAUh+3yvAV95tuuqk0DB/3tpRZGCcBMAW5aQ1FXvKUn65l4xqo4tMjjjjCUma7BNciFQOwu9YCcNVy8tTjrrnmmvTgyVN+suvcy4bCrhjAfNZaay3LglO9VWGiP3m5yv6CCy6Ij9zNPM/SpJHQ17v/PDn+JoJ//TLTmP+Gk8FiRTCp2qvkX06gFwO4D1N8rv2xwAij+7vjmmuu8aSwwrq/ObrJLypFT5MblpI07Wc0kHP3V2mY6OmXqjPni/5FN5vWuQ/dCoAIV3UgxYj4+JhCz8PprmIY7kn3awG23XbbGEAyaDTmTwa32WYba05VvTracF/Ps0kttor4YUs+VBAfuRtxdksMX4FyfxyZ8suVm1byrbbaKiZrbkY3v2YQRXMSwMeyDuOgG31RrPNlgiRlfro48ylNEMUAOVX2mwY5ZxPTdDeJ+y7IPffc0/3nzDFBGcx5fZ3fQrRfYi9png62Ip/O0RRLB4ucQSGWhI97mTCypSz6V7l9ao31qzQMV1Ivu+yyliYfrErC5MhgX1WORaJh7LXXXm41r5qmxigz7Z6gDNZzG8NMlQ2U1izZ20ErrS+PPc2ZtpFCt5lqUrC2Ehqjj7PKmQvkWOz5dksGEcPmMljTGEaOsH2N+x1kkAmkX5XBxYBopWoq0vAR33VGO2SdGB/jHKkwaauxaViMOQg2tTJYyta3qJ5//vmlAbp5+npnpPoiU+NaLF4vNXLlIdvri1ng4yvN1772taUB3vGOd5g0of+kM4lhfKVZtb5uqDWNaTaZ2MfwckPgr3OYaNKQe5wEfNGFnCy4QQs1q2umuLPePozhInrggQdaV8K/1113nfubg73SrHC4bphdrskjfn7pS1/yuOyAiwGI6F/65eI+Xn18ipvvc5rxhn9LzV1JeP/JJh2vDge/uLnCH5mDwYZbm61gpWp6DrHaU7RX3C4Yo3Ncg/2n9vSEE06Ij8wNPb+HkB42qttYdPl8i5U5xoAkOoM3ahFLfLfddkuMi2yn9W8e4Ci1eJEgO3MtBbSc7HdIsqj66Z8E50vjbIWIwdC38nVuVyVw+iQ+xc0Wfm6xwD8WicKXNgw2YrziFa/g5cbASYKT+jnBydNwMliE2dygRVz/kgGXofFCY2r0Btw7bI1ts802i49wd5Zf2hvXJVmyCDIddZIy+a688soWoNRSnoTnZ/MZIS3ZUsaSZ8diiqlV+bBu8S+NPe1pT6MjiiFZs7mpjD6kKJuSQXDNnwzS4/kuBHr1ZHilG2RktCbHqMQl5LHN4HZ7GFbST3ziE8lTmiiHCC06S2hG1SRAZ/klHQrj3T5Lkbgs57SufxWA8Y4zx0m+OeMgSW2++eZWKXqAZDyiY+EQpz3lnpYkX352rrJr+kicT4IzXsfEqbLv7EF+k2UVITUORlyd3Z1fX/1ctKY8I9VtSVwy+vKXv8ymKD9MiYBzoD8JZj8zBwVLhG2tiL+1eW7SLs0o8TzggAMsPCLMbQGMqjEAtyawJ90CMJiSfnxq7s4y2EuVrQx0j8x7sbfFo2/0osXSmo9ksIpMX/5jmKk+61nPspa58847Nyx2zrSt80w1lq2DhMbo46xy5gKZpTp93ate9So+sRmrQLKsE1EXMIVzf8wDnCsl8NVXX+2es+KY4Fy0HtF0ymBNmZuMsJ3Hfc+3gwwyTXVjEo7SLcie/kgHl/fQzl19TyfGZzLQp9dH7KCxkQzWI53U04bmn7Yz1ebqixyNaym0hjUqjeueLhFVBq3zzjvPJ6uJlolEPvrRj/qd2Bx29GTNQS+drzW1pFpN7BdHBhPgVT8xaC0FFHuR+nfMBFDWcETGMsWg5QIz5mJ4dnzM1twjvxHlUXp0MJl4/etfbwlyohNTE3f+cmkpYy1LRPPnQhVMQUmmHFeyz1NzLwQz2iJGVsUXXXSRxeKCPibuHDVFHUDr96MYxx9/fPIFTnRYbFpnNkxE9GXPfOYzk3yLP+kr+VqJ+XMlIFVgDc9PemQuYccCxNzCauQXBrKkt69zJamhQ0elbrKJXhtlPZcgMdfBHwKssQnPSQsU696TxhQw7Wy33Xaow/DkBBh7eLnGkKzRbPqx2Re/+MV03DGWufmUzvOe9zxzcxSXreLsHrJ+lgO5flchO4CqmFBaBgBSIB2WasUsSn1AjfHPhkO0M8Rlegd53hHF9u26vD6UqjEFdiVTTvo4PM8666wXvOAF9tQbBkqf2267zRoG1kE/aExNsUbEpCbu9g/FzZkMJmBphH6vJie0ijIbw8eGgT+XHSMaXGWJVQxZQCjw5AAlN3H5l+TxyZRfehIuW7NPm6KP44AIrRGJY/s8d53TzFCZkQu39nGGErnGXf+H4pv93YRhixNnPqoCn3jiiay97SkFcFNBVXj8UXxHXRtbe7BpYbviEaced9ppJ3oe1N8Um2sl/Brogw46KG4UsPQjasngPMkgd48ceuih9pZpxhzOo2nxijlcSKvw1f6RRx7pOx4sMP+ySqfRetfEeMq8BR00MkKjQrVk4xHjL3tKkqvDiB4bFT8byq/njqLQL2AhcXbSIeYIHWM6Pb8FO/30031joEfEkTMO8ulgJgAoCEiHEZwxlMkDYsUQzCk0y4WJCkOw703xrHOqjMmKN2JJMQXaZJNNbPylPHwnyQ/MoWFnvuE54tA4GGnkuDu/Pp9yVM1Fq0rFJMfvsmaIYc5WGhKJY15HH26nbz0MMxzEsGYykzMoWC4+fjHksROi2Oa9MO5g/rneeuu5sowNFkyJmX3Rn2AiQstsN0iz7QnFNHM/j+iOHBnMrzJTX0ZqZozJGplxnE+9uqHdS4tDMhhpDOcebrVImZlYshHQCo8u2E/N1lQnZ9qWOVP1UnWQUI875iqTb84Cma7DhnsmMPSE3NhhFfGhk7m6Xwh86qmnciMrAehn6Fr92KjXfZodPumayFy0nsy0yWB9aZuMsJ3Hfc+6gwyyxfDcc8+1FNihNXItyZlmPyzi+fLJ5MMOO4zJvK1Y3Z/90Fxb4ke13D9xdNDYSAYThlPykyU/JlIKg24EfWlVqdrOVL1h16svLLtuGteqojasUVV082dTpqkiWTSxdCoNHFeaqFtZebFbmtMIrGoxd5mCkd32SFlRSDO1pt0m9osjg6Xvq+j51y/HM1nX30QIoBnxV7LgJ7TM4nryySfXrJPZFInirGib3WWXXRxj6SEtpimJ6sfD40AVzpS3mKx9NyuGHOlObk3hgqZ6TT13v/Lei1mbDyacqKdOckelWH8VBkYgOt8klv1kKY7SPDl9FYuBWtN3/hZTINniGSmPjoLPl/q0cPdv4kBTU/+BNOwKxVqb8czKiTbWM7JljPl7w4gGUZS8HnhKHBPfkTeQDCZ4W53QIi72VLdTFhskuvXi7st8+eVadrd9FjPFB0MRFUmqVvWz4RYn7E+ledV4Fi88xFRf03Vw0O2Nb3wjQ15pUSWDcymD2F85luoXAxabEyMvV9glR4K8hbDPYPXVVy/Gch/am036PUp0dJDfGJ3VVFXJMTVVXYRoKeSMgyju/RNiXlN3sHmlePbai925ypjQ/GCK5xUdDM2Yoj0jd2gcdBT5jm6vb+RctKpgblHmRddcOYhxJbYE3My12PqDfagqZffPGRSYQmPHsqy5X8XTHOlgkxw3LiRljj/ZrcWyvCadHBnMqTJFckW5F5i7m+hPakorGayB0++j4Waqvp+PzRMNy5wzbcufqVLIzhJqFRxzlS3Tzgtk2xlmUomM+zvyDW3oQF1XEDeq8sVNDzwTjonPRespTZUM1he14Qjbbdy3rLvJIJtxfXxp4ih+ooLci59LZwsjypD6+/yt2N00NpLB+vY2qacNzzO1nak2VF9YrbtpXKuINaxRVXTzH3lCi2CcOvVgpZLIEpiFcFVGOVrTbhP7xZHBKuaJP5MBGbQmYsz6a6YyaCXNkZ+f/vSn11133XjqCPUN+5TZk1IMbD7onRnjWdjbiaiqYFzmw8EFzFfeVXGDCkc92EBaGqWXZQbXqqBGTMxaKJQx28SJeGkB8Lzlllsw4fj9gZScLWnsCmeOTudbFcv90QWwR8MvPyQ61W+YNXptNvwmZi1MWdgVOO3kWRQdfHTRCGNyKz4d6YNGlclZsh8NYuxf5nxYaXSGTzb/kimGDY7peBgsgsWGgRkPrQSBV1lllSZqIE9tPI5pWMAMJ4POsK1ByyJyjJLWS2OwBsa/bLfEVs2xCU/ZHb3IL700mfpHazxfzrhwU6ivmT3TGkfDGWGOZiTmDhPWM35JlJWce9LoXYt3ysWIuCWDJoYJlnH+HEgG2W/OvrPkE/csfTl+xA7W+gpiEmMMZfnt+xWsUaGS5vtVxRv/iqm1kt8kOhu6abq+tiFrzmiicCxac5OI/MwZB9ntyAjOYOGyT/X5iY0tuUKtmC8+natMfTnDGvs6CkD1WXFRndK8NA6WYsnxbPv6Gs5Fi0VqqG5j3UvzY38o0yG2QzEG8b3VYmpVPp0HhdNOO83aPy2QY1VV6Vf5c6IRw3Bik6YWbIltklqODHauMnXBoEUng5acZcLBBx/MmY+RIi8ZrGoDQ/gPMUoiYiyyrLUXv8laVYucaVsvM9UcCR1/lR1jtwUy57Nthcg+eqbonhrn5OigmN5w/YB7ooK0CTwXijCHcf+ZcEzDerAe1PTIYH05G46wlkjbcd9idZPBvgxaDK/sKuMADbcBcZQfyaoH4k+7aWwkgw5wqhy+RGLrdk3B2s5UG6ovYo5tNa4xbnQ3rFGMUnS7pQp9UfFp9OHqKc5n+xzAZgIofNj+zuwuhiy6O2tNu03sF0cGi6hLfVCq68pBa7H69x6TvXIwvgAaK10DKjb6EY5L1xzbslisM7mKp8llAuigmdmwsRqNAPaPRDcXy9Cvm5NYqN5YvXPmgxXyyBoluROdy0/o2ZmvsPsseTryJ9NiVJkNYcbUAMulBxh+sGxBrAnhGD3HjSaCfGmTaA+5cylaIkuT5dAu87nkhVY1DAJHM2FpghPxnJ4rJoaTwUywmCRZo7KTjvvHxtYg0dejOuRCIdbGWJQ7yGBmrTtHx/yMHHExI/JbtEbUJysZrOcz9NOBZJBekRHQTEE0ZsaFpNusrxeihywwptDf0jNzBUp9+ORpjvxiamX4ppPsNnx3HgepAgenuBuKrTZs02sr/p2rjAKOiRC0MUWD2k/JJEjjT42DkUYv7lavr2rK0UtJSIT0k8V2h5RzBoUO2VkUVM9cA8jSnQ1e3Hf60Ic+tG1SOTLYocr0NokRrmGBJYMNQfUSbKBRspeyKZGGBNoukHnpSHRxEYfxm0Ez2chFYC72YLrSsDDTE2x61oP1TOZSBluN+/V8hn4Kf7JoNY3PL5JkMJ/hBFMYeqZqVZuUxjUTLLYrZqrMV1ExsdhkY1PzBAHbQWvabWK/IDLYED5XDsqg1ZDV/AebHoPW/LNWDUWgjMCsLGDKyi4/EZgHApLBeXiLqsMsE5AMzvLbU9nngYBkcB7eouowywQkg7P89lT2eSAgGZyHt6g6LAABDFr/7+H6BaisqigCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjCTBGTQmsnXpkKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwOIQkEFrcd61aioCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACM0lABq2ZfG0qtAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAgsDgEZtBbnXaumIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjCTBGTQmsnXpkKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwOIQkEFrcd61aioCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACM0lABq2ZfG0qtAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAgsDgEZtBbnXaumIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjCTBGTQmsnXpkKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwOIQkEFrcd61aioCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACM0lABq2ZfG0qtAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAgsDgEZtBbnXaumIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjCTBGTQmsnXpkKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwOIQkEFrcd61aioCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACM0lABq2ZfG0qtAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAgsDgEZtBbnXaumIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjCTBGTQmsnXpkKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwOIQkEFrcd61aioCIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACM0lABq2ZfG0qtAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAgsDgEZtBbnXaumIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIiACIjCTBGTQmsnXpkKLgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwOIQWPrWW29dnNqqpk0I3HjjjU2CKYwIiEC/BH70ox/1m6BSEwERaEVAMtgKlwKLQO8EJIO9I1WCItCKgGSwFS4FFoHeCUgGe0eqBEWgFQHJYCtcCiwC+QRWWmmlbonohFY3boolAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIgAiIwJgIyaI0JtLIRAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARHoRmDp5ZZbrltMxZozAn75ZOfjfnMGRNURgUkRkAxOirzyFQEjIBlUSxCByRKQDE6Wv3IXAcmg2oAITJaAZHCy/JW7CEgG1QZEYMoJ6ITWlL8gFU8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEFp2ADFqL3gJUfxEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARGYcgIyaE35C1LxREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERGDRCcigtegtQPUXAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgSknIIPWlL8gFU8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEFp2ADFqL3gJUfxEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARGYcgIyaE35C1LxREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERGDRCcigtegtQPUXAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgSknIIPWlL8gFU8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEFp2ADFqL3gJUfxEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARGYcgIyaE35C1LxREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERGDRCcigtegtQPUXAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgSknIIPWlL8gFU8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEFp2ADFqL3gJUfxEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARGYcgIyaE35C1LxREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERGDRCcigtegtQPUXAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgSknIIPWlL8gFU8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEFp2ADFqL3gJUfxEQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQARGYcgIyaE35C1LxREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAERGDRCcigtegtQPUXAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQAREQgSknIIPWlL8gFU8EREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEREAEFp3A0osOYOz1/+IXv3jxxRd/6+6/O++881GPetSqq666xhpr7LTTTve+973HXpzpyvCuu+664IILvvzlL3/lK1/55je/ueKKKz7pSU964hOfuPnmmy+33HI5Zb3pppsuvfRSUubvRz/60WMf+1iSJfGtt976fve7X6uUP/OZz5xxxhk77rjj05/+9JERb7/99ne/+91XXnnljTfe+Kc//empT30q73rJkiU4SuPuu+++v/jFL0ofVXlut91266yzTtVT8//Vr35FGW644YYf//jHf/nLX97ylrfUh49Pf//739NiP/jBD5566qkPeMAD4qMm7q9+9atf+MIXbrvtNrLedtttRxaV1/TRj34U+fjOd75z//vf/zF3/+2yyy6ISZPsFCaTwHAyGAs2zkZ16623nnnmmfQn/CGGyy+/PG3p8Y9//O677/6Qhzwklqro/u1vf4v80mkQF/Eh7mqrrfa4xz1ut912e+hDH1oMX+Pz85///Oyzz6ZVH3/88TXB/BFSc+GFFyIIZH3HHXesssoqZL3mmmvuvPPO9SPF+973vquvvtrTqXJsuOGGW265ZfL0mmuuec973pN4Fn+uvPLK++yzT9FfPr0QGI8MWlHPPffcK664wtxHHnnkMsssU1WFnFLlxKU8jAjXXnstswL+HvSgBzF287f++uszgleV1v2ZcTH2EREpRpwt7tOe9rSNNtrIw1Q5cqYNmVWORbrllls+9alPMW9hDKXjYjSMT+UeA4G205hWRWoug23nk16MDoNCj3PRD33oQ1dddRUD2be//W16GAYy/pi4rr766l7CJo4//OEPDG2MiQjCT37yk0MOOaRmFO5Q5ViG3/3ud4zXvBrGfVZqFJXBl35jk002WWqppWJIuSdCoMcOtqb8bZdONJWTTjrpuuuuo6mz6mHaxgpm7bXX3mGHHe51r3vVZMQj+nkWWd/4xjeQlJ/97GePfvSjEROWinT49VO+YrLf//73Tz/99Ec+8pHMVItP633GWeWcyXl9LfR0DAR6lEHWHaw+WpXZVkPFKJ///OfptxEiROnPf/7z3aPNaqx3Nttss2LgkT7I8tvf/nYLxuKLTGui5KgvcmaqxSKNYVJRzFQ+EyfQSj9ZX9pWDZKJGTOu+gSTp0wFWXImnqU/W8lgTKHt0ukjH/mIzTDpPazrQFm08cYbb7rppjHZUvf1119/8sknf+lLX0LLtOyyy6Lppa8g7korrVQa3j0zZ6qejhz3QLGov/EQYAHG5PKe9yw/FYfYnH/++eMpSWkuLNvsD4PHRP5++MMfVq1vH/awh7Ee7lwqFHb0L6XSzmoBpVWTlG+++ebDDz+cuZGlw8saGQvN9X3uc59ivqyHDzvssNLoHex29KGlSZnn5Zdfvt5668XlEEr5mvDxEV3zq1/9alf6o5GPT+vdBN57771XWGGFWP1TTjmlJtb3vve9qmEDjKhXmD3XRJ+DR38TwRsmVZfhZNBrNM5G9cc//hEZfOADHxgbobuZTh177LHoyLxsieO8886rmosQ961vfWtNXE+KMlxyySXPf/7z73vf+5I1+nR/VOVAdl72spdVjRR0QRdddFFVXPy32GILr2ONA/EsJnLcccfVRPFHTNT+H/beO3yiorr/T0ETExsSFU0ECxZUSAApioIgAqKIIEhRaRYUBEW6SC8SFZEiKgQVkWJQkaqIYlCUElEUDRgTFQtBjAVbYtr39zLnyfs3z8y9s7Mzd3c/n92zf+wzd+7U950zc+acM2fSvPMRswg0qC8F5x1SB9y/XkWBlpmhJe9vfvObHXfcUQMvDKywwgrQYNTO6PGtb31rHx1hkoJ+K0ofPrawDS1dVhuYOpCkREo71ABKMK+BmdOggK1gY5S3MFBOgxX8pNpQsSgMwouiCOyzYUK+/7rXvQ4FgBqZCaDE2mWXXR70oAeFMwAcYyZLRZdVGtsNNh1hXQpvt912c8+IgsPSoUF9lDAwyAQbFpiGx906YbCIKushD3mIhkoYgPH7/Oc/n9ZiMb/97W8R7fXZC8LyISjvyxvG//KXv0QxsNFGG5nO9fWvf334dmR4ml1uZM5H9mUOEiwUDTK9h/RSEt55552jr4yQbY899ugzONh8882RU0dZ8o+M0rXXXluNQfTcl75RfNHCqaZNmg5TkdY7fzFLnAYFeIV8Unk7A+MOyLPOOktkUhh4xCMe0Vl1FFlOg8pYsXWCw+SkQV/Ln/vc56KvUvlRAOMnDFY688KyIv+J0kePLZxqVNQiP8IjuUKrU7kzfCRaE6hXIx5ZDEzq+uuvj7mxIgkgQh2+7rISZztxf+ELX5DiBBzuf//7A85KK60kcEAsr7npo2REQqFl3MMf/nCksfe73/1UMjaeN954Y192ZF4XXHABnFAkFxup0DrooINUBQG2x1KGWTzHlVKBWoUQAblbZ+Ph7TotkkYqtLACRtC/5pprhu0nXK7Q4kBM+O1UTkahRWv5NErJAMDAAdvG8NttvfXWnT2dm8h5pUE+0PQHFfIFVLkaUQQY+ZjKrrzyymHkG9/4xs7xE51iRDWLYI6TSeGAhMY781ok5oEojcJpn3pHKrSwPQ+zMO898YlPxDA8Wik4ydpXNYQTdrAv3KnQimatvryu0OoDvzF+cutgZ8MwHws/cZ9Cq6VVLXmhhVCaAC2stdZa0GDYZjYSnV1jS/Pyl788TInNEItaSL/rrLMO605n9ha2oaXLagwn0jiMFbbfwq7QEkSTDlSwMRVNKqTBaGYu5CfVnopFoZ0XhRCgWY1heEK8GnC+OXSKwNIGqaqdaYC39D00yVKBeYVWRZetdnj7sDp4USztwo4wjSAxTJs6TzGz5UXzSA4ywWaqqNs6HXvssRqZiNQf/ehHM9pDS0qGEGdH0nqxBA8t+UgGy/eMZzwj4lQ5ppzmtRhKuPbaa1kKI5VYuUJryl1uZM77cJiz+IWiwQqF1uGHHx5+cQ41ssUTDeLfBXYRHi80g8ASsY/LDYtSONoG9im0WsQXjZyqmhoGpsNUhDXOa3gp0yCYV8snM9+rbkBWKLSQxmSaoVeFNKj0FVsnhKjh2QObOpDVhJFMHZ1cH8YodETTDgGOVocyHETHJ598spqXBqo51bSoRY5xhVaZqmeIVGJYIRVMsTDxU6nnn3++pP8odVB96dU0A7OduHU2CwRwEKQtLkJhvYJZLzTnFFUjEdPWlFlDG2COVjD6xeiweVAWBaj61a9+tdKEExbhvEKL3YXSs6vBz5gVi4OFV77ylXrFSFB1FqCDsGUjfyg+rZCnPvWpUQn2SMcRHKgilHYIwlgYMBYQCGlGDPTw7hhu3VUCgUKFVuRR7VnPetYBBxzw/ve/n2Xm3nvvTSu1mBe96EVWF1/5tNNOY8Nj8XCKO+ywg5rBqtlXwhzEzyUNzmpQwUNo2OAp4sc//rFGCKfENKsgevjUpz6lVxZAwy0qYG+A/xYlQDMHKVnJcCoc8NcrBc4880xUPqo9DIxUaKG1tfSsFG9729vgllTsRRddFK4U0LhehQH8zFgJRx99NC3p+91www1hLgu/9KUvtbxsL/syEo87izTvfMTMJQ12fprUQUTfVl/EUrE6t+TlAIeNRrRQGL9zWss6Ag1inGuv+E/pl2Qf+chHlIA1lyXV8sJ6MTPIhvdNb3pTCk4L20BpLV22xmDLH56cw7CDw/1nnHHGdddd10f1aS+Wb8xsadBwq2NjxsW8kAar+Um1p2JRaORFyY7PcKNBBjBcvRrDooaLAjuyTAL4Ur2KAjDqHG4WISNfgFE88sgj8WGIFxr2R1H68LGiy2SH4ZRpHVKJz3zmM7YZ4exLqOTGYWlY1/yFlwIN9qHaPsH2lUx83daJ3Y04xo033vhb3/qWqmCsyjEmRhXp8T5xqixz+++/Px7/lBc3ueIkUTCn+19iIAeUZ6KRMFCo0Jphl2ntuMy5wJn7wELRIBPsSLkHCb797W+b1hbBDk6YwzGgrRNaZKyfJUMgDTQo+ww2dGGuTBi5TWj6zFjtU2i1iC9aONXOxk+Nqeisfc4ilywNtsgn89+obkBySqmEfkmDDZMtUtxjkm8Jb8tp0Iqq2DpxyYsU4Rgu4/8GAxErDU71iCOO0MrO0edwVrE0Rx11lNZctmlcD2Hx3/jGN7R2kyBzaqKOU7Va/F8IuEJrSgqjD3zgAxrxn/70p9Na8dqpA0CHHXZYmmAKMTOcuPHvYfhgC8ksoAFqASYI9sOWILP1jXLZIyJdy4jfnlSbglxbtqKp6RwTmeW1f2Y6Po024XmFlnZcHGINRdLWqn333dfKZJ/z61//urPl+Ugdveo888Ro0fkqJHd4YNckmy8W5VPYZazj99lnH8WUKLSQuSs94oy8bzQ1BvCVC48ZildA950g0Ffk/AXmkgZnMqgQKIi0mQTSocJsICOa1FAItZMNyGc/+9libsJCNtlkEyUI4wnD8WgwE4AZ4tYNbXjyCi3OBChvp5NVuCKtFJF9ojWD2nUGBUYtatvIR/ULX9IjE89lgrmkwfRLSW4r1Q4Dr1Oh1bI6t+RFrielDpdWRl1gnEun1SmhgHKNlFj+UhJGKWtvWVCkJ1MVLWxDS5etAajPZWhvWu0S16Zq/BwEZkiDhl4dGzMu8uU02MhPNi4K+X718aII043E0A+hgU4LOeaYYywBTDXSkDQBMbjetTT8c0I63R105iKyustizjkDFwlMKRbBqCZMbnzpq30O4mdOg30Ytk+wfSUTX7110pEI/HCkjjcwexLbdvrpp4cNgDRkc4lqKnxlYfZuMmNKWVkUriIQAqyYuFzT7q9EoTX9Ljcy5ylE8xqzmDSY/5pvf/vbbcBHR9URyFg88zNEkRaieYNkhNMEUQxMo3hIzfmdCq1G8YVqqeBUozbzODWmIq16LmOWLA22yCfzX2rYARnVheWH0SlGGDq3EKXRYzkNWpa6rRN0Z02CU0XfptoVwPuiJeA/cogV2j6mAhn2sByWsLycaVGBYaCaUw0L8TAIuEJrCnqi31UhaSYimL4qdVEEnHFfmonGz3Di3n777Y3mudaikzLZA1gCtErlwh3gksyaE0KdJb/4xS+2klN3zLZgoGOjVXBIzDuUIElTRqGFr1Urk/8vfelLab1seGSylz+LmuYlhvKNwcJHX6cg4Pjjj7cGsFOK5t/OAhVpugfsBXBHZh5jtfxQ4EiFFlsvXTyAmCM1RVRFUQBzKmswBozRK3tEkGEJ+C+886yznCUeOX80COAzGVQc8rMB89jHPraTRmiYRh00Hoq84UK0gUHn3TlmOBRi5XMOPUrARGGvUKLjupOLwUnAWSuLzCu0uJ/DknFSKipWj5LjoydTpAIwWFYCog1FlgfwdWPZOyeu8nKWb8q5pMH0c2gUhX75UgEuGVtW55a8HEiyocjOJyRP9YUjGpYAasUBuuIJaAnmFSa94SsLs5WSh9vIlXEj29DSZWvbZpttpo4zGtPGz33MDGkQbKvZmHG/SyENajAzKjqn5ZH8ZOOikOlXhhflelobxiyCnSWgSJbtxa233pqm+dznPqeFGO1X5ySQ5rKYui6H00IfZ65zAPPtAXu2NNj3WYlvn2AzhVdvnbTT7BR5U6P8THAgKWzAFVdcYWSC1oqlJ3ylMMeILQ2SBEVawBRa0NEWW2yBrxezj5Q/mBKF1vS73MKcR92f78fFpMHMN4VAdJoh2podeOCBRiP4mOkrQcJljl/3pVG8todQlsQ1kJgSKKCUFeILLe4VnKoaEAamxlSElc5xeMnSYLV8Mv+xBh+QUXU6cN/HFobpRVkjadBy1W2ddO3l2WefHdauMDJnJM82vaDcUjwB7VLZTnYeToBjt4z833LLLWFeC9dxqmk5HuMKLdbHafwe97jH2Zg+99xz++qDvC0NJ6P70kw0flYTN53SiU6Or3WSJVYnStM5KXTmwsmJQcqx8dCNQ5hYvvtS+S8HbzkwEWllShRaJ554otWLs8GwrjAsG3AcA4bxJeHdd9/dyk/t9cgOVjpG1pkgUwXWT0jqTXVnycZSaHEdkTUM/o9mZCqKXukcGPrF6JU9IssQ8uwAO9PMQeT80SAfZSaDSo49CfQNDO6rt+HKf+giBr9eFo+Doz71OVpV5Y3UAJAPm6uvfe1rYb2FCi2cL1uxfWIRyjz11FMtDVu7sAoLi39affXV07cjY9DtWeGdNvUjs89Bgrmkwei7cHDWvjInPxAlW5j/aCSTq2V1bslL1Thusoa9+c1vjtqvR5miR9JneYHgVIcSR4H99tvPyo8E0y1sQ2OXaeFll11mrULAgaO5qM0L8jgrGjR4q9mYsb5OOQ2285ONi0KmX328KISg6wfQS/WVgLmJjfZOQaHuz+Pc8FjaLKqr6zInaaw9aAjSg5vWi9DSf45XydnSYN+AaZ9g+0omvnrrhDtrGzb8Y2bRWYUuAkHRGybQOUWceYTxYZgDylZ+KjGHz4S3xJV9mL5coTWTLrcw52E35z68gDSY/6bydcQhkiilfEtwlV30So/oko2OXvWqVymyM4CvArtsgiPyHPuQnW7nOtUivmjhVNOWT5OpSGufy5ilSYNAXS2fzH+mYQdkVBfXnZgNE8TVJ5JVlrFokFx1Wydq0drd6aHE2rPttttassjUePPNN7d43Hep5VFAJ9723nvv6BWPdZxqWo7HoND6A31LD0wIAcwnOcaIvoSfXOeldemuJhktpmnmMoYTDNhFWtfQAHX2EQWJjg6EE1BnYkV+//vftzBiO8BXfBiQPzQ8ODPDhq9QrnCqXXqU8FU+zBJoCdTmNP0222xjkd/5zneYidIEfTHIHGGqeItHNebHNBmicJwcEo84gDOwaYJMDJddPfe5z5VzjEzK9BWif+273vOe98imKU2ZxqA1tMg+tJHuwVlaGuwg0hI8pgWBydEgrZrJoOJYxu8m3Ac8gEvm+pDRlEuCcNblGmHjYilEevSokHCgItUK30I+iBjkJCp8lQ+zUiAZsWajjupLrGaHbVZiRBsWlldovRoZYA40SkQWORb9jizZE4xEYKI0GNbOMmeOvBiomKRJ7hymUbilVS15aYCWb9nVqlUKaPmOuIJB8lawDY1dpl/4ELbecX+YZDTqrwcmjUALG1PetrFosJ2fbFkUMp3K8KLMLVx+YMtoH1dPyVpGozWUV9wehA0+ARZE/CvoqFamPeGrui4Lagxyo9tTVDiKdi3BcO+K98AUEGifYDONrN46af9C4RrSUUUyFYr2L8zzRiZcKBJl0aPKTMmERQrzKZ0PU5bCwEy63MKcF/bLk00OgYnSYKbZyElkF8imMkrJSDY64ha66JUeM3SkNBZAtIJTesLom2V1EaXRo8hf5euVBTLiixZONaplykxFVLs/ThmBavlkvp0DDsi0Io5tmAMbbBrg69IEYcxYNEjGuq3T7bffbiKXlVdeWac/w2ZYWCxfJHURx1gi7GXhS0uu41TTcjwGBFZwFCaNAPui0ICrrzqu0bJXGaVXX95lHR+euJTSIu2RBKyR6CpNqRiVLOdCeqUAp03Ze2P+SQwlc15Kr6oDmqH6ruql5FVXXdXKR4qNlVymhVEzOCyPzIVIXEUJkzANpwDtEUPjPnF8mH6o8JVXXoniltI4ZSLPaYWF62AKF112ZqFkEZHOO3am9MgKBEQpLO3D0mBFY8Is1YMqulogLFNhrg+1MLph0SMxbEv4KVlnIDyYj9lsZ5pxI1kpuPVqZC41u3Ol0ORTodBSXhi7cQWII5vtCfIITI0GDz30UFtDEaWtv/76MPSZhrW0qiUvTVL2zOKoFTDiCiadt49tUL11Eylr3FVXXWWf45BDDsl8F381IQSqV5yx2jMWDWparuYnVULFopDpV4YXZdsfLqmdhcDHfvOb37RXqThAfCzO2EcWlZZf12XlykBNXbSHkzEEEGcwhaa1e8yEEGicYPOt0pAbd+uE+hO7EE6PUT5bmMc85jFpRdraRPsXlhJ+afowRtxmSiZhsorwTLrcwpxX9NGzDIvARGkw01RYI7t6ACrQGUSlx7ORwn2BQjpCtWynPbAJzqjHVEuL+EJgVnC5aoAFpsxURLX743wgMOCAjABhd4NlEpGIJeUeI0qjx3FpsHrrhJfC6CSD2hAG+qQuJRyj2Fdpv8KSVcKwzHlYxeKEXaG1VL61HN3gAniptGkq7dAEioY8UyH39dnbSHSVyVJSMtosthOmiRlKoSU1ktn4dLZwxRVXRObFkQjeMs1luJkwOwWaV1nEzZ2cFtdcyZEa1uuIDHAhSAw/Zu211loLFo0zWDI3CAtvDMsTi1nN48ONNQB/Vmzk2O9RL4b2nH3prAW3+HYfMv4kySW7eyU+5ZRTLMyVy/RC8R4YBIESSqGiChpsbF7LoBpZNR69LA1TLvPAyPRhAmR59shRS9nehgkmF1azdTVjWJfYI4gOTTl8IZbymNLzyPVd0A43e4Xpw3CYl4vHcMcK/d55553MVOTlt8EGG+QP9ISleXgsBKZDg8yuHJ+lYausssoJJ5wwsoUtrWrJy0qHqYc1L8MY9M1IJVUrL6s/Z5rlp7ckbx/bUJKXTqnqiJn5whe+YKfVEYmiVKZhEDs0yPKNnQFrKD82YJGR4MiP6AnKEZjoimPNGJcG2/nJcGIfd1Hog24kL9qXUfHvfve7TQfA+ZL0MGL0IW644QbcX0MLOPvF7AxCwJFLRu1U1+USqGl/qNBSdzwwBQQaJ9hMC1u2TkzI2MtfeOGFlM9WJZW2c7LqggsusNp1W3amMeErNm4mByQSf0fhq8bwku0y/RKXW8GcN8Li2fMITI4G8/Vye40leMMb3lBhb4drL7MaJ+8uu+zSV9dPf/rTfffdl7esBZz5KOG1WsQXJWCKXYw41bAL02cqwto9PDcIDDUgU0C4bsr2dKyVbD/TBIqpoMGJbp3gOfE7as2Tby17LOEYpdBClqI+KlDHqSq7B0IEXKEVojGzMNosU94+8IEPHJZnnVmXiisWPXMpbiaTFvVIBpTJUl6yFFqZ0spfyf9D5wlTlcO3NoUW02WhmSeySMuy5ZZbdjolw0+LlY/8GmEBOiR5aCX+k5/8JP9Mr4i511tvPbVkkID5h6EoKuXaJO4xtpNkVrg5v+aOYpwG3Oc+94lq5KzJq1/9anR1eBXYddddP/zhD4fNO+ecc8zVAIsHnujH1T1EdfljikA5pVjechpM6xorpmVQ5SvCoufSSy+1NPKBns9ib+HJcDN9ySWX8IguvEQrUFJsYRr2+ebmiNmDM5ppLn3Kiy666KSTTjKJYZiMS4ORJHaqtJUXbTTmQj/5yU/CjIQRIyKXGdxMOKplMR8F/uDroPBESWlXyhHDUlKiiG1p1SB5aWoGkD6uoKRq5WXRQeMrxzIleWkV2VO2oTyvfZRoItV0x9VfV1999W677Rb6mMJpPrkQ/UODGSWflez/dQjoE1SwMSU1VtBgOz+pYVmxKPR1aiQv2peReCgO6f/BBx9MGIkh3F3k3w8FsxnjkwBDClY6PEqpQDvhgYr3Xe96ly7x0lsL1HW5HGqrJbxKM2qAP04CAX3WzKJAvZrbowk206TGrRMXPbK9QkXE4OSA11ve8hZZSODECd/1vKJ2Nno77bRTphnRKxamV7ziFSYcwBRpr732ihK0PC7NLtOjaua8BQ3PW4jA5Ggw04CbbrqJuxhJAOHDF2VSdr66+eabUWKZRAIiylj1YSiMzQeFQMWFtrMt4osSMDWbRZyqejoTpkK1e2CeEBhkQKaAcCMp3JrFQ1lpgjCmggbFt09i6yRnhmy+IidecIzmVicj7EVWY71DdsTSJrciFinAB2TOQzAXKuwKrdl/bnxqv+Y1r7F2HHTQQSViptk3ergWGKNPeXmjG2nCIxfkmYZMruRMpbzStT1cMc083unDDZExZghWTqj4yZTMJv/UU0+1BH1LAhaslgDWB+mzab+IQV4gg3fMBDgChZaIXVamunFfqWr85MqNFdIK9E/qIO2/8cYbP/3pT6eDHCE7rCr3n7N5Q5aE+e1TnvIUvjVmuebdBRH8xRdfzPGycRvm6UciMCtKGdmwxkGVKR9/a0aA3Oq50UYbZVJywgnzHxKQHhdJ3DxpUhI2Rai1MkbimTLrXkEO3GZsefEvkRIRr8Qe0ezOWpAkYu2O+45UI668stKKSoBrZFbhqjxsrKJX/tiIwBRokOuybVSg1Hze855X0uCWVg2Sl0ZmGIM+rqCkauWlipCpKMlLFmUfKi9larpj1UOCzyJOJD98oiKzsDDGT8hZUHetueaaFuP/AyKgT1DHxoxsSQUNtvOTmtgrFoXOHpXwomFGTpl89KMfJQYulDZgTm4CCFg+tvEcOgwTE0abZUcVMX7C/p27xC0BSgJMNIwuYK332GMPKEWCkrCQui4Las6fYQr2tKc9LSzTwlAiJyYtLM42TeYxk0CgZXLOt0eEX7d1Qs4FW8Wqir0Rh7Q43c4uBjdojHbOGtpGDK7pvPPO08LR2R6k9mYMjiqLbdTHP/5xxjkpWbIRq7EQdOaqi1wiXU4bX86cp3k9ZtIITI4GMy3X8SxkZZH1Q2cubHZhzFhEoEcUt/BL0DXiCCxi0/u3VALJ7CJw/IIeffTRih8ZqBZflIAZzhght6lWzYSpUO0emCcEBhmQKSDIG81AdsMNNwyt1dOUdTSotWzwrRPiRyyDaSf70OOOOy5qMByjSTuRanJyNHprjyjjFZ9yjHWcqgr0QIjAeH6WwpweHgQBVtk999zTjO6RUGTW2kGqW8qFZORWYbMLk1VkqSg5rEVhXIFZUWjjzUOgXlmA7Q0HV6VhGnlhj+VCwmWCZsZJKgKwNDLoRuZOLXgyRIdELjZFxJx99tlWF0IBjtWnJzCidpY/0hcpz2x+p4Ns8GC/iEeJJfkpkzs8ZVoyiHHYhVPJvOLeSOwcTzvtNFZB02Zh1IBSwbVZKW7DxhSSQGGyxra1D6q+BjC02PDwFnLopNAwIyIGpmh+XL4NBZk2Cykb6upparNYKbBMNDsgjNYxfQgbqbDYI74RNIizGg51cRaNy4118h2dMReuKosCyksMQnOIEe3dNddcc/LJJ8OGWjJmEg5TiutVXg8MhUAhcRUmU6vg+O2cK0JkGUbo7chAYXWdyToj0xr7kvXFhyX0pemLD/MS7kzWGRllHDavlu877rgDeofMUZkjW4fo8NwrkudW9rEOlaZt9phOBCa34lh1dTTYzk9qYmdIj7sodAJVwouGGbF5tzWUccvkY9osdAC4mO5kZUUI7P/RZqHHQliAbgBCYOnBGkPXlpx55pl2YCusjnBdl2kSTq2tqFR4YfEcctVdLIWse9Q2f2xHoGVy7qxdQ65668TJdXY6ZsGN1SCWQzBOl19+ue2MUNJgkDdSFn/WWWcZpRxxxBGcxGXA01qk+YQ7LSM7+1IYuUS6HLV2LOY8yuuP00RgcBrsazwzP5pd3uLwHDrqSxbGY/ILHbFV4awkRyfhpnCczv0LGQnbr371K52AZFc4klTD6oCiUXxRDeasmIqw+x6ePwSqB2QKBXpl7MMsvs8W395W06DWsmG3Thgxb7fddqaFQmYiMYj6uO2221qYy3d1SkxvCWD8xCykmJRjrONUVaAHQgRcoRWiMYPw8ccfb96rWD4R34e2GDNojVc5BALY5cmLOt7JzNGfCmaLwuF3nWEiHk5Lb/sCMGT48bO3nbdn2SuEXyqBE06YJnH7orkn4k4OtuJI4a06zsli16PEjYGwXhZCbhiCAeXGHSQR/PCzccUVV8BfWi2sbZgfpjUip9BdWdFbVINI5PHdEcX74xwjMMigSvHBlEZMFRKHOqUUah6mbilx01oGj2H8m5E7KwUOPPtWCsSFePt8znOegx4OGsQjE8fkuYeAO+pgrWDOrGFYDdsWMWwnxo+ITtA9Y28Ic4bwEdemCByBi9LoryWGyaMxYUYPL3EE2FHgucgOPTD9Rk4PlnjjF6d54Yz3kpe8BGNDlBk4rGBJ5TwltKnLVLgNwvjGxQFnCj0N8a9mY/raWU2D7fxky6KQdqeQF00zRjGIKXHUpssJwrfhh2D8s2TDGT7+8Y/nuD8n9VmhcCElf1AYmvzP//xPmJ1wdZclfbjssssif8J0nJjzzz9fdZWw7krsgZEIcDIPHWf6w6HFyLyNCcIhV7d1QtUKl4VArbMlOIy1wx+db/ORiNcxpcqnqXi7BLs8CHNeAYVnEQIzpEG1IQqwTbMZnnOK1c6W2axx6Ep31UdV8Hj44YfbJTcc/GUDlSbIx8xEfDFDpiKPhr9tQWAJ0mBLd7DkMK+5HHzsvPlbhVfTYLiWDbV1grgoyhhU7JxQjaudCmy99daakZDrRo4HkVjSX7NKsSwpx1jNqaoNHvj/EeCshv9mhYCYVDbP2HPNqhlWLzOO/di2TfOnTSMuGjL1yjAHvw2ZZOErHehBwhvGR2G5GTn99NOjV+loS6BXAABAAElEQVSjFOyIk9K3imFek4EPomd0Oe973/twQwFHZdMfrlR03ShyK2XsC0j6jM9W/L30JYPhM9rGlIk2dCZDiGBpaANTbWcaRdI2TRZs2BQfBXDFpmR4RYve2iMHwuRMljZEaZDRQwVWCB8O0uBIFgdEOM6FQMfi0cn1dSoqbZk+/h8JfnfK7Z8cDXZ2ZGqDKq0dczYNQhQ2aYI0BotdTsHz42o3lDo6a8iYXHfddRnVaZY0xg7HkIVTF+nbkTHMHkYC0Ahny0am70vAiUkux7KiUDP3JeuLlz6MqQMvUn3JlnX8XNKgDsUytUZfh6M/Nh74x84getsyM7TkNSef1jD4k6hVerRrpUjGsTNFEmB8Wl5scsP4MGw3YFmy2267Ta9a2IaWLtMAXeGD+gqrQDUpDOAj3tpMIIyfp/CsaLCdjcl8hWoapMzB+cmwneMuCoW8aFSFraHkRTqJmQV2TjaMuSAE44kwMeHw2DQKpOitPSL7thL4J9yZpi8y02XoTjsCSsakA/MsztmgUZMKTZ7fWNb7qlju8TOhQR2P05e1gJatxgk281Eat04I1HQxDxwmmi2OxXNg69xzzw0vzTrssMMybeAVHimgFIwgsV1Auhf6lWVjhe+KfHZ7K3tKLi3OpF8iXVYLK5hz5Z3LwKLRYOdH5Dy65lts8jrTpJFY7EFHHJvgCC+nex/2sIfZTILUBcpK02Ppa5dz49Um3dMpe99iVC2+aOFU6cWSZSpShJdpzNKkwT4wC+WTfdmJbxyQnSWLcUJ505nAIltocBJbJ93PuuKKK7I16Gs5onuxK5iKcgYUBR4+DJAv2cSl6Yuv01dIX3yGU+3LsrDxiIt/b7ZKlEWuna2drOxhf2cOxUwmbmjP/JMyI3CgJ0OKUmhxQXQmWfiKvahNNNw3E8ZHYW1fOSEXvUofyxcMeKk+HxGwR5xYkv4Gl4BpRVEM2j7rCyL16FX4KInzS1/60jA+DMOuqepQkBemUbhQ92COAa2FWFMqexTguJilibSS2NuKFrgRIcrFYTLsICwjVroZ+WaUcdk9zh8Ndn6C6QyqtGo8u4rvQUTSJzJOM0YxXAGiHQ5HzqO3nY8tCi3mClEHR6M6yy+P1AWn2AqV57KUussEYuSKkXGzL4v080eDnIQwlpr1KDUIyCu0WlbnlryIAm3C559La/pGjhRaj3rUo8I02kKYt5nwlcKhQgsrPMW3sA0tXaYBD3nIQ6zX2HOoPVEAsxhLE+nwomTL+nFWNNjIxmQwb6FBK3ZYfjJq6liLQiEvGlURPbIWqxwslnD2GCbA7ZgNcuga2/zwVRiWuL/EFi3MSDjTZZY5TImtAdE/CzF16W4tZBlRsXPzOBMaHKnQapxgM1+ncevEIXgbKgjvMIGKKkLUIg1upzw9Sq9HNFg4IdR+LS8QVK5ChdaS6vJQzLlAmIPAotFg5yfDHNkoC18RnQlKIjmeJXeCUBOua8JcmAibp1Aq4k7H8JWFtd3rVGi1iC9aONUlzlSkMC7HmKVJg31Ilssn+0poGZCdZWJTaPT74Ac/GDLsTENkIw0OvnVCaGzNhuUbaS+VuW8Po2cc3lhROCXu634mPsOpZnIt4Cu4LHc5aCNt2v/cJMT5RHMBhC8gHZqZdjuWQH06f4Mnq0xz5N1LIulMYns1uZJHVk2CrbbaCsG3pGOWhfWGzQYuNZh/mXGIxCBIR1b7isVcHeMF3iKRxC1YXzLidSxMC1uamKpVIyxRmqAiRvWSN1M1rjys8KhenB8aLey4447p7T44mcEAytqMtkxulyra6Vk6EZgtpXQ2icjGQRUVi/x6iy22sFvo1l57bUTh0hJFKUc+cjZCR6awS9AV8SMzViRAss+xd6MOTHQxfagoJMyC3YA9MqNi/Bi+GhlGzIdBvSWbghegke2ZpwQTokFWGYYNOiGwYpp9zGMeMxZoLa0aJC+tzTAGfVxBSdXKSxUhU1GSlyzKPlReytSMV7KGYpiSQYbS/DcuAsKfjCWfIGJj+qprpEErdkB+Mm1n+aJQzoumtYQxePrlxDOmr0RyiwBS+/CtPgQBSfPDBBbu4yfTlGlMpsssc/DbTJtSQpAd0370WNzX9drXvhaPQFagDuWk5XtMBQKHHHIIpj/pT34gWybnfHs05DKE37d1YkOHg0rKx7ydI1l4kojqYhuI83mLRPhld3JEaTofOTXCONTFP+jzEPx1pqyIXDpdHpA5r8DBs4QIzJAGw2ZYmDt1JA6Wo/g02ciY+9///ljK2pzPchzdtsC5T7PresELXoAIYmRpUYIW8UXJhCZWk3rFbS59piJCyR/LERhJg+VFjZuyekD2VYSndHvFIg4Z9iVrpMGStUzs4sitE0ZLZjoD84moZ6QDUhZ3HOeofOsjWnAOaXFZg24cr2MXM5xqH5gLG7/CwvZ8hh1nGyzHmhj4n3HGGTNszMyrxgbT2sA9SZnGmCSaBFrRM4ntVWHJkuqWlzyyakuA3BylMUbxbICpBW9jxJgYHXtbS0PkyNLY4FmaXXfdFbvsTHoZlgquzsSINS0BZnGdCcaNXHXVVdnwY05IRkruQ1Li1LBe2DXOoFiNOsIVNYCdJG4bTzzxROI5zKv9bZTMH+sQKKQUDaq+71tXe1+ulkEVlYlrTfYqJn/ECphzG3I8GKUsfEQtjUgO6y32FXi3KKHiwpLDZJxzp9nmgplDjdrahWnGDa+22mpIScwlPabo0m0XlgN6drQlPK1VmNeTZRCYEA1y1ue6666jXuZnLm5M7SHEbZOGfZSJ8xDdcuEiMS2tasnLtkoLCoxB34TTNyNRNcsu7c8wFcqLwYS2Q+Vd7mQbWrpM1Szfdruk2kZk9GPaYZfFtEM8yygrY5TAH6sRGHDFCdvQSIMqaih+UgUqUL4olPOiKrwvgPPbbbbZBlslEnC1Scj7lfOxVnjIT/ZVF8Xnu4wwAh0bDCdu0FjlV1llFc6TmTiGRZnzZJTGpIHDgKhYf2xBQPfs9hXSOMH2FUt8+ZCzyTkccnI69OxnP3uNNdborAW/Z0cccQRbJO7pwdJx00037UzWGYl7ErtBmaq5o2sobnOJdHlw5rwTQ48sRGCGNJi2kNMJxqBygioyDk4T52PgJ+F+7eh/eJPW17/+dd2OA2eVcsgUi391KxwvPkioCbN/NEl9o/iimlNd+kxF/nP42wwCI2kwk7fxVfWA7KyXo5DmGhpTj3333bczDZGNNEgJA26dUE2xWFtTcTSNS7C+ZofxO+yww/bbbw+7iGAEP4FPf/rTOY9lCeT4oW7hznOqYRs87AqtaY8BWFJsLVERUzFMLffEstBOuxFLqT72itYcPMthuSYXrlEbO+VHUZroUSXfc8890Ss9wi3J5K1PaqbEdYHH/u8vymsnrojcfPPNo1fRI1JIs/6D2dI8G6XRI3OfhdmEKzINYPdkkTpKn6YZK4avhjDIZIhUjeSlM3tnveFRj8jGISxEG8VCs+gwr4fzCIhSBqfBfL35ty2DKiyZ401wGxyKJRLjWY7A4+k4TFAXxlWpUVnntfZ1ZYa5sFvnSJmtFJgI4exikJUCNEybRV0ZY+SwJWEYh5/2WJE3LMfDEQITokEpQRGlsRuPKo0esRWwGOYBU2i1tKolL0MdnRZ36tKezPLdxxWo6gnl7WMbVG/dRMrybeKSzPLNZsm0WSAz1PIdDYOFfRxqxYkAbKTBqLRGfjIqzR4LF4WxeNHOiqJIufuO1lDxsYgLOYbYp7Xt5CejKvoeS7rMcWT8XEUliHVnUR5kRY7K98cMAo0TbKZkDbnM3Ev2ziGnLYnGc1oR6k+qsFv6SD+WQote4yDaDmpAKXVysbRJS6HLE2LO0856zFAITI4GoxYiDnrHO95hkYg+EIBECcZ9FHnCnmlZgSvWiUnuSsyXee3//kiDZx1TaDWKLwTmuJzq0mcq8kj626WJQPWA7OyO7J+QwOj27jRlIw1SIGvZIFsnVN1osExCguM0LqFMW9sXwwSFw2F+YQL23SZ6InKksDfMqHAJp6rECx5wl4NTHQDcHIvF/fe+9z1qxQgat1ehU4upNmXJVMY0Z5wKYprwPo+wgZA0dm0WU652kjUfuwhKCAtUWOcMODg1iJhbJWcC3CFkokP2w1x2lUnJK+7QNgEWh0Kk8+/LIotRdEtmRtqZUtaFIwvszN4ZqaqxtuhMQGRnvRKvk8DuZe3Mrldh+s6UHjkuApOjwXFbEqWvHlRhOfhsueqqq4hBLoY2C81r+DYNYxWIpS2/vI8LOBXLi2/otJDGGFaK5z3veTbp4YUZx4YlKwUmwNwNxo8jaH0NMK0zb5l1wysrWIwsr+4Z7ixB2QecOjorWrTICdFg3Wwp2UFLq1ry8vW1fMvBVzoktHxHXIHyZhajkXkr2IbGLo813eF/mOpSTDymBYGxPkHhHNhIgyO7k+cnWxaFqOpyXpQbFm0NZU3JdL9vDWVg6y6HDAl38pO0ecAuRwhgqk/hFrnbbrtFb/1x0gg0TrCZ5onwK7ZOGuHapHRWpLdKTzJOXxmlYNvamcsi+yglk2Xkq1l1OWzYuMx5mNfDM0FgcjQYdQeftGbShGQmf04CMzs0xEZHOg8RlcajiAhpjwzyQmJMs/TFiEMOs4vA01x6FaYnWTWnGpWT1tgZo2Z3vg0j80xFmNLD84RA9YBMQYA947STxedlKe2DWWtZCbvYt3Xi8gh8p5nNLic1jzvuuLRT48Zw7aWxqRjrr7POOlH2yXGqUUUL8ugKrel9aE4CcSbxtttuo0p2why7yXgUnV6zZl3Tfe97X93tzCmEzubguA8JL69ALFKAd6a3SAzZKJwwPrJ0M2GUnitALQaPIuI5ojTDPqJaw/jdZnBmz7zLL87znXfeedaA/JJgaTBBMvE6dty6SzBqP5yK2foRXyiRiUrofORiIYt/z3veo0NvUUpuILCYsF7aLEPXPo0muSTWHMo+MWrbIj9OjgYbUa0eVKqXK4XNqREbGNRauvtXCdIA7A5e2vixoUrfKsaMbXnU9KVXjQHIh5nBNmZPetKTaHbhSoEVMAf8+WlaS1siA3MOq2lTRzIOxFhemf6leemyru0JSThN6THjIjAhGuTOG5bOzE/mYzSYExiWUlfEtbSqJS+NQY9rGPZxBRxevOaaayzNs571LAvYv/LCOYQ+FcM0opEobwvb0NhlTXfYCfbJZbSGoo0uF0+EHfdwBgF9gnHZmEyZjTSYKZlXI/nJlkUhrHosXpTVxNZQ1pTo9FVYZt8aysDecMMNLSUSgTCLwngqM/9RxESL0VBdVl0KHHbYYeYUlA5uvfXWivfAdBBonGAzjWzZOuluDG1S0ooQk+kwR7iFId4o5eqrr05zWcwPfvAD8zvN44Dc5qy6rG5WMOfK64FZITA5Gox6pNt3uLZQ9g1RGnukSdCI0VHI0EaJtdywXshAEAO+DHtsr3RzMI7+LCY8lNkivqjmVGfLVETA+uPcIFA9IFME8NdnKuSNNtoo1eWE6RtpkKLEt1dvnTiZjQ2xORfl2nKu3AtbWBemTNk/dfoynRynWtfgZZ8Ldsp/U0CAW9m32247Gy5YuGAFNoVKx6oC2rMf54Gm/JP4DGQ4V5TWzk1jBh2cTfo2E8OlU5aRG2jSZAiOOTluCRBepwnSGImAL7nkkvTtyBj29hy0shrxwsxZvXwWLjWxxKjx8in1Vv6gMbXAP4biFVADcPOqyL4A2i9rAP9IBvuSEc9KILbvtNNOS1PefPPNKorNW5hAngY5rxbGK0zhshw58MADFT9ngf8jwd/dzDTl3+RoMO3IdAYV9TIObcix4UGlnbakMwaxvgYqZgedaUIFOXubzjRhpI7eI8sI49MwLCB2D9YAxvzIKSIs4YYbbrCMXAvE6a7wlYVZhuRSYKeddgoT4HZDarNLL700fKUwfJ6Vz0SNIFXx8xRYEBrUJwttCJDVKl6BlpmhJS/iP5mYoN1RexSQpHv11VdXpAUYnLqvkQt+o7c8YotnIxlbXUQhUYIWtqGlyzQDL6PWMNjFqFU8sqDLzSCy9TTBfMTMkAZb2Jhq8EfSYF/JJfxky6IQ1jsuLyrWGkussByFf/SjH6244oo22rmwSvEWkOEFyi1uJoje8qglFUaaq/LCBEN1OSyTgXHQQQdJhdy3RIZZlnV4hjSYx61xgs0UXr11uvjii20Yw2diId5ZBdI9S8MQwqBQaWQ/BM/Wx+wdfvjhlhc1qjJmAtri4TEpk4xXM+myNamOOc93Z87eLiAN6gtiimRjHs0TK4Xi+wI77rijpV9vvfXYy6TJsCGW1HuXXXZJE2RixHRhXJUmaxFfNHKqaWMUM1GmQrXMfWDJ0mAn8o3yScocakByigDLYCNJHMx0tnasyDwNUlTL1gkGUu5qcAyIZHistnUm5m51dqaGAP7YOgUmk+BUOxuzCJG/WzLG0nl44moE9t57bxvZCP05pFVdzuQyznDiRsyq2Qqf9XZLhCjwqKOOMuj4BzrFWwDLNXY4MO4Y5kSvePzyl7+svJiDhQnIqFtGcVgEsOHbvnDdgkFd3I7ItkRmQYjqOnmjsF5MgdjkWPvxRxG+yoRBT/6X4O3CrT6yckkl2FahYcqUY6/KdQ+kl8NrpIR8lLBwPhx3P1hf8LoZviKsTR0JzKwjTMChEGk0sWiAUQvfEsYoA8cdWNyHjCya0c6BgcnwPvvsw1HoMHFU4Kwe55IGUzCnM6j4xCYQ579QXW1NZeeD8wobq8xL8BxRFxjMEpd3asqj9DxK+jZSocVItqpZKVCtpUVlYuCZdH3dBhtsgE19mBiRnFRl0BEH4cO3hPfbbz+reqWVVsJnVPgWTE488USJ86C18C1hp8EIkIrHya2DmcaM3Pe2tKolL23eZpttbECi2cVDYNgLji0yhu1tp8pKAkTW3EgtjRogT78tbENjl2+55RZRGe6Ywl0Q66CcsLPEh1JRQ8ZpMBwh1eFqNibPi2baM5IGo7xj8ZONi4JVXcGLHnnkkUaejGe8BbCChL1A3oE9rCWAGCk/fGvhF77whZaAVThaj1jcJS55+9vfHuUdpMtWJlwidv3o20JXCnvttVdUox6dBgXFhAItE2yeQqu3TkzFXEpvY5XTWtFSBQ5XXnmlhuvuu+8eIgOTps0vbkLwsRa+JYzppC6W7rQUjNLzWK7QmkmXaWE1c05etursnffdd18uTQn7DpLwqIgL2EYpHtEkPk5IfP311ytyuQRmuB/MQzQ5GlS9m2yyiRHUnnvuqchMgH297J+YnyMhEtM48h8rEGqCy8oUlb4ShXYKbRrFFy2catpUxUyUqXAaFM5LKlAon8yvg4MMyGOOOcbIDWeACB7bUcrTIOVXb52YK9Zff31rLYHOYwDl7cdxCGuNbH8plqsu+sxcWjjVxaHBQvBdocWQmMbvne98p1EL/wg0Gesjf9hqTaNlQR2zZZ7wzi+IOA+OggoVztFHHy22hre44UpHtrDlTHrnEaLQMQjOhdhmw+BiNCrlOSVTSFpyZ0zhgmF577rrLjw7P/rRj5aIyvqIiI0bazrLDyMlBMc5GB8qfJUPX3jhhToFj00fcjE25IceeihmAgJ5jz32yBdib8fSPcDmSp5ORVyfcMIJJ7AHw/+1Dn9gw4jpfVo1H1dt43gyB3X5TO9973v333//8G6z1JKXkyiC94ILLlDJGhigrYEhpx/UVfIJVNp0AvNKgxF6UxhUVCHlMcJfTiON/OHLS+2EcsU/MbrQWuH+Ai0O0xSnN9DX2liFMGFflCsTEC3nFVqI5kUFSEZGtpkEkdYKYQoW61YIRId6DN9Z5557LjOeNMq8ZWpNWxvSL5tDbB6R7TKZMGcialHDmJOjvE6DESDVj5NbB/uaVLLvrW4Vlbbk5ZQG64UNPPS7iAJx+cJ2C/ZJMj5OHHZKw4mEPC0viTnwREsQrOPvVwsKPANeyzqRaWEbWrpMY7C3EK3hYIpzIayDROpsJW+h6KjZToMRINWP4TQI1OVsjFiOPl60r0klNEjean6yZVGwNmv9KudF2atvvPHGGskYWLDoIBA888wzYe/Z5NsrOFW8J3Uigz0HphWWDMYbqmd9hKXEIkrFslno5I3bu4wVPw6KNf+oRugxUs6p8U6DgmKigeoJdiSFVm+dsA7UkgT5s8pwgJjSGK6ym2QIYQaeGiJw3F+yeMY5vonOOOMM9jLHH388k48GHgrgQlTLFVoUOP0uNzLnumwM0EL9n7aQCCUFlBzvM8/cfffdil8WgdnuB/MQTY4GqTf05tInC06b9+Y3v1nEArN08MEHY9KKw3lUWaGr+ZNOOinNm4/RZrBToUVejT0aMJb4gryNnGpfyyfKVDgN9sE+2/hC+WR+HWwfkCjM5K4J332DYDKSBqmlbuuEeEfzBiaDI6UunX7C0KZwAFSNVIHYYkKJGQSqOdXFocEMeuErV2gF+pxJBuXBRqN8ZAAamGSLOsqeOfOE1EZbghQfjG46j9S84hWvUOLOQ1qYaKHRUZoowH71nHPOCakiHy5cMKwQpqqoOoTjO+ywAyaf+Vp4S7Mlj8MvxMj0UQLslSQLSNuAwVqna8eoEB7H0j2QnpVs5513jmrUI6a4fYZyKAZkkKv0YQD9BMZ3aQsxflQyzp8pAVtKxWtgYO2oSHRmSrxEAvNKgxG8UxhUdm+WvnVJINKVImXTRaOd2bmBgI5EXet7lEAwr9BCZN9ZVyYydXjIuh6ak0d50cax8WMB6GwqvnDzlxSiGEgdgDgNdoJZFzmhdbCvMYX73rpWWaUtedFpZQYzXnPT0aieogBAjB6Nfz2i59a6oCwKNLINLV2mDRw6kUmKGmwBOBC0emqnAk6DgqI9UMfGjORF+xpWSIMt/GTLolDNi0KAeFyIBnD4iNQDu6U+WIjnGLEuKAozWhhzsVCiHZXT0mWKwq1uVCMGYfDVUS3ho9NgiMZEw3UTbAmFVm+dUEFJiheNHHvE2q9POs+Ubpcfd2YkEgOLe+65pxDSsRRalDnlLjcy52iUhRI0LkxWW201i8eQS7ICdOdKHJqsKddSDsx8P5gHZ3I0iJDEvhpuxPJtCN9iQoFEu49xokDkS1xYUHFeRHLqPoVWtfjC2t/CqYYIhOGJMhVOgyHUSydcKJ8cuQ42DkhdQIXdUnRWshqrkTRoJVdsnThyoDWiJNDp+FeWEyoBDyIIJGVMn+l4Hae6ODSYgS58BYzucrBDtTN4lCu0wmGXCX/+85/n/kCdMGBqQI2BwQtGnX25kDszv6Aowg6uz3CSvJwKZzsRmlvizQ/HYtwj2ldyZ3zhgmF5EUDQMOZ07IPYkHBWqfNwUmdF73//+21mpEZ8DXWmyUciC8BuSMsApdF9pNXhHiBfAm/H1T1YgRiAUJEOslA1FyqgVsTpWb7Ga665hkspw4zkBQGMc+lOZ16kLSa1RFvGncxKgyQ0HRio8eyYGh5CStSKKm06gaWwgZkcDQrDKQyqxj2zNZWFgJGsy9uMHvnH+g9a1p5Z/coEpqnQohkQGryUHJZay3F6w+w60tsG+z2Ys1VXXVX9JQBJMolhzNvZR6fBTliqI6dAg2pb4b6X9BWtUi0tebnjCjWq/OgyGjHNtmPcHKZRFZ0BEiDSwi6ehVjjGUsRCsyIwlVUC9vQ0mUawFIItYrfoPEPeMADkO+kCmxrrdOgvtpQgXHZmEJeNG1eIQ228JNUWr0oNPKiV1xxBUqpSM4IV8zp4RLOFoEIYkq5dIMQoGUekVxkGH4DubrLZEehBcUhJWebgP8cjnKOrM5pMB3bk4upmGALKbR66/Tzn/+co/CMba01NlwZRRwWyUvSccLJOI/UWvBd427ZAHxchRZZptnlRuacCzWNO91www1DwyyOtbFccjwudD+A92Bj4DktisJjcqNxEiUvhf1gvl+ToEGWOR1YjC7bzjfG3qIz3mqrrRgGIQ0iXIYoxnXhruokRelTaFnKCvGFqmjkVFWOAhNlKpwGhfOSCmi/gOl2pmEl62D1gGSalTOYAS/6LaRBej3u1mkohRYsLsaXmCzjkupDH/oQC3rmE0SvKjjVxaHBCKu+RyTbvw9DEM77Hl5YBNDJW98jUeb0AWG8clAA5gPBMY4BM8e2rG3sM7kpSndQZxqMDJplHkkWwln0H6GQK5Or5RVtE3PWUk5LXthimLxCMFsqivKiPWI7gbEzfpNKvo6ysyIyAFD+IYJHFqmTakqQBnC8xloSfdC+gUHiUDyaljarGFzWWNVzTION2FYPqpZ68RLDaMQNLHtjJOnyn9lS5nTy4mUFxS1eoZnxUt+n+TbAY5GXdQHpIWQYWgN0ZnQa7ISlOnJy62B1k8g4bqvCulryUg4m6rfeeit7NvYM49IgJPDVr34V4TiydW2NwrZlwi1sQ2OXkYHCsbAuFHIsToOZ71j3aqwVp4/lqKs6zTUIP9myKKRNKoxhp8nddQgrEdk/6UlPktvPwuwkwwqem7Qwd1tjjTXGJf+KLkN6kRKusKlOg4VADZJs3Al2LAqt3joxVrE8YMFim8lBf133WNJlcpEXXS8Z0YSN3P+WlFmeZiZdLm+epeSjQ9HpJo71nc1jZMhFYo5xZ855j1v71NIvnf1gvssTpcF81X1vIXPkPGb9w5YN6UckHOjL2B5fIb4IK23hVMNyysN1TIXTYDnCSzBl+To4/QE5CFzjbp0aK4UcKKF9khmLU10QGiz8NJ/85CddoVWI1fwnWzoKrfnH2nvoCHQhsFw2MF1t9zhHYB4QcBqch6/ofVjOCDgNLuev522fBwScBufhK3ofljMCToPL+et52+cBAafBefiK3ocFQACF1h8sQDe9i46AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AssYAVdoLeOP5013BBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHYBEQcIXWInxl76Mj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCyxgBV2gt44/nTXcEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUdgERBwhdYifGXvoyPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOALLGAFXaC3jj+dNdwQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR2AREHCF1iJ8Ze+jI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AssYAVdoLeOP5013BBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHYBEQcIXWInxl76Mj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCyxgBV2gt44/nTXcEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUdgERBwhdYifGXvoyPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOALLGAFXaC3jj+dNdwQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR2AREHCF1iJ8Ze+jI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AssYAVdoLeOP5013BBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHYBEQcIXWInxl76Mj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCyxgBV2gt44/nTXcEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUdgERBwhdYifGXvoyPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOALLGAFXaC3jj+dNdwQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR2AREHCF1iJ8Ze+jI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AssYAVdoLeOP5013BBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHYBEQcIXWInxl76Mj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCyxgBV2gt44/nTXcEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUdgERBY4a677lqEfnofyxG48847yxN7SkfAERgKgR/+8IdDFeXlOAKOQAUCToMVoHkWR2BABJwGBwTTi3IEKhBwGqwAzbM4AgMi4DQ4IJhelCNQgYDTYAVonsURaEFg1VVXrcvuJ7TqcPNcjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCU0LAFVpTAtqrcQQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARqENghUc+8pF1OT3XnCEg55PVx/3mDBDvjiMwKwScBmeFvNfrCBgCToM+EhyB2SLgNDhb/L12R8Bp0MeAIzBbBJwGZ4u/1+4IOA36GHAEljgCfkJriX8gb54j4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCi46AK7QWfQR4/x0BR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFY4gi4QmuJfyBvniPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKLjoArtBZ9BHj/HQFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEVjiCLhCa4l/IG+eI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AouOgCu0Fn0EeP8dAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARWOIIuEJriX8gb54j4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCi46AK7QWfQR4/x0BR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFY4gi4QmuJfyBvniPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKLjoArtBZ9BHj/HQFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEVjiCLhCa4l/IG+eI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AouOgCu0Fn0EeP8dAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARWOIIuEJriX8gb54j4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCi46AK7QWfQR4/x0BR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFY4gi4QmuJfyBvniPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKLjoArtBZ9BHj/HQFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEVjiCLhCa4l/IG+eI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4AouOgCu0Fn0EeP8dAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARWOIIuEJriX8gb54j4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCi46AK7QWfQR4/x0BR8ARcAQcAUfAEXAEHAFHwBFwBBwBR8ARcAQcAUfAEXAEHAFHwBFY4gi4QmuJfyBvniPgCDgCjoAj4Ag4Ao6AI+AIOAKOgCPgCDgCjoAj4Ag4Ao6AI+AIOAKLjsAKiw7A1Pt/8803X3LJJXfcccc//MM//M///M8TnvCExz/+8c997nOf//znT70tXqEjsHQR+NWvfnXppZd+5StfufXWW2+//fZVVlnlr/7qr/7yL//yhS984SMf+ciWdn/ve9/7xCc+Qcn8fvjDH66++uoUS+Hbb7/9H//xH7eUPAd5Jwd7CM5//Md/XHHFFX/7t397zjnn/Omf/mn4qiT81a9+9Utf+tLdd9/9L//yLzvttNMzn/nMTK677rrrvPPOYwjxu/POO//iL/6CKfcpT3nKa17zmj/7sz/LZOTVv/3bv5199tmME/J+97vfJS8z9pOf/OS99trroQ99aD5v9PZnP/vZhRde+I//+I/vfOc7o1edj3TwsssuY6Wg6p/85CePe9zjqHq99dbbc88973Of+3RmscgPfehD119/fSaBvdpiiy223XbbKBnL0/ve974oMn18zGMec8ghh6TxHjMIAtOhwf/8z/9knDDAIKJ77rnnuOOOyw/psVpFgUceeeS4aBx77LEPe9jD0lxXXnnl3//937MQ8HvgAx/IdM1vs802Y9JOEyvm5z//+aGHHqrHTOD00083mgIQyCeTMn314Ac/+KSTTiJ+qC47DaYgzzxmrBVn3NZ+9KMfveaaaywXY4kR1VfCj370I9ajz372syxk//3f//20pz1t3XXX3XTTTQn0ZbH4ikUB2oGC8sVGb1/2spd1rsUXX3zx3/3d37GQffOb36R3LGT8SLzGGmtEJeQfx5qyKroc1v7v//7vrNd8Gtb9n/70pzSVxXeDDTbYaqutfv/3fz9M6eEJITDWijNWG2666Sa+LANS23AGJBzRC17wgpHlVNMgJQ/Yox/84Aef+9zn2L+wfMPNvuIVr8i3/JZbbmHeYAGFm4WttTWU8bzlllvmM/J2LLiGWgetVezUWP3hUuCc/+RP/uRJ//ujs/DwI5vtCdoRaNk95Wtv2YYz+Nk5QryQ8L/+67+uttpq0C+LIAMjvzlKm/TFL37xgx/84G677fb0pz89fUvMgOtgWv5b3vIWlnLi4XvhftMEnTFjrYOU0AiX02DnV5ha5ICrRtTmFhpsmRnGXUMHpMGPf/zjtu1l6pAE/nnPe964EvhxabDxIzoNRkO3+xHBov+mgwCsJ6tm314IndZtt902nZZ01sK2zX7/z3+OwKwR+M53vtMn7IDzQzhS3UAkRyuuuGLnbAhDzAJfXXJ7xv8jwe+2F1VXwuRgV3u+/OUv77ffftIkoebRq5EBEh988MGPetSjws/3N3/zN30Z/+u//gvt0QMe8IAwvcJI1t7xjnfAl/Rl/9jHPrbqqqsqfRgg76mnnprJqzJpw1VXXfWSl7zkj/7ojygBOYJe9QXo5mtf+9o/+IPu89Ps3C6//PK+vMRvs802YVP7wiCZFnLKKaf0pQ/jEe2leecjZhFoEPHQLrvs8qAHPSj8pv/0T/+U+YLjzgyIn8LCC8PIu6M2/OY3v9lxxx07s6+wwgrQYJQ+fERy15kxjaQWy3jWWWelb/Mxj3jEIyzvUF12Gpw5DWoUjbviKGN5AHlWuEIhe+rLy1p23/veNx2N7ClOOOGEvlwWX7EoVJgNvfe9742agSKwU8VFL/7wD//wda97HbqiKEvnY8WUVdFlVQ2H2alZp9nbbbcdsgmlnNfAzGlw3BWn8EOgbtljjz36tuGbb745cq5MUS00OEiP4Cff9a53RZYc6IYzbebVW9/61j5+EqEE+q2+7BVwDbUOwpD0CRmZBhFxzj0ZzpYGG3dPfSPK4qu34b/97W+x+egzgmRzhOQ3X7W9/f73v3/iiSeS3tZTrMz7cg2yDnYWzgZTqzk6uc40UeS462AjXE6Ds6VBvv4gq0Y0iuyxmgYbZ4aKNXQQGoR2sP0SxUUBJPBf//rXO4GKIselQbK3fESnwQj/vkdm/t/r1G145OAIYEX153/+5yIhTI1gSddee+1QqISQlNlz8KoLC5z5xN03TD1+0RD4whe+IIUHJHP/+99//fXXX2mllUQ+iDJT0UkJSmwFEaOonIc//OGI5u93v/sphjMKN954Y0lRk0gzWxqcHOxghUkO2qM111xTUFugXKHFKatwDKicPoUWBuzPec5zlIwA56s23HDDlVdeOYx84xvf2Pkp//qv/zpMhhYNwRwnk8Lxc9BBB3XmtUjsB1EaIe8Oyxmp0GKlCLMw1J/4xCdiSMvBlLAcDi/2VY3NfpiyL9yp0KJHfenDeFdo9YHfGD9RGqRtbEX4xJ1GrBmFVkWrKqRaCCk4ERICCC3AI2ngQQtrrbUWNKgYArvuumuYJQxj2R2mzIRbFFpMKVbpIF2mKKfB2a6DGkLjrjjKOFYA49BwZPYptKJRgbpFwjjLzknljGC6YlGoECIgIgn7zrwBzap3LN9YwXO+OTwHz9LGpBTmisJ1UxaFVHTZqka4Gc6Q8J8YV4UdgY1BDRm1c84eZ0uDFStOCf4c5oAJ1IBkG86Css4660Tb8EnQ4CA94pgyh7HUfgUyCi3I5+Uvf7lSEuBsEwM45GNBAMVVCmAdXIOsg7SH3ZmaDQ1CzpzOCpu99dZbp22ep5gZ0mDj7in/Faq34ZyoCHWctjl6xjOeEe3pYPz6GsASecEFF6C3jvS7wyq0onWwszHsfMON3kiFVsU62AiX0yAfboY0SO2DrBqdw6+aBhtnhjo+tp0XhSRDazBb+pHGhJFI4PN8XQUNNn5Ep8HO0dsZ6QqtQl3PAMm0DHM65Mwzz8SdhQq96KKLtLBx/F/xUw7MduLuHKAeuZgI6GwWai28ZLCKGA5oCPQK6Wehba8wRDwqOQW7IwlwOWfDVKg9Lfyxskw5MFsaFLbDwn711VfjJTKUB2mbSqBQoYVRT5jrWc961gEHHPD+97+fHf69997b+ZlOPvlkZcFN349//GMl45SYOout7qc+9Sm9sgBKTTUYgeM///M/KwGaOcQHVjKbIhxW6JUCzPCofFR7GBip0GKXbulhud72trdhYadiWSmk0uMbMZ71KgzIcuLoo4+mJX2/G264Icxl4Ze+9KVWOyd4+jISj7ueNO98xMwlDdqnYaLjpKBGI6z8i170IhwD4hDMTof3fUERS/nMwLYHcVjJD0K2Ju29995RAzjAYa8QY7373e+W2gka3HnnndWRlH6tHJyzWRqO3mYGM6+0xMCYlbSZNAjirXD8plp1g3SZopwGZ0uD9jUrVhzLONZ/6t+yU5h+7bXXarSjE0JkbLXg9eWVr3ylXmG63ld7xaIAf1VCCzIzf+pTnxrWTnbcRFvbkETDyOktixpHyuzIMgmwHdGrKFA9ZVFORZfJhQSBWc6azb7sM5/5jE0Ov/zlL0MZEDu1qKlz9jhbGqxYcUrwF3PFNhyTOGZs5WIRlNQMlk/xCjTSYHuPPv/5z4dHOaEpHF+fccYZ1113XR8rSOM/8pGPaH5groCirUccb4JD1km1N73pTeqpAnVwDbIOwplYs9nonXbaafpSkOcOO+ygHnGiWq2dv8AMabBl95T/EC3bcLUKhnD//ff/xS9+obo4jq89F4ScCgeQHrz61a/WNl9DyAIZhVbjOqgWRoFw4aYNeYVW3TrYAhetdRoEhBnSILW3rxrRqLPHQWiQQTuuXKV6DW2kQQwlZciCaTIebtD1GhTwokcccYSkPRtttJHWmgi6OhqkkJaP6DQYfYXMoyu0pqQzEk8J+4h4N63105/+tNZXwmmCKcTMduLODFN/tVAI4OzFaAGjvG984xtR37lMiI2cJcjIQaJc9oh83zJyODLVgqDkkOEwLuM7S5h05AxpcHKwo3wy2O2fIxf77LOPYkoUWihylB4ZWd7hnn2jb33rW/qafPf0wzEAZEagMxZKhtrJanz2s58t1kdvCWyyySZKEMYThh9SawnAKnHrhviSvEKLMwHKyxeJSuYRTZtMCw8//PA0AbXLiDU675ImTmPULzxNp28XIWYuadA+HH4sNbo4bpjOrp3fd3IzA9Who7KLuxjVEtNbM5BWSH7HJQdR2xjn0ml1yh9Jf8wxx1h/3/CGN0TZGx/Ro1vJj370o6UMKywz02UrwWlwhjRon6BixSn8+mEy6U4kVmZQdSq0tCXGZUpo4mCl7bvvvjYaIaVf//rXYRUWblwU0gLDGF07FB2Vfv3rX2+tQj+EBjrMYmGRJ5otFMlpAmLqpiwyVndZYHIGDi/xUatQe+hjcddd9HaeHmdIgxNacZBZ24DkC6KnTD+W6iUZ4ShBCw2q5OpNDTZVcrNmpk7I16IWdj7CwVqvuV4oZWUx5rC3WErJXsTKaYSrszFhZGYdZP9lreKfK13DXBbWvV80O307NzGzosHG3VMe/+ptOIuIDFIxw0prQTggg79004fYWoOKAHLtww47TEYVGYVWWlFnTN862JmY+ccao9Ukr9CqWAcb4XIatA83Kxqk9vZVo3PsEVlNg40zQ8sa2teXML6PBln7jNzgRWXSEWbEJa8mh77jlRU0SBUtH9FpMPxGI8Ou0JqCnuh3VeDVyqgFeuurUjdSYlXRl2ai8TOcuEeOVE+wOAhsv/32Riz4du/s9emnn24JYEkLN3WUA+1IgcHJns6SX/ziF1vJiEo7E0w6coY0OCHYQcwUWphL4+POnBRLHAzaIxVa7FJ0mwXzZ6HffIw67VM+9rGP7ROWYaJraZAyhFt9JOnaZqDm7PzoHAqxvJxSjxIgSrNX6E3xsnj33XeTgLNWFplXaHE/hyXjlEZUrB4lx0dPpkgFsL2yElAGKLI8gHtDy/6lL32pPNc8pZxLGuQDcYG8RjWi5HDA5z/f5GYG6pVAjUOcUTOwPbehiNKos7WcKrME9Au35lF2HjFgtwRvf/vb07ctMTroBoGPW06my1aU0+AMaZBPULfijDsMSK+ZPPQJlipRWDRtGPPfOS3jRsm0wiRg+5C2pHFRSAtUDG2zWQX/YNE6y7FIa3YfjSDRlu3FrbfeqjIVqJ6yKKGuyyin5eisE0lK1rEVAmrq/AVmSIMTWnEOPPBAG5DpWqPPh3sGS8MBTUUSaKTB9h5tttlm1jBWQz5N2LZMWM2GSL/97W+nKcMBH3lxb4ErrSiNyayD4szxjphmJAZbHEOD/9lee9zZvKEiZ0WDLbunfN9btuFXXHGFfXS0VpTTWREHDS0NFoRRAlNosddDsIA+iW0aCaQkblRoZdbBqBk8svA97nGPo51QpY7j9w110tetg41wOQ3ah5sVDVJ7+6phXYj+W2iwZWbQYsTIr+Njo45EjxkafMhDHmLTAk47olz2iCARcaKlQbmVpqmjQcpp+YhOg+mHyMSg0Oq+dt6+q/8PhQBSUSsKX9V9ZUqKwcnovjQe7wjMNwKsKxwNsT6GUp6w1zvuuKMdEOZa16997Wvhq0wYZ8Qm4+DGLCmuovRiLsULRgnm9XFysIMYd7Oj/mHbyYm6zgsA8qjimwhLdtIgs8NaUzuQfC4NDEzaZYUXZZHTMJRkGMDqLc4rWDV5xCebTIr01gI6JsgpKPw+RW+RBdAAysEthgRkUZrORzU7ulglTKxmf/WrXw3jLYw81AI6Yp+mycQou9i7TGJ/NSACE6VB2mmnlAhwAIi9vYmhR7Z/oq1CpiCnKDL6UZM4DGFhPHx2thYXZ3YnH9TaeV3WhAYzcwUuN2kb7msivzFqfF8g32XLNaFm9zXJ4yME6lacqJCRj/AYF154IclYYuR1szPXZZddZvEYvXXuIDiLrEPP55xzTlqIRlTdopAWqBhTFfOI+Wq4zjJvaC2TZku5LAAntuqqq1o4lFArWd2UZdnrusyhKwzbKQFNW2iQqyYR0MfipLhxJuFbDzciMLkV55ZbbrG29Q1I3oqviwZkCw2294iRZk5cWAfhgUU1I6HWGooTgujuScvLOGc/ZeFo19MC18iG5ddBTR3pnbtW8uqrr66NgBKPrNQTFCIgSCt2T/kqWrbhGpAYBeqoVlRdH/2SjFw4wMC+8AMf+AD9kqOLqIS6x751sLM0jpfZfhMXiBBmZ5owsm4dbIRLY8BpMPwWUwu3rxp9TW2hQY2KipmhZQ3t60sY30eDsIJ4LLSUfXIVJIriCjrlKnU02PgRhbbTYPihM2FXaGXAGezVueeey4U9/OSFIy1a/JltqNIEHuMIzD0CsJvYDFo3dWYx6jWKDWl/8c8Tve17RPtlr5AfyZNVlFjO8bgvgWM60ds5fpwc7ICGAOi5z31u3f4BhgBvY4b8e97zHtmhj/wWmKPylfkh9e5LHLpTl6k4iREamlkWhci3clSIpmvioxmbnnIeq08TFpUTPmJoz0Vf1mx27OGrMKxmh21WAmnXKmSXDHt0exSFJq8calXtgRYEJkqD6FbNqobRxfnUTv1QZ+Mn2ipu37GNPRS38cYbRw3QjC2r+SgBj5qxOxeCFlpI61IMJ06Qx/GINgs8FV8SyHeZEpwGS2CcXJrqFWesJsFdmL6ExQKj0fBi6rQcFiOLFNuTpsFqxCK/853vmDVGmGZChICk4Pzzz6ciNGpcgBfWSL/wIGrLaB8jR3oto9EayqvqKcuaUddlQY09B/q2sEcKM1NpCQZtxXtgEAQmt+JwX50NSMyM+praNyA1MCposL1HOPO0BnOppBzS9nUhjG9ZQ1vgCtvQGc6vg8aFklGfIyoEBga/ixaJk9XorT82ItCye8pXrQFZsQ1n8Bv9cmtpXy0aMOmCghkrplFK0FdCRXxmHUxL42zKKaecQjwXfWHimSaIYqrXwUa4nAajDzHlx/ZVo6/BLTTYMjO0rKF9fVF8hgZvv/12E6qsvPLKuldVGRUQU5fKVappsPEjOg3q6xQGVihM58laEJCvs0whsgjLcMyZ7P7KEZgDBEJHMdqxpP2StL1TjpmmJ0YlZ07McDAZQQzurUhPyU9+8pM7i5q/SIHDwj8s7I1YcfACl8cUgodxueMrKVM+yjOJuWTb3iKPC41e2fOM3Pbo+Dxba5xFZGopf4UEjfNeI9Or2bJGDLPUCfKsBOWF7SvXeYS1e7gagYnSICY11jDMscOhPrK1E22VHJenx7NomKrOzNj5hUDjuUK524cMKmc0grxF1b3ffvv1JeuLz3eZXGqz02AfhhONr15xxmrVoYceaqwLUqf111+fLXcmu4YE3sb6komosYrg2FBEMiphQEKgJbhlQ/9HgMP0okRrIUIBNamvzeT95je/aW/TXU/1lGUF1nVZuTJQUz5dM+NZZDR8vr4OenwFApr2B+dFkSCPbI/4umhAlgwMDfiIBht7xIpz1VVXWcsPOeSQkV0IE6jqaEII04hyo81UC1xh+Z3h/DoIt2+5+tzVsCkAFktj3ts6a/HIOgRadk/5GksGZN82nHh++fL76Defq/FtZh2MSmbJ4zofM4fCpbZk6FGy8LF6HWyEy2kw/ArTD4tSBl8HVXJmUWDwdIrCWmaGljV0JP4ZGsRbb4l5ekauUk2DgrruIzoNjvzuUQJXaEWAzOaR08GcA6VuxIg77bTTbBrhtToCs0ZACwDGFJm2cLWjvY32YJksJSWzhLOQmwZlMRVag8Oe+SIlr3Sjph3F4EpS2A7u22Cjiw312muvzemNTi9MJYVfe+21low7Ffj0JVmUBv7JwpjG45Zd8VMIqNm4iU+rE9cIPkg2MWbEUh7zJR7xubHWWmtxs1eay2LCvFyIghUtUN95551wY+Tlh7fD/GGCvpI9fiQCJRMUhVRMfeSK6OiGG27gHjs+Lo5AUdtDR1wdUAgzHwAALSdJREFU3ynDnVyrPvvZz2KsStsQsu+www4RPvfeey9iQYvMTEoZNJAamEMwSBuLIlw24QYdATQyBRw4MJg5OLLiiitG9Y58RAxhDcPgd5VVVhmZPkyQ77KldBoMEZt+OKKUYVcc6w5LGKeNCTN+cG84so86JQxR9CVmJDNLc7yPBAzySFQRDqpxF4W+GmmMufhnz5I58tKXnfh3v/vdXOdAAPJMz51EH6J8yrIa67pcAjXlhwotq87/h0JgcivOyBayHl1//fUkY0jvsssuYfqSgdFHg409QjJgXivwGYiVA9sTOEDWbjwjYXzG2s0PmV1qV077S6rWGkrJv/3tb0PHoSECaTgDV5o4jBm5DnLvCG7ZOGmKYITZUuewVYidceHxCU94Ajyt4j0wNQS0DRlr91QyIKu34YitzdgIEHR9wKQBGWsdRI9rlhBYZ2677bYlbWtcBzNV5OFyGsxAN4VXJZRCMzR7Lx1RWN/M0LKG5gEfiwY7i2IjrFsn5O1AKatpsPEjOg3qExQGXKFVCNQEkyFawsLRTB1f9apXIWqZYGVetCOwhBGQGIJ7XzPNrFjFy0uWQivTgDl7VQ6OdbyceWoESrcPorjCRTJX/to8acXiDJ0Aflxx8dfnVL2vARh4XnrppfZWF5D0JQ7jEWcfddRR3CFMJOrPEolkmL0xDLNobo4e+MAHdl4yp0950UUXnXTSSSYxDCvdeeedkSR22gYqL2Jc1Aw/+clPwoyEEZ1ccMEFkfFylMYf6xAQ+INPfUipdP4b7oJhgzchNdJs7hCNvetd79p9990Vb4HJtUo22vvuu29KvKqXZmQAySwEeHuws7bso/A7ilpXXYM0CGOZjvjj+c9/vuJHBrjNG5QsWeepsnwJ+S5bXnXcaTAP5oTeTm7FsQZjKICnSvMKiFqrxBhCDh7w95LpNSuCKbTYnEfHhjSoKhaFvhppvFW35ZZbZhzkdman+9wfdvDBB/MWQTzLd+Tfr2XKshrrulwOtdUC1J0d9MhqBPThMtM+hWdm/rqquT4NJZaxl3vttVdk91M+MFIabOyRZqRnPvOZV1999W677Ra6U/vIRz5Cf9EHw5illh8lVQtJqBLLp8c+9rElAObhypcwch3E9wDXC6Evp0m77rrrhz/84fXWW09lck0gMwaPrOw4bRvXFk3leKAagerdU8mApFWMyXG34QxdDj9hzEF2iBcSru7dWBnL18E77rjjuOOOo3A2X9hFldTSvg721TISLqfBPuimE19OKdaecplMecnj0iAtycwMLWtoHvNyGuwrR059WUkjz0wtNFgOdedHdBrs+1598a7Q6kNmgvHwoPh9xuqK1RdLq0996lPwbezrTjzxxDpTxwm21Yt2BKaIwM9+9jOrLe/xTLYe5f7TJ1fyFOGZVFVLFhymR+sz99bILxNTJZtYabZOPfXUG2+8kRMYJcJBIYivJ7spdPPNN99oo40UnwYQhdvxWdLjIukTn/iE8Y7smlBrdZ5rSQsZJIbRjsWDFYXTqs7+ioUKJfhh7UgSsXbHiU0qhVReGRaFGQkjW0Gnxa1mHE+JXvljIwKTo0G0WWbijd4Imy/u8rSmYouNvhPeg0dUNXvssQe2NVLYWJoJtQoz1U9+8pNUwRhGdGV1hf+ql8jMWpBZCDSY6WMnLbD1esELXoBUveQmA2sbojTT8nKVdyhiC1veFx7ZZcuoZjsN9iE50fjJrTjW7GOPPdZGI4YFfTdURx3UNZBf/OIXodNOn8CoP3XxtVZGlaNB1UkIJMssCiokDLDJZ9m1mBLNLsbgH/3oR0mPOQht4NSFielRWqBj43xJWDjhlinLiqrrsqDG5pcDKLonPGweKkldG55CHab0cAUCmvkz0z7FZmb+wko5vG7bcCyEuCQDXZFtw1kOuPY1KkQDo4IGG3ukGYnVGUsUW69pHo6yGYrWTs48cVCJLkS3x5dULSQpKrOfGguuCL3wsXAdxOiKyQGRCEISrNk4Qf6UpzyF5gGCnXFBK3DxxRdzO29YuIeng0D57ilqT8mAJIvGZGZAcuDeDlWgm2Fv+PGPf5zFkbwsrIinoY6o6kk8lq+DkC2GLKSnGSh0JdzPt6p9HVT5FXA5DQq96QcGpJSo8ZMrmYoyM0PLGhp1IXwsp8EwVxhmQcH2lxhYDlM5h29baLAdaqfB8FuMDLtCayREwyc48MADtd2y0vEWgpYrdbsxfN1eoiOwHBDI72bVg8JkSk+gMEthsrDkOQgX9rowWSMgCL/M3JVyTJvFYXAUOeZjhH3+Mcccg3qJtzfddBNiiJQX6WvAaaedxv6ct9yVZX6T+lISz05AaiQlQzGAXieyK9fbSQTYEWGca0b6nLM56KCDOmvRysI3whkIbgnxjwSMwPW+970P/4HkQkaw9957IwSJSlBe4gEZg0fuJ2BvCcOH+0HT6rFpRAOx6aabVrhri6rzx04EComrMBlVyKAb8SvaLPRYr33taxkAfFwGBhI67oIyLdeZZ56JL5TnPOc5acMKqytMZhbW1IIW7cEPfnBaXRhTUmaaJhzMiA/oLxsqjlRiJ4sHCST4VgXCha222mrjjTcOa+wMoxRErW6vSoT4USGFXQ6b7TQYYTjpx8mtONZyJmEbBshqpRAa2SnmcMQEzP+oYFmtUqM3qJiVkcZbUekFkBpUkMm4i0Jn8xCsm8IVAXqqjkqz4Odwzz33jOKxhMWuQjcPhW/bp6y6LtMknJiZ2g92Qme4w7Yhl9RNLSnUYUoPtyCQTumdpRUmS/MyhyMKD+PZhiMWh7cJIy3cToOUU9jUKJlogZWLQuD9jj76aBpJa4nhkLFNKZxIxtMAquK08dVVh0WNBVeYMQoXroOAgPMD7vFi6sN7MOfI+akoTlfDvqLiUowHpobAWLunvlZFg7wi2VlnnXX++edHGWHn+nZGUcpBHsvXQVhr20DBaqbbyb7GiPZbWHcrvAIup8G+7zLN+HZK6Wvt4CXnZ4ZB1tC0L+U0mOYlBjsw9rxmmcQmEVPFKNkgNFgNtdNg9DlGPGK+6r8pI9B52yr+BDiqNeWWhNUh5bQfO2f/OQIzQUAO3DDKyzSATY7Na5wUySQLX8mUD7d1YXwUlhXJ6aefHr2awuP/keB3p1BXWMXkYA9rURgrSy1L2LAoPgqEZxRY17m2KkrARldiMuzWOTgVJeh8vOaaa3TfAGfVO9OEkWeffbZaqwD2oQwkDM/DlJmwtvHIIzLJMq/wc2i1o0XDoLgv5XXXXUfD0Ekg2ojS/PznP4d1Uxc4XhYlQD7ChpBzA5gFRa+A+vjjj1de3DxGCebmcf5oUMobPh9+ydKBAU9vSmIScO6Qb62vOYmZAa2qWeByzhIbW9UVBmzzb+MNLiV8FYbN4RLJ0BCE8YSR/kO5mOuyS0HWH729/PLL5XWTvoddjlLqEZ9O1h4UgSXplZFASZctvdMgOMyKBie04tiXZc/M8VYbQtwybZH2z5WQFs8/QvbwlYU5SmgJkGJjwxEmwIZdby0Nkt8wAeGWRSEqikc8eeILxepCnp4mSGM4QGbpw38mgZe97GWo0tP0LVOWlVbdZTwbq5EseWHb6Hi4CJLsiCOOCBPMU3hWNDiJFafzu3AflT60AlzXhFfnzvSisnFpsLFHoYfnl7zkJelqqHuD6AWGR2Hj5cvX3MCErxQ2v1KGwG233ab4KDAuXFF2eyxfB0l/5ZVXstTq00QB7t3E4WdnLXMTOSsazAA47u4pKmrAbXjnLVkMGDQ3UaWZR1kkpLuhTC57Vb4OMuzNnQbnxnDyEZaMossG9uMf//gw3sLt66DKrIPLaXBWNNi4aui7p4EBaTAsvGRmqF5Dw4rCcDkNhrkUhieX+SaWTJ3ynBYaHOQjOg3qe+UDv7NuD1UaHp4OAnjHYoxi/YfQHOPohz3sYbakIWblrPR02pDWMquJOz9G/e1CITDIAtCJ2IRW8c66qiNnRYOTg70TikKFFqy/NrEYtXUWhQcwZPSW7M1vfnNnmjASG3mlx6A1fNUXRk+GLxd+uPJHnhX6iVp33XVpQF/GML5RocXhKusjij3OloUljxVG+snlWFYU96yMlZfE0ochKOlk/sYtcAmmnz8aDM8gYtPaiTk8iWiNsNJMYmZ4wxveYHVxLbYqigLtCq2owPQRc0J1OVISpImJkc6PszWdCTKRJV3OZA9fOQ2GaAwbnsSKoxbKsyWsiCItMFKhxcFcnQZGD4QZBysCwmvOatjFOUzIstJgeY3Kzz+OuyhwisUIh4OP+HvJF25vqcLWUPKefPLJ3NXHOVErhOtScD8YFdIyZUVFdT5muoyMQ1ZNtJAbwrCkQf99yimnaAaQPyuW9c7y5yBy/tbB6KNwTpcxyU4cmTLzc7gN71TTVtNg4xoq72T4iGZwRr2wR8xhjZoIhAkGVGiNC1fYDIXL10Hux5VVOxMmtimwBCzTzKIyCEbHxkdR4fMXmBUN9iFZsXuKihpwG47fCOgXz9VQ6+GHHx4622S3WGhy1KLQKl8HtWdkKogAySu0BlwHK+ByGuRjzYoGG1eNaJiFjwPSoIotnBmq11BVFAXKaTDKaI+6NBp/M5GmWelbaLD9IzoN6kOMDLhCK1XrzCAG+2idQYaHYzs6g0b8x3/MauIeOUw9weIgYK5s2ZttsMEGmV7rhNbLX/7yTLLwFYIJ2/Lhsy6Mj8KSZXBdSvRqCo+zosHJwd4JWqFCCxbcPhn/mHh3FkUkNgGWbORxPS5LkHQArq5POtBXkeI54yLxR0Yor/QEWhRaV1xxhZ1ooZt4WQyLrQjr+lMMjcfNbo7pDG2uGBk3+7JIP380KM3Nox71KCza+r6CJALh4dTBZwa4Hd39dv311/c1hlvxbJjxz00hfcl0Qouu9aXpi4f8dQEexzL6klk8Fu7WHhwk4uEtnzh6W9jlKFffo9NgHzLt8YOvOGoSXuxMC8JJ4lQIO1KhRTlI3jtvz2JYshixRkj+yzkz1VsYGGtR4PS80UJ0gKmwLkvGWqxykFBHZyhbpqzCZmS6DIn1HQ1hIWZ61N1aWO8WVrfsks3fOpj/BMzqe+21lw1sSKnzEHwdDTauobjJtVah1OnrArptSxOdVJbmFbl/X97whFbfgek0bwlcUa7ydfDmm28Wx8ulnlE5eBrYeuutrb+ca0FaEiWYm8dZ0WAngIPsnia3DUeDxdksLYKFVkctCi2tX/l18LzzzrOxusYaa6RjNa/Qmtw6OBIup0GjglnRYOOq0UnCFjk4DY41M9StoX3dKaTBzuxIAo0wWWtCI84ocQsNNn5Ep8HoW+QfUWj9gX1R/58hAsh3oBns5WkDHyxyajHDhnnVjsCUEZDxHZufTNXspuyt9BOZxPZqciWPrHrpJ1ia4MgmHQC18UjBlCf9vhvvLQv79i222MKcSuH6CVG49sxpmfkYzGB1ZAoTIV0Rn89V9xbJPn5muL+H7Ng9HHnkkXXlKBeqYgtDRNy7oPiSAGI+DOotZacXqZJCPE0nApOjQdERAW340zZ00tHgrWID/6tf/Yra11tvvdRfuVqleonJrAUVC4GqgPwlmB45mHHFaRm5QE4KORWVDxR2OV+I3joNCorBA6IUSm5fcdQ8uHqmbvSyxBx77LGPecxj9Ko8wE1vGFJIGGEZaeTzn//8W265Bak3tRD58Ic/3M5slZdMyvJFgXMS3LpHFrRrr3nNa8aqJUyMLpkTz3YRI/ddIYsM3+pDVExZYTmZcKbLkBh95JPpGBnlcACO6QJ3jlxAyD3hVjIOWjNV+KsKBDTzZ6Z9im2Z+TtbxayO7sRGBaQEnabJ6miwsUeihZIZCYcBIW4lVQtJ+lu+nyqBKwKwfB0EfON4d9xxRzwGR+XgKxi7dZvlWLhDj4tRSn8cCoGhdk8lA5I2a0yWD0j8VzNjH3DAAdZlRMmcHh6q+2k5hesgjq/tVCLNw4RCJybTAjtjRPuDr4Mj4XIa7PwiU4ucHKUMW/K4M0PdGtoJeyENdubFLMm0TeyIEebI8WCauIUGG6F2Gkw/Rz5mhfxrfzsdBNgsISi56aabqA7n79Op1GtxBJYaAhjaW5NgBDNt010X5fxuYckS8ZeXnGnncnlVCE4F7C0IcF08EyOmZBRC1X1fRPJBDIX6qvvNb36D+2bTeOErGXtVOR7sy5KPR4yISA7rLWQfOGPhZqx8+rq3nIKn2TSe7Jw/42qrunLCXKutthqbGU7qEIkp+rjST9Azk97wpEhYvofrEJgcDeq0gei3s4WddDRsq5Dps5Gw2rlkvrMZFslOQLTPWtBH++pRX4JMFbxiMFuC/GDGWt9cMiKP2HffffNlRm/LuxxlzDw6DWbAaXk14IoTNoOjFcbVM6Rvv/32VA/ERZJKf8ghh5jkGvXJK1/5SsUTwA4DI0QOeKFQgVFh0SHGzDKwe7WUdStR+aKgo8a77rorx0HC5o0bxvntNttsg2yajJ/97Gd10prHlimrsBn5LnPoDR3biSeeiC8dVvlVVlkFW2DTZLMoc56MWpB0cECksDpPVojAsCtOYaWWDPKENu1wMAOyM28FDTb2CFrgXj0ao8UubRi8KII5U2nDButQF1UzXZA+s59SsSiKJLxLq0hjSuBSrvJ1EGUG500tYzgnqCgCdBC3q5An4Q996EMIT8K3Hh4WgQF3T4W0UL0Nx3eLXYLIqOY60rrVsAS9wnWQy4bRMVMghibIzflFhcs24p577hFjwH1XXOZHykmvg31wOQ1Gn2n6j4WUotm7fAdUWHIJDdbNDBVraCf+hTSY5uXWBjm/xZU0fp7SNIppocFCqDs/otOgPkF5wBVa5VhNNqXsozlQj42VWNLJ1uqlOwJLCQEEB9YcqACXUH0GTSVrbdQtlQzjGL3SI3IlWXWV8wfKvnwDAmdw2Fsw4esjYbQNOUIl2KDO0uzAB6/kBjBKhrEnN0ibuQBu9/Ee9tCHPjRKU/GId0paRUb8tFRkH5kFu3WOlNl2CAMibj9CgjAy18gEoGHaLFJmbH77ysFphr2qyNtXpseDwORoENGtIQyLnGEtOulo2FZ98IMf/NGPfmSdRUGb+e4MdXRa3//+90mTmbErFoKw0sLBrI0T04iuoAvLyYTLu5wpJHpV2Owolz+ORGCoFSeqSIYIGGfgyjh6Gz0in7UY1uJIoWXxj/3fX5TLTk0Rufnmm0evSh4LFwW0cZdddhkFIj2XRKCk/L408vAcraEtU1ZfXVF8SZc5jrzZZptFGQU1i/IgK3JU/oI/DrvijAumBmR+Gz4WDTb2CFr43Oc+R0eM2+zsEXfCmTaLtyEbrKontIYWwkWrytfB8LS0pCJpr3HgZpF5xwxpRo8ZC4Fhd08lA7JlG075uFKH0aWPrCkTUmgVroP33nsvPmkNbWTW4WU86ScgsRJw8NcUWpNeB/vgchpMP9CUY0Qpg8tkVHJmUSihwcaZYaw1NAW/kAbTjJiqoMEyGQjXrqNyTtOEMS00KKgrPqLTYPgVCsPucrAQqPpkSB/YZLIv4nfbbbf1FWQHEXiLxaXLCvtQ8vj5RgBxoTnFYnsWXiwR9pp19M4777SYcrWTbCUwsKWEsECFZacPDQ6i81DJSzwwOdgbOy4L6K9//et9Relg1hOf+MTONHiiuOqqq3iFlQDaLJRknckUiWuIZ//vL3+IRDM21+oo71ABtjfcJGzjfN1118WxYej+qK+WI444grvB+HEErS+NKQh5C6HphAqP+GC0vFy73ZeXeGXvQzuT119lEJgcDVKy7tIYl44GbBVbCDOeBQQsr+1kSQYQzdgyYk0Ta8aOFgI2AzaYd9lllzSXYkoGM9MLBn2WJT8hqFgFxu2y06Cgm1VgkBUnarwMCKL4/KMxQvk09pY7KU0Nhn4F4+4oS8uiEBV18sknm9ycA8r5+Z8bFm0NhQwz3e9bQ1umLNo8YJcjBH7xi19QuEXutttu0Vt/bEdgwBUnbAzb8E033dTGZHhVXpiGsAbkuNvwDA029misGQlHoFSnTmkNzaz7nWvosHCNtQ6G0wW+BNSXKKBXYfoojT+2IzDu7ilfowZkxTac40RGvyhHM7WIhCexL7N6C9fB6pGppb9lHWyBK2y5CC3FXK/C9Gkyj6lAoHHVyNTYQoNhscPODJScWUPDei1cSINRRq6HeNGLXmQWgRyIPO6446IE6WMLDbZ8xJCmRGhp8/QqTJ8mW5CYXnZhQfo/hW7e9773/cEPfoCNFT8ueeurUXZGCBlLZJd95Xi8I7B8EYBY1lxzTWs/R1I6O4LXHcT9vMIDzFprrdWZJo3EVovCicdhGlqNNAExF110kcXjXkbrRGfKOYucHOyNQHFblZXwnve8R4fnojK51sJiOkVsb3rTm8ypEVYCqLWe/OQnR9nTR4QCeIjixyUf6VvFsCWzsEasXjUG6Clcl4ldnvSkJ9Fsc3Y0slg8xuAbjZ9GcppFBuYcVgstJzgQY3l1niDNS5d1Q0Mn2mkWjylEYHI0yPZYt1VxV3Zne/AdYa6WeBt+2QFbhVLWjM4woWUv1NmMMBI9rj32LQQcXrzmmmssjRm0KjsumG644QbGMxayUnjrrQU4AYzY3cJhl6NkuKQwEclGG220zjrrRG/zj+N22Wkwj+cU3ravOGkj8fUPx5L52elhy4jlqaVM3ROlJRODdQ4HuWw3y6qRupBtWRTCGrEx1/32IzW7jGRbQ6HB6PRVWGbfGtoyZVH+UF0Om2rhww47zPy/0cGtt946TeAxjQgMuOKELaFYtuE2JkNyC9MQ1oBkRSjfhudpsLFHmpEwLe9TxYkHRnogaTjd0RrKpin0axr2WrxiuIYOC9dY6yCnvnTwsc+okfbLzGVCp3BCiBY2XLF7ymPVsg1HDG30e/XVV/fVAo2bh3YSDL4vs0rL10E0aplF314hl7diObCixHvssYdFtqyDLXA5DRr+M/xvXDUyLW+hQRU7+MyQX0NVrwXKaTDMyBFnrISxSSKSi8m5MjN82xduocGWj+g02PdFcvHMev6bNAJ4qrFvAH/JcptWhwRTIqeddtopTTCFGKjdfthg+s8RmBUCkuNg3cBdBWkztt12W6MmbudO32ZiuPXBMuLtKk0GDeLn2hKgyUgTTCHm/0jwdzczTfk3OdjTjmCJYzjzzzY7TaAYmA/c/lji0047TfEKhCYC7HMUbwGyWF4YC7SY0du+R0SKah4eljqThTpRbBE604SR8loGNxnGp2Gk59tvv701AFuq733ve2mavhiE+JaR6xA43ZUm4yYDnYJnoQkTIN+X2uzSSy8NXykMF2jlQ5swoIqfp8Bc0qC0mHDn3AqTfi+NT+6W476NMMFQM8P6669vgwdnZWH5fWG0X7IqQJCXJpNybvXVV0/f4q7Tqtt///3Tt8RwR70loBYEZ51psH5AvW3JkMp1pslEjttlp0EDc4Y02LjiZAZD5lUot0VfkkkZvUJXxGEpG59Qbudi0bIohNVxuZdVhBVRGN8XFjeF9qszDd5HV1xxRSuTC6uiNC1T1lBdDpvEwDjooIOkLehbIsMsyzo8QxocasWJ8N9xxx1tsK233nrMtNFbHtEKS3vE0d40QWdMCQ029khr2XbbbZe2AV/BcjOIwjVMAJOmqzG5vTJ8ZWEs1g0TTqShDAgTDAjXuOugPA1y3jRsksIQo84ZHHjggYqfs8AMaRAk63ZPIz9B9TZclnbsbjpXOqo+/PDDbTxjcDCyJSSQSd8ll1xSkp40466D+WLFhXIQszNl9TrYCJfToH2OGdJg46rROZwsspoGLfvgM0PJGhp2p4IG2dXKIQ0u0xD3hQXmw9U0SLEtH9FpMP9dordY7fzeFDQlXgUOoCSawTYZf5ohJqi4Dj30UFuGceKP+Vj4dmrhGU7c0bj0x0VGAJm7tmd46TTv8ALkqKOOMkrhHweeircApMT6gVdciCh6xeOXv/xl5cXAJExAxi233NLe4r0KogvfTi08QxqcHOwpeuUKLfK+4x3vsO/ClpuPG5bGAMCuzd6+4AUvCF8RxlGYzbr8j6WhRK6BawsrlqGIdCwqmXolJuhUjkbpeZTCYKRCC08RVjWaPFRraVGZGOQXumlsgw02wI4pTIwUQKoybNg5uRK+JawruFdaaSUOr4RvwYQruCXOw6o3fEsY82FaTnwoKkIh3UmPmO3vs88+fKAwcVTgrB7nlQZf+MIXakhHH5cPIbUNXgEj5FtmBhWFaa3VjvF1OvCULApss802lgvpFZ6RwrccW2QM29tOUd3HPvYxe8ugjeYNyrnyyivlkQbXE2HJYfiYY46xQhA3oGkOX40M13XZaRBgZ0iD1F694uTZj8yAGVehRUVf+cpXEN7pEAlrHAcZO6toXBSsTIzHESMaLeDxqbOiKPLII48UAeLFlxUkTICqGGtZS8BiSvnhWwtXT1mDdNnawArFqR30beHRt7322ittrcX4OtiHTHl8y4qToUG4Dm3D+YLRzoIPzabABiTb8FtuuSXf4LFosKVHNIPGiPVCgMDwVts4Ma9r89i5cHuQXlmAE8bWKeaKyDwLMWKGjx0Krop1ULJ4Wm4npMNO0WUZNcIDpMYoToMhXHXh6t0T1WVokLfV23C2M5IM4EOFC1ajrqGUgnJttHeaP0bpeRxXoVWxDqaVhjEjFVokrlsHG+FyGrTPNENetGXVmBANgknLzBCOfMJjraHKW0GDrPUyqiCACYhKKwzU0SCFt3xEp8HCr2PJUGj9PqJbWwD8f6IIHH300QgErQqkM1g/4dMA9hozZGz9dCbghBNOwAxwoi3pK/yuu+6yVyPvmOkrweMdgUEQQKJ0wAEHWFGQCaomfD1x3wlbI/YqFo93HfjXqDps9u2ucu6MgZ+TyFLJWJYuv/xye8TDxiabbIKkEtkQU6Fo8J3vfOfIiyJV4LAB3Q02ExqcHOwRSsjT5QuFE1rpZwrTY0rD7kW7IO7kQNuE1zK0YgwAWBMScwALDyS6vZMYqsDYlryE2edvvPHGYZmdYSQF3LJgrxg8XMxrl6YiSsAwFjaIs02Yz2PTesEFFyBTICVe+zg7wtVcnQWGkegJbGJHocURmfBVGD7jjDP23Xdfi6EB+BsM33aG2fCHsjaEFOi00F2RmBNXu+++O0fX2fbTbI6Y6NIg1qNQN2wlh1CzNnGwmF6DHqSBdENmSlDNtddeGzYGe0mcvMFVEAk4O++8s70VPSJMufvuu+1Do5nQtRBcGoRGMCxq5uF5pcE77rgDisBNHwizjedgBxMg4wR97RVXXGGwM9mirJVQQN+iemZQCeib0SHxyKBiR6T4fIC2Qez/X3t3jCM50QZg+BScgIhgs01ISMg4AysCMs5AiLRcASG02mijvQMSHIED/e8vSyXj7nZ3256eEfNssHJ77HL5aZfLrq+qenpGLb5bIg1kr8i3vrJfE2S7VyrLc8NTTpMqwjqmlWjH7777rvh37SB1dyjcNV2uxW4bYXm2CBeOLfFa3ku5dH766afTQ6ys2XbKymCkz1sG519BmbmxxmnLcbu79Phx6WrpBjvmwq2KqXf52S2rlb7//vvu4dPo27FNd9eiWSs30j2VwnSUUX9V5RWQPr1FjMyMhQLA33777WjOroNFD3Ld+WuPKETU7IXT7LVFuJuMvSp+7DgW9tyy9p9yo0OqqautFu/I1eP9zOQIMIzctqAenGvsWd5c46yXwX7/7Jdffpky1r29Qeo9YvWoUxXQ09GIK//666+jD/jiLDaXwc1nNGVgXpc1l1pjtnrc7eGz15muummbjx8/jq73I9tV8d1bpnkyK7bNk1lVWHC6GYZreajfelt2v/rzzz9Hk9/Ydz9XSW2rBwtZ9Y1MOentr7q7/vU1gHZP6PcCp2z31yLNi+mLlcHx9W1e2Pn2tF4Gy9Xm1/CmmK4SqW9EifQQ++7du95uenibGgfGBNR1lZh+NfmqQK9INXa3Wc+TNSlc3X5DPbieZm3W04NlNeP42ZHFLpvrwZ1cymBfxPM+i26uNZ6oDO68M0wX9uY6dNp9QxmsI+/nz5+n3ev/cfZ1b/rr9H8zB4xI87Rmcxls981fYvsqg5P/Lf///5fje1j37wECNY40Q9qYG/r06+lZs6k5Cuc+IDNnD/GMPRHuCsPa+DUI/PbbbyuNJvWvPDu248cffxwl6+wgrdqqFu8/Y/sWiov88ccfz8j77GXwidgXpHeN0Grf3mNHjGT+fU3LdTL9+++/F4eYfjfrdOOVNYtZj/rhkxF3ObtXr1KdyOK4lz7eOEKr+NPZY62sPJ3wsOjsPMS12LeBbj///HNVwNms9ma4/rt0xfaaKmqx7xSrmA5US9D4az/uMo4+yuM8Dl2Lydj4hSz8h8tgLdFFScc3slgomnva6XV8KdvuDNPuBZxG++/peMdxiLMLhVFXLuavv/769Goc6fQ0NSbJXJzs9PHNmzdFysf2i4URDOv9Z9Gdf7Hl6cc9p6wMPnsZ3FDjdA1cffw4vU6mNaMlvctyZcrBgiuLy7hiVYS4+NCllMf6PZVCT03FsaZDv3//fqR5daFOcg2yX+R5/rEodZGtlXT23LL2nHJZalrdeVZbfvv2bR07VnKrHlzBufdP22qc9TJYP6SGhq+/hjeF3cpg3D1lcNsZDbca8i7lvMb933//fWy5WKgYFk5eXMzjYz2WxrPZYsf9XJvrwXrejE7xI6vzhaL4dclaZLiPyuCpyb1rdr49rZfBMrPnNbzrfL0HZCHbeiLeeMp3jdDaXA+uZGa0m/eyubLZ5npwD5cy2Dfy7M+i22qNJyqDO+8M0xW+pw7dVgbrbjuvO64un52wdHMZ7Ky3fYntqAxO18wt//fAL6B1NrjzVCvr8VfnkfpDzUtU3efrf9ST31Md9bZ0n/3Gfcsla5vXI/DXX39988038673vcPUWa9HwEsIBSGqimrlmbpxXdqsXlGNYil8NYphPRYbClBv4ku7PGb9SyiDT8c+DO8NaE07NnKuWEvxmPGt9SsdhSfrgjpSHguHPHh14+ygY7L+cdz69jajxdmQ6sjAYuGRAa0OnUmBpTFJ1JTzppWrQF2dS6fWnGaIWowRjL1+vp8+fVqc1/Sxp8ypxaTgYkPlxjZFI07LY307ahksS19++eUtTbEjtccs/LfLYIGZWvSSHxdzd8s+1li2mBPsVHvDnWFK5IcffpgOV/zpNNmra/ptj8KotbuNPNenvsFkjaMtZHV19/qe96Mp7TJ2b6EiXDC16/bS7rXljelMFz9McmmX+fqdp6wMTsVwTvr45btqnLJ34+PH6YncHtCqtBZe7VZcs111UBM8nKZ2ac3mSuHDhw9T2an5r2FVl9K/tL4BoMXRF23xnUWDTm5Jbc8ta/Mpdy4FtBoI3jiYngyberQRLVfvkOrBS9fAtvUbapxbyuA///zTG/fpa3ijlhsruZ7VGuP2lMENZzTPT0MueoQbrfCVyi7RRmud9mqa79VyFWXVZYOcyvyoB4tSV7Gu9GKZEtnDtbMe7Hyby2H+zF/mO/36qdS8uDjH6aMyeJblrpU7355uKYPlZ/NreIPme4hdhLW6SHo9rE3zrjMdRaludld33FkPnk3/xoBW+26uB3dyKYPP/iy6odZ4ojK4884wFYE9dei2MnhIQGtPGWzfDV/ixNX/r7wMDof1hW7+phwcT3ePW+i9qDaa6Rm0dpkaTOdPmY/Lx7+PZMrBf3v49CIEuoXVY733zJogv/rqq5VhW1N2K1zNRzF+kHzlHApI1JDU61zNQzXEv4Qy+LzD2+dWT8c+P8qG5cIhDaqoB33zrtzyLW84xOku/TJBTYdNZtK9ujt281ScbvMy1zTRX0GjZmnrIu+p7q6LvNbA9q1eKOBRU8g8AHz2ZJtJoME0i0NcKo9tPA9RnE3wWVa+kjJYz6+mj6i7QKOU7rqe770zHPgl1vG2LkG1QTRv5115Lg+1c/Ue1URk3TQqC7UDHpixp0tKGXw62xtTvqvGuXS7u/FYVzcr/UVo9uoupxvsqRROU7txTb1DKn2VwRoim+ftiy++uHHHsdnmW1YpbDjlgsqLINzIyfqCenDd596/3lvj3FgG26yH//Ea3vvF4tHlUj73l8F7z2iRk67M3lx6UNnw5tKjYLNP1zhejHn8ItEi/bMfN3OdTe2ulXUu6R2wJ/C6ZPUgOkaLriSiDK7gPOBPN5bBcrLnNbwHwspvvSIa3lTPg6uNAw848QccYnM9uIdLGVx08XzAF704xL21xmPK4CKTN37cX4feeKAn2mxzGbz3S5zn/9WWwTnCynJTDgporfi8rj8JaL2u79vZvjyBl9OY/vJs5IjAIwSUwUcoOwaBywLK4GUbfyHwCAFl8BHKjkHgsoAyeNnGXwg8QkAZfISyYxDYLVBA61/TsOxOUAIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEDhYQ0DoYVHIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQLHCghoHespNQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgYMFBLQOBpUcAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAsQICWsd6So0AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQOBgAQGtg0ElR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgcKyAgNaxnlIjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBA4WEBA62BQyREgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBwrIKB1rKfUCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEDhYQ0DoYVHIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQLHCghoHespNQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgYMFBLQOBpUcAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAsQICWsd6So0AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQOBgAQGtg0ElR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgcKyAgNaxnlIjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBA4WEBA62BQyREgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBwrIKB1rKfUCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEDhYQ0DoYVHIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQLHCghoHespNQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgYMFBLQOBpUcAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAsQICWsd6So0AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQOBgAQGtg0ElR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgcKzA/wAzWzWgvySnEAAAAABJRU5ErkJggg==)" + ], + "metadata": { + "id": "oVnzLUmUqtrC" + } + }, + { + "cell_type": "code", + "source": [ + "retrieve_features()" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 990 + }, + "id": "6SkdX9oMb_-o", + "outputId": "6297f714-a9fb-4c62-c944-3e405b0f98d0" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "display_data", + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
featurescore
0Sum(Engine_Size, Fuel_Consumption_comb_L_100km_)0.9111157
1log_transform(Engine_Size)0.8786785
2log_transform(Fuel_Consumption_in_City_Hwy_L_100_km_)0.8520729
3GroupByThenSum(Cylinders; Fuel_Consumption_comb_L_100km_)0.8102135
4quantilize(Fuel_Consumption_in_City_L_100_km_)0.7255357
5GroupByThenAverage(Make; Fuel_Consumption_in_City_L_100_km_)0.67406154
6poly_transform(Fuel_Consumption_in_City_Hwy_L_100_km_)0.6694274
7z_scaling(Fuel_Consumption_in_City_Hwy_L_100_km_)0.6422561
8quantilize(Fuel_Consumption_comb_L_100km_)0.62388855
9Product(Fuel_Consumption_in_City_L_100_km_, Fuel_Consumption_comb_L_100km_)0.61903596
10GroupByThenAverage(Make; Fuel_Consumption_in_City_Hwy_L_100_km_)0.55897516
11Sum(Fuel_Consumption_comb_L_100km_, Fuel_Consumption_in_City_Hwy_L_100_km_)0.5260287
12GroupByThenSum(Cylinders; Fuel_Consumption_in_City_Hwy_L_100_km_)0.500389
13poly_transform(Fuel_Consumption_in_City_L_100_km_)0.38317132
14z_scaling(Engine_Size)0.37189126
15Product(Fuel_Consumption_in_City_Hwy_L_100_km_, Fuel_Consumption_in_City_L_100_km_)0.33482778
16log_transform(Fuel_Consumption_comb_L_100km_)0.27907348
17z_scaling(Fuel_Consumption_comb_L_100km_)0.2777691
18GroupByThenAverage(Make; Fuel_Consumption_comb_L_100km_)0.23432171
19Sum(Engine_Size, Fuel_Consumption_in_City_Hwy_L_100_km_)0.21788299
20GroupByThenAverage(Make; Engine_Size)0.18543518
21Product(Fuel_Consumption_comb_L_100km_, Fuel_Consumption_in_City_Hwy_L_100_km_)0.13918126
22GroupByThenSum(Cylinders; Fuel_Consumption_in_City_L_100_km_)0.104188085
23poly_transform(Engine_Size)0.08647835
24poly_transform(Fuel_Consumption_comb_L_100km_)0.084222436
25quantilize(Engine_Size)0.07567251
26log_transform(Fuel_Consumption_in_City_L_100_km_)0.07491779
27z_scaling(Fuel_Consumption_in_City_L_100_km_)0.044052005
28GroupByThenSum(Cylinders; Engine_Size)0.025876045
29quantilize(Fuel_Consumption_in_City_Hwy_L_100_km_)0.0042961836
" + ] + }, + "metadata": {} + } + ] + }, + { + "cell_type": "code", + "source": [], + "metadata": { + "id": "x78H8A_DcAWk" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/automated_feature_engineering/README.md b/automated_feature_engineering/README.md new file mode 100644 index 00000000000..59f60559e10 --- /dev/null +++ b/automated_feature_engineering/README.md @@ -0,0 +1,70 @@ +# Automated Feature Engineering (AFE) + +How can we simplify and automate the complex and time-consuming process of feature engineering for machine learning models, allowing users to focus on model development and analysis while improving their model's performance and uncovering hidden insights in their data? Our goal is to develop an automated feature engineering system that can identify which engineered features can be optimal for a downstream ML task. To obtain an output such as: + +![](featureImage.png) + +AFE uses a set of feature importance masks to learn which features best benefit from which transformations. It uses local and global masking to determine the best transform functions for each feature, as well as which transformed features lead to an optimal performance with respect to the downstream task. Further details can be found in our papers on [automated feature engineering](https://arxiv.org/pdf/2406.04153) and [feature selection](https://arxiv.org/pdf/2304.03202). + + +[A notebook demonstrating AFE](GithubAutomatedFeatureEngineering_Demo.ipynb) on Google Cloud. This repository has built-in integration with Google Cloud in terms of data retrieval, processing, and artifacts storage. + +It is easy to get started with AFE on the command line, for example, after +running `pip3 install -r requirements.txt`, we can run: +``` +python3 trainer.py --project_id="gcp_project_id" --dataset_name=housingPrice --train_table_name=airbnb2023_float --target=price --task_type=regression --num_steps=10 --model_type=discovery --upload_features_to_bq=True +``` + +Here is the full list of possible command line flags: +``` + --batch_buffer_size: Number of batches held in shuffling buffer. + (default: '32') + (an integer) + --batch_size: Batch size + (default: '2048') + (an integer) + --config: Configuration string for running pipeline from container. + --data_buffer_size: Dataset buffer size. + (default: '4096') + (an integer) + --data_name: Dataset name + (default: 'isolet') + --dataset_name: BigQuery dataset name for train and test. + --decay_rate: Decay rate + (default: '0.5') + (a number) + --decay_steps: Decay steps + (default: '500') + (an integer) + --feature_dim: Feature dimension + (an integer) + --gcs_output_path: GCS output path. + --learning_rate: Learning rate. + (default: '0.01') + (a number) + --logging_filename: Name of the file used for logging discovered or selected + features. + (default: 'features.json') + --model_type: Model type can be feature selection or discovery. + (default: 'discovery') + --num_mlp_layers: Number of MLP layers in MLP model + (default: '2') + (an integer) + --num_selected_features: Number of features for feature selection + (an integer) + --num_steps: Number of training steps + (default: '50') + (an integer) + --project_id: The BigQuery project ID. + --seed: Random seed + (default: '21') + (an integer) + --target: Name for the training target feature. + --task_type: Task type can be classification or regression. + (default: 'classification') + --test_table_name: Table name of the test dataset. + --train_table_name: Table name of the training dataset. + --upload_features_to_bq: Whether to upload features to BQ table. + (default: 'true') +``` +Disclaimer: This is not an officially supported Google product. \ No newline at end of file diff --git a/automated_feature_engineering/bq_data.py b/automated_feature_engineering/bq_data.py new file mode 100644 index 00000000000..251e3b47000 --- /dev/null +++ b/automated_feature_engineering/bq_data.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Retrieve dataset from BigQuery.""" + +import collections +import dataclasses +from typing import Callable, Dict, Optional, Tuple, Union + +import bq_dataset +import feature_metadata +from google.cloud import bigquery +import tensorflow as tf + + +X_KEY = "x" +EMBED_IDX_KEY = "cat_embed_idx" +TARGET_KEY = "y" +_CAT_EMBED_DIM = 1 +_PAGE_SIZE_MULTIPLIER = 300 + + +@dataclasses.dataclass +class BQInfo: + """Object containing BigQuery information. + + Attributes: + bq_project: Name of BigQuery project. + dataset_name: BiqQuery Dataset name. + table_name: BigQuery table name. + """ + + bq_project: str + dataset_name: str + table_name: str + + +def create_embedding_fn( + vocab, +): + """Create embedding function for categorical features. + + Args: + vocab: vocabulary for the categorical feature. + + Returns: + Embedding function for the categorical feature. + """ + if isinstance(list(vocab.keys())[0], int): + lookup_layer = tf.keras.layers.IntegerLookup() + else: + lookup_layer = tf.keras.layers.StringLookup() + + lookup_layer.adapt(list(vocab.keys())) + + embed_layer = tf.keras.layers.Embedding( + lookup_layer.vocabulary_size(), _CAT_EMBED_DIM + ) + + def look_up_and_embed(x): + embed_idx = lookup_layer(x) + return embed_layer(embed_idx), tf.expand_dims(embed_idx, -1) + + return look_up_and_embed + + +def create_target_process_fn( + vocab, + is_classification, +): + """Create transform function for the target feature. + + Args: + vocab: vocabulary for the target feature. + is_classification: whether task is classification. + + Returns: + Transform function for the target feature. + """ + if is_classification: + if isinstance(list(vocab.keys())[0], int): + lookup_layer = tf.keras.layers.IntegerLookup() + else: + lookup_layer = tf.keras.layers.StringLookup() + + lookup_layer.adapt(list(vocab.keys())) + return lookup_layer + else: + return lambda x: tf.expand_dims(x, -1) + + +def make_categorical_transform_fn( + feature_data, + target_key, + task_type, +): + """Make transform function for categorical features. + + Args: + feature_data: metadata for the training BQ table. + target_key: key of training target. + task_type: training task type. + + Returns: + Transform function for categorical features. + + Raises: + ValueError: If the specified target key is not amongst the features. + """ + cat_features = [] + numerical_features = [] + feature_embedding_fn = {} + target_metadata = None + for feature in feature_data: + if feature.name == target_key: + target_metadata = feature + continue + if feature.vocabulary: + feature_embedding_fn[feature.name] = create_embedding_fn( + feature.vocabulary + ) + cat_features.append(feature.name) + else: + numerical_features.append(feature.name) + assert feature.input_data_type.lower().startswith( + "float" + ), "The column dtype must be float" + + if target_metadata: + target_process_fn = create_target_process_fn( + target_metadata.vocabulary, + is_classification=task_type == "classification", + ) + else: + raise ValueError("Target feature not found.") + + @tf.function + def categorical_transform_fn( + features, + ): + """Input is dictionary of all features in batch.""" + all_features = collections.defaultdict(list) + # Putting categorical features first eases aggregation learning. + for feature_name in cat_features: + embedding, embed_idx = feature_embedding_fn[feature_name]( + features[feature_name] + ) + all_features[X_KEY].append(embedding) + all_features[EMBED_IDX_KEY].append(embed_idx) + + for feature_name in numerical_features: + all_features[X_KEY].append(tf.expand_dims(features[feature_name], -1)) + + if all_features[EMBED_IDX_KEY]: + all_features[EMBED_IDX_KEY] = tf.concat( + all_features[EMBED_IDX_KEY], axis=-1 + ) + + all_features[X_KEY] = tf.concat(all_features[X_KEY], axis=-1) + all_features[TARGET_KEY] = target_process_fn(features[target_key]) + return all_features + + return ( + categorical_transform_fn, + list(cat_features), + list(numerical_features), + ) + + +def get_data_from_bq_with_bq_info( + bq_client, + bq_info, + batch_size = 256, + limit = None, + drop_remainder = False, +): + """Obtains data or data generator from a BQ table, given the BQInfo. + + Args: + bq_client: BigQuery client. + bq_info: BQInfo object. + batch_size: batch size. + limit: Number of records to query from BigQuery table. + drop_remainder: whether to drop last smaller-sized batch. + + Returns: + TF Dataset containing specified data, and table metadata. + """ + + bq_metadata_builder = feature_metadata.BigQueryMetadataBuilder( + bq_info.bq_project, + bq_info.dataset_name, + bq_info.table_name, + bq_client=bq_client, + ) + + retrieval_options = feature_metadata.MetadataRetrievalOptions( + get_mean=False, + get_variance=False, + get_min=True, + get_max=False, + get_median=False, + get_log_mean=False, + get_log_variance=False, + min_log_value=0.0, + number_of_quantiles=None, + get_mode=False, + ) + table_metadata = bq_metadata_builder.get_metadata_for_all_features( + retrieval_options + ) + + dataset = bq_dataset.get_bigquery_dataset( + table_metadata, + bq_client=bq_client, + batch_size=batch_size, + page_size=batch_size * _PAGE_SIZE_MULTIPLIER, + limit=limit, + drop_remainder=drop_remainder, + ) + options = tf.data.Options() + # Avoid a large warning output by TF Dataset. + options.deterministic = False + dataset = dataset.with_options(options) + return dataset, table_metadata diff --git a/automated_feature_engineering/bq_dataset.py b/automated_feature_engineering/bq_dataset.py new file mode 100644 index 00000000000..1a6c3b4a55b --- /dev/null +++ b/automated_feature_engineering/bq_dataset.py @@ -0,0 +1,749 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for creating a tf.data.Dataset from a BigQuery table.""" + +import collections +import dataclasses +import functools +import tempfile +from typing import Any, Callable, Generator, Iterable, Mapping, Optional, Sequence, Tuple, Union + +import bq_utils +import feature_metadata +from google.cloud import bigquery +from google.cloud import bigquery_storage +import numpy as np +import pandas as pd +import tensorflow as tf + + +# Investigate using BigQueryReadClient with one generator per +# stream. + +NO_CACHE_LOCATION_NAME = 'none' + +VALUES_KEY = 'values' +WAS_NULL_KEY = 'was_null' + +# The value used to replace NULL strings. +NULL_STRING_PLACEHOLDER = '__NULL_PLACEHOLDER__' +# The value used to replace NULL floats. +NULL_FLOAT_PLACEHOLDER = 0.0 +# The value used to replace NULL integers. +NULL_INT_PLACEHOLDER = 0 +# The value used to replace NULL booleans. +NULL_BOOL_PLACEHOLDER = False + +TensorAndMaskType = Union[ + Mapping[str, tf.Tensor], Mapping[str, Mapping[str, tf.Tensor]] +] +TensorAndMaskSpecType = Union[ + Mapping[str, tf.TensorSpec], Mapping[str, Mapping[str, tf.TensorSpec]] +] +KerasInputType = Callable[..., tf.Tensor] + +_TF_FLOAT_DTYPE = tf.dtypes.float32 +_TF_INT_DTYPE = tf.dtypes.int32 +_TF_BOOL_DTYPE = tf.dtypes.bool +_NP_FLOAT_DTYPE = np.float32 +_NP_INT_DTYPE = np.int32 +_NP_BOOL_DTYPE = np.bool_ + +# The default value for variables related to input formatting. +NESTED_FORMAT_DEFAULT = False + +_USE_LEGACY_SQL = False +_WITH_MASK_DEFAULT = False + + +def convert_series_to_tensor_dictionary( + input_series: pd.Series, + value_to_replace_null: Any, + np_dtype: np.dtype, + tf_dtype: tf.dtypes.DType, + with_mask: bool = _WITH_MASK_DEFAULT, +) -> Union[tf.Tensor, Mapping[str, tf.Tensor]]: + """Converts the input series into a tensor after filling null values. + + Args: + input_series: The series to be converted. + value_to_replace_null: Any nulls in this feature will be replaced with this + value. + np_dtype: The data in the series will be converted to this data type after + filling and nulls and prior to converting to Tensorflow. + tf_dtype: The data type for the output tensor(s). + with_mask: If True (the default) the data will be output as a dictionary + with the keys 'values' and 'was_null'. Otherwise, the tensor will be + output directly. + + Returns: + A tensor with the null values filled. If with_mask is true this tensor + will be included in a dictionary with the key 'values' and the dictionary + will also contain the key 'was_null' which will be a boolean tensor that + is true if that value was pandas NA and false otherwise. + """ + if with_mask: + # Check performance to see if using the null mask is faster. + missing_mask = input_series.isnull() + # pandas disabled silent downcasting in version 2.2.1, suppressing + # the warning here since we auto-inferred the dtype already. + # Uncomment this when pandas version is updated to 2.2.1: + # with pd.option_context('future.no_silent_downcasting', True): + input_series = input_series.fillna(value_to_replace_null) + return { + VALUES_KEY: tf.convert_to_tensor( + input_series.to_numpy(dtype=np_dtype), + dtype=tf_dtype, + ), + WAS_NULL_KEY: tf.convert_to_tensor( + missing_mask.to_numpy(dtype=bool), + dtype=tf.dtypes.bool, + ), + } + else: + with pd.option_context('future.no_silent_downcasting', True): + filled_inputs = input_series.fillna(value_to_replace_null) + return tf.convert_to_tensor( + filled_inputs.to_numpy(dtype=np_dtype), + dtype=tf_dtype, + ) + + +# noinspection PyUnresolvedReferences +@dataclasses.dataclass +class BQFeatureConverter: + """Converts an input series into a structure that can be used by tf.Dataset. + + Uses the input arguments to help convert the incoming data (i.e. pd.Series) + into a dictionary of tensors (or dictionary of dictionaries) that can be + passed into a TensorFlow dataset. + + Attributes: + value_to_replace_null: Any nulls in this feature will be replaced with this + value. + np_dtype: The data in the series will be converted to this data type after + filling and nulls and prior to converting to Tensorflow. + tf_dtype: The data type for the output. + """ + + value_to_replace_null: Any + np_dtype: np.dtype + tf_dtype: tf.dtypes.DType + + def series_to_tensor( + self, + input_series: pd.Series, + with_mask: bool = _WITH_MASK_DEFAULT, + ) -> Union[tf.Tensor, Mapping[str, tf.Tensor]]: + """Converts the input series into a tensor after filling null values. + + Args: + input_series: The series to be converted. + with_mask: If True (the default) the data will be output as a dictionary + with the keys 'values' and 'was_null'. Otherwise the tensor will be + output directly. + + Returns: + A tensor with the null values filled. If with_mask is true this tensor + will be included in a dictionary with the key 'values' and the dictionary + will also contain the key 'was_null' which will be a boolean tensor that + is true if that value was pandas NA and false otherwise. + """ + return convert_series_to_tensor_dictionary( + input_series, + self.value_to_replace_null, + self.np_dtype, + self.tf_dtype, + with_mask, + ) + + +# https://numpy.org/doc/stable/reference/arrays.dtypes.html#string-dtype-note +_STRING_CONVERSION_INFO = BQFeatureConverter( # pytype: disable=wrong-arg-types # numpy-scalars + NULL_STRING_PLACEHOLDER, + np.str_, + tf.dtypes.string, +) +_FLOAT_CONVERSION_INFO = BQFeatureConverter( # pytype: disable=wrong-arg-types # numpy-scalars + NULL_FLOAT_PLACEHOLDER, + _NP_FLOAT_DTYPE, + _TF_FLOAT_DTYPE, +) +_INT_CONVERSION_INFO = BQFeatureConverter( # pytype: disable=wrong-arg-types # numpy-scalars + NULL_INT_PLACEHOLDER, _NP_INT_DTYPE, _TF_INT_DTYPE +) +_BOOL_CONVERSION_INFO = BQFeatureConverter( # pytype: disable=wrong-arg-types # numpy-scalars + NULL_BOOL_PLACEHOLDER, _NP_BOOL_DTYPE, _TF_BOOL_DTYPE +) + +# Create a mapping from the BigQuery data types to how we want to handle their +# conversion to tensors. This uses a default type of converting the objects to +# strings if the conversion is not explicitly included in the dictionary. +_TF_INFO_FROM_BQ_DTYPE = collections.defaultdict( + lambda: _STRING_CONVERSION_INFO, + { + 'BIGNUMERIC': _FLOAT_CONVERSION_INFO, + 'FLOAT': _FLOAT_CONVERSION_INFO, + 'FLOAT64': _FLOAT_CONVERSION_INFO, + 'INT64': _INT_CONVERSION_INFO, + 'INTEGER': _INT_CONVERSION_INFO, + 'NUMERIC': _FLOAT_CONVERSION_INFO, + 'STRING': _STRING_CONVERSION_INFO, + 'BOOL': _BOOL_CONVERSION_INFO, + }, +) + + +# Make this cleaner and more dynamic. +def _dataframe_to_dict_of_tensors( + df: pd.DataFrame, + metadata_container: feature_metadata.FeatureMetadataContainer, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, +) -> TensorAndMaskType: + """Converts a dataframe to a dictionary of tensors. + + Args: + df: The input dataframe to be converted. + metadata_container: The metadata collection for the data in the dataframe. + with_mask: If true a mask specifying if the data was null will also be + returned. + nested: If true the output dictionary will have sub-dictionaries each with + the fields 'values' and 'was_null'. If false the output dictionary will + have keys '{feature_name}_values' and '{feature_name}_was_null' for each + feature. Note that this only applies if `with_mask` is true. + + Returns: + A dictionary of tensors and potentially masks of each of the features in the + input dataframe. + """ + output = {} + for col_name, col_data in df.items(): + metadata = metadata_container.get_metadata_by_name(col_name) # pytype: disable=wrong-arg-types # pandas-drop-duplicates-overloads + bq_conversion = _TF_INFO_FROM_BQ_DTYPE[metadata.input_data_type] + feature_output = bq_conversion.series_to_tensor( + col_data, with_mask=with_mask + ) + if with_mask and not nested: + for k, v in feature_output.items(): + output[f'{col_name}_{k}'] = v + else: + output[col_name] = feature_output + + return output # pytype: disable=bad-return-type # pandas-drop-duplicates-overloads + + +def _get_output_from_df_iterator( + output_dataframes: Iterable[pd.DataFrame], + metadata_container: feature_metadata.FeatureMetadataContainer, + batch_size: int = 64, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, + drop_remainder: bool = False, + verbose: bool = False, +) -> Generator[TensorAndMaskType, None, None]: + """Converts an iterator of DataFrame into the correct format and batch sizes. + + Args: + output_dataframes: An iterator of pandas DataFrames as generated from + bigquery.RowIterator.to_dataframe_iterable. + metadata_container: The metadata generated for the features to be returned + by the query. + batch_size: The desired output batch size. + with_mask: If false the output If true each feature will be returned as a + dictionary with the keys 'values' and 'was_null'. The 'values' field is a + dense tensor with null values filled and the 'was_null' tensor is a dense + boolean tensor that is true where the 'values' tensor was null prior to + being filled. + nested: If true the output dictionary will have sub-dictionaries each with + the fields 'values' and 'was_null'. If false the output dictionary will + have keys '{feature_name}_values' and '{feature_name}_was_null' for each + feature. Note that this only applies if `with_mask` is true. + drop_remainder: If true no partial batches will be yielded. + verbose: If true will print debugging messages using tf.print. These can be + used to help debug when this function is running and when cached data is + being used. + + Yields: + A dictionary of features from the query. If with_mask is true each feature + will have a dictionary with keys 'values' and 'was_null' where 'values' + contains a Tensor of the values from that column after null values have been + filled and 'was_null' is a boolean tensor + """ + # Investigate more performant ways to do this. + data_frame_cache = [] + cur_rows = 0 + + conversion_function = functools.partial( + _dataframe_to_dict_of_tensors, + metadata_container=metadata_container, + with_mask=with_mask, + nested=nested, + ) + + for df in output_dataframes: + new_rows = df.shape[0] + cur_rows += new_rows + + # Shortcut the happy path because eventually it might work. + if new_rows == batch_size and not data_frame_cache: + yield conversion_function(df) + cur_rows = 0 + continue + + while cur_rows >= batch_size: + # We now have enough data to output something + if data_frame_cache: + df = pd.concat(data_frame_cache + [df], axis=0, ignore_index=True) + data_frame_cache = [] + + output_data = df.iloc[:batch_size].reset_index(drop=True) + + if cur_rows > batch_size: + df = df.iloc[batch_size:] + cur_rows = df.shape[0] + else: + cur_rows = 0 + + if verbose: + tf.print('Yielding BQ Query') + yield conversion_function(output_data) + + if cur_rows > 0: + data_frame_cache.append(df) + + if data_frame_cache and not drop_remainder: + if verbose: + tf.print('Yielding BQ Query') + yield conversion_function( + pd.concat(data_frame_cache, axis=0, ignore_index=True) + ) + + +def bigquery_table_batch_generator( + metadata_container: feature_metadata.BigQueryTableMetadata, + bq_client: bigquery.Client, + bqstorage_client: Optional[bigquery_storage.BigQueryReadClient] = None, + batch_size: int = 64, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, + drop_remainder: bool = False, + page_size: Optional[int] = None, + verbose: bool = False, +) -> Generator[TensorAndMaskType, None, None]: + """Generates DataFrames iteratively from a Bigquery query. + + Args: + metadata_container: The metadata generated for the features to be returned + by the query. + bq_client: The BigQuery client that will be used for the query. + bqstorage_client: The BigQueryStorageClient that will be used for the query. + batch_size: The desired output batch size. + with_mask: If false the output If true each feature will be returned as a + dictionary with the keys 'values' and 'was_null'. The 'values' field is a + dense tensor with null values filled and the 'was_null' tensor is a dense + boolean tensor that is true where the 'values' tensor was null prior to + being filled. + nested: If true the output dictionary will have sub-dictionaries each with + the fields 'values' and 'was_null'. If false the output dictionary will + have keys '{feature_name}_values' and '{feature_name}_was_null' for each + feature. Note that this only applies if `with_mask` is true. + drop_remainder: If true no partial batches will be yielded. + page_size: the pagination size to use when retrieving data from BigQuery. A + large value can result in fewer BQ calls, hence time savings. + verbose: If true will print debugging messages using tf.print. These can be + used to help debug when this function is running and when cached data is + being used. + + Yields: + A dictionary of features from the query. If with_mask is true each feature + will have a dictionary with keys 'values' and 'was_null' where 'values' + contains a Tensor of the values from that column after null values have been + filled and 'was_null' is a boolean tensor + """ + if page_size is None: + page_size = batch_size + row_iterator = bq_client.list_rows( + metadata_container.bigquery_table, + selected_fields=metadata_container.to_bigquery_schema(), + page_size=page_size, + ) + + output_dataframes = row_iterator.to_dataframe_iterable( + bqstorage_client=bqstorage_client + ) + + yield from _get_output_from_df_iterator( + output_dataframes=output_dataframes, + metadata_container=metadata_container, + batch_size=batch_size, + with_mask=with_mask, + nested=nested, + drop_remainder=drop_remainder, + verbose=verbose, + ) + + +def bigquery_query_batch_generator( + query: str, + metadata_container: feature_metadata.FeatureMetadataContainer, + bq_client: bigquery.Client, + bqstorage_client: Optional[bigquery_storage.BigQueryReadClient] = None, + batch_size: int = 64, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, + drop_remainder: bool = False, + page_size: Optional[int] = None, + verbose: bool = False, +) -> Generator[TensorAndMaskType, None, None]: + """Generates DataFrames iteratively from a Bigquery query. + + Args: + query: The query to be run. + metadata_container: The metadata generated for the features to be returned + by the query. + bq_client: The BigQuery client that will be used for the query. + bqstorage_client: The BigQueryStorageClient that will be used for the query. + batch_size: The desired output batch size. + with_mask: If false the output If true each feature will be returned as a + dictionary with the keys 'values' and 'was_null'. The 'values' field is a + dense tensor with null values filled and the 'was_null' tensor is a dense + boolean tensor that is true where the 'values' tensor was null prior to + being filled. + nested: If true the output dictionary will have sub-dictionaries each with + the fields 'values' and 'was_null'. If false the output dictionary will + have keys '{feature_name}_values' and '{feature_name}_was_null' for each + feature. Note that this only applies if `with_mask` is true. + drop_remainder: If true no partial batches will be yielded. + page_size: the pagination size to use when retrieving data from BigQuery. A + large value can result in fewer BQ calls, hence time savings. + verbose: If true will print debugging messages using tf.print. These can be + used to help debug when this function is running and when cached data is + being used. + + Yields: + A dictionary of features from the query. If with_mask is true each feature + will have a dictionary with keys 'values' and 'was_null' where 'values' + contains a Tensor of the values from that column after null values have been + filled and 'was_null' is a boolean tensor + """ + # Page size doesn't seem to have any impact here. Bug in client? + # Instead, we will handle batching manually which is painful and slow. + + # It is recommended that legacy SQL not be used and this allows large results. + # For more information see the following documentation: + # https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.allow_large_results + # https://cloud.google.com/bigquery/docs/reference/rest/v2/Job#JobConfigurationQuery.FIELDS.use_legacy_sql + query_config = bigquery.QueryJobConfig(use_legacy_sql=_USE_LEGACY_SQL) + if page_size is None: + page_size = batch_size + output_dataframes = ( + bq_client.query(query, job_config=query_config) + .result(page_size=page_size) + .to_dataframe_iterable(bqstorage_client=bqstorage_client) + ) + + yield from _get_output_from_df_iterator( + output_dataframes=output_dataframes, + metadata_container=metadata_container, + batch_size=batch_size, + with_mask=with_mask, + nested=nested, + drop_remainder=drop_remainder, + verbose=verbose, + ) + + +def tensor_output_signature_from_metadata( + metadata_container: feature_metadata.FeatureMetadataContainer, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, +) -> TensorAndMaskSpecType: + """Converts the input metadata into a TensorSpec for Dataset creation. + + Args: + metadata_container: The input feature metadata. + with_mask: Whether was_null will be included in the output. + nested: If true the output dictionary will have sub-dictionaries each with + the fields 'values' and 'was_null'. If false the output dictionary will + have keys '{feature_name}_values' and '{feature_name}_was_null' for each + feature. Note that this only applies if `with_mask` is true. + + Returns: + The TensorSpec corresponding to the expected output. + """ + output_spec = {} + for feature_md in metadata_container: + if not feature_md.tf_data_type: + raise ValueError( + 'The TF data type must be set before creating the output signature.' + ) + name = feature_md.name + values_spec = tf.TensorSpec( + (None,), dtype=feature_md.tf_data_type, name=f'{name}_{VALUES_KEY}' + ) + if with_mask: + was_null_spec = tf.TensorSpec( + (None,), dtype=tf.bool, name=f'{feature_md.name}_{WAS_NULL_KEY}' + ) + if nested: + output_spec[name] = { + VALUES_KEY: values_spec, + WAS_NULL_KEY: was_null_spec, + } + else: + output_spec[f'{name}_{VALUES_KEY}'] = values_spec + output_spec[f'{name}_{WAS_NULL_KEY}'] = was_null_spec + else: + output_spec[name] = values_spec + + return output_spec + + +def _generate_query_from_metadata( + table_metadata: feature_metadata.BigQueryTableMetadata, + limit: Optional[int] = None, + where_clauses: Sequence[str] = tuple(), +) -> str: + """Generates a query from column names, where clauses and a limit.""" + select_fields_string = '`,`'.join(col.name for col in table_metadata) + limit_str = f' LIMIT {limit}' if limit else '' + where_str = bq_utils.where_statement_from_clauses(where_clauses) + return ( + f'SELECT `{select_fields_string}` FROM\n' + f'{table_metadata.escaped_table_id}{where_str}{limit_str}' + ) + + +def update_tf_data_types_from_bq_data_types( + metadata_container: feature_metadata.FeatureMetadataContainer, +) -> None: + """Updates each feature's tf_data_type based on the input_data_type. + + Args: + metadata_container: The metadata container to be updated. + """ + for metadata in metadata_container: + if not metadata.tf_data_type: + metadata.tf_data_type = _TF_INFO_FROM_BQ_DTYPE[ + metadata.input_data_type + ].tf_dtype + + +def get_bigquery_dataset( + table_metadata: feature_metadata.BigQueryTableMetadata, + bq_client: bigquery.Client, + bqstorage_client: Optional[bigquery_storage.BigQueryReadClient] = None, + batch_size: int = 64, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, + limit: Optional[int] = None, + cache_location: Optional[str] = NO_CACHE_LOCATION_NAME, + where_clauses: Sequence[str] = tuple(), + drop_remainder: bool = False, + page_size: Optional[int] = None, +) -> tf.data.Dataset: + """Creates a Big Query dataset for the specified Table. + + Args: + table_metadata: The metadata for the BigQuery table. + bq_client: The BigQuery client that will be used for the query. + bqstorage_client: The BigQueryStorageClient that will be used for the query. + batch_size: The desired output batch size. + with_mask: If true each feature will be returned along with a tensor + indicating if the values were null before being filled. + nested: If true the output dictionary will have sub-dictionaries each with + the fields 'values' and 'was_null'. If false the output dictionary will + have keys '{feature_name}_values' and '{feature_name}_was_null' for each + feature. Note that this only applies if `with_mask` is true. + limit: Put a limit on the number of examples returned from the query. If + falsy all the results will be returned. + cache_location: If 'disk' the data will be cached to disk if None the data + will not be cached and otherwise the data will be cached to memory. + where_clauses: A list of clauses that can be combined with and statements to + get the correct values in the BigQuery table. + drop_remainder: If true no partial batches will be yielded. + page_size: the pagination size to use when retrieving data from BigQuery. A + large value can result in fewer BQ calls, hence time savings. + + Returns: + A tf.data.Dataset for the table. + """ + update_tf_data_types_from_bq_data_types(table_metadata) + tensor_spec = tensor_output_signature_from_metadata( + table_metadata, with_mask=with_mask, nested=nested + ) + + if limit or where_clauses: + query = _generate_query_from_metadata(table_metadata, limit, where_clauses) + tensor_generator_fn = functools.partial( + bigquery_query_batch_generator, + query=query, + metadata_container=table_metadata, + bq_client=bq_client, + bqstorage_client=bqstorage_client, + batch_size=batch_size, + with_mask=with_mask, + nested=nested, + drop_remainder=drop_remainder, + page_size=page_size, + ) + else: + tensor_generator_fn = functools.partial( + bigquery_table_batch_generator, + metadata_container=table_metadata, + bq_client=bq_client, + bqstorage_client=bqstorage_client, + batch_size=batch_size, + with_mask=with_mask, + nested=nested, + drop_remainder=drop_remainder, + page_size=page_size, + ) + + dataset = tf.data.Dataset.from_generator( + tensor_generator_fn, output_signature=tensor_spec + ) + if cache_location is not None and cache_location != NO_CACHE_LOCATION_NAME: + filename = tempfile.mkdtemp() if cache_location == 'disk' else '' + # This appears to give a false warning: b/194670791 + dataset = dataset.cache(filename) + + return dataset + + +def keras_input_from_metadata( + metadata_container: feature_metadata.FeatureMetadataContainer, + with_mask: bool = _WITH_MASK_DEFAULT, + nested: bool = NESTED_FORMAT_DEFAULT, +) -> Mapping[str, Union[Mapping[str, KerasInputType], KerasInputType]]: + """Creates Keras Input objects for input metadata. + + Args: + metadata_container: The metadata of the features to be used for input. + with_mask: Whether a mask is included in the dataset or not. See + get_bigquery_dataset for more information. + nested: If True assumes the input dictionaries are nested. Otherwise, + assumes that a flat dictionary is input. This is only relevant if + with_mask is true. + + Returns: + A dictionary of tf.keras.Input objets that can be input into a + tf.keras.Model object. + """ + model_input = {} + for metadata in metadata_container: + cur_name = metadata.name + # Keras layers must take in JSON serializable inputs. + values_input = tf.keras.Input( + (), name=f'{cur_name}_{VALUES_KEY}', dtype=metadata.tf_data_type_str + ) + if with_mask: + was_null_input = tf.keras.Input( + (), name=f'{cur_name}_{WAS_NULL_KEY}', dtype='bool' + ) + if nested: + model_input[cur_name] = { + VALUES_KEY: values_input, + WAS_NULL_KEY: was_null_input, + } + else: + model_input[f'{cur_name}_{VALUES_KEY}'] = values_input + model_input[f'{cur_name}_{WAS_NULL_KEY}'] = was_null_input + else: + model_input[cur_name] = values_input + return model_input + + +def get_dataset_and_metadata_for_table( + table_path: Optional[str] = None, + table_parts: Optional[bq_utils.BQTablePathParts] = None, + bigquery_client: Optional[bigquery.Client] = None, + bigquery_storage_client: Optional[ + bigquery_storage.BigQueryReadClient + ] = None, + metadata_options: Optional[ + feature_metadata.MetadataRetrievalOptions + ] = None, + metadata_builder: Optional[feature_metadata.BigQueryMetadataBuilder] = None, + batch_size: int = 64, + with_mask: bool = _WITH_MASK_DEFAULT, + drop_remainder: bool = False, +) -> Tuple[tf.data.Dataset, feature_metadata.BigQueryTableMetadata]: + """Gets the metadata and dataset for a BigQuery table. + + Args: + table_path: The full path ('project.dataset.table') of the BigQuery table. + table_parts: The parsed potions of the BigQuery table path. + bigquery_client: The BigQuery Client object to use for getting the metadata. + bigquery_storage_client: The BigQuery storage client to use for the dataset. + metadata_options: The metadata retrieval options to use. + metadata_builder: The metadata builder to use to get the metadata. + batch_size: The batch size to use for the dataset. Default is 64. + with_mask: Whether the dataset should be returned with a mask format. For + more information see get_bigquery_dataset. + drop_remainder: If true no partial batches will be yielded. + + Returns: + A tuple of the output dataset and metadata for the specified table. + + Raises: + ValueError: If neither table_parts nor table_path are specified or both of + them are. + """ + if not (table_parts or table_path): + raise ValueError('Either table_parts or table_path must be specified.') + elif table_parts and table_path: + raise ValueError('Only one of table_parts or table_path can be specified.') + + if not table_parts: + table_parts = bq_utils.BQTablePathParts.from_full_path(table_path) + + if not bigquery_client: + bigquery_client = bigquery.Client(project=table_parts.project_id) + + if not bigquery_storage_client: + bigquery_storage_client = bigquery_storage.BigQueryReadClient() + + if not metadata_options: + metadata_options = feature_metadata.MetadataRetrievalOptions.get_none() + + if not metadata_builder: + metadata_builder = ( + feature_metadata.BigQueryMetadataBuilder.from_table_parts( + table_parts, bq_client=bigquery_client + ) + ) + + all_metadata = metadata_builder.get_metadata_for_all_features( + metadata_options + ) + # Refactor the code so that this extra call is not needed. + update_tf_data_types_from_bq_data_types(all_metadata) + + dataset = get_bigquery_dataset( + all_metadata, + bigquery_client, + bqstorage_client=bigquery_storage_client, + batch_size=batch_size, + with_mask=with_mask, + # Cache during prepare_dataset instead. + cache_location=None, + where_clauses=metadata_options.where_clauses, + drop_remainder=drop_remainder, + ) + + return dataset, all_metadata diff --git a/automated_feature_engineering/bq_utils.py b/automated_feature_engineering/bq_utils.py new file mode 100644 index 00000000000..fa20106ee33 --- /dev/null +++ b/automated_feature_engineering/bq_utils.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Common utilities for interfacing with BigQuery.""" + +import dataclasses +import enum +import re +from typing import Mapping, Optional, Sequence + + +@dataclasses.dataclass +class BQTablePathParts: + """Data object with the parts of a full BigQuery table. + + Attributes: + project_id: The project id for the project that the BigQuery table is in. + bq_dataset_name: The name of the dataset where the table resides. + bq_table_name: The table's name. + """ + + project_id: str + bq_dataset_name: str + bq_table_name: str + + @property + def full_table_id(self): + """The full path 'project.dataset.table' of the BigQuery table.""" + return f"{self.project_id}.{self.bq_dataset_name}.{self.bq_table_name}" + + @property + def escaped_table_id(self): + """The full path of the BigQuery table escaped with `.""" + return f"`{self.full_table_id}`" + + @classmethod + def from_full_path(cls, full_big_query_path): + """Parses a full BigQuery path into components (project, dataset, table). + + Args: + full_big_query_path: The full path (e.g. + project_id.dataset_name.table_name) to a BigQuery table. + + Returns: + A BQTablePathParts object. + """ + project_prefix_pattern = re.compile( + r"`?(?P[^.`]+)\." + r"(?P[^.`]+)" + r"(?:$|\.(?P[^.`]+)`?$)", + re.VERBOSE, + ) + + path_match = project_prefix_pattern.match(full_big_query_path) + if not path_match: + raise ValueError( + f"The BigQuery table path {full_big_query_path} did not have the " + "expected format of project_id.dataset_name.table_name" + ) + + output_dict = path_match.groupdict() + for name, match in output_dict.items(): + if not match: + raise ValueError( + f"The {name} was not found in the BQ path {full_big_query_path}" + ) + + # Note that the regex match names must match the attribute names. + return cls( + project_id=output_dict["project_id"], + bq_dataset_name=output_dict["bq_dataset_name"], + bq_table_name=output_dict["bq_table_name"], + ) + + +def where_statement_from_clauses( + where_clauses, conjunction = "AND" +): + """Constructs an optional where statement from list of conditions. + + Args: + where_clauses: A sequence of strings each of which is a valid clause for the + table being queried. + conjunction: The conjunction used to join the clauses. Does not need to + include spaces as they will be added later. + + Returns: + Either an empty string if there were no clauses or a where clause with + whitespace on both sides. + """ + if not where_clauses: + return "" + + join_str = f") {conjunction} (" + combined_clauses = f"({join_str.join(where_clauses)})" + return f"\nWHERE {combined_clauses} " + + +@enum.unique +class SplitColumnValues(enum.Enum): + TRAIN = enum.auto() + VALIDATE = enum.auto() + TEST = enum.auto() + + +def create_split_column_conditions( + split_column, +): + return { + mode: (f"{split_column} = '{mode.name}'",) if split_column else () + for mode in SplitColumnValues + } diff --git a/automated_feature_engineering/data_loader.py b/automated_feature_engineering/data_loader.py new file mode 100644 index 00000000000..dcd71e1302c --- /dev/null +++ b/automated_feature_engineering/data_loader.py @@ -0,0 +1,1309 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data loader functions to read various tabular datasets.""" + +import os +from typing import Any, List, Optional, Tuple + +import numpy as np +import pandas as pd +from PIL import Image +from sklearn import datasets as sk_datasets +from sklearn import model_selection +from sklearn import preprocessing +import tensorflow as tf + + +DATA_DIR = "gs://data-imputation/" +LOCAL_CACHE_DIR = "./" +DataReturnType = Tuple[ + pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame, bool, Any, Any +] +_EMPTY_NP_ARRAY = np.empty(0) +_IMPUTE_VALUE = "0" + + +def load_ames_housing( + seed = 44, test_size_ratio = 0.2 +): + """Load Ames housing dataset. Regression. (1460, 80).""" + housing = sk_datasets.fetch_openml(name="house_prices", as_frame=True) + + x = housing.data + y = housing.target + + x_train, x_test, y_train, y_test = ( + model_selection.model_selection.train_test_split( + x, y, test_size=test_size_ratio, random_state=seed + ) + ) + + is_classification = False + + print("Data loaded...") + print("Column names:") + print(x_train.columns) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + None, + _EMPTY_NP_ARRAY, + ) + + +def generate_feature_engineering_synthetic_data( + num_samples = 10000, + num_features = 5, + seed = None, + test_size_ratio = 0.2, + zipf_distribution = 10.0, + triangle_left = 0, + triangle_center = 5, + triangle_right = 6, +): + """Generates synthetic dataset for regression task. + + Args: + num_samples: number of samples to generate. + num_features: feature dimension in generated dataset. + seed: random number generator seed. + test_size_ratio: proportion of data used for test set. + zipf_distribution: distribution parameter for the zipf distribution. + triangle_left: left bound for the triangular distribution.. + triangle_center: center for the triangular distribution.. + triangle_right: right bound for the triangular distribution. + + Returns: + Generated synthetic dataset and pertinent metadata. + """ + + x = np.random.uniform(low=-1, high=1, size=(num_samples, num_features - 2)) + x1 = np.random.zipf(a=zipf_distribution, size=(num_samples, 1)) + x2 = np.reshape( + np.random.triangular( + triangle_left, triangle_center, triangle_right, num_samples + ), + (-1, 1), + ) + + features = np.concatenate([x, x1, x2], axis=-1) + labels = np.exp(x2[:, -1]) + + features = pd.DataFrame(features) + labels = pd.Series(labels) + + x_train, x_test, y_train, y_test = model_selection.train_test_split( + features, labels, test_size=test_size_ratio, random_state=seed + ) + + print("Synthetic data loaded...") + + return ( + x_train, + x_test, + y_train, + y_test, + False, + None, + _EMPTY_NP_ARRAY, + ) + + +def generate_feature_engineering_synthetic_data_temporal( + num_samples = 100, + target_column = "label", + temporal_lag = 7, + context_window_len = 10, + seed = None, + test_size_ratio = 0.2, +): + """Generates temporal synthetic dataset for regression task. + + Args: + num_samples: number of samples to generate. + target_column: name of the target column. + temporal_lag: time lag along the temporal dimension used for label creation. + context_window_len: size of the time series prediction context window. + seed: random number generator seed. + test_size_ratio: proportion of data used for test set. + + Returns: + Generated temporal synthetic dataset and pertinent metadata. + """ + + temporal_lag = min(temporal_lag, context_window_len) + category = ["cat"] * num_samples + event = ["event"] * num_samples + num_random_samples = num_samples + context_window_len + all_price = np.random.randint(low=0, high=100, size=(num_random_samples,)) + prices = [] + labels = [] + for i in range(num_samples): + prices.append(str(all_price[i : i + context_window_len].tolist())) + labels.append(str(all_price[i + context_window_len - temporal_lag])) + + features = { + "category": category, + "price": prices, + "event": event, + target_column: labels, + } + + features = pd.DataFrame(features) + labels = pd.Series(labels) + + x_train, x_test, y_train, y_test = model_selection.train_test_split( + features, labels, test_size=test_size_ratio, random_state=seed + ) + + print("Temporal synthetic data loaded...") + + return ( + x_train, + x_test, + y_train, + y_test, + False, + None, + _EMPTY_NP_ARRAY, + ) + + +def load_m5( + target_col, + seed = 4, + test_size_ratio = 0.2, + use_rows = 2000, +): + """Loads the m5 sample dataset generated by the FTE. + + This dataset presents sequential features as strings of arrays. + + Args: + target_col: name of the target column. + seed: seed for random number generator. + test_size_ratio: ratio used to allocate test set. + use_rows: number of rows to use for training and eval. + + Returns: + Train and test data, with associated properties. + """ + cache_filepath = "m5_vertex_ai_fte_split_output_train_staging_100k" + data_path = os.path.join(LOCAL_CACHE_DIR, cache_filepath) + + if not os.path.isfile(data_path): + tf.io.gfile.copy( + os.path.join(DATA_DIR, cache_filepath), data_path, overwrite=True + ) + + # Use a test set to ensure numerical columns are loaded as float32. + df_test = pd.read_csv(data_path, nrows=200) + float_cols = [c for c in df_test if df_test[c].dtype == "float64"] + float32_cols = {c: np.float32 for c in float_cols} + + dataset = pd.read_csv( + data_path, engine="c", dtype=float32_cols, nrows=use_rows + ) + dataset = dataset.replace("__MISSING__", _IMPUTE_VALUE) + + # Note this x includes all feature columns, as the historical "sales" + # data are used as a feature as well. + x = dataset + y = dataset[target_col].to_frame() + + x_train, x_test, y_train, y_test = model_selection.train_test_split( + x, y, test_size=test_size_ratio, random_state=seed + ) + + is_classification = False + num_classes = None + + print("Data loaded...") + print("Column names:") + print(x_train.columns) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_fraud( + seed = 4, test_size_ratio = 0.2, use_rows = 600000 +): + """Loads the IEEE fraud detection dataset of size (600000, 871). + + Data is downloaded from: + https://www.kaggle.com/competitions/ieee-fraud-detection/data. + + Args: + seed: seed for random number generator. + test_size_ratio: ratio used to allocate test set. + use_rows: number of rows to use for training and eval. + + Returns: + Train and test data, with associated properties. + """ + + cache_filepath = "train_transaction_identity.csv" + if not os.path.isfile(cache_filepath): + tf.io.gfile.copy( + os.path.join(DATA_DIR, cache_filepath), + os.path.join(LOCAL_CACHE_DIR, cache_filepath), + overwrite=True, + ) + + data_path = os.path.join(LOCAL_CACHE_DIR, cache_filepath) + + # Use a test set to ensure numerical columns are loaded as float32. + df_test = pd.read_csv(data_path, nrows=200) + float_cols = [c for c in df_test if df_test[c].dtype == "float64"] + float32_cols = {c: np.float32 for c in float_cols} + + dataset = pd.read_csv( + data_path, engine="c", dtype=float32_cols, nrows=use_rows + ) + + x = dataset.iloc[:, 2:] + y = dataset.iloc[:, 1] + x_train, x_test, y_train, y_test = model_selection.train_test_split( + x, y, test_size=test_size_ratio, random_state=seed + ) + + is_classification = True + num_classes = 2 + + print("Data loaded...") + print("Column names:") + print(x_train.columns) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_synthetic( + num_samples = 100000, + num_salient_features = 100, + num_features = 10000, + seed = 4, + test_size_ratio = 0.2, + noise_magnitude = 0.2, +): + """Generate synthetic data with desired attributes. + + Args: + num_samples: number of samples to generate. + num_salient_features: number of non-noise features. + num_features: total number of features. + seed: seed for random number generator. + test_size_ratio: ratio of test set out of all samples generated. + noise_magnitude: magnitude of noise to add. + + Returns: + Generated train and test datasets, and their attributes. + """ + cache_filepath = ( + "synthetic_data_" + + str(num_samples) + + "_" + + str(num_features) + + "_" + + str(num_salient_features) + + ".npz" + ) + try: + loaded_data = np.load(cache_filepath) + input_features = loaded_data["input_features"] + salient_features = loaded_data["salient_features"] + output_labels = loaded_data["output_labels"] + except OSError: + # cache_filepath does not exist. + input_features = np.random.uniform( + low=-1, high=1, size=(num_samples, num_features) + ) + + salient_features = np.random.choice(num_features, num_salient_features) + np.random.shuffle(salient_features) + + subset1 = salient_features[: num_salient_features // 5] + subset2 = salient_features[ + num_salient_features // 5 : 2 * num_salient_features // 5 + ] + subset3 = salient_features[ + 2 * num_salient_features // 5 : 3 * num_salient_features // 5 + ] + subset4 = salient_features[ + 3 * num_salient_features // 5 : 4 * num_salient_features // 5 + ] + subset5 = salient_features[4 * num_salient_features // 5 :] + + # Various nonlinear functions that depend on different salient features. + term1 = np.mean(np.exp(input_features[:, subset1] - 1), axis=1) + + term2 = np.exp( + np.mean(np.abs(np.sin(2 * np.pi * input_features[:, subset2])), axis=1) + ) + + term3 = np.mean(-1.0 * np.log(1.1 + input_features[:, subset3]), axis=1) + + term4 = np.mean(input_features[:, subset4], axis=1) + + term5 = 1.0 / ( + 1.0 + np.mean(np.abs(np.tanh(input_features[:, subset5])), axis=1) + ) + + # Construct the logit + aggregate_term = 1.0 * ((term1 + term2 + term3 + term4 + term5) - 3) + + # Add noise + noise = np.random.randn(num_samples) + aggregate_term += noise_magnitude * noise + output_labels = (aggregate_term > 0).astype(int) + + np.savez( + cache_filepath, + input_features=input_features, + salient_features=salient_features, + output_labels=output_labels, + ) + + x = pd.DataFrame(input_features) + y = pd.DataFrame(output_labels).iloc[:, 0] + + x_train, x_test, y_train, y_test = model_selection.train_test_split( + x, y, test_size=test_size_ratio, random_state=seed + ) + + # TODO(yihed): Generalize to regression as well. + is_classification = True + num_classes = 2 + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + salient_features, + ) + + +def load_safe(seed = 4, test_size_ratio = 0.2): + """Loads the safe dataset.""" + + # Data is downloaded from here: + # https://www.kaggle.com/competitions/porto-seguro-safe-driver-prediction/data. + + cache_filepath = "safe_train.csv" + if not os.path.isfile(cache_filepath): + tf.io.gfile.copy( + os.path.join(DATA_DIR, cache_filepath), + os.path.join(LOCAL_CACHE_DIR, cache_filepath), + overwrite=True, + ) + + dataset = pd.read_csv(os.path.join(LOCAL_CACHE_DIR, cache_filepath)) + x = dataset.iloc[:, 2:] + y = dataset.iloc[:, 1] + + x_train, x_test, y_train, y_test = model_selection.train_test_split( + x, y, test_size=test_size_ratio, random_state=seed + ) + + is_classification = True + num_classes = 2 + + print("Data loaded...") + print("Column names:") + print(x_train.columns) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_year(seed = 4, test_size_ratio = 0.2): + """Loads the year dataset.""" + + # Data is downloaded from here: + # https://archive.ics.uci.edu/ml/datasets/yearpredictionmsd. + + cache_filepath = "YearPredictionMSD.txt" + if not os.path.isfile(cache_filepath): + tf.io.gfile.copy( + os.path.join(DATA_DIR, cache_filepath), + os.path.join(LOCAL_CACHE_DIR, cache_filepath), + overwrite=True, + ) + + dataset = pd.read_csv( + os.path.join(LOCAL_CACHE_DIR, cache_filepath), header=None + ) + x = dataset.iloc[:, 1:] + y = dataset.iloc[:, 0] + + x_train, x_test, y_train, y_test = model_selection.train_test_split( + x, y, test_size=test_size_ratio, random_state=seed + ) + + is_classification = False + + print("Data loaded...") + print("Column names:") + print(x_train.columns) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + None, + _EMPTY_NP_ARRAY, + ) + + +def load_mice(): + """Loads the Mice dataset, adapted from: https://github.com/lasso-net/lassonet/blob/master/experiments/data_utils.py.""" + + filling_value = -100000 + cache_filepath = "Data_Cortex_Nuclear.csv" + if not os.path.isfile(cache_filepath): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "MICE/", cache_filepath), + os.path.join(LOCAL_CACHE_DIR, cache_filepath), + overwrite=True, + ) + + x = np.genfromtxt( + cache_filepath, + delimiter=",", + skip_header=1, + usecols=range(1, 78), + filling_values=filling_value, + encoding="UTF-8", + ) + classes = np.genfromtxt( + cache_filepath, + delimiter=",", + skip_header=1, + usecols=range(78, 81), + dtype=None, + encoding="UTF-8", + ) + + for i, row in enumerate(x): + for j, val in enumerate(row): + if val == filling_value: + x[i, j] = np.mean([ + x[k, j] + for k in range(classes.shape[0]) + if np.all(classes[i] == classes[k]) + ]) + + y = np.zeros((classes.shape[0]), dtype=np.uint8) + for i, row in enumerate(classes): + for j, (val, label) in enumerate(zip(row, ["Control", "Memantine", "C/S"])): + y[i] += (2**j) * (val == label) + + x = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit_transform(x) + + indices = np.arange(x.shape[0]) + np.random.shuffle(indices) + x = x[indices] + y = y[indices] + classes = classes[indices] + + print("Data loaded...") + print("Data shapes:") + print("x shape: {}, y shape: {}".format(x.shape, y.shape)) + + is_classification = True + num_classes = 8 + + x_train = pd.DataFrame(x[: x.shape[0] * 4 // 5]) + x_test = pd.DataFrame(x[x.shape[0] * 4 // 5 :]) + y_train = pd.DataFrame(y[: x.shape[0] * 4 // 5], dtype=np.int32).iloc[:, 0] + y_test = pd.DataFrame(y[x.shape[0] * 4 // 5 :], dtype=np.int32).iloc[:, 0] + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_isolet(): + """Loads the Isolet dataset, adapted from: https://github.com/lasso-net/lassonet/blob/master/experiments/data_utils.py.""" + + cache_filepath_train = "isolet1+2+3+4.data" + cache_filepath_test = "isolet5.data" + if not os.path.isfile(cache_filepath_train): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "isolet/", cache_filepath_train), + os.path.join(LOCAL_CACHE_DIR, cache_filepath_train), + overwrite=True, + ) + if not os.path.isfile(cache_filepath_test): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "isolet/", cache_filepath_test), + os.path.join(LOCAL_CACHE_DIR, cache_filepath_test), + overwrite=True, + ) + + x_train = np.genfromtxt( + cache_filepath_train, + delimiter=",", + usecols=range(0, 617), + encoding="UTF-8", + ) + y_train = np.genfromtxt( + cache_filepath_train, delimiter=",", usecols=[617], encoding="UTF-8" + ) + x_test = np.genfromtxt( + cache_filepath_test, + delimiter=",", + usecols=range(0, 617), + encoding="UTF-8", + ) + y_test = np.genfromtxt( + cache_filepath_test, delimiter=",", usecols=[617], encoding="UTF-8" + ) + + x = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit_transform( + np.concatenate((x_train, x_test)) + ) + x_train = x[: len(y_train)] + x_test = x[len(y_train) :] + + print("Data loaded...") + print("Data shapes:") + print(x_train.shape, y_train.shape) + print(x_test.shape, y_test.shape) + + is_classification = True + num_classes = 26 + + x_train = pd.DataFrame(x_train) + x_test = pd.DataFrame(x_test) + y_train = pd.DataFrame(y_train - 1, dtype=np.int32).iloc[:, 0] + y_test = pd.DataFrame(y_test - 1, dtype=np.int32).iloc[:, 0] + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_activity(): + """Loads the Activity dataset, adapted from: https://github.com/lasso-net/lassonet/blob/master/experiments/data_utils.py.""" + + cache_filepath_train_x = "final_X_train.txt" + cache_filepath_train_y = "final_y_train.txt" + cache_filepath_test_x = "final_X_test.txt" + cache_filepath_test_y = "final_y_test.txt" + if not os.path.isfile(cache_filepath_train_x): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "dataset_uci/", cache_filepath_train_x), + os.path.join(LOCAL_CACHE_DIR, cache_filepath_train_x), + overwrite=True, + ) + if not os.path.isfile(cache_filepath_train_y): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "dataset_uci/", cache_filepath_train_y), + os.path.join(LOCAL_CACHE_DIR, cache_filepath_train_y), + overwrite=True, + ) + if not os.path.isfile(cache_filepath_test_x): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "dataset_uci/", cache_filepath_test_x), + os.path.join(LOCAL_CACHE_DIR, cache_filepath_test_x), + overwrite=True, + ) + if not os.path.isfile(cache_filepath_test_y): + tf.io.gfile.copy( + os.path.join(DATA_DIR + "dataset_uci/", cache_filepath_test_y), + os.path.join(LOCAL_CACHE_DIR, cache_filepath_test_y), + overwrite=True, + ) + x_train = np.genfromtxt(cache_filepath_train_x, encoding="UTF-8") + x_test = np.genfromtxt(cache_filepath_test_x, encoding="UTF-8") + y_train = np.genfromtxt(cache_filepath_train_y, encoding="UTF-8") + y_test = np.genfromtxt(cache_filepath_test_y, encoding="UTF-8") + + x = preprocessing.MinMaxScaler(feature_range=(0, 1)).fit_transform( + np.concatenate((x_train, x_test)) + ) + x_train = x[: len(y_train)] + x_test = x[len(y_train) :] + + print("Data loaded...") + print("Data shapes:") + print(x_train.shape, y_train.shape) + print(x_test.shape, y_test.shape) + + is_classification = True + num_classes = 6 + + x_train = pd.DataFrame(x_train) + x_test = pd.DataFrame(x_test) + y_train = pd.DataFrame(y_train - 1, dtype=np.int32).iloc[:, 0] + y_test = pd.DataFrame(y_test - 1, dtype=np.int32).iloc[:, 0] + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_coil(): + """Loads the Coil dataset, adapted from: https://github.com/lasso-net/lassonet/blob/master/experiments/data_utils.py.""" + + processed_data_filename = "coil_data.npz" + # Instead of processing the image data again, save the processed data for + # efficiency of subsequent runs. + try: + loaded_data = np.load(LOCAL_CACHE_DIR + processed_data_filename) + data = np.float32(loaded_data["data"]) + targets = np.float32(loaded_data["targets"]) + except IOError: + samples = [] + os.makedirs(LOCAL_CACHE_DIR + "coil-20-proc/", exist_ok=True) + for i in range(1, 21): + for image_index in range(72): + image_filename = "obj%d__%d.png" % (i, image_index) + tf.io.gfile.copy( + os.path.join(DATA_DIR + "coil-20-proc/", image_filename), + os.path.join(LOCAL_CACHE_DIR + "coil-20-proc/", image_filename), + overwrite=True, + ) + obj_img = Image.open( + os.path.join(LOCAL_CACHE_DIR + "coil-20-proc/", image_filename) + ) + rescaled = obj_img.resize((20, 20)) + pixels_values = [float(x) for x in list(rescaled.getdata())] + sample = np.array(pixels_values + [i]) + samples.append(sample) + samples = np.array(samples) + np.random.shuffle(samples) + data = samples[:, :-1] + targets = (samples[:, -1] + 0.5).astype(np.int64) + data = (data - data.min()) / (data.max() - data.min()) + np.savez( + LOCAL_CACHE_DIR + processed_data_filename, data=data, targets=targets + ) + + l = data.shape[0] * 4 // 5 + x_train = data[:l] + y_train = targets[:l] - 1 + x_test = data[l:] + y_test = targets[l:] - 1 + + is_classification = True + num_classes = 20 + + x_train = pd.DataFrame(x_train) + x_test = pd.DataFrame(x_test) + y_train = pd.DataFrame(y_train, dtype=np.int32).iloc[:, 0] + y_test = pd.DataFrame(y_test, dtype=np.int32).iloc[:, 0] + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_data( + fashion = False, digit = None, normalize = False +): + """Loads the data for image datasets.""" + + if fashion: + (x_train, y_train), (x_test, y_test) = ( + tf.keras.datasets.fashion_mnist.load_data() + ) + else: + (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() + + if digit is not None and 0 <= digit and digit <= 9: + train = test = {y: [] for y in range(10)} + for x, y in zip(x_train, y_train): + train[y].append(x) + for x, y in zip(x_test, y_test): + test[y].append(x) + + for y in range(10): + + train[y] = np.asarray(train[y]) + test[y] = np.asarray(test[y]) + + x_train = np.asarray(train[digit]) + x_test = np.asarray(test[digit]) + + x_train = x_train.reshape((-1, x_train.shape[1] * x_train.shape[2])).astype( + np.float32 + ) + x_test = x_test.reshape((-1, x_test.shape[1] * x_test.shape[2])).astype( + np.float32 + ) + + if normalize: + x = np.concatenate((x_train, x_test)) + x = (x - x.min()) / (x.max() - x.min()) + x_train = x[: len(y_train)] + x_test = x[len(y_train) :] + + return (x_train, y_train), (x_test, y_test) + + +def load_mnist(): + """Loads the MNIST dataset, adapted from: https://github.com/lasso-net/lassonet/blob/master/experiments/data_utils.py.""" + + train, test = load_data(fashion=False, normalize=True) + + is_classification = True + num_classes = 10 + + x_train = pd.DataFrame(train[0]) + x_test = pd.DataFrame(test[0]) + y_train = pd.DataFrame(train[1], dtype=np.int32).iloc[:, 0] + y_test = pd.DataFrame(test[1], dtype=np.int32).iloc[:, 0] + + print("Data loaded...") + print("Data shapes:") + print(x_train.shape, y_train.shape) + print(x_test.shape, y_test.shape) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def load_fashion(): + """Loads the Fashion dataset, adapted from: https://github.com/lasso-net/lassonet/blob/master/experiments/data_utils.py.""" + + train, test = load_data(fashion=True, normalize=True) + + is_classification = True + num_classes = 10 + + x_train = pd.DataFrame(train[0]) + x_test = pd.DataFrame(test[0]) + y_train = pd.DataFrame(train[1], dtype=np.int32).iloc[:, 0] + y_test = pd.DataFrame(test[1], dtype=np.int32).iloc[:, 0] + + print("Data loaded...") + print("Data shapes:") + print(x_train.shape, y_train.shape) + print(x_test.shape, y_test.shape) + + return ( + x_train, + x_test, + y_train, + y_test, + is_classification, + num_classes, + _EMPTY_NP_ARRAY, + ) + + +def get_mtime(path): + """Gets file modification time for the path. + + Args: + path: whose mtime is needed + + Returns: + mtime in nanseconds (type int) + """ + stats = tf.io.gfile.stat(path) + return int(stats.mtime_nsec) + + +def is_more_recent(path_of_interest, path_to_compare_with): + """Determines if the path of interest is more recent than path to compare. + + Args: + path_of_interest: path to test + path_to_compare_with: path to compare with + + Returns: + True if path of interest is more recent + """ + return get_mtime(path_of_interest) > get_mtime(path_to_compare_with) + + +def ensure_source_file_exists(path): + """Ensures path exists. + + Args: + path: path to test + + Raises: + FileNotFoundError: + """ + if not os.path.isfile(path) and not tf.io.gfile.exists(path): + raise FileNotFoundError(path + " does not exist") + if tf.io.gfile.isdir(path): + raise FileNotFoundError(path + " is not a file") + + +def create_parent_dirs(path, mode = 0o777): + """Creates parent directories for a given path. + + Args: + path: path whose parents need to be created if not existent + mode: mode with which the parents should be created + + Raises: + FileExistsError: if parent exists and is not a directory + """ + d = os.path.dirname(path) + if os.path.isfile(d): + raise FileExistsError(d + "is a not a dir and exists") + if os.path.isdir(d): + return + os.makedirs(d, mode=mode, exist_ok=True) + + +def update_cached_file(data_file, cache_file): + """Updates cache file if does not exist or source is more recent. + + Args: + data_file: original file that needs to be copied into cache + cache_file: cache file that needs to be as recent as data file + """ + ensure_source_file_exists(data_file) + create_parent_dirs(cache_file, 0o777) + if not os.path.isfile(cache_file) or not is_more_recent( + cache_file, data_file + ): + if tf.io.gfile.exists(cache_file): + tf.io.gfile.remove(cache_file) + tf.io.gfile.copy(data_file, cache_file) + + +def update_cache(data_dir, cache_dir, files): + """Updates multiple cache files that are listed in arguments. + + Args: + data_dir: source data dir from where the file need to be copied + cache_dir: directory into which the file need to be copied + files: list of files that need to be copied + """ + if len(files) == 1 and isinstance(files, str): + update_cached_file( + os.path.join(data_dir, files[0]), os.path.join(cache_dir, files[0]) + ) + return + for file in files: + update_cached_file( + os.path.join(data_dir, file), os.path.join(cache_dir, file) + ) + + +def separate_data_and_labels( + df, label +): + """splits the dataframe into data and label dataframes. + + Args: + df: to split + label: name of label column + + Returns: + data and label dataframes + """ + label_df = df[label] + data_df = df.drop(label, axis=1) + return (data_df, label_df) + + +def date_to_year_month_day( + df, + date_col, + year_col = "Year", + month_col = "Month", + day_col = "Day", + drop_date_col = True, +): + """Extracts year, month and day from date column and add to frame. + + Args: + df: data frame to be modified + date_col: name of the column that is date column + year_col: name of the new year column + month_col: name of the new month column + day_col: name of hte new day column + drop_date_col: delete date column or not + + Returns: + data frame with columns for year, month and day + """ + year = df[date_col].dt.year + month = df[date_col].dt.month + day = df[date_col].dt.day + df[year_col] = year + df[month_col] = month + df[day_col] = day + if drop_date_col: + df = df.drop(date_col, axis=1) + return df + + +def create_mask_columns( + df, + source_col, + target_col_names, + based_on_values, + drop_source_col = True, +): + """spread a column into number of columns based on contents. + + Args: + df: dataframe containing the source column + source_col: of the source column + target_col_names: list of new target columns to create + based_on_values: list of matching values for each of target columns + drop_source_col: whether to drop source column or not + + Returns: + dataframe with mask columns + + Raises: + ValueError: if months and based on values are not of same length + """ + if len(target_col_names) != len(based_on_values): + raise ValueError( + "target_col_names and based_on_values must have the same length" + ) + for c, v in zip(target_col_names, based_on_values): + + def has(x): + if isinstance(x, float): + return 0 + # pylint: disable=cell-var-from-loop + return 1 if v in x else 0 + + df[c] = df[source_col].apply(has) + if drop_source_col: + df = df.drop(source_col, axis=1) + return df + + +def create_month_list(use_full_names = False): + """Returns: list of months.""" + if not use_full_names: + return [ + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec", + ] + return [ + "January", + "February", + "March", + "April", + "May", + "June", + "July", + "August", + "September", + "October", + "November", + "December", + ] + + +def load_diabetes( + seed = 44, + test_size_ratio = 0.2, + nrows = None, + data_dir = os.path.join(DATA_DIR, "diabetes"), + cache_dir = os.path.join(LOCAL_CACHE_DIR, "diabetes"), + discard_cols = None, +): + """Loads kaggle diabetes data. + + Args: + seed: random state for train test split. use same value across multiple runs + to consistently generate same shuffle order while splitting data + test_size_ratio: ratio of test data set to total data set lengths + nrows: number of rows to use, if not provided, all rows are used data_dir : + source directory containing diabetes data + data_dir: source directory for data + cache_dir: cache directory to copy data into. Default value is curre nt dir + discard_cols: list of column names to discard from data + + Returns: + train, test data and labels + whether data is for classification problems or not + number of classes if this is for classification + an empty numpy array + + Raises: + KeyError: if label column is not found + """ + label = "Outcome" + file = "diabetes.csv" + dest_file = os.path.join(cache_dir, file) + update_cache(data_dir, cache_dir, [file]) + df = pd.read_csv(dest_file, nrows=nrows) + if discard_cols: + df = df.drop(columns=discard_cols) + if label not in df.columns: + raise KeyError(label + " not found") + data_df, label_df = separate_data_and_labels(df, label) + x_train, x_test, y_train, y_test = model_selection.train_test_split( + data_df, label_df, random_state=seed, test_size=test_size_ratio + ) + return (x_train, x_test, y_train, y_test, True, 2, _EMPTY_NP_ARRAY) + + +def build_criteo_display_ads_columns( + num_numeric_cols = 13, num_categorical_cols = 26 +): + """Builds columns for criteo data as it does not have labels. + + Args: + num_numeric_cols: number of integer columns, default is 13 + num_categorical_cols: number of categorical columns, default is 26 + + Returns: + a list of column names including a label column named as Label + """ + cols = ["Label"] + cols.extend([f"numeric_{i}" for i in range(num_numeric_cols)]) + cols.extend([f"categorical_{i}" for i in range(num_categorical_cols)]) + return cols + + +def load_criteo_ads_display( + seed = 44, + test_size_ratio = 0.2, + nrows = None, + data_dir = os.path.join(DATA_DIR, "criteo_display_ads"), + cache_dir = os.path.join(LOCAL_CACHE_DIR, "criteo_display_ads"), + discard_cols = None, +): + """Loads criteo display ads data. + + Criteo data is tab separate and there are no headers in the datafile. + + Args: + seed: random state for train test split. use same value across multiple runs + to consistently generate same shuffle order while splitting data + test_size_ratio: ratio of test data set to total data set lengths + nrows: number of rows to use, if not provided, all rows are used + data_dir: source of test data. default value is gs://data-imputation + cache_dir: cache directory to copy data into. Default value is current dir + discard_cols: list of column indices to discard from data + + Returns: + train, test data and labels + whether data is for classification problems or not + number of classes if this is for classification + an empty numpy array + + Raises: + KeyError: if label column is not found + """ + file = "train.txt" + dest_file = os.path.join(cache_dir, file) + update_cache(data_dir, cache_dir, [file]) + columns = build_criteo_display_ads_columns(13, 26) + df = pd.read_csv(dest_file, nrows=nrows, sep="\t", names=columns) + label = df.columns[0] + for i in range(1, 14): + df[columns[i]] = df[columns[i]].astype("Int64") + if discard_cols: + df = df.drop(df.columns[discard_cols], axis=1) + if label not in df.columns: + raise KeyError(label + " not found") + df = df.dropna() + data_df, label_df = separate_data_and_labels(df, label) + x_train, x_test, y_train, y_test = model_selection.train_test_split( + data_df, label_df, random_state=seed, test_size=test_size_ratio + ) + return (x_train, x_test, y_train, y_test, True, 2, _EMPTY_NP_ARRAY) + + +def load_rossmann_sales_data( + seed = 44, + test_size_ratio = 0.2, + nrows = None, + data_dir = os.path.join(DATA_DIR, "rossmann"), + cache_dir = os.path.join(LOCAL_CACHE_DIR, "rossmann"), + discard_cols = None, + discard_store_cols = None, + label = "Sales", +): + """Loads Rossmann's sales data. + + Rossman's data is made of 2 files, train.csv and store.csv. This + loader joins the 2 files as store.csv provides information per store + and train.csv provides temporal store data. + + Args: + seed: random state for train test split. use same value across multiple runs + to consistently generate same shuffle order while splitting data + test_size_ratio: ratio of test data set to total data set lengths + nrows: number of rows to use, if not provided, all rows are used + data_dir: source of test data. default value is gs://data-imputation + cache_dir: cache directory to copy data into. Default value is current dir + discard_cols: list of column names to discard from data + discard_store_cols: list of store file column names to discard from data + label: name of the label column + + Returns: + train, test data and labels + whether data is for classification problems or not + number of classes if this is for classification + an empty numpy array + + Raises: + KeyError: if label column is not found + """ + files = ["train.csv", "store.csv"] + dest_file = os.path.join(cache_dir, files[0]) + update_cache(data_dir, cache_dir, files) + df = pd.read_csv(dest_file, nrows=nrows) + if discard_cols: + df = df.drop(columns=discard_cols) + if label not in df.columns: + raise KeyError(label + " not found") + store_df = pd.read_csv(os.path.join(cache_dir, files[1])) + if discard_store_cols: + store_df = store_df.drop(columns=discard_store_cols) + df = df.merge(store_df, on="Store", how="left") + df["StateHoliday"] = df["StateHoliday"].astype("str") + df["SchoolHoliday"] = df["SchoolHoliday"].astype("str") + df["Date"] = pd.to_datetime(df["Date"]) + df = create_mask_columns( + df, "PromoInterval", create_month_list(), create_month_list(), True + ) + df = date_to_year_month_day(df, "Date") + df = df.dropna() + data_df, label_df = separate_data_and_labels(df, label) + x_train, x_test, y_train, y_test = model_selection.train_test_split( + data_df, label_df, random_state=seed, test_size=test_size_ratio + ) + return (x_train, x_test, y_train, y_test, False, None, _EMPTY_NP_ARRAY) + + +def load_kaggle_heart_disease_prediction( + seed = 44, + test_size_ratio = 0.2, + nrows = None, + data_dir = os.path.join(DATA_DIR, "kaggle_heart_disease_prediction"), + cache_dir = os.path.join( + LOCAL_CACHE_DIR, "kaggle_heart_disease_prediction" + ), + discard_cols = None, +): + """Load kaggle heart disease data. + + Args: + seed: random state for train test split. use same value across multiple runs + to consistently generate same shuffle order while splitting data + test_size_ratio: ratio of test data set to total data set lengths + nrows: number of rows to use, if not provided, all rows are used + data_dir: source of test data. default value is gs://data-imputation + cache_dir: cache directory to copy data into. Default value is current dir + discard_cols: list of column names to discard from data + + Returns: + train, test data and labels + whether data is for classification problems or not + number of classes if this is for classification + an empty numpy array + + Raises: + KeyError: if label column is not found + """ + label = "Heart Disease" + file = "Heart_Disease_Prediction.csv" + dest_file = os.path.join(cache_dir, file) + update_cache(data_dir, cache_dir, [file]) + df = pd.read_csv(dest_file, nrows=nrows) + df["Heart Disease"].replace({"Presence": 1, "Absence": 0}, inplace=True) + if discard_cols: + df = df.drop(columns=discard_cols) + if label not in df.columns: + raise KeyError(label + " not found") + data_df, label_df = separate_data_and_labels(df, label) + x_train, x_test, y_train, y_test = model_selection.train_test_split( + data_df, label_df, random_state=seed, test_size=test_size_ratio + ) + return (x_train, x_test, y_train, y_test, True, 2, _EMPTY_NP_ARRAY) diff --git a/automated_feature_engineering/featureImage.png b/automated_feature_engineering/featureImage.png new file mode 100644 index 00000000000..c07ef981aa3 Binary files /dev/null and b/automated_feature_engineering/featureImage.png differ diff --git a/automated_feature_engineering/feature_engineering.py b/automated_feature_engineering/feature_engineering.py new file mode 100644 index 00000000000..aa921e2ab75 --- /dev/null +++ b/automated_feature_engineering/feature_engineering.py @@ -0,0 +1,925 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Model definitions for automatic feature engineering.""" + +import collections +import functools +from typing import Any, Callable, Dict, List, Optional, Tuple + +from absl import logging +import feature_selection +import tensorflow as tf +import tensorflow_probability as tfp + + +_IDENTITY_FN = lambda x: x +_PLACEHOLDER_TENSOR = tf.zeros([]) +_N_ARY_NUM_FEATURES = 5 +_N_ARY_TO_UNARY_RATIO = 2 +_NUM_QUANTILES = 3 +_NUMERICAL_FEATURE_TYPE = 'numerical' +_CATEGORICAL_FEATURE_TYPE = 'categorical' +_SUM_FUNCTION_NAME = 'Sum' +_PROD_FUNCTION_NAME = 'Product' + + +def log_transform(inputs): + """Creates the entrywise log transform function.""" + inputs_min = tf.math.reduce_min(inputs, axis=-1, keepdims=True) + inputs = inputs - inputs_min + return tf.math.log1p(inputs) + + +def idf(inputs): + """Creates the identity transform function.""" + return inputs + + +class PolyTransform(tf.keras.layers.Layer): + """Layer to learn polynomial transformations.""" + + def __init__(self): + super().__init__() + # TODO(yihed): make exponent learnable. The current bottleneck is that 1)the + # exponent needs to be integral, but tf.math.round is not differentiable, + # and 2) exponentiation of negative floats by non-integral exponents leads + # to nans. + self.exp = 2.0 + + def call(self, inputs): + return tf.math.pow(inputs, self.exp) + + +class ZScaling(tf.keras.layers.Layer): + """Layer to learn and apply z-scaling.""" + + def __init__(self): + super().__init__() + self.gamma = tf.Variable( + tf.ones( + 1, + ) + ) + self.beta = tf.Variable( + tf.zeros( + 1, + ) + ) + + def call(self, inputs): + # TODO(yihed): save train-time statistics to apply at test time. + mean = tf.math.reduce_mean(inputs, axis=0, keepdims=True) + std = tf.math.reduce_std(inputs, axis=0, keepdims=True) + return self.gamma * tf.math.divide_no_nan(inputs - mean, std) + self.beta + + +class Aggregate(tf.keras.layers.Layer): + """Layer to aggregate by categorical features.""" + + def __init__( + self, + num_cat_features, + num_attribute_features, + n_ary_num_features, + feature_dim, + activation_fn = 'gelu', + aggregate_average = False, + ): + super().__init__() + self.num_cat_features = num_cat_features + self.num_attribute_features = num_attribute_features + self.n_ary_num_features = n_ary_num_features + self.cat_mask = tf.Variable( + tf.random.uniform((self.num_cat_features,), maxval=1.0), + dtype=tf.float32, + ) + self.num_numerics = self.num_attribute_features - self.num_cat_features + self.numeric_mask = tf.Variable( + tf.random.uniform((self.num_numerics,), maxval=1.0), dtype=tf.float32 + ) + self.aggregate_average = aggregate_average + + def call(self, inputs, idx_inputs): + """Forward pass for learnable aggregation layer. + + Args: + inputs: attribute input features. + idx_inputs: inputs containing index embeddings for categorical features. + + Returns: + Aggregated features after learning which features to GroupBy by and which + to aggregate. + """ + cat_mask = tf.nn.softmax(self.cat_mask) + top_cat_mask_val, top_idx = tf.math.top_k(cat_mask, k=1) + # Categorical indices. This assumes categorical features precede numerical + # features, as done during data processing. + cat_idx = tf.expand_dims(idx_inputs[:, int(tf.squeeze(top_idx))], -1) + cat_max = tf.squeeze(tf.math.reduce_max(cat_idx)) + cat_aggregate = tf.zeros((cat_max + 1, self.num_numerics)) + updates = inputs[:, self.num_cat_features :] + cat_idx = tf.cast(cat_idx, tf.int32) + # 2D tensor that collects the numeric aggregations grouped by a categorical + # column. + cat_aggregate = tf.tensor_scatter_nd_add(cat_aggregate, cat_idx, updates) + if self.aggregate_average: + count_ones = tf.ones_like(inputs[:, 0]) + count_aggregate = tf.zeros((cat_max + 1,)) + cat_counts = tf.tensor_scatter_nd_add( + count_aggregate, cat_idx, count_ones + ) + cat_aggregate = tf.math.divide_no_nan( + cat_aggregate, tf.expand_dims(cat_counts, -1) + ) + + cat_aggregate = tf.gather(cat_aggregate, tf.squeeze(cat_idx), axis=0) + + # Select the aggregate numerical features. + numeric_mask = tf.nn.softmax(self.numeric_mask) + top_numeric_mask_val, top_idx = tf.math.top_k( + numeric_mask, k=min(self.n_ary_num_features, self.num_numerics) + ) + mask_confidence = (top_numeric_mask_val + top_cat_mask_val) / 2.0 + cat_aggregate = tf.gather(cat_aggregate, top_idx, axis=-1) * tf.expand_dims( + mask_confidence, 0 + ) + return cat_aggregate + + +class Quantilize(tf.keras.layers.Layer): + """Layer to apply quantilization into three quantiles.""" + + def __init__(self): + super().__init__() + self.num_quantiles = _NUM_QUANTILES + self.quantile_intervals = [] + interval_min = -1.0 + interval_max = 1.0 + self.quantile_len = (interval_max - interval_min) / self.num_quantiles + for i in range(self.num_quantiles): + self.quantile_intervals.append(interval_min + self.quantile_len * i) + + def call(self, inputs): + # TODO(yihed): save training thresholds for inference. + quantiles = tfp.stats.quantiles(inputs, self.num_quantiles, axis=0) + # TODO(yihed): make more adaptable to different numbers of quantiles. + input_thirds = [] + input_thirds.append(tf.where(inputs < quantiles[1, :], inputs, 0.0)) + + input_thirds.append( + tf.where( + (inputs > quantiles[1, :]) & (inputs < quantiles[2, :]), inputs, 0.0 + ) + ) + input_thirds.append(tf.where(inputs > quantiles[2, :], inputs, 0.0)) + + # Map each quantile evenly between -1 and 1. + for i, third in enumerate(input_thirds): + interval = quantiles[i + 1, :] - quantiles[i, :] + input_thirds[i] = third * ( + tf.math.divide_no_nan(self.quantile_len, interval) + ) + + input_thirds[0] = tf.where( + inputs < quantiles[1, :], + self.quantile_intervals[0] + input_thirds[0], + 0.0, + ) + input_thirds[1] = tf.where( + (inputs > quantiles[1, :]) & (inputs < quantiles[2, :]), + self.quantile_intervals[1] + input_thirds[1], + 0.0, + ) + input_thirds[2] = tf.where( + inputs > quantiles[2, :], + self.quantile_intervals[2] + input_thirds[2], + 0.0, + ) + + return input_thirds[0] + input_thirds[1] + input_thirds[2] + + +class FeatureDiscoveryLayer(tf.keras.layers.Layer): + """Feature selection and discovery layer.""" + + def __init__( + self, + num_features, + num_selected_features, + transform_function = _IDENTITY_FN, + keep_input_dims = False, + use_softmax_mask = True, + final_feature_dim = None, + feature_type = _NUMERICAL_FEATURE_TYPE, + ): + """Initializes feature discovery layer. + + Applies a given transform function to the inputs. + + Args: + num_features: number of total features. + num_selected_features: target number of selected features. + transform_function: transform function to apply to inputs. + keep_input_dims: whether to keep input dimensions after executing + transformation function. + use_softmax_mask: whether to use softmax to learn mask. + final_feature_dim: the feature dimension of the final output. + feature_type: the type of feature this transform acts on. + """ + super().__init__() + self.num_features = num_features + self.num_selected_features = min(num_features, num_selected_features) + self.feature_selector = feature_selection.FeatureSelectionSparseMasks( + num_features, + self.num_selected_features, + output_mask=True, + use_softmax_mask=use_softmax_mask, + ) + self.transform_function = transform_function + self.keep_input_dims = keep_input_dims + self.final_feature_dim = min(self.num_selected_features, final_feature_dim) + self.feature_type = feature_type + + def set_sparse_mask(self, mask): + """Sets the learned sparse mask for this layer.""" + self.sparse_mask = mask + + def call( + self, inputs, training = True + ): + """Feature discovery layer forward pass. + + This call uses the input to learn the feature and transform masks, and zeros + out non-selected features. Further details:go/automatic-feature-engineering. + + Args: + inputs: the input features. + training: whether currently training. + + Returns: + Sparsified and transformed features, learned sparse mask, and the selected + features to be used for the given transform. + """ + if self.keep_input_dims: + # Apply transform before masking for elementwise operations. + inputs = self.transform_function(inputs) + + inputs_sparse, inputs_mask = self.feature_selector(inputs) + + inputs_mask = tf.reshape(inputs_mask, [-1]) + # TODO(yihed): consider using raw inputs rather than masked-out inputs. + # non-selected features are zeroed-out + _, top_idx = tf.math.top_k(inputs_mask, k=self.num_selected_features) + + # Unary operators outputs have same dimensionality as inputs, n-ary + # operators output tensors with fewer dimensions than inputs. + top_features = tf.gather(inputs_sparse, top_idx, axis=-1) + if self.keep_input_dims: + inputs_transformed = inputs_sparse + else: + inputs_transformed = self.transform_function(top_features) + top_features = inputs_transformed + self.set_sparse_mask(inputs_mask) + return inputs_transformed, inputs_mask, top_features + + def visualize_transform( + self, str_presentation, num_cat_features + ): + """Create an interpretable string representation using current layer. + + Modifies input list in-place. + + Args: + str_presentation: List of string representations per index from previous + layers. + num_cat_features: number of categorical features. + """ + _, top_idx = tf.math.top_k(self.sparse_mask, k=self.num_selected_features) + transform_str = transform_to_str(self.transform_function) + + if self.feature_type == _NUMERICAL_FEATURE_TYPE: + top_idx += num_cat_features + + if self.keep_input_dims: + # e.g. for unary transforms like log. + for i in top_idx: + # This accounts for skip connections. + if str_presentation[i]: + str_presentation[i] = f'{transform_str}({str_presentation[i]})' + # Append here to match up with cross_mask selection coefficients. + str_presentation.append(str_presentation[i]) + else: + # e.g. for n-ary transforms like reduce_sum. + features = [] + for i in top_idx: + if str_presentation[i]: + features.append(str_presentation[i]) + if self.final_feature_dim > 1: + str_transformed = f'{transform_str}({str_presentation[i]})' + str_presentation.append(str_transformed) + + if self.final_feature_dim == 1: + features = ', '.join(features) + transform_str = f'{transform_str}({features})' + str_presentation.append(transform_str) + + +class TemporalFeatureDiscoveryLayer(tf.keras.layers.Layer): + """Discovery module for temporal features.""" + + def __init__( + self, + feature_dim, + n_temporal_features, + context_len = 10, + ): + """Initializes temporal feature discovery module.""" + super().__init__() + self.context_len = context_len + # TODO(yihed): consider case when cat_embed_dim > 1. + self.temporal_mask = tf.Variable( + tf.random.uniform((context_len,), maxval=1.0), dtype=tf.float32 + ) + self.lag_mask = tf.Variable( + tf.random.uniform((context_len,), maxval=1.0), dtype=tf.float32 + ) + + self.n_temporal_features = n_temporal_features + # TODO(yihed): explore making this threshold learnable. + self.feature_selection_prob_thresh = 1.0 / context_len + + @classmethod + def output_dim_per_temporal_feature(cls): + """Output feature dimension per temporal feature. + + Each temporal feature produces a cumulative sum feature, a cumulative + difference feature, and a lag feature. + Returns: + Processed putput feature dimension per temporal feature. + """ + # TODO(yihed): update this return value to avoid the constant 3. + return 7 + + def call( + self, + temporal_inputs, + training = True, + separate_prob_coeff = True, + ): + """Temporal feature discovery module forward pass. + + Args: + temporal_inputs: input tensor containing temporal features. + training: whether currently training. + separate_prob_coeff: whether to separate feature selection probability + from the aggregation coefficients. + + Returns: + Transformed temporal features. + """ + # This mask simultaneously learns the sign of aggregation and the selection + # coefficients. + all_features_prob = tf.nn.softmax(tf.math.abs(self.temporal_mask)) + + selected_feature_coeff = tf.where( + all_features_prob > self.feature_selection_prob_thresh, + self.temporal_mask, + 0, + ) + # This incentivizes the mask to select features that are temporally adjacent + temporal_regularizer = tf.reduce_sum( + tf.math.abs(selected_feature_coeff[:-1] - selected_feature_coeff[1:]) + ) + + if separate_prob_coeff: + # TODO(yihed): introduce other aggregation schemes such as differencing. + # Aggregating selected features with coefficients 1. + selection_coeff = tf.where( + all_features_prob > self.feature_selection_prob_thresh, 1.0, 0.0 + ) + else: + selection_coeff = selected_feature_coeff + + selection_coeff = tf.tile( + selection_coeff, + [ + self.n_temporal_features, + ], + ) + selected_features = temporal_inputs * tf.expand_dims(selection_coeff, 0) + selected_features = tf.reshape( + selected_features, [-1, self.n_temporal_features, self.context_len] + ) + # TODO(yihed): consider using percentages rather than raw values. + selected_features_mean_diff = ( + selected_features[:, :, :-1] - selected_features[:, :, 1:] + ) + selected_features_mean_diff = tf.reduce_mean( + tf.math.abs(selected_features_mean_diff), -1 + ) + selected_features_sum = tf.reduce_sum(selected_features, -1) + # Learn temporal lag probabilistically. + lag_prob = tf.nn.softmax(self.lag_mask) + lag_prob_max = tf.math.reduce_max(lag_prob, axis=-1) + lag_prob = tf.where(lag_prob < lag_prob_max, 0.0, lag_prob) + + lag_features = lag_prob * selected_features + lag_features = tf.math.reduce_sum(lag_features, axis=-1) + + # Compute features based on relative rather than absolute changes. + temporal_inputs = tf.reshape( + temporal_inputs, [-1, self.n_temporal_features, self.context_len] + ) + temporal_mean, temporal_std, relative_mean, relative_std = ( + self.temporal_relative_stats(temporal_inputs) + ) + return ( + tf.concat( + [ + selected_features_sum, + selected_features_mean_diff, + lag_features, + temporal_mean, + temporal_std, + relative_mean, + relative_std, + ], + -1, + ), + temporal_regularizer, + ) + + def temporal_relative_stats( + self, temporal_inputs + ): + temporal_mean = tf.math.reduce_mean(temporal_inputs, axis=-1) + temporal_std = tf.math.reduce_std(temporal_inputs, axis=-1) + + temporal_max = tf.math.reduce_max(temporal_inputs, axis=-1, keepdims=True) + temporal_min = tf.math.reduce_max(temporal_inputs, axis=-1, keepdims=True) + # percentage changes w.r.t. amount of total variation per feature. + temporal_relative = tf.math.divide_no_nan( + temporal_inputs - temporal_inputs[:, :, :1], temporal_max - temporal_min + ) + relative_mean = tf.math.reduce_mean(temporal_relative, axis=-1) + relative_std = tf.math.reduce_std(temporal_relative, axis=-1) + return temporal_mean, temporal_std, relative_mean, relative_std + + +class FeatureDiscoveryModel(tf.keras.layers.Layer): + """Feature selection and discovery module.""" + + def __init__( + self, + num_attribute_features, + num_selected_features, + feature_dim, + num_mlp_layers, + n_temporal_features, + context_len = 10, + infer_features = False, + activation_fn = 'gelu', + prod_num_args = 2, + use_softmax_mask = True, + num_cat_features = 0, + n_reduce_ops = 3, + n_aggregation_layers = 1, + ): + """Initializes feature selection module. + + Args: + num_attribute_features: total number of attribute features. + num_selected_features: target number of selected features. + feature_dim: feature dimension for latent vectors. + num_mlp_layers: number of MLP layers. + n_temporal_features: number of temporal features. + context_len: length of context window for temporal features. + infer_features: whether to produce and save learned features. + activation_fn: activation function for MLP layers. + prod_num_args: number of arguments going into reduce_prod. + use_softmax_mask: whether to use softmax to learn mask. + num_cat_features: number of categorical features. + n_reduce_ops: number of reduction operations such as reduce_prod. + n_aggregation_layers: number of aggregation layers. + """ + super().__init__() + self.num_attribute_features = num_attribute_features + num_selected_features = min(num_selected_features, num_attribute_features) + self.num_selected_features = num_selected_features + z_scaling = ZScaling() + self.unary_transforms = [ + # This is an initial unary transform candidate, more transforms + # will be added. + # z_scaling, + ] + + poly_transform = PolyTransform() + quantilization = Quantilize() + + NAryTransform = collections.namedtuple( + 'NAryTransform', + [ + 'transform_func', + 'num_selected_features', + 'final_feature_dim', + 'feature_type', + ], + ) + n_ary_num_features = max( + _N_ARY_NUM_FEATURES, + (self.num_selected_features // _N_ARY_TO_UNARY_RATIO), + ) + n_ary_num_features = min(n_ary_num_features, self.num_selected_features) + # Learned masks select which subset of features to use as arguments to these + # operators. + reduce_sum_transforms = [ + NAryTransform( + functools.partial( + tf.math.reduce_sum, + axis=-1, + keepdims=True, + ), + prod_num_args, + 1, + _NUMERICAL_FEATURE_TYPE, + ) + ] * n_reduce_ops + reduce_prod_transforms = [ + NAryTransform( + functools.partial( + tf.math.reduce_prod, + axis=-1, + keepdims=True, + ), + prod_num_args, + 1, + _NUMERICAL_FEATURE_TYPE, + ) + ] * n_reduce_ops + self.n_ary_transforms = [ + *reduce_sum_transforms, + # Taking the product across many entries can create vanishing gradients. + *reduce_prod_transforms, + NAryTransform( + log_transform, + n_ary_num_features, + n_ary_num_features, + _NUMERICAL_FEATURE_TYPE, + ), + NAryTransform( + z_scaling, + n_ary_num_features, + n_ary_num_features, + _NUMERICAL_FEATURE_TYPE, + ), + NAryTransform( + poly_transform, + n_ary_num_features, + n_ary_num_features, + _NUMERICAL_FEATURE_TYPE, + ), + NAryTransform( + quantilization, + n_ary_num_features, + n_ary_num_features, + _NUMERICAL_FEATURE_TYPE, + ), + ] + self.unary_layers = [] + self.n_ary_layers = [] + self.unary_layernorms = [] + for transform in self.unary_transforms: + # keep_input_dims=True so addition across layers makes sense. + feature_layer = FeatureDiscoveryLayer( + num_attribute_features, + self.num_selected_features, + transform, + keep_input_dims=True, + use_softmax_mask=use_softmax_mask, + ) + self.unary_layers.append(feature_layer) + self.unary_layernorms.append(tf.keras.layers.LayerNormalization()) + + for transform in self.n_ary_transforms: + if transform.feature_type == _NUMERICAL_FEATURE_TYPE: + layer_n_features = num_attribute_features - num_cat_features + elif transform.feature_type == _CATEGORICAL_FEATURE_TYPE: + layer_n_features = num_cat_features + else: + layer_n_features = num_attribute_features + + feature_layer = FeatureDiscoveryLayer( + layer_n_features, + transform.num_selected_features, + transform.transform_func, + keep_input_dims=False, + use_softmax_mask=use_softmax_mask, + final_feature_dim=transform.final_feature_dim, + feature_type=transform.feature_type, + ) + self.n_ary_layers.append(feature_layer) + + raw_features_mlp_sequence = [ + tf.keras.layers.Dense(feature_dim, activation=activation_fn) + for _ in range(num_mlp_layers) + ] + self.raw_features_mlp = tf.keras.Sequential(raw_features_mlp_sequence) + + self.n_temporal_features = n_temporal_features + # mask for comparing importance across learned transforms. + n_ary_output_dim = sum( + [layer.final_feature_dim for layer in self.n_ary_layers] + ) + self.cross_mask_dim = ( + self.num_selected_features * len(self.unary_transforms) + + n_ary_output_dim + ) + + cross_mlp_sequence = [ + tf.keras.layers.Dense(feature_dim, activation=activation_fn) + for _ in range(num_mlp_layers) + ] + self.cross_mlp = tf.keras.Sequential(cross_mlp_sequence) + if n_temporal_features > 0: + self.temporal_discovery_layer = TemporalFeatureDiscoveryLayer( + feature_dim, + n_temporal_features, + context_len, + ) + self.cross_mask_dim += ( + n_temporal_features + * TemporalFeatureDiscoveryLayer.output_dim_per_temporal_feature() + ) + + self.num_cat_features = num_cat_features + self.aggregation_layers = [] + if num_cat_features > 0: + for _ in range(n_aggregation_layers): + for aggregate_average in [True, False]: + self.aggregation_layers.append( + Aggregate( + num_cat_features, + self.num_attribute_features, + n_ary_num_features, + feature_dim, + aggregate_average=aggregate_average, + ) + ) + self.cross_mask_dim += min( + n_ary_num_features, self.num_attribute_features - num_cat_features + ) + + self.cross_mask = tf.Variable( + tf.random.uniform((self.cross_mask_dim,), maxval=1.0), dtype=tf.float32 + ) + self.infer_features = infer_features + + def call( + self, + attribute_inputs, + temporal_inputs = _PLACEHOLDER_TENSOR, + idx_inputs = None, + ): + """Feature discovery module forward pass. + + This call uses the input to learn the feature and transform masks, and zeros + out non-selected features. + + Args: + attribute_inputs: SLM-processed input attribute features tensor. + temporal_inputs: input temporal features tensor. + idx_inputs: inputs containing the indices for categorical features. + + Returns: + Transformed features, including both unary and n-ary transforms. + """ + # Since inputs are SLM selected features, feeding this into the downstream + # MLP means the identity function is used as a unary operator. Hence this + # model includes SLM as a special case. + raw_features_sparse = attribute_inputs + latents = attribute_inputs + selected_unary_features = [] + + # save for visualizing transforms. + # TODO(yihed): consider passing in SLM mask rather than inferring + # non-zero indices here. + raw_features_idx = tf.squeeze(tf.where(attribute_inputs[0])) + self.save_raw_features_idx(raw_features_idx) + + for i, layer in enumerate(self.unary_layers): + layer_latents, _, top_features = layer(latents) + latents = self.unary_layernorms[i](latents + layer_latents) + selected_unary_features.append(top_features) + + # Note unary_output is expected to keep the dimension of attribute_inputs. + unary_output = latents + # parallelize the n_ary layers, to allow each n_ary operation on + # the unary outputs directly. + selected_n_ary_features = [] + + numerical_latents = unary_output[:, self.num_cat_features :] + categorical_latents = unary_output[:, : self.num_cat_features] + + for layer in self.n_ary_layers: + if layer.feature_type == _NUMERICAL_FEATURE_TYPE: + layer_input = numerical_latents + elif layer.feature_type == _CATEGORICAL_FEATURE_TYPE: + layer_input = categorical_latents + else: + layer_input = unary_output + latents, _, _ = layer(layer_input) + # n_ary transforms output 1-D top output + selected_n_ary_features.append(latents) + + raw_feature_latents = self.raw_features_mlp(raw_features_sparse) + + selected_unary_features.extend(selected_n_ary_features) + if self.n_temporal_features > 0: + temporal_features, temporal_regularizer = self.temporal_discovery_layer( + temporal_inputs + ) + + selected_unary_features.append(temporal_features) + else: + temporal_regularizer = 0 + + for aggregation_layer in self.aggregation_layers: + aggregation_features = aggregation_layer(raw_features_sparse, idx_inputs) + selected_unary_features.append(aggregation_features) + + cross_latents = tf.concat(selected_unary_features, axis=-1) + if self.infer_features: + self.save_learned_features(cross_latents) + + top_val, top_idx = tf.math.top_k( + self.cross_mask, + k=self.num_selected_features + self.n_temporal_features, + ) + top_idx = tf.expand_dims(top_idx, -1) + top_val = tf.nn.softmax(top_val) + transform_prob = tf.scatter_nd( + top_idx, top_val, tf.constant([self.cross_mask_dim]) + ) + learned_latents = cross_latents * tf.expand_dims(transform_prob, 0) + + cross_latents = self.cross_mlp(learned_latents) + all_latents = tf.concat([raw_feature_latents, cross_latents], axis=-1) + + return ( + all_latents, + temporal_regularizer, + ) + + def save_learned_features(self, learned_features): + """Record newly discovered features.""" + # TODO(yihed): save config file with feature dimensions. + self.learned_features = learned_features + + def save_raw_features_idx(self, raw_features_idx): + """Save selected indices for raw input features.""" + self.raw_features_idx = raw_features_idx + + +def transform_to_str(transform): + """Converts a transform function to a readable string.""" + if isinstance(transform, functools.partial): + if transform.func == tf.math.reduce_prod: + return _PROD_FUNCTION_NAME + elif transform.func == tf.math.reduce_sum: + return _SUM_FUNCTION_NAME + else: + return str(transform.func.__name__) + elif isinstance(transform, tf.keras.layers.Layer): + return transform.name + elif callable(transform): + # e.g. transform of function type. + return transform.__name__ + else: + return str(transform) + + +def recover_transforms( + model, + cat_features = None, + numerical_features = None, +): + """Retrieves transformation functions from model. + + Args: + model: the discovery model to extract transforms from. + cat_features: list of categorical feature names. + numerical_features: list of numerical feature names. + + Returns: + Dictionary of feature names along their scores. + Feature names of discovered features, ranked by importance. + Ranked ordering of the discovered features, ranked by by importance. + """ + + raw_features = [''] * model.num_attribute_features + all_feature_names = cat_features + numerical_features + assert tf.math.reduce_max(model.raw_features_idx) < ( + len(cat_features) + len(numerical_features) + ), 'Selected feature indices must be within total feature range.' + for idx in model.raw_features_idx: + raw_features[idx] = all_feature_names[int(idx)] + + # Note the iteration order here must be the same as in forward pass, due to + # construction of the cross mask. + features = raw_features + for transform_layers in [model.unary_layers, model.n_ary_layers]: + for layer in transform_layers: + layer.visualize_transform(features, len(cat_features)) + + if hasattr(model, 'temporal_discovery_layer'): + temporal_layer = model.temporal_discovery_layer + all_features_prob = tf.nn.softmax(tf.math.abs(temporal_layer.temporal_mask)) + + selected_feature_index = tf.where( + all_features_prob > temporal_layer.feature_selection_prob_thresh + ) + temporal_features = [] + for idx in selected_feature_index: + temporal_features.append('t' + str(int(idx))) + + temporal_features_str = ', '.join(temporal_features) + '; ' + temporal_sum_strs = [ + 'TemporalSum(' + temporal_features_str + str(i) + ')' + for i in range(model.n_temporal_features) + ] + features.extend(temporal_sum_strs) + + temporal_diff_strs = [ + 'TemporalDiff(' + temporal_features_str + str(i) + ')' + for i in range(model.n_temporal_features) + ] + features.extend(temporal_diff_strs) + lag_idx = tf.math.argmax(temporal_layer.lag_mask, axis=-1) + temporal_lag_strs = [ + 'TemporalLag(t' + str(int(lag_idx)) + ', ' + str(i) + ')' + for i in range(model.n_temporal_features) + ] + features.extend(temporal_lag_strs) + transform_names = [ + 'TemporalMean', + 'TemporalStd', + 'RelTemporalMean', + 'RelTemporalStd', + ] + for transform_name in transform_names: + for idx in range(model.n_temporal_features): + features.append(transform_name + '(series' + str(idx) + ')') + + for aggregation_layer in model.aggregation_layers: + cat_idx = tf.math.argmax(aggregation_layer.cat_mask) + + _, numeric_idx = tf.math.top_k( + aggregation_layer.numeric_mask, + k=min( + aggregation_layer.num_numerics, aggregation_layer.n_ary_num_features + ), + ) + aggregate_str = ( + 'GroupByThenAverage(' + if aggregation_layer.aggregate_average + else 'GroupByThenSum(' + ) + for idx in numeric_idx: + features.append( + aggregate_str + + cat_features[int(cat_idx)] + + '; ' + + numerical_features[int(idx)] + + ')' + ) + + n_top_features = model.cross_mask_dim + cross_mask_scores, cross_mask_idx = tf.math.top_k( + model.cross_mask, n_top_features + ) + + if model.num_attribute_features + model.cross_mask_dim != len(features): + logging.error( + 'Attributes and cross mask dimensions do not add up to total #features.' + ) + + # The cross features start at index len(features) - model.cross_mask_dim. + ranked_features = tf.gather(features[-n_top_features:], cross_mask_idx) + return ( + dict(zip(ranked_features.numpy(), cross_mask_scores.numpy())), + ranked_features.numpy().tolist(), + cross_mask_idx, + ) diff --git a/automated_feature_engineering/feature_metadata.py b/automated_feature_engineering/feature_metadata.py new file mode 100644 index 00000000000..e109091766d --- /dev/null +++ b/automated_feature_engineering/feature_metadata.py @@ -0,0 +1,1035 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Creates feature metadata for use in normalization and feature creation.""" + +import collections +import dataclasses +import enum +import logging +import textwrap +# TODO(b/249435057): Convert imports to use collections.abc once the container +# is updated to have python 3.9. +from typing import Any, DefaultDict, Iterable, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Set, Union + +import bq_utils +from google.cloud import bigquery +import numpy as np +import tensorflow as tf + + +# https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types +_BIGQUERY_SUPPORTED_NUMERIC_DATA_TYPES = frozenset(( + 'INT64', + 'NUMERIC', + 'BIGNUMERIC', + 'FLOAT64', + 'FLOAT', + 'INTEGER', +)) +# Decide if we should handle dates and times as numerical as well. +# Research if there are other data types we should try to handle. +_BIGQUERY_SUPPORTED_DISCRETE_DATA_TYPES = frozenset(( + 'STRING', + 'BYTES', + 'BOOL', + 'BOOLEAN', + 'INT64', + 'INTEGER', + 'DATE', + 'DATETIME', + 'TIMESTAMP', + 'TIME', +)) +_BIGQUERY_SUPPORTED_DATA_TYPES = frozenset(( + _BIGQUERY_SUPPORTED_NUMERIC_DATA_TYPES + | _BIGQUERY_SUPPORTED_DISCRETE_DATA_TYPES +)) + +_USE_LEGACY_SQL = False + + +@enum.unique +class Normalization(str, enum.Enum): + """GAIN normalization methods.""" + + MINMAX = 'minmax' + STANDARD = 'standard' + + +@dataclasses.dataclass +class MetadataRetrievalOptions: + """Describes what metadata should be collected.""" + + # For numeric features + get_mean: bool = True + get_variance: bool = True + get_min: bool = True + get_max: bool = True + get_median: bool = False + get_log_mean: bool = True + get_log_variance: bool = True + number_of_quantiles: Optional[int] = None + + # Transform related parameters. + min_log_value: float = 1.0e-4 + + # For discrete features + get_mode: bool = True + max_vocab_size: int = 5000 + + # BigQuery specific options. + where_clauses: Sequence[str] = dataclasses.field(default_factory=tuple) + + def no_options(self, ignored_fields = None): + """True if metadata statistics do not need to be retrieved. + + Args: + ignored_fields: Any fields to not check for a boolean true value. If None + is passed in the default of {'min_log_value'} will be used. + + Returns: + True if none of the important attributes are true. + """ + if ignored_fields is None: + ignored_fields = {'min_log_value', 'where_clauses'} + + for field in dataclasses.fields(self): + if field.name in ignored_fields: + continue + + if getattr(self, field.name): + return False + + return True + + @classmethod + def get_all( + cls, + number_of_quantiles = 101, + max_vocab_size = 10000, + where_clauses = tuple(), + ): + """Creates a MetadataRetrievalOptions object with all values True. + + Args: + number_of_quantiles: The number of quantiles to use. + max_vocab_size: The maximum vocab size to use. + where_clauses: A list of clauses that can be combined with and statements + to get the correct values in the BigQuery table. + + Returns: + A MetadataRetrievalOptions that gets all metadata classes. + """ + return cls( + get_mean=True, + get_variance=True, + get_min=True, + get_max=True, + get_median=True, + get_log_mean=True, + get_log_variance=True, + number_of_quantiles=number_of_quantiles, + get_mode=True, + max_vocab_size=max_vocab_size, + where_clauses=where_clauses, + ) + + @classmethod + def get_none( + cls, + where_clauses = tuple(), + ): + """Creates a MetadataRetrievalOptions object that doesn't get any metadata. + + Args: + where_clauses: A list of clauses that can be combined with and statements + to get the correct values in the BigQuery table. + + Returns: + A MetadataRetrievalOptions that gets no metadata. + """ + return cls( + get_mean=False, + get_variance=False, + get_min=False, + get_max=False, + get_median=False, + get_log_mean=False, + get_log_variance=False, + min_log_value=0.0, + number_of_quantiles=None, + get_mode=False, + max_vocab_size=0, + where_clauses=where_clauses, + ) + + @classmethod + def for_normalization( + cls, + normalization, + where_clauses = tuple(), + ): + """Returns the metadata retrieval options needed for a normalization. + + Args: + normalization: The normalization method to be used. Currently, supports + minmax and standard. + where_clauses: A list of clauses that can be combined with and statements + to get the correct values in the BigQuery table. + + Returns: + A MetadataRetrievalOptions with the necessary options being True. + + Raises: + ValueError: If an invalid normalization is passed in. + """ + if normalization == Normalization.MINMAX: + return cls( + get_mean=False, + get_variance=False, + get_min=True, + get_max=True, + get_median=False, + get_log_mean=False, + get_log_variance=False, + get_mode=False, + where_clauses=where_clauses, + ) + elif normalization == Normalization.STANDARD: + return cls( + get_mean=True, + get_variance=True, + get_min=False, + get_max=False, + get_median=False, + get_log_mean=False, + get_log_variance=False, + get_mode=False, + where_clauses=where_clauses, + ) + else: + raise ValueError(f'The normalization {normalization} is not valid.') + + +# noinspection PyUnresolvedReferences +@dataclasses.dataclass +class FeatureMetadata: + """The metadata for a single data column. + + Attributes: + name: The name of the column. + index: The index of the column (e.g. 0-N-1). + input_data_type: The data_type of the column in BigQuery + tf_data_type_str: The data type that should be used for the feature in + TensorFlow as a string. + mean: The mean of the feature. For numeric data only. + variance: The variance of the feature. For numeric data only. + min: The minimum value of the feature. For numeric data only. + max: The maximum value of the feature. For numeric data only. + median: The mean of the feature. For numeric data only. + log_shift: The value to shift the data by before applying a log transform. + This is effectively MIN(0, eps - min(X)). Therefore the log transform + should be applied as log(X + log_shift). Will be present if log_mean or + log_variance are present. For numeric data only. + log_mean: The mean of the feature after the log transform. For numeric data + only. + log_variance: The variance of the feature after the log transform. For + numeric data only. + quantiles: A list of quantiles evenly spaced between 0% (the min) and 100% + (the max). As an example if there are three values they are the min, the + median, and the max. + cardinality: The number of different values the feature takes on in the + table. For discrete data only. + mode: The most common value of the feature. For discrete data only. + vocabulary: A mapping between the feature values and the number of times + that they occur in the data. Note that this may not be present for some + discrete data whose cardinality is too large. For discrete data only. + is_numeric: True if the BigQuery datatype is numeric. + is_discrete: True it fhe BigQuery datatype is discrete. + """ + + name: str + index: int + input_data_type: Optional[str] = None + tf_data_type_str: Optional[str] = None + # These are related to numeric features. + mean: Optional[float] = None + variance: Optional[float] = None + min: Optional[float] = None + max: Optional[float] = None + median: Optional[float] = None + log_shift: Optional[float] = None + log_mean: Optional[float] = None + log_variance: Optional[float] = None + quantiles: Optional[Sequence[float]] = None + # These are related to discrete features. + # Consider if we should calculate cardinality for all features + # as sometimes float fields can only take on a set of discrete values. + # This case was commonly seen in some of the Uber data. + cardinality: Optional[int] = None + mode: Optional[Any] = None + vocabulary: Optional[Mapping[Any, int]] = None + + @property + def tf_data_type(self): + """The TensorFlor data type of the feature.""" + return ( + tf.dtypes.as_dtype(self.tf_data_type_str) + if self.tf_data_type_str + else None + ) + + @tf_data_type.setter + def tf_data_type(self, tf_data_type): + tf_dtype = tf.dtypes.as_dtype(tf_data_type) + self.tf_data_type_str = tf_dtype.name + + @property + def range(self): + """The range (max-min) of the data.""" + if self.max is None or self.min is None: + return None + + return self.max - self.min + + @property + def is_numeric(self): + """If the feature is numeric.""" + if self.tf_data_type_str: + return self.tf_data_type.is_floating or self.tf_data_type.is_integer # pytype: disable=attribute-error # always-use-return-annotations + # Handle other input sources more generically. + else: + return self.input_data_type in _BIGQUERY_SUPPORTED_NUMERIC_DATA_TYPES + + @property + def is_discrete(self): + """If the feature is discrete.""" + tf_data_type = self.tf_data_type + if tf_data_type: + return ( + tf_data_type.is_integer + or tf_data_type.is_bool + or tf_data_type == tf.dtypes.string + ) + # Handle other input sources more generically. + else: + return self.input_data_type in _BIGQUERY_SUPPORTED_DISCRETE_DATA_TYPES + + def get_config(self): + """Creates a config that can be converted to json.""" + config = dataclasses.asdict(self) + # Because JSON only takes strings as keys we need to track and convert + # types. + if self.vocabulary: + # Consider whether we should limit the vocabulary to one + # type. + config['vocabulary_types'] = { + v: type(v).__name__ for v in self.vocabulary + } + return config + + @classmethod + def from_config(cls, config): + """Creates a FeatureMetadata from a config.""" + + def _update_vocabulary_key(new_key, old_key): + config['vocabulary'][new_key] = config['vocabulary'][old_key] + del config['vocabulary'][old_key] + + vocabulary_types = config.pop('vocabulary_types', {}) + for k, k_type in vocabulary_types.items(): + if k_type == 'bool': + bool_key = k.lower() == 'true' + _update_vocabulary_key(bool_key, k) + elif k_type == 'NoneType': + _update_vocabulary_key(None, k) + elif k_type == 'int': + _update_vocabulary_key(int(k), k) + elif k_type == 'float': + _update_vocabulary_key(float(k), k) + + return cls(**config) + + +class FeatureMetadataContainer: + """Allows feature metadata to be easily retrieved. + + Example:: + + # Pass retrieved metadata into the container. + builder = BigQueryMetadataBuilder('project', 'dataset', 'table') + all_metadata = builder.get_feature_names_and_types() + metadata_container = FeatureMetadataContainer(all_metadata) + + # Get a feature by name. + feature1_metadata = metadata_container.get_metadata_by_name('feature1') + assert feature1_metadata.name == 'feature1' + + # Get features by dtype. + float_features = metadata_container.get_metadata_by_name('FLOAT64') + assert all([ff.input_data_type == 'FLOAT64' for ff in float_features]) + + # Get a feature by index and check it matches. + assert metadata_container[0] == all_metadata[0] + + # Iterate through the metadata and check it matches. + for idx, c_metadata in enumerate(metadata_container): + assert c_metadata == all_metadata[idx] + """ + + def __init__( + self, + all_feature_metadata, + ): + """Container that holds feature metadata for easy retrieval. + + Args: + all_feature_metadata: A sequence of the metadata for the features in the + table. + """ + # Internal storage of the input metadata as a sequence. + self._metadata_sequence = ( + all_feature_metadata._metadata_sequence + if isinstance(all_feature_metadata, FeatureMetadataContainer) + else all_feature_metadata + ) + + # Lazily created cache for mapping output features to data types. + self._metadata_for_dtype: Optional[ + DefaultDict[str, MutableSequence[FeatureMetadata]] + ] = None + # Lazily created cache for quickly getting feature metadata by name. + self._metadata_for_name: Optional[Mapping[str, FeatureMetadata]] = None + + @property + def feature_metadata_by_names(self): + """A mapping between the name and metadata of each feature. + + Raises: + ValueError: If two features have the same name. + """ + if not self._metadata_for_name: + self._metadata_for_name = {m.name: m for m in self._metadata_sequence} + # Make sure all names are unique + if len(self._metadata_for_name) != len(self._metadata_sequence): + raise ValueError('Two features may not have the same name.') + + return self._metadata_for_name + + def get_metadata_by_name(self, feature_name): + """Returns the metadata for the feature with the given name. + + Args: + feature_name: The name of the feature (i.e. metadata.name). + + Returns: + The metadata object whose name matches the input name. + """ + return self.feature_metadata_by_names[feature_name] + + def __getitem__(self, idx): + """Allow user to use access the metadata sequence directly. + + Args: + idx: The index (based on input order) of the feature to retrieve. + + Returns: + The metadata for the specified feature. + """ + return self._metadata_sequence[idx] + + @property + def feature_metadata_by_dtypes( + self, + ): + """A mapping between data type and metadata for all features.""" + if not self._metadata_for_dtype: + self._metadata_for_dtype = collections.defaultdict(list) + for m in self._metadata_sequence: + self._metadata_for_dtype[m.input_data_type].append(m) + + return self._metadata_for_dtype + + def get_metadata_by_dtype( + self, input_data_type + ): + """Returns a set of metadata for features of the given type.""" + return self.feature_metadata_by_dtypes[input_data_type] + + @property + def names(self): + return tuple(self.feature_metadata_by_names.keys()) + + def __iter__(self): + """Iterates through the metadata in the order they were provided.""" + for metadata in self._metadata_sequence: + yield metadata + + def __len__(self): + """Returns the number of elements in the metadata.""" + return len(self._metadata_sequence) + + def __repr__(self): + metadata_str = ','.join([f'{f!r}' for f in self]) + return f'{type(self)!r}(all_feature_metadata=[{metadata_str}])' + + def to_bigquery_schema(self): + """Creates a BigQuery schema from the feature metadata. + + Returns: + A list of the SchemaFields for each of the features. + """ + # Add in the nullable attribute. + return [bigquery.SchemaField(m.name, m.input_data_type) for m in self] + + def equal_names_and_types( + self, + other, + difference_method = None, + ): + """Returns where the name and datatypes of the two containers is equal. + + Args: + other: Another metadata container to compare to. + difference_method: How to handle differences. Valid values are 'raise', + logger levels or None. For logging levels see: + https://docs.python.org/3/library/logging.html#logging-levels. + + Returns: + True if all the names and types of the two schema match. + + Raises: + ValueError: If the difference method is raise and the two values do not + match or an invalid difference_method is specified. + """ + fields_to_check = ('name', 'input_data_type', 'tf_data_type') + + if difference_method == 'raise': + + def difference_fn(message): + raise ValueError(message) + + elif difference_method: + numeric_level = getattr(logging, difference_method.upper(), None) + if numeric_level is None: + raise ValueError( + f'The difference_method {difference_method} is not valid.' + ) + + def difference_fn(message): + logging.log(numeric_level, message) + + else: + + def difference_fn(_): + pass + + if len(self) != len(other): + difference_fn( + f'The length of the two containers is different: {self} vs {other}' + ) + return False + + different_features = [] + for feature, other_feature in zip(self, other): + for field_name in fields_to_check: + if getattr(feature, field_name) != getattr(other_feature, field_name): + different_features.append((feature, other_feature)) + continue + + if different_features: + features_str = [ + f'{mine} vs {other}' for mine, other in different_features + ] + difference_fn(f'The following features differ: {";".join(features_str)}') + return False + + return True + + def get_config(self): + """Creates a config that can be converted to json.""" + return { + 'all_feature_metadata': [ + fm.get_config() for fm in self._metadata_sequence + ], + } + + @classmethod + def from_config( + cls, + config, + ): + """Create a FeatureMetadataContainer from a config.""" + if 'all_feature_metadata' not in config: + raise ValueError('The key all_feature_metadata must be in the config.') + + config['all_feature_metadata'] = [ + FeatureMetadata.from_config(fm) for fm in config['all_feature_metadata'] + ] + return cls(**config) + + +class BigQueryTableMetadata(FeatureMetadataContainer): + """Contains the metadata for features in a BigQuery table. + + This extends the normal FeatureMetadataContainer by including information + about the datasource (i.e. the BigQuery Table). + + Attributes: + project_id: The name of the GCP project that the BigQuery table is in. + bq_dataset_name: The name of the dataset that the BigQuery table is in. + bq_table_name: The name of the table from which the metadata was retrieved. + """ + + def __init__( + self, + all_feature_metadata, + project_id, + bq_dataset_name, + bq_table_name, + ): + """Initializer for a BigQueryTableMetadata object. + + Args: + all_feature_metadata: Sequence of FeatureMetadata for the columns. + project_id: The name of the GCP project that the BigQuery table is in. + bq_dataset_name: The name of the dataset that the BigQuery table is in. + bq_table_name: The name of the table from which the metadata was + retrieved. + """ + super().__init__(all_feature_metadata) + + self.project_id = project_id + self.bq_dataset_name = bq_dataset_name + self.bq_table_name = bq_table_name + + @property + def full_table_id(self): + """The full path 'project.dataset.table' of the BigQuery table.""" + return f'{self.project_id}.{self.bq_dataset_name}.{self.bq_table_name}' + + @property + def escaped_table_id(self): + """The full path of the BigQuery table escaped with `.""" + return f'`{self.full_table_id}`' + + @property + def bigquery_table(self): + """The bigquery.Table object for this table.""" + return bigquery.Table.from_string(self.full_table_id) + + def update_bq_path_parts(self, bq_file_parts): + self.project_id = bq_file_parts.project_id + self.bq_dataset_name = bq_file_parts.bq_dataset_name + self.bq_table_name = bq_file_parts.bq_table_name + + def __repr__(self): + metadata_str = ','.join([f'{f!r}' for f in self]) + return ( + f'{type(self)!r}(project_id={self.project_id}, ' + f'bq_dataset_name={self.bq_dataset_name}, ' + f'bq_table_name={self.bq_table_name}, ' + f'all_feature_metadata=[{metadata_str}])' + ) + + def get_config(self): + """Creates a config that can be converted to json.""" + config = super().get_config() + config.update({ + 'project_id': self.project_id, + 'bq_dataset_name': self.bq_dataset_name, + 'bq_table_name': self.bq_table_name, + }) + return config + + +class BigQueryMetadataBuilder: + """Gets metadata about the column in a BQ table.""" + + def __init__( + self, + project_id, + bq_dataset_name, + bq_table_name, + ignore_columns = (), + bq_client = None, + ): + """Creates an object that can retrieve metadata about the input BQ table. + + Args: + project_id: The BigQuery project that the table is in. + bq_dataset_name: The BigQuery dataset that has the table. + bq_table_name: The name of the table itself. + ignore_columns: Any columns that should not be included in the output. + bq_client: A BigQuery client to be used to interact with BigQuery. If this + is not provided (default) a new client will be created for the input + project. + """ + self._project_id = project_id + self._bq_dataset_name = bq_dataset_name + self._bq_table_name = bq_table_name + self._ignore_columns = ignore_columns + self._bq_client = bq_client or bigquery.Client(project=project_id) + self._query_config = bigquery.QueryJobConfig(use_legacy_sql=_USE_LEGACY_SQL) + + # Set up initial values for private variables that will be lazily updated + # and cached. + self._rows = None + + @property + def full_table_id(self): + """The full path 'project.dataset.table' of the BigQuery table.""" + return f'{self._project_id}.{self._bq_dataset_name}.{self._bq_table_name}' + + @property + def escaped_table_id(self): + """The full path of the BigQuery table escaped with `.""" + return f'`{self.full_table_id}`' + + def _query_bq( + self, + query, + job_config = None, + **kwargs, + ): + job_config = job_config or self._query_config + return self._bq_client.query(query, job_config=job_config, **kwargs) + + @property + def rows(self): + if not self._rows: + row_query = textwrap.dedent(f"""\ + SELECT COUNT(*) + FROM {self.escaped_table_id}""") + self._rows = next(self._query_bq(row_query).result())[0] + + return self._rows + + def get_feature_names_and_types(self): + """Returns a metadata collection of all the columns in the table. + + Returns: + A BigQueryTableMetadata with FeatureMetadata for each column in the + object's BQ table. + """ + table = self._bq_client.get_table(self.full_table_id) + + if self._ignore_columns: + ignored_set = set(self._ignore_columns) + else: + ignored_set = set() + + columns = [] + for idx, column in enumerate(table.schema): + if column.name not in ignored_set: + if column.field_type not in _BIGQUERY_SUPPORTED_DATA_TYPES: + raise NotImplementedError( + f'The datatype {column.field_type} for {column.name} ' + 'is not currently supported.' + ) + columns.append(FeatureMetadata(column.name, idx, column.field_type)) + + return BigQueryTableMetadata( + columns, + project_id=self._project_id, + bq_dataset_name=self._bq_dataset_name, + bq_table_name=self._bq_table_name, + ) + + def _construct_numeric_metadata_query( + self, feature, options + ): + """Constructs a query to get the numeric metadata for the input feature. + + Note that the suffix of each of the query results must match the attribute + that it will be assigned to. + + In my tests doing this as a single query for each feature appeared to give + the best performance as it avoided multiple queries but also avoided the + anti-pattern of throwing a bunch of queries together: + https://cloud.google.com/bigquery/docs/best-practices-performance-compute#split_complex_queries_into_multiple_smaller_ones + + + Args: + feature: The current metadata for the feature. Must include the name. + options: Specifications for what types of metadata should be retrieved. + + Returns: + The query to run in bigquery to get the metadata. + """ + if not feature.is_numeric: + raise ValueError(f'This function only works for numeric data: {feature}') + + # Cache for brevity, performance and ease of use. + name = feature.name + # The CTE queries list can contain multiple CTE expression which will be + # proceeded by the WITH statement and joined with commas. + cte_queries = [] + + # The where clauses list will be proceeded by the WHERE statement and + # joined with ands. + where_clauses = list(options.where_clauses) + + # The select components will be combined with commas to create the main + # query. The output of this query is what will be used to create the + # results. + main_query_components = [] + if options.get_mean: + main_query_components.append(f'AVG({name}) as {name}_mean') + + if options.get_variance: + main_query_components.append(f'VARIANCE({name}) as {name}_variance') + + if options.get_min: + main_query_components.append(f'MIN({name}) as {name}_min') + + if options.get_max: + main_query_components.append(f'MAX({name}) as {name}_max') + + if options.get_median: + # Look at other ways of doing this. + main_query_components.append( + f'APPROX_QUANTILES({name}, 3)[OFFSET(1)] as {name}_median' + ) + + if options.number_of_quantiles: + main_query_components.append( + f'APPROX_QUANTILES({name}, {options.number_of_quantiles}) ' + f'as {name}_quantiles' + ) + + is_float = feature.input_data_type.startswith('FLOAT') + + if options.get_log_mean or options.get_log_variance: + cte_where = ( + where_clauses + [f'NOT IS_NAN({name})'] if is_float else where_clauses + ) + cte_shift = 'log_shift' + where_statement = bq_utils.where_statement_from_clauses(cte_where) + cte_queries.append( + f'{cte_shift} AS (' + f'SELECT {options.min_log_value} ' + f'- LEAST(0, MIN({name})) as value ' + f'FROM {self.escaped_table_id}{where_statement})' + ) + log_shift_subquery = f'(SELECT value from {cte_shift})' + + cte_transformed = 'log_transformed' + cte_queries.append( + f'{cte_transformed} AS (' + f'SELECT LOG({name} + {log_shift_subquery}) as value ' + f'FROM {self.escaped_table_id}{where_statement})' + ) + + main_query_components.append(f'{log_shift_subquery} as {name}_log_shift') + if options.get_log_mean: + main_query_components.append( + f'(SELECT AVG(value) FROM {cte_transformed}) as {name}_log_mean' + ) + if options.get_log_variance: + main_query_components.append( + '(SELECT VARIANCE(value) ' + f'FROM {cte_transformed}) as {name}_log_variance' + ) + + query_parts = [] + if cte_queries: + combined_with = ',\n'.join(cte_queries) + query_parts.append(f'WITH {combined_with}') + + combined_select = ',\n '.join(main_query_components) + if is_float: + where_clauses.append(f'NOT IS_NAN({name})') + + where_statement = bq_utils.where_statement_from_clauses(where_clauses) + query_parts.append( + f'SELECT\n{combined_select}\n' + f'FROM {self.escaped_table_id}{where_statement}' + ) + + return '\n'.join(query_parts) + + def update_numeric_feature_metadata( + self, feature, options + ): + """Updates a numeric feature's metadata based on BigQuery results. + + Args: + feature: The feature to update. + options: The options for getting the metadata. + """ + query_string = self._construct_numeric_metadata_query(feature, options) + # We only expect one row of results. + query_result = next(self._query_bq(query_string).result()) + if not query_result: + raise ValueError(f'Expected a result from query: {query_string}') + + # Do this more efficiently and safely by only iterating over + # numeric attributes. + query_columns = set(query_result.keys()) + for metadata_field in dataclasses.fields(feature): + field_name = metadata_field.name + # This means we overwrite any already existing data. + # Figure out a cleaner way to do this. + query_result_name = f'{feature.name}_{field_name}' + if query_result_name in query_columns: + if query_result[query_result_name] is None: + logging.warning('Got a None value for %s on %s', field_name, feature) + setattr(feature, field_name, query_result[query_result_name]) + + def update_discrete_feature_metadata( + self, feature, options + ): + """Updates a discrete feature's metadata based on BigQuery results. + + Args: + feature: The feature to update. + options: The options for getting the metadata. + """ + if not feature.is_discrete: + raise ValueError(f'This function only works for discrete data: {feature}') + + # Cache for brevity, performance and ease of use. + name = feature.name + + where_clauses = [f'{name} IS NOT NULL'] + where_clauses.extend(options.where_clauses) + + # We will get the mode from the first APPROX_TOP_COUNT output + num_top_count = max(options.max_vocab_size, int(options.get_mode)) + + select_parts = [ + # Should be close enough to use APPROX_COUNT_DISTINCT instead. + # https://cloud.google.com/bigquery/docs/reference/standard-sql/approximate_aggregate_functions#approx_count_distinct + f'APPROX_COUNT_DISTINCT({name}) as {name}_cardinality', + ] + if num_top_count <= 10000: + # While it is not documented APPROX_TOP_COUNT does not appear to work + # with numbers >10000. When this occurs we have to use 2 queries + # instead. + select_parts.append( + f'APPROX_TOP_COUNT({name}, {num_top_count}) as {name}_top_count' + ) + + combined_select = ',\n '.join(select_parts) + # The where clauses list will be proceeded by the WHERE statement and + # joined with ands. + where_statement = bq_utils.where_statement_from_clauses(where_clauses) + query_string = ( + f'SELECT\n {combined_select}\n' + f'FROM {self.escaped_table_id}{where_statement}' + ) + + # We only expect one row of results. + query_result = next(self._query_bq(query_string).result()) + if not query_result: + raise ValueError(f'Expected a result from query: {query_string}') + + # We need to handle mode independently since it comes from the vocab. + feature.cardinality = int(query_result[f'{feature.name}_cardinality']) + + if num_top_count <= 10000: + feature.vocabulary = { + item['value']: int(item['count']) + for item in query_result[f'{feature.name}_top_count'] + } + else: + # Since we are asking for a vocabulary of over 10k elements we need to use + # a second query to get those results and the convert it to a dictionary. + dataframe = ( + self._query_bq( + f'SELECT {name} as value, count(*) as count ' + f'FROM {self.escaped_table_id}{where_statement} GROUP BY {name} ' + f'ORDER BY 2 DESC LIMIT {num_top_count}' + ) + .result() + .to_dataframe() + ) + feature.vocabulary = { + r['value']: int(r['count']) for r in dataframe.to_dict('records') + } + + # The vocabulary is already ordered by the top count. + if options.get_mode: + feature.mode = next(iter(feature.vocabulary)) + + def get_metadata_for_all_features( + self, options + ): + """Gets the metadata_collection for all the features in the specified table. + + Args: + options: The options to use when retrieving the metadata. + + Returns: + A collection of metadata for each feature in the table with the specified + values calculated. + """ + feature_metadata = self.get_feature_names_and_types() + + if options.no_options(): + return feature_metadata + + for feature in feature_metadata: + if feature.is_numeric: + self.update_numeric_feature_metadata(feature, options) + + if feature.is_discrete: + self.update_discrete_feature_metadata(feature, options) + + return feature_metadata + + # Add the ability to export the metadata_collection. + + @classmethod + def from_table_parts( + cls, table_parts, *args, **kwargs + ): + """Constructs a BigQueryMetadataBuilder directly from table_parts. + + Args: + table_parts: The BigQuery path parts for the table for the metadata. + *args: Positional arguments to be passed to __init__. + **kwargs: Keyword arguments to be passed to __init__. + + Returns: + The BigQueryMetadataBuilder for the specified table. + """ + return cls( + project_id=table_parts.project_id, + bq_dataset_name=table_parts.bq_dataset_name, + bq_table_name=table_parts.bq_table_name, + *args, + **kwargs, + ) + + @classmethod + def from_table_path( + cls, table_path, *args, **kwargs + ): + """Constructs a BigQueryMetadataBuilder directly from a full table path. + + Args: + table_path: The full path of the BigQuery table in the format + project.dataset.table. + *args: Positional arguments to be passed to __init__. + **kwargs: Keyword arguments to be passed to __init__. + + Returns: + The BigQueryMetadataBuilder for the specified table. + """ + table_parts = bq_utils.BQTablePathParts.from_full_path(table_path) + return cls.from_table_parts(table_parts, *args, **kwargs) diff --git a/automated_feature_engineering/feature_selection.py b/automated_feature_engineering/feature_selection.py new file mode 100644 index 00000000000..8cd502efd7d --- /dev/null +++ b/automated_feature_engineering/feature_selection.py @@ -0,0 +1,408 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Feature selection layers to be integrated into the architecture.""" + +import sys +from typing import Any, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf +# This import format necessary: b/164280272. +import tensorflow_addons.activations as tfa_activations + + +class FeatureSelectionSparseMasks(tf.keras.layers.Layer): + """Feature selection module using sparse learnable masks.""" + + def __init__( + self, + num_features: int, + num_selected_features: Optional[int] = None, + mask_temperature: float = 0.9, + mask_perturbation_amplitude: float = 0.0, + scale_before_sparsemax: bool = True, + num_feature_scaler: Any = None, + do_mixed_precision: bool = False, + gt_salient_feature_indices: Optional[np.ndarray] = None, + reduce_feature_dim: bool = False, + output_mask: bool = False, + use_softmax_mask: bool = False, + epsilon: float = 0.00001, + seed: Optional[int] = None, + ): + """Initializes feature selection module. + + Args: + num_features: number of total features. + num_selected_features: number of selected features. + mask_temperature: amount to scale the mask to encourage higher or lower + sparsity; encourages higher sparsity if > 1, and lower if < 1. + mask_perturbation_amplitude: amplitude of noise to add to the mask to + introduce stochasticity. + scale_before_sparsemax: whether to scale mask before sparsemax, so + sparsemax achieves target sparsity level. + num_feature_scaler: object used for number of features tempering. + do_mixed_precision: whether to use mixed precision. + gt_salient_feature_indices: ground truth indices for salient features, + applies only to synthetic data. + reduce_feature_dim: whether to reduce feature dimension to speed up + inference. + output_mask: whether to return the learned sparse mask. + use_softmax_mask: whether to use softmax mask for better latency. + epsilon: small floating point value to bound away from 0. + seed: seed for random number generation. + """ + super().__init__() + self.mask = tf.Variable( + tf.random.uniform((1, num_features), maxval=1.0, seed=seed), + dtype=tf.float32, + ) + + if reduce_feature_dim: + initializer = tf.keras.initializers.GlorotNormal() + self.feature_weight = tf.Variable( + initializer(shape=(num_features, num_selected_features)), + dtype=tf.float32, + ) + self.feature_bias = tf.Variable( + np.zeros(num_selected_features), dtype=tf.float32 + ) + + self.num_features = num_features + self.num_selected_features = min(num_selected_features, num_features) + self.mask_temperature = mask_temperature + # TODO(yihed): remove mask_perturbation_amplitude parameter and function. + self.mask_perturbation_amplitude = mask_perturbation_amplitude + self.scale_before_sparsemax = scale_before_sparsemax + self.num_feature_scaler = num_feature_scaler + self.do_mixed_precision = do_mixed_precision + self.gt_salient_feature_indices = gt_salient_feature_indices + self.reduce_feature_dim = reduce_feature_dim + self._output_mask = output_mask + # Mask cached after training to accelerate inference time. + self.trained_mask = None + self.trained_feature_weight = None + self.trained_top_idx = None + self.use_softmax_mask = use_softmax_mask + self.epsilon = epsilon + + def softmax_mask(self, inputs: tf.Tensor) -> Tuple[tf.Tensor, ...]: + """Use softmax to learn the sparse selection mask. + + This achieves up to 40% latency improvements with insignificant degradation + in accuracy + Args: + inputs: data to learn the selection mask on. + + Returns: + Tensor where non-selected portions of the inputs are zeroed-out, as well + as the learned mask if specified. + """ + top_val, top_idx = tf.math.top_k(self.mask, k=self.num_selected_features) + top_val = tf.reshape(top_val, [-1]) + top_val = tf.nn.softmax(top_val) + top_idx = tf.reshape(top_idx, [-1, 1]) + + transform_prob = tf.scatter_nd( + top_idx, top_val, tf.constant([self.num_features]) + ) + if self._output_mask: + return inputs * tf.expand_dims(transform_prob, 0), transform_prob + else: + return inputs * tf.expand_dims(transform_prob, 0) + + def fast_infer(self, inputs: tf.Tensor) -> tf.Tensor: + masked_inputs = tf.gather(inputs, self.trained_top_idx, axis=-1) + # Only apply weights and bias to the selected features. + masked_inputs = ( + tf.expand_dims(masked_inputs, -1) * self.trained_feature_weight + ) + return tf.math.reduce_sum(masked_inputs, axis=-2) + self.feature_bias + + def call( + self, inputs: tf.Tensor, training: bool = True + ) -> Union[tf.Tensor, Tuple[tf.Tensor, tf.Tensor]]: + """Feature selection module forward pass. + + This call uses the input to learn the feature mask, and zeros out + non-selected features. + + Args: + inputs: Tensor of dataset samples. + training: whether is in training mode. + + Returns: + Processed input tensor with non-selected features zeroed-out. If mutual_ + information, HSIC, or compute_entropy is enabled, also returns the + sparse mask. + """ + if self.use_softmax_mask: + return self.softmax_mask(inputs) + + # TF requires all conditions to have same type, hence explicit "not None". + if ( + not training + and self.reduce_feature_dim + and self.trained_mask is not None + ): + return self.fast_infer(inputs), self.trained_mask + + # Subtract the min for numerical stability (not often needed) + # norm_mask = self.mask - tf.math.reduce_min(self.mask, keepdims=True) + if self.num_feature_scaler is not None: + num_selected_features = self.num_feature_scaler.get_num_features() + else: + num_selected_features = self.num_selected_features + + inputs_dtype = inputs.dtype + scalar = tf.constant(1, dtype=inputs_dtype) + if num_selected_features and self.scale_before_sparsemax: + scalar, top_idx = self.compute_sparsifier( + self.mask, num_selected_features, inputs_dtype + ) + else: + top_idx = None + + # Nonlinear mapping with sparsemax + if self.do_mixed_precision: + scalar = tf.cast(scalar, self.mask.dtype) + + # Noise to encourage feature selection exploration + if training: + mask_perturbation = tf.random.normal( + shape=tf.shape(self.mask), + mean=0.0, + stddev=tf.math.reduce_mean(self.mask) + * self.mask_perturbation_amplitude, + dtype=tf.float32, + ) + else: + mask_perturbation = 0.0 + mask_argument = self.mask + mask_perturbation + sparse_mask = ( + mask_argument + if num_selected_features == self.num_features + else tfa_activations.sparsemax(mask_argument * scalar) + ) + + # Note that sparsemax throws error if input is float16. + if self.do_mixed_precision: + sparse_mask = tf.cast(sparse_mask, inputs.dtype) + + # Subtract the minimum value for numerical stability. Note that sparsemax is + # invariant to a constant bias. + self.mask.assign(self.mask - tf.math.reduce_min(self.mask)) + + if num_selected_features: + non_zero_count = tf.cast( + tf.math.count_nonzero(sparse_mask), dtype=inputs_dtype + ) + + # If the number of selected features is lower than the target, apply + # temperature scaling to the mask to reduce the sparsity in selection. + + if ( + non_zero_count < tf.constant(num_selected_features, inputs_dtype) + and training + ): + tempered_mask = self.mask * self.mask_temperature + if self.do_mixed_precision: + tempered_mask = tf.cast(tempered_mask, self.mask.dtype) + self.mask.assign(tempered_mask) + + # Trim the number of selected features to the target, by masking out the + # elements with the lowest coefficient values. + if not self.scale_before_sparsemax and non_zero_count > tf.constant( + num_selected_features, inputs_dtype + ): + top_k_mask = tf.cast( + sparse_mask[0, :] + >= tf.math.top_k(sparse_mask[0, :], num_selected_features)[0][-1], + inputs_dtype, + ) + # Filter out the top num_selected_features elements. + sparse_mask = sparse_mask * tf.expand_dims(top_k_mask, 0) + # Rescale the mask to add up to 1. + sparse_mask = sparse_mask / tf.math.reduce_sum(sparse_mask) + + if not training: + self.sparse_mask = sparse_mask + masked_inputs = inputs * sparse_mask + if self.reduce_feature_dim: + if top_idx is None: + top_idx = tf.math.top_k(sparse_mask, num_selected_features)[1] + # Only gather the selected features. + cur_weight = tf.gather(self.feature_weight, top_idx[0], axis=0) + masked_inputs = tf.gather(masked_inputs, top_idx[0], axis=-1) + # Only apply weights and bias to the selected features. + masked_inputs = tf.expand_dims(masked_inputs, -1) * cur_weight + masked_inputs = ( + tf.math.reduce_sum(masked_inputs, axis=-2) + self.feature_bias + ) + + if self._output_mask: + return (masked_inputs, sparse_mask) + else: + return masked_inputs + + def save_mask( + self, + ): + """Cache mask after training to accelerate inference.""" + scalar = 1.0 + if self.num_selected_features and self.scale_before_sparsemax: + scalar, top_idx = self.compute_sparsifier( + self.mask, self.num_selected_features, tf.float32 + ) + + sparse_mask = tfa_activations.sparsemax(self.mask * scalar) + self.trained_mask = sparse_mask + + if top_idx is None: + top_idx = tf.math.top_k(sparse_mask, self.num_selected_features)[1] + # Only gather the selected features. + self.trained_feature_weight = tf.gather( + self.feature_weight, top_idx[0], axis=0 + ) + self.trained_top_idx = top_idx[0] + + def print_mask(self, sparse_mask: tf.Tensor): + """Logs the learned sparse mask.""" + print("Mask pattern:") + tf.print(sparse_mask, summarize=self.num_features, output_stream=sys.stdout) + sys.stdout.flush() + print("Number of non-zero elements:") + tf.print(tf.math.count_nonzero(sparse_mask), output_stream=sys.stdout) + sys.stdout.flush() + + # Print correct feature selection rate if ground truth salient features + # are known + if ( + self.gt_salient_feature_indices is not None + and self.gt_salient_feature_indices + ): + selected_feature_indices = tf.sparse.from_dense(sparse_mask).indices[:, 1] + correct_feature_indices = tf.sets.intersection( + self.gt_salient_feature_indices[None, :], + selected_feature_indices[None, :], + ) + correct_feature_rate = tf.cast( + tf.size(correct_feature_indices), dtype=tf.float32 + ) / tf.cast(tf.size(selected_feature_indices), dtype=tf.float32) + + print("Correct feature selection rate:") + tf.print(correct_feature_rate, output_stream=sys.stdout) + sys.stdout.flush() + + def compute_sparsifier( + self, + feature_mask: tf.Variable, + num_selected_features: int, + inputs_dtype: tf.dtypes.DType, + ) -> Tuple[tf.Tensor, tf.Tensor]: + """Compute scalar multiplier to obtain desired sparsity. + + level with sparsemax. + Args: + feature_mask: feature mask used for scaling. + num_selected_features: number of features to select. + inputs_dtype: dtype of the inputs. + + Returns: + Scalar multiplier before sparsemax, and the indices of + the selected features. + """ + if num_selected_features == self.num_features: + # All features are used + return ( + tf.constant(1, dtype=inputs_dtype), + tf.expand_dims(tf.range(num_selected_features), 0), + ) + num_selected_features += 1 + + if self.do_mixed_precision: + feature_mask = tf.cast(feature_mask, inputs_dtype) + + top_elements, top_idx = tf.math.top_k( + feature_mask, k=min(self.num_features, num_selected_features + 1) + ) + top_sum = tf.reduce_sum(top_elements) + top_k_sum = top_sum - top_elements[0][-1] + top_k_idx = top_idx[:, :-1] + scalar = tf.constant(1, dtype=inputs_dtype) + + if ((num_selected_features + 1) * top_elements[0][-1] + 1 - top_sum) > 0: + # Need to increase sparsity + scalar = tf.math.reciprocal_no_nan( + top_sum - (num_selected_features + 1) * top_elements[0][-1] + ) + + elif top_k_sum - num_selected_features * top_elements[0][-2] - 1 > 0: + scalar = tf.math.reciprocal_no_nan( + top_k_sum - num_selected_features * top_elements[0][-2] + ) + + scalar += self.epsilon + return scalar, top_k_idx + + +class NumFeatureScaler: + """Class to for scaling the number of features.""" + + def __init__( + self, + x_train: tf.data.Dataset, + n_epochs: int, + batch_size: int, + n_total_features: int, + target_num_features: int, + n_feature_updates: int = 5, + effective_n_step_ratio: float = 0.5, + ): + """Allows tempering of the number of features selected. + + Args: + x_train: training dataset. + n_epochs: number of epochs. + batch_size: batch size. + n_total_features: number of total features. + target_num_features: final desired number of features. + n_feature_updates: number of num_selected_features changes. + effective_n_step_ratio: 0.5 means only temper during the first half of all + steps. + """ + total_steps = int(len(x_train) / batch_size * n_epochs) + + self.feature_decrement = np.ceil( + (n_total_features - target_num_features) / n_feature_updates + ) + self.n_steps_per_change = np.floor( + total_steps * effective_n_step_ratio / n_feature_updates + ) + self.step_counter = 0 + self.target_num_features = target_num_features + self.n_total_features = n_total_features + + def add_step(self): + self.step_counter += 1 + + def get_num_features(self) -> int: + cur_num_features = int( + self.n_total_features + - self.feature_decrement * self.step_counter // self.n_steps_per_change + ) + cur_num_features = max(cur_num_features, self.target_num_features) + return cur_num_features diff --git a/automated_feature_engineering/requirements.txt b/automated_feature_engineering/requirements.txt new file mode 100644 index 00000000000..895830ce750 --- /dev/null +++ b/automated_feature_engineering/requirements.txt @@ -0,0 +1,18 @@ +absl-py==1.4.0 +db-dtypes +google-auth +google-cloud-bigquery +google-cloud-bigquery-storage +google-cloud-storage +pandas>=2.2.1 +Pillow +scikit-learn==1.2.1 +tensorflow==2.11 +tensorflow_addons==0.19.0 +tensorflow_probability==0.19.0 +# The baselines that can be used if desired +# selective==1.1.1 +# xgboost==1.6.2 +# autofeat==2.1.2 +# pyHSICLasso==1.4.2 +# lassonet==0.0.11 diff --git a/automated_feature_engineering/run.sh b/automated_feature_engineering/run.sh new file mode 100755 index 00000000000..112a626d539 --- /dev/null +++ b/automated_feature_engineering/run.sh @@ -0,0 +1,31 @@ +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#!/bin/bash +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +python3 trainer.py --project_id="gcp_project_id" --dataset_name=housingPrice --train_table_name=airbnb2023_float --target=price --task_type=regression --num_steps=10 --model_type=discovery --upload_features_to_bq=False diff --git a/automated_feature_engineering/trainer.py b/automated_feature_engineering/trainer.py new file mode 100644 index 00000000000..d009c17bcd8 --- /dev/null +++ b/automated_feature_engineering/trainer.py @@ -0,0 +1,565 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Automated feature selection and engineering.""" + +import json +import os +import tempfile +from typing import Any, Dict, List, Optional, Tuple, Union + +from absl import app +from absl import flags +from absl import logging +import bq_data +import data_loader +import feature_engineering +import feature_selection +from google import auth +from google.cloud import bigquery +import tensorflow as tf +import utils + +# Needed to make GPU training deterministic +# (per https://github.com/NVIDIA/framework-determinism/blob/ +# master/doc/tensorflow.md) +# os.environ["TF_DETERMINISTIC_OPS"] = "1" + + +FLAGS = flags.FLAGS + +# Experiment parameters +_SEED = flags.DEFINE_integer("seed", 21, "Random seed") +_DATA_NAME = flags.DEFINE_string("data_name", "isolet", "Data name") + +# Number of features amplification for discovery model. +_N_FEATURES_AMPLIFIER = 3 +_USE_SOFTMAX_MASK = True +_LOGGING_STEPS = 300 +_MIN_NUM_SELECTED_FEATURES = 30 +_REQUIRED_FLAGS = ["target", "project_id", "train_table_name", "dataset_name"] + +_NUM_SELECTED_FEATURES = flags.DEFINE_integer( + "num_selected_features", None, "Number of features for feature selection" +) + +_MODEL_TYPE = flags.DEFINE_string( + "model_type", + "discovery", + "Model type can be selection or discovery.", +) +_TASK_TYPE = flags.DEFINE_string( + "task_type", + "classification", + "Task type can be classification or regression.", +) +_LOGGING_FILENAME = flags.DEFINE_string( + "logging_filename", + "features.json", + "Name of the file used for logging discovered or selected features.", +) + +_TARGET = flags.DEFINE_string( + "target", None, "Name for the training target feature.", required=False +) + +_NUM_MLP_LAYERS = flags.DEFINE_integer( + "num_mlp_layers", + 2, + "Number of MLP layers in MLP model", +) + +_BATCH_SIZE = flags.DEFINE_integer("batch_size", 2048, "Batch size") +_FEATURE_DIM = flags.DEFINE_integer("feature_dim", None, "Feature dimension") + +_LEARNING_RATE = flags.DEFINE_float("learning_rate", 0.01, "Learning rate.") +_NUM_STEPS = flags.DEFINE_integer("num_steps", 50, "Number of training steps") +_DECAY_STEPS = flags.DEFINE_integer("decay_steps", 500, "Decay steps") +_DECAY_RATE = flags.DEFINE_float("decay_rate", 0.5, "Decay rate") +_DATA_BUFFER_SIZE = flags.DEFINE_integer( + "data_buffer_size", 4096, "Dataset buffer size." +) +_BATCH_BUFFER_SIZE = flags.DEFINE_integer( + "batch_buffer_size", 32, "Number of batches held in shuffling buffer." +) +_PROJECT_ID = flags.DEFINE_string( + "project_id", None, "The BigQuery project ID.", required=False +) +_DATASET_NAME = flags.DEFINE_string( + "dataset_name", + None, + "BigQuery dataset name for train and test.", + required=False, +) +_TRAIN_TABLE_NAME = flags.DEFINE_string( + "train_table_name", + None, + "Table name of the training dataset.", + required=False, +) +_TEST_TABLE_NAME = flags.DEFINE_string( + "test_table_name", None, "Table name of the test dataset." +) +_CONFIG = flags.DEFINE_string( + "config", None, "Configuration string for running pipeline from container." +) +_UPLOAD_FEATURES_TO_BQ = flags.DEFINE_bool( + "upload_features_to_bq", True, "Whether to upload features to BQ table." +) +_GCS_OUTPUT_PATH = flags.DEFINE_string( + "gcs_output_path", None, "GCS output path." +) + + +def _parse_config_string(): + """Parses config string when running pipeline from container.""" + if _CONFIG.value is not None: + container_config = json.loads(_CONFIG.value) + for flag in _REQUIRED_FLAGS: + if flag not in container_config: + raise ValueError(f"Required flag {flag} not found under --config.") + + for flag, flag_val in container_config.items(): + setattr(FLAGS, flag, flag_val) + + +def check_and_set_flags(num_features, n_feature_threshold = 20): + """Sets certain optional flag values if not specified by the user.""" + # Past experiments have shown that the performance is not very sensitive to + # these parameter settings, as long as they are within a reasonable range. + # I.e. the model learning adjusts for the difference in these parameters. + + if _FEATURE_DIM.value is None: + FLAGS.feature_dim = 64 if num_features <= n_feature_threshold else 128 + + if _NUM_SELECTED_FEATURES.value is None: + FLAGS.num_selected_features = max( + _MIN_NUM_SELECTED_FEATURES, num_features // 10 + ) + + +def process_dataset( + bq_client, bq_info, training = True +): + """Retrieves and preprocesses dataset from BQ.""" + + # Use all batches since some datasets are small. + drop_remainder = False + dataset, table_metadata = bq_data.get_data_from_bq_with_bq_info( + bq_client, + bq_info, + batch_size=_BATCH_SIZE.value, + drop_remainder=drop_remainder, + ) + # Should not count label + num_features = len(table_metadata) - 1 + + if _TASK_TYPE.value == "classification": + # The +1 here is for the out-of-vocabulary bin created during category + # lookup. + num_classes = ( + table_metadata.get_metadata_by_name(_TARGET.value).cardinality + 1 + ) + else: + num_classes = None + + cat_transform_fn, cat_features, numerical_features = ( + bq_data.make_categorical_transform_fn( + table_metadata, target_key=_TARGET.value, task_type=_TASK_TYPE.value + ) + ) + dataset = dataset.map(cat_transform_fn, num_parallel_calls=tf.data.AUTOTUNE) + + # dataset is already batched. + if training: + # Cache Dataset on disk. + filename = tempfile.mkdtemp() + # This gives a false warning: b/194670791 + dataset = dataset.cache(filename) + dataset = dataset.shuffle( + buffer_size=_BATCH_BUFFER_SIZE.value, + seed=_SEED.value, + reshuffle_each_iteration=True, + ) + dataset = dataset.repeat() + + return ( + dataset.prefetch(tf.data.AUTOTUNE), + num_features, + cat_features, + numerical_features, + num_classes, + ) + + +def load_data_bq( + bq_client, +): + """Loads and returns training and test datasets from BQ.""" + + train_bq_info = bq_data.BQInfo( + _PROJECT_ID.value, _DATASET_NAME.value, _TRAIN_TABLE_NAME.value + ) + if _TEST_TABLE_NAME.value: + test_bq_info = bq_data.BQInfo( + _PROJECT_ID.value, _DATASET_NAME.value, _TEST_TABLE_NAME.value + ) + else: + test_bq_info = train_bq_info + logging.info("Using training data for evaluation.") + + train_dataset, num_features, cat_features, numerical_features, num_classes = ( + process_dataset(bq_client, train_bq_info, training=True) + ) + test_dataset = process_dataset(bq_client, test_bq_info, training=False)[0] + # TODO(yihed): obtain num_features and num_classes. + + return ( + train_dataset, + test_dataset, + num_features, + cat_features, + numerical_features, + num_classes, + ) + + +def load_data(): + """Loads datasets for training. + + Returns: + Datasets for training. + + Raises: + ValueError: if specified dataset is not supported. + """ + if _DATA_NAME.value == "isolet": + (x_train, x_test, y_train, y_test, _, num_classes, _) = ( + data_loader.load_isolet() + ) + x_train = x_train.values + x_test = x_test.values + y_train = y_train.values + y_test = y_test.values + + y_train = y_train.reshape((-1, 1)) + y_test = y_test.reshape((-1, 1)) + else: + raise ValueError(f"Dataset {_DATA_NAME.value} not supported.") + + ds_train = tf.data.Dataset.from_tensor_slices((x_train, y_train)) + ds_test = tf.data.Dataset.from_tensor_slices((x_test, y_test)) + ds_train = ds_train.shuffle( + buffer_size=_DATA_BUFFER_SIZE.value, + seed=_SEED.value, + reshuffle_each_iteration=True, + ) + ds_train = ds_train.repeat(_NUM_STEPS.value) + ds_train = ds_train.batch(_BATCH_SIZE.value, drop_remainder=True) + ds_test = ds_test.batch(_BATCH_SIZE.value, drop_remainder=False) + ds_train = ds_train.prefetch(tf.data.AUTOTUNE) + return (ds_train, ds_test, x_train.shape[-1], num_classes) + + +def get_predictor(): + """Generates model predictor head.""" + mlp_sequence = [ + tf.keras.layers.Dense(_FEATURE_DIM.value, activation="relu") + for _ in range(_NUM_MLP_LAYERS.value) + ] + mlp_model = tf.keras.Sequential(mlp_sequence) + return mlp_model + + +def define_discovery_model( + num_features, num_cat_features = 0 +): + """Define model for feature discovery. + + Args: + num_features: total number of features. + num_cat_features: number of selected features. + + Returns: + Model for feature discovery. + """ + discovery_model = feature_engineering.FeatureDiscoveryModel( + num_features, + _NUM_SELECTED_FEATURES.value, + _FEATURE_DIM.value, + _NUM_MLP_LAYERS.value, + n_temporal_features=0, + num_cat_features=num_cat_features, + ) + return discovery_model + + +def define_feature_selector( + num_features, + num_selected_features, + num_feature_scaler = None, +): + """Define module for feature selection.""" + feature_selector = feature_selection.FeatureSelectionSparseMasks( + num_features=num_features, + num_selected_features=( + num_selected_features * _N_FEATURES_AMPLIFIER + if _MODEL_TYPE.value == "discovery" + else num_selected_features + ), + num_feature_scaler=num_feature_scaler, + use_softmax_mask=_USE_SOFTMAX_MASK, + ) + return feature_selector + + +def eval_on_prediction( + data, prediction_model +): + """Evaluates the trained model.""" + predictions = [] + labels = [] + for batch in data: + batch_features = [] + batch_features.extend(batch[:-1]) + labels.append(batch[-1]) + + pred = prediction_model(batch_features, training=False)[0] + predictions.append(pred) + + predictions = tf.concat(predictions, axis=0) + return predictions, labels + + +def train( + ds_train, + ds_test, + is_classification, + num_features, + cat_features, + numerical_features, + num_classes, + bq_client, +): + """Trains model for the user-supplied task type.""" + + # TODO(yihed): account for embedding dimension in this shape. + input_features = tf.keras.Input(shape=(num_features,)) + embed_idx_input = tf.keras.Input(shape=(len(cat_features),)) + # TODO(yihed): update this definition. + num_feature_scaler = None + + num_selected_features = min(_NUM_SELECTED_FEATURES.value, num_features) + + feature_selector = define_feature_selector( + num_features, num_selected_features, num_feature_scaler + ) + selected_features = feature_selector(input_features) + if _MODEL_TYPE.value == "discovery": + discovery_model = define_discovery_model(num_features, len(cat_features)) + representation, _ = discovery_model( + selected_features, idx_inputs=embed_idx_input + ) + else: + discovery_model = None + representation = selected_features + + dense_model = get_predictor() + latents = dense_model(representation) + + if is_classification: + predictor = tf.keras.layers.Dense(num_classes) + else: + predictor = tf.keras.layers.Dense(1) + + prediction = predictor(latents) + prediction_model = tf.keras.Model( + inputs=[input_features, embed_idx_input], + outputs=prediction, + name="prediction_model", + ) + lr = tf.keras.optimizers.schedules.ExponentialDecay( + _LEARNING_RATE.value, + decay_steps=_DECAY_STEPS.value, + decay_rate=_DECAY_RATE.value, + staircase=False, + ) + optimizer = tf.keras.optimizers.Adam(lr) + if is_classification: + loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) + else: + loss_fn = tf.keras.losses.MeanAbsoluteError() + + do_training( + ds_train, + prediction_model=prediction_model, + discovery_model=discovery_model, + loss_fn=loss_fn, + optimizer=optimizer, + cat_features=cat_features, + numerical_features=numerical_features, + num_feature_scaler=num_feature_scaler, + ) + + # TODO(yihed): return string of selected features when model_type = selection. + if discovery_model: + infer_and_upload_features( + prediction_model, + discovery_model, + ds_test, + bq_client, + cat_features, + numerical_features, + ) + + +def _upload_transforms_to_gcs(transforms): + """Uploads discovered transforms or selected features to GCS.""" + if _GCS_OUTPUT_PATH.value: + gcs_feature_path = os.path.join( + _GCS_OUTPUT_PATH.value, _LOGGING_FILENAME.value + ) + with tf.io.gfile.GFile(gcs_feature_path, "w") as f: + if isinstance(transforms, dict): + transforms = { + key.decode("utf-8"): str(value) for key, value in transforms.items() + } + f.write(json.dumps(transforms)) + else: + logging.info("Not logging features, as no GCS output path is specified.") + + +def do_training( + ds_train, + prediction_model, + discovery_model, + loss_fn, + optimizer, + cat_features, + numerical_features, + num_feature_scaler = None, +): + """Performs training. + + Args: + ds_train: dataset for training. + prediction_model: model for training. + discovery_model: module for feature discovery, used during feature + inference. + loss_fn: Loss function for training. + optimizer: Optimizer for training. + cat_features: categorical feature names. + numerical_features: numerical feature names. + num_feature_scaler: Object used for gradually scaling number of features. + """ + + for step, step_data in enumerate(ds_train): + if step >= _NUM_STEPS.value: + break + x = step_data[bq_data.X_KEY] + cat_embed_idx = step_data[bq_data.EMBED_IDX_KEY] + labels = step_data[bq_data.TARGET_KEY] + + with tf.GradientTape() as tape: + + logits = prediction_model([x, cat_embed_idx], training=True) + loss = loss_fn(labels, logits) + if step % _LOGGING_STEPS == 0: + logging.info("step %d loss %f", step, loss) + if discovery_model: + transforms, _, _ = feature_engineering.recover_transforms( + discovery_model, + cat_features=cat_features, + numerical_features=numerical_features, + ) + _upload_transforms_to_gcs(transforms) + + grads = tape.gradient(loss, prediction_model.trainable_variables) + + optimizer.apply_gradients(zip(grads, prediction_model.trainable_variables)) + if num_feature_scaler: + num_feature_scaler.add_step() + + +def infer_and_upload_features( + model, + discovery_model, + dataset, + bq_client, + cat_features, + numerical_features, +): + """Infers discovered features on the given dataset.""" + transforms, ranked_feature_names, feature_ranking_idx = ( + feature_engineering.recover_transforms( + discovery_model, + cat_features=cat_features, + numerical_features=numerical_features, + ) + ) + _upload_transforms_to_gcs(transforms) + logging.info( + "Discovered feature transforms ordered by importance: %s", transforms + ) + feature_table_name = utils.infer_and_upload_discovered_features( + dataset, + model, + bq_client, + _PROJECT_ID.value, + _DATASET_NAME.value, + _TRAIN_TABLE_NAME.value, + feature_names=ranked_feature_names, + feature_ranking=feature_ranking_idx, + cat_features=cat_features, + numerical_features=numerical_features, + upload_to_bq=_UPLOAD_FEATURES_TO_BQ.value, + ) + logging.info("Feature table name: %s", feature_table_name) + + return feature_table_name, transforms + + +def main(args): + del args + _parse_config_string() + logging.info("Flags: %s", FLAGS.flag_values_dict()) + credentials, _ = auth.default() + bq_client = bigquery.Client( + project=_PROJECT_ID.value, credentials=credentials + ) + ( + ds_train, + ds_test, + num_features, + cat_features, + numerical_features, + num_classes, + ) = load_data_bq(bq_client) + check_and_set_flags(num_features) + is_classification = _TASK_TYPE.value == "classification" + train( + ds_train, + ds_test, + is_classification, + num_features, + cat_features, + numerical_features, + num_classes, + bq_client, + ) + + +if __name__ == "__main__": + app.run(main) diff --git a/automated_feature_engineering/utils.py b/automated_feature_engineering/utils.py new file mode 100644 index 00000000000..420b0719c58 --- /dev/null +++ b/automated_feature_engineering/utils.py @@ -0,0 +1,1001 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compute entropy, to be used as diversity regularizer.""" + +import json +import os +# TODO(yihed): replace pickle with protobuf. +import pickle +import random +import re +import time +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from absl import logging +import bq_data +import feature_engineering +from google.cloud import bigquery +import numpy as np +import pandas as pd +from sklearn import cluster +from sklearn import covariance +from sklearn import decomposition +from sklearn import metrics +import tensorflow as tf +from tensorflow.io import gfile +import tensorflow_probability as tfp + + +# Number of buckets used for histogramming feature distribution. +N_BUCKETS = 100 +CAT_DATA_TYPE = True # 'categorical' +CONT_DATA_TYPE = False # 'continuous' +_TEMPORAL_FEATURE_SPLIT_PATTERN = r'\s*,\s*' +_DATA_IMPUTE_VALUE = '0' +_TEMPORAL_CONTEXT_LEN = 10 +_TEMPORAL_PLACEHOLDER = tf.zeros((_TEMPORAL_CONTEXT_LEN,)) +_DATA_DIR = 'data' +_LABEL_COL = 'label' + + +def read_json_config(config_path): + """Reads config file from gcs or local files. + + Args: + config_path: path for configuration file that indicates feature types. It + can be gsc file or local file. + + Returns: + The dict of config from json file. + """ + if config_path.startswith('gs://'): + with gfile.GFile(config_path) as f: + data_types = json.load(f) + else: + with open(config_path, 'r') as f: + data_types = json.load(f) + return data_types + + +def create_temporal_dataset( + data, + config_path, + target_col_name, + embedders, + context_len = _TEMPORAL_CONTEXT_LEN, + cat_embed_dim = 1, +): + """Process data with temporal dimension. + + Args: + data: the raw data based on which to create the temporal TF dataset. + config_path: path for configuration file that indicates feature types. + target_col_name: name of target column. + embedders: embedders for categorical data. + context_len: historical context length for temporal features. + cat_embed_dim: dimension used for categorical embeddings. + + Returns: + The created TF dataset based on parsing the provided raw data. + """ + # TODO(yihed): make context_len configurable as input argument. + # TODO(yihed): consider how cat_embed_dim > 1 impacts temporal + # features shape. + data_types = read_json_config(config_path) + + def data_gen(data, attribute_feature_names, temporal_feature_names): + column_names = data_types.keys() + for _, row in data.iterrows(): + attribute_features = [] + temporal_features = [] + for col_name in column_names: + # Each col can be singleton attribute or a temporal feature. + data_str = row.at[col_name] + data_str = re.sub(r'\[|\]|"', '', data_str) + data_str = re.sub(r'__MISSING__', _DATA_IMPUTE_VALUE, data_str) + col = np.array(re.split(_TEMPORAL_FEATURE_SPLIT_PATTERN, data_str)) + if col_name in embedders: + # categorical feature + col = tf.squeeze(embedders[col_name](col)[: context_len + 1], -1) + else: + # numerical feature + if len(col) > context_len: + # Case for numerical feature separated by commas. + entire_col = [] + for j in range(context_len + 1): + entire_col.append(float(col[j])) + col = np.array(entire_col, dtype=np.float32) + else: + col = np.array([col[0]], dtype=np.float32) + + if col_name == target_col_name: + target_col = create_temporal_dataset_target_label( + col, col_name, context_len, embedders + ) + + col = col[:context_len] + if col_name in attribute_feature_names: + attribute_features.append(col) + elif col_name in temporal_feature_names: + temporal_features.append(col) + + # TODO(yihed): handle case when either attribute or temporal features is + # empty. + yield ( + (tf.concat(attribute_features, 0), tf.concat(temporal_features, 0)), + target_col, + ) + + attribute_feature_names = set() + temporal_feature_names = set() + attribute_dim = 0 + temporal_dim = 0 + sample_row = data.iloc[0] + for col_name, col_type in data_types.items(): + col = sample_row.loc[data.columns == col_name].to_numpy() + if len(re.split(_TEMPORAL_FEATURE_SPLIT_PATTERN, col[0])) > context_len: + # Column contains temporal feature. + if col_type == 'str': + temporal_dim += context_len * cat_embed_dim + else: + temporal_dim += context_len + temporal_feature_names.add(col_name) + else: + # Column contains attribute feature. + if col_type == 'str': + attribute_dim += cat_embed_dim + else: + attribute_dim += 1 + if col_name != target_col_name: + attribute_feature_names.add(col_name) + + data_signature = ( + ( + tf.TensorSpec(shape=(attribute_dim,), dtype=tf.float32), + tf.TensorSpec(shape=(temporal_dim,), dtype=tf.float32), + ), + # This assumes the target corresponds to single timestamp, + # i.e. as in classification / regression. + tf.TensorSpec(shape=(), dtype=tf.float32), + ) + + return tf.data.Dataset.from_generator( + lambda: data_gen(data, attribute_feature_names, temporal_feature_names), + output_signature=data_signature, + ) + + +def create_temporal_dataset_target_label( + col, + col_name, + context_len, + embedders, +): + """Creates the target label for the temporal dataset. + + Args: + col: the target column to extract label from. + col_name: feature name for column. + context_len: length of the prediction context window. + embedders: embedding layers for categorical features. + + Returns: + The prediction target label. + """ + if len(col) > context_len: + # Predict the next timestamp value if the target column is temporal. + target_col = col[context_len] + else: + target_col = col[0] + + if col_name in embedders: + target_col = tf.squeeze(embedders[col_name](target_col), -1) + else: + target_col = tf.constant(target_col, dtype=tf.float32) + return target_col + + +def infer_discovered_features_and_save( + dataset, + model, + is_classification, + dataset_name, + cat_feature_embeds, + temporal_embedders = None, +): + """Infers discovered features from trained model. + + Args: + dataset: dataset to infer additional discovered features for. + model: the trained model used for feature inference. + is_classification: whether the prediction task is classification. + dataset_name: name of dataset to infer discovered features for. + cat_feature_embeds: embedding modules for categorical variables. + temporal_embedders: embeddings for temporal features. + + Returns: + Discovered features inferred from the trained model. + """ + inferred_features = [] + feature_indices = None + for submodule in model.submodules: + if isinstance(submodule, feature_engineering.FeatureDiscoveryModel): + discovery_module = submodule + discovery_module.infer_features = True + break + else: + raise ValueError('No FeatureDiscoveryModel found in model.') + + for batch in dataset: + y = batch[-1] + if cat_feature_embeds: + cat_features = perform_feature_embedding(batch, cat_feature_embeds) + batch_features = (cat_features,) + idx_features = batch[0][:, : len(cat_feature_embeds)] + model_features = (cat_features, _TEMPORAL_PLACEHOLDER, idx_features) + else: + # batch consists of two components: features and labels. + batch_features = batch[0] + if temporal_embedders: + model_features = batch[0] + else: + model_features = (batch[0], _TEMPORAL_PLACEHOLDER) + + model(model_features, training=False) + learned_features = discovery_module.learned_features + + if feature_indices is None: + feature_indices = tf.squeeze(tf.where(learned_features[0] != 0)) + + learned_features = tf.gather(learned_features, feature_indices, axis=-1) + if is_classification: + n_classes = y.shape[-1] + y = tf.cast(tf.argmax(y, axis=-1), tf.float32) + y = tf.expand_dims(y, -1) + + if not isinstance(batch_features, (tuple, list)): + batch_features = (batch_features,) + + learned_features = tf.concat( + [*batch_features] + [learned_features, y], axis=-1 + ) + inferred_features.append(learned_features) + + inferred_features = tf.concat(inferred_features, axis=0) + features_filepath = get_inferred_features_filepath(dataset_name) + features_config_filepath = get_inferred_features_config_filepath(dataset_name) + features_shape = tuple(inferred_features.shape) + print('Inferred features shape: {}'.format(features_shape)) + n_raw_features = features_shape[-1] - len(feature_indices) - 1 + features_config = { + 'features_shape': features_shape, + 'n_raw_features': n_raw_features, + } + if is_classification: + features_config.update({'n_classes': n_classes}) + with open(features_config_filepath, 'wb') as f: + pickle.dump(features_config, f, protocol=pickle.HIGHEST_PROTOCOL) + + fp = np.memmap( + features_filepath, dtype='float32', mode='w+', shape=features_shape + ) + + fp[:] = inferred_features.numpy()[:] + return inferred_features + + +def _concat_feature_names( + feature_names, + cat_features, + numerical_features, +): + """Combines all feature names.""" + return feature_names + cat_features + numerical_features + [_LABEL_COL] + + +def infer_and_upload_discovered_features( + dataset, + model, + bq_client, + project_id, + dataset_name, + table_name, + feature_names, + feature_ranking, + cat_features, + numerical_features, + upload_to_bq = False, +): + """Infers discovered features from trained model. + + Args: + dataset: dataset to infer additional discovered features for. + model: the trained model used for feature inference. + bq_client: BigQuery client. + project_id: BigQuery project id. + dataset_name: name of dataset to infer discovered features for. + table_name: name of table to infer features for. + feature_names: list of feature names. + feature_ranking: ranking of discovered features. + cat_features: names of categorical features. + numerical_features: names of numerical features. + upload_to_bq: whether to upload inferred features to BQ. + + Returns: + Name of the newly created table containing inferred features. + """ + # feature_indices = None + new_table_name = '' + new_table = None + for submodule in model.submodules: + if isinstance(submodule, feature_engineering.FeatureDiscoveryModel): + discovery_module = submodule + discovery_module.infer_features = True + break + else: + raise ValueError('No FeatureDiscoveryModel found in model.') + + if upload_to_bq: + all_feature_names = _concat_feature_names( + feature_names, cat_features, numerical_features + ) + new_table_name, table_feature_names = create_bq_table( + bq_client, + project_id, + dataset_name, + table_name, + all_feature_names, + ) + if not new_table_name: + # Table creation erred out, do not proceed. + logging.warn('Failed to create feature table.') + return new_table_name + new_table = bq_client.get_table(new_table_name) + else: + table_feature_names = [] + + for batch in dataset: + x = batch[bq_data.X_KEY] + target = batch[bq_data.TARGET_KEY] + embed_idx = batch[bq_data.EMBED_IDX_KEY] + + model([x, embed_idx], training=False) + learned_features = discovery_module.learned_features + + # if feature_indices is None: + # # feature_indices = tf.squeeze(tf.where(learned_features[0] != 0)) + # # logging.info('Inferred features dim: %d', len(feature_indices)) + + # learned_features = tf.gather(learned_features, feature_indices, axis=-1) + + if upload_to_bq: + learned_features = tf.gather(learned_features, feature_ranking, axis=-1) + all_features = tf.concat([learned_features, x, target], axis=-1) + upload_features_to_bq( + all_features, new_table, bq_client, table_feature_names + ) + + return new_table_name + + +def upload_features_to_bq( + learned_features, + table, + bq_client, + feature_names, +): + """Uploads learned features to BigQuery.""" + + rows_to_insert = pd.DataFrame(learned_features, columns=feature_names) + + bq_client.insert_rows_from_dataframe(table, rows_to_insert) + + +def create_bq_table( + bq_client, + project_id, + dataset_name, + current_table_name, + feature_names, +): + """Creates a new table in BigQuery based on given table name.""" + creation_time = time.strftime('%H%M%b%d') + table_name = ( + f'{project_id}.{dataset_name}.{current_table_name}_{creation_time}' + ) + # TODO(yihed): use BQ table creation API to create table instead. + if isinstance(feature_names[0], bytes): + feature_names = [b.decode('utf-8') for b in feature_names] + + def sub_str(x): + x = re.sub(r'\<|\>|\)', '', x) + return re.sub(r'\(|\s|;|,', '_', x) + + new_feature_names = [sub_str(s) for s in feature_names] + schema_names = [f'{s} FLOAT64' for s in new_feature_names] + schema_str = ', '.join(schema_names) + + option_str = ( + 'OPTIONS(expiration_timestamp=TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL' + ' 48 HOUR))' + ) + + query = f'CREATE TABLE {table_name} ({schema_str}) {option_str}' + query_job = bq_client.query(query) + try: + query_job.result() # Wait for request to finish + except Exception as e: # pylint: disable=broad-exception-caught + logging.warn('Failed to create feature table due to error: %s', e) + table_name = '' + return table_name, new_feature_names + + +def get_inferred_features_filepath(dataset_name): + """Retrieves filepath for cached discovered features inferred from model.""" + + # Create data directory if one does not exist. + if not os.path.exists(_DATA_DIR): + os.makedirs(_DATA_DIR) + return os.path.join( + _DATA_DIR, '{}_inferred_features.npy'.format(dataset_name) + ) + + +def get_inferred_features_config_filepath(dataset_name): + """Retrieves config filepath for cached discovered features inferred from model.""" + if not os.path.exists(_DATA_DIR): + os.makedirs(_DATA_DIR) + return os.path.join( + _DATA_DIR, '{}_inferred_features_config.npy'.format(dataset_name) + ) + + +def load_inferred_features( + dataset_name, features_shape +): + """Loads discovered features inferred from trained model. + + Args: + dataset_name: dataset name to load inferred features for. + features_shape: expected shape of inferred features. + + Returns: + Discovered features inferred from trained model. + """ + inferred_features_filepath = get_inferred_features_filepath(dataset_name) + inferred_features = np.memmap( + inferred_features_filepath, + dtype='float32', + mode='r+', + shape=features_shape, + ) + return inferred_features + + +def compute_hsic_loss(): + """Constructs function that computes the HSIC objective between features & labels. + + References: https://arxiv.org/pdf/1908.01580.pdf. + https://pure.mpg.de/rest/items/item_1791468/component/file_3009860/content. + + Returns: + Function that computes the HSIC (Hilbert-Schmidt independence criterion) + objective between training features & labels. + """ + + def hsic_loss_fn(x_train, labels, sparse_mask): + batch_sz = x_train.shape[0] + + # Only need to standard normalize the features, and not the labels, + # since different features live on different scales, but the label + # space has only one scale. + x_std = tf.math.reduce_std(x_train, axis=0, keepdims=True) + x_mean = tf.math.reduce_mean(x_train, axis=0, keepdims=True) + x_train = tf.math.divide_no_nan((x_train - x_mean), x_std) + + # Condition loss on learned mask + x_train *= sparse_mask + # Square l2 distance between points + x_dist = tf.reduce_sum( + (tf.expand_dims(x_train, 1) - tf.expand_dims(x_train, 0)) ** 2, axis=-1 + ) + # Labels are 2d vectors, i.e. categorical labels have been converted to + # one-hot, hence distance makes sense. + y_dist = tf.reduce_sum( + (tf.expand_dims(labels, 1) - tf.expand_dims(labels, 0)) ** 2, axis=-1 + ) + # Use gaussian kernel. 2 is normalization constant. + x_dist = tf.math.exp( + -tf.math.divide_no_nan(x_dist, 2 * tf.math.reduce_std(x_dist) ** 2) + ) + y_dist = tf.math.exp( + -tf.math.divide_no_nan(y_dist, 2 * tf.math.reduce_std(y_dist) ** 2) + ) + + # Centering + x_centered = x_dist - tf.reduce_mean(x_dist, -1, keepdims=True) + y_centered = y_dist - tf.reduce_mean(y_dist, -1, keepdims=True) + return ( + -tf.linalg.trace(tf.matmul(x_centered, y_centered)) + / (batch_sz - 1) ** 2 + ) + + return hsic_loss_fn + + +def compute_mi_loss( + data_types, + is_classification, + do_probabilistic_loss = False, + alpha = 20.0, + cont_features_diff_weight = 0.7, + verbose = False, +): + """Constructs function that computes mutual information between data & labels. + + Args: + data_types: Type for each feature, whether categorical or continuous. + is_classification: Whether task if classification. + do_probabilistic_loss: Whether to compute MI loss component based on the + learned probabilistic mask. This requires more computation than the + quadratic MI loss component. + alpha: Weight for probability loss; controls the contribution from the + consistency constraint. + cont_features_diff_weight: Weight for the consistency . + verbose: Whether to do verbose logging. + + Returns: + Function that computes mutual information between training data & labels. + """ + + def mi_loss_fn(x_train, logits, labels, sparse_mask, step): + + batch_sz = x_train.shape[0] + if is_classification: + logits = tf.nn.softmax(logits, axis=-1) + + logits = tf.squeeze(logits) + if do_probabilistic_loss: + sparse_mask = tf.squeeze(sparse_mask) + features1 = tf.expand_dims(x_train, axis=1) + features2 = tf.expand_dims(x_train, axis=0) + all_prob_loss = [] + # Breaking into chunks to ensure scalability. + chunk_sz = 128 + i = 0 + + while i < batch_sz: + upper = min(batch_sz, i + chunk_sz) + features_diff = features1[i:upper] - features2 + + features_diff = tf.transpose(features_diff, [2, 0, 1]) + cat_features_diff = tf.boolean_mask(features_diff, data_types) + cat_features_diff = tf.transpose(cat_features_diff, [1, 2, 0]) + cat_mask = tf.boolean_mask(sparse_mask, data_types) + + cat_prob = tf.math.reduce_prod( + tf.where(cat_features_diff > 0, 1 - cat_mask, 1.0), axis=-1 + ) + + cont_data_types = True ^ data_types + cont_features_diff = tf.boolean_mask(features_diff, cont_data_types) + cont_features_diff = tf.transpose(cont_features_diff, [1, 2, 0]) + cont_mask = tf.boolean_mask(sparse_mask, cont_data_types) + cont_prob = tf.math.reduce_prod( + 1 + - ( + 1 + - tf.math.exp( + -tf.math.abs(cont_features_diff * cont_features_diff_weight) + ) + ) + * cont_mask, + axis=-1, + ) + + prob = cat_prob * cont_prob + logits_diff = ( + tf.expand_dims(logits[i:upper], 1) - tf.expand_dims(logits, 0) + ) ** 2 + + if not is_classification: + prob_loss = tf.reduce_sum(tf.squeeze(logits_diff) * prob) + else: + prob_loss = tf.reduce_sum(tf.reduce_sum(logits_diff, axis=-1) * prob) + + all_prob_loss.append(prob_loss) + i += chunk_sz + + prob_loss = tf.reduce_sum(all_prob_loss) + else: + prob_loss = 0 + + # MI loss component not based on probability mask: + if not is_classification: + square_loss = tf.reduce_sum((logits - labels) ** 2) + else: + pos_loss = tf.reduce_sum( + (1 - tf.ragged.boolean_mask(logits, labels > 0)) ** 2, -1 + ) ## + neg_loss = tf.reduce_sum( + tf.ragged.boolean_mask(logits, labels == 0) ** 2, -1 + ) ## + square_loss = tf.reduce_sum(pos_loss + neg_loss) + + # add alpha hyperparam + if verbose and step % 100 == 0: + print( + 'prob_loss {} square_loss {} %%logits{} sparse_mask non-zero ' + 'indices {}'.format( + prob_loss, + square_loss, + logits[:100], + tf.squeeze(tf.where(sparse_mask > 0)), + ) + ) + + return (prob_loss * alpha + square_loss) / batch_sz + + return mi_loss_fn + + +def lassonet_select( + x_train, + y_train, + num_features, + batch_size, + is_classification, + chunk_sz = 3000, +): + """Uses LassonNet for feature extraction. + + Args: + x_train: Training features. As a dataframe. + y_train: Training labels. As a dataframe. + num_features: Number of selected features. + batch_size: Training batch size for lassonet. + is_classification: Whether is classification task. + chunk_sz: Chunk size for splitting the processing. + + Returns: + Indices of selected features. + """ + try: + # lassonet is imported here, as it is not required to run the main SLM + # functions. + import lassonet # pylint: disable=g-import-not-at-top, import-error + except ImportError as e: + logging.warning('Lassonet installation is required.') + raise e + hidden_dims = (random.randrange(64, 128),) + dropout = random.uniform(0.0, 0.5) + # l2 penalization on skip connection, defaults to 0. + gamma = random.uniform(0.0, 0.4) + # hiearchy param, defaults to 10 + m_param = random.uniform(8.0, 15.0) + if is_classification: + model = lassonet.LassoNetClassifier( + batch_size=batch_size, + M=m_param, + dropout=dropout, + hidden_dims=hidden_dims, + gamma=gamma, + ) + else: + model = lassonet.LassoNetRegressor() + + start_idx = 0 + all_scores = [] + while True: + if start_idx > len(y_train): + break + end_idx = start_idx + chunk_sz + model_params = model.path( + x_train.iloc[start_idx:end_idx, :].values, + y_train[start_idx:end_idx].values, + ) + for mod in model_params: + model.load(mod.state_dict) + scores = model.feature_importances_.numpy() + all_scores.append(scores) + start_idx += chunk_sz + + all_scores = np.mean(all_scores, axis=0) + return np.argsort(all_scores)[:num_features] + + +def pfa_select(x_train, num_features): + """Uses PFA (principal feature analysis) for feature extraction. + + Args: + x_train: training features. + num_features: number of features to be selected. + + Returns: + Indices of selected features. + """ + cov = covariance.EmpiricalCovariance().fit(x_train) + x_cov = cov.covariance_ + pca = decomposition.PCA(svd_solver='randomized').fit(x_cov) + cov_svecs = pca.components_ + kmeans = cluster.KMeans(num_features).fit(cov_svecs) + centers = kmeans.cluster_centers_ + dist = metrics.pairwise_distances(centers, cov_svecs) + rank_idx = np.argmin(dist, axis=-1) + return rank_idx + + +def compute_entropy(feature_mx, eps = 1e-5): + """Computes the entropy of the selected features. + + Args: + feature_mx: Tensor of selected features. + eps: Epsilon factor to achieve positive definiteness. + + Returns: + Von Neumann entropy of the covariance of the features. + """ + mx_max = tf.reduce_max(feature_mx, axis=0) + mx_min = tf.reduce_min(feature_mx, axis=0) + histograms = [] + + for i, (cur_max, cur_min) in enumerate(zip(mx_max, mx_min)): + interval_len = (cur_max - cur_min) / (N_BUCKETS - 1) + edges = tf.concat([cur_min + j * interval_len for j in range(N_BUCKETS)], 0) + histogram = tfp.stats.histogram(feature_mx[:, i], edges) + histogram /= tf.reduce_sum(histogram) + histograms.append(histogram) + + prob_mx = tf.stack(histograms, 0) + # prob_mx has shape (n_features, N_BUCKETS-1) + cov_mx = tfp.stats.covariance(prob_mx, sample_axis=-1, event_axis=0) + # logm only takes complex entries + + cov_mx = tf.cast( + cov_mx + tf.eye(cov_mx.shape[0], dtype=cov_mx.dtype) * eps, tf.complex64 + ) + + # TODO(yihed): this entropy covers cross-feature entropy, also compute + # intra-feature entropy (easier to compute.) + entropy = -tf.linalg.trace(tf.matmul(cov_mx, tf.linalg.logm(cov_mx))) + + # Since the cov_mx is positive semidefinite, the complex part is 0. + return tf.math.real(entropy) + + +def compare_entropy( + x_train, mask, n_random_sampling = 5 +): + """Compares the entropy of the selected features w/ randomly sampled features. + + Args: + x_train: Training data. + mask: Feature selection mask. + n_random_sampling: Number of random samplings to take to compare entropy. + """ + selected_idx = tf.squeeze(tf.where(mask > 0), -1) + num_selected_features = len(selected_idx) + num_features = x_train.shape[-1] + + mask_features = tf.gather(x_train, selected_idx, axis=-1) + mask_feature_entropy = compute_entropy(mask_features) + avg_entropies = [] + + # randomly sample subsets of features + for _ in range(n_random_sampling): + selected_idx = tf.random.uniform( + (num_selected_features,), minval=0, maxval=num_features, dtype=tf.int64 + ) + cur_features = tf.gather(x_train, selected_idx, axis=-1) + + mask_feature_entropy = compute_entropy(cur_features) + avg_entropies.append(mask_feature_entropy) + + avg_entropy = np.mean(avg_entropies) + print(f'mask entropy {mask_feature_entropy}. Mean entropy {avg_entropy}') + + +def def_weighted_ce(y_train, max_pos_weight = 10.0): + """Defines weighted cross entropy loss.""" + + pos_weight = (len(y_train) - y_train.sum()) / y_train.sum() + pos_weight = min(pos_weight, max_pos_weight) + + def weighted_ce(labels, pred, pos_weight=pos_weight): + return tf.reduce_mean( + tf.nn.weighted_cross_entropy_with_logits( + labels, pred, pos_weight=pos_weight + ) + ) + + return weighted_ce + + +def get_feature_types(x_train): + """Decides feature column types.""" + feature_types = [] + for i, col in enumerate(x_train.iloc[0]): + if ( + isinstance(col, str) + or col is None + or x_train.dtypes[i] == np.dtype('O') + ): + feature_types.append(CAT_DATA_TYPE) + else: + feature_types.append(CONT_DATA_TYPE) + + return tf.constant(feature_types, dtype=tf.bool) + + +def create_temporal_embedders( + x_train, + config_path, + context_len = _TEMPORAL_CONTEXT_LEN, + cat_embed_dim = 1, + n_rows_for_vocab = 1000, +): + """Create embedders for categorical features, including temporal features. + + Args: + x_train: training data that created embedders are based on. + config_path: path for configuration file that indicates feature types. + context_len: historical context length for temporal features. + cat_embed_dim: dimension used for categorical embeddings. + n_rows_for_vocab: number of samples from which to extract vocabulary. + + Returns: + Categorical embedding layers that have been adapted to feature vocabularies. + """ + data_types = read_json_config(config_path) + + embedders = {} + n_temporal_features = 0 + for col_name, data_type in data_types.items(): + feature_col = x_train.loc[:, x_train.columns == col_name].to_numpy() + if ( + len(re.split(_TEMPORAL_FEATURE_SPLIT_PATTERN, feature_col[0][0])) + > context_len + ): + split_col = True + n_temporal_features += 1 + else: + split_col = False + + if data_type != 'str': + continue + + vocab_set = set() + for i, row in enumerate(feature_col): + if split_col: + row = re.split(_TEMPORAL_FEATURE_SPLIT_PATTERN, row[0]) + vocab_set.update(row) + if i > n_rows_for_vocab: + break + + lookup_layer = tf.keras.layers.StringLookup() + + lookup_layer.adapt(list(vocab_set)) + embed_layer = tf.keras.layers.Embedding( + len(lookup_layer.get_vocabulary()), cat_embed_dim + ) + + def embed_func(x, embed_layer=embed_layer, lookup_layer=lookup_layer): + return embed_layer(lookup_layer(x)) + + embedders[col_name] = embed_func + + n_attribute_features = len(data_types) - n_temporal_features + return embedders, n_attribute_features, n_temporal_features + + +def category_to_idx( + x_train, x_test, normalize_range = False +): + """Reconstruct dataframe to convert categories into indices. + + Args: + x_train: Training set dataframe. + x_test: Test set dataframe. + normalize_range: Whether to range-normalize (max_min) the features. + + Returns: + Updated dataframe where categorical features are indices; + as well as supporting categorical data information. + """ + cat_feat_train = [] + cont_feat_train = [] + cat_feat_test = [] + cont_feat_test = [] + vocab_lengths = [] + train_index = x_train.index + test_index = x_test.index + + # TODO(yihed): check if affect MI datatypes computation. + for i, col in enumerate(x_train.iloc[0]): + if ( + isinstance(col, str) + or col is None + or x_train.dtypes[i] == np.dtype('O') + ): + + col_train = x_train.iloc[:, i] + lookup_layer = tf.keras.layers.StringLookup() + # Replace missing values with a negative number that does not appear. + missing_value_rep = '-1' + col_train = col_train.fillna(missing_value_rep) + col_test = x_test.iloc[:, i].fillna(missing_value_rep) + lookup_layer.adapt(col_train) + + vocab_lengths.append(len(lookup_layer.get_vocabulary())) + cat_feat_train.append(lookup_layer(col_train).numpy().astype(np.float32)) + cat_feat_test.append(lookup_layer(col_test).numpy().astype(np.float32)) + else: + col_train = x_train.iloc[:, i] + col_test = x_test.iloc[:, i] + + if normalize_range: + col_min = col_train.dropna().min() + col_range = max(col_train.dropna().max() - col_min, 0.001) + col_train = (col_train - col_min) / col_range + col_test = (col_test - col_min) / col_range + + missing_value_rep = col_train.dropna().median() + cont_feat_train.append( + col_train.fillna(missing_value_rep).values.astype(np.float32) + ) + cont_feat_test.append( + col_test.fillna(missing_value_rep).values.astype(np.float32) + ) + + if not cat_feat_train: + raise ValueError('Input data do not contain any categorical features.') + # Construct new data with categorical features at the beginning + cat_feat_train.extend(cont_feat_train) + cat_feat_test.extend(cont_feat_test) + + feat_train = np.stack(cat_feat_train, axis=-1) + feat_test = np.stack(cat_feat_test, axis=-1) + + feat_train = pd.DataFrame(feat_train) + feat_train.index = train_index + feat_test = pd.DataFrame(feat_test) + feat_test.index = test_index + + return (feat_train, feat_test, vocab_lengths) + + +def perform_feature_embedding( + batch, cat_feature_embeds +): + """Embeds categorical features. + + Args: + batch: Dataset batch. + cat_feature_embeds: Categorical feature embeddings + + Returns: + Embedded features. + """ + cat_features = [] + + for i, feature_embed in enumerate(cat_feature_embeds): + # Category features at beginning of x + cat_features.append(feature_embed(batch[0][:, i])) + + cat_features = tf.concat(cat_features, axis=-1) + return tf.concat( + [cat_features, batch[0][:, len(cat_feature_embeds) :]], axis=-1 + ) diff --git a/fm4tlp/experiment_config.py b/fm4tlp/experiment_config.py index a1d2d2e2982..f79a6041400 100644 --- a/fm4tlp/experiment_config.py +++ b/fm4tlp/experiment_config.py @@ -13,19 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Helpers to define experiments and models for xmanager runs. +"""Helpers to define experiments for xmanager runs. """ import dataclasses -import sys -import tensorflow.compat.v1 as tf - -from google.protobuf import text_format -from models import model_config_pb2 - - -if "gfile" not in sys.modules: - gfile = tf.io.gfile @dataclasses.dataclass(frozen=True) @@ -68,28 +59,3 @@ class ExperimentConfig: reset_nbd_loader=True, ) ] - - -_MODEL_CONFIG_PATHS = [ - "models/configs/tgn.pbtxt", - "models/configs/tgn_structmap.pbtxt", - "models/configs/tgn_structmap_alpha10.pbtxt", - "models/configs/tgn_structmap_alpha100.pbtxt", - "models/configs/edgebank.pbtxt", -] - - -def get_model_config(model_name): - """Returns a model config from the specified model name.""" - model_configs = {} - for model_config_path in _MODEL_CONFIG_PATHS: - model_config = model_config_pb2.TlpModelConfig() - filepath = str(model_config_path) - with gfile.GFile(filepath, "r") as f: - text_format.Parse(f.read(), model_config) - if model_config.model_name in model_configs: - raise ValueError( - f"Duplicate model name: {model_config.model_name}" - ) - model_configs[model_config.model_name] = model_config - return model_configs[model_name] diff --git a/fm4tlp/get_embedding_correlation.py b/fm4tlp/get_embedding_correlation.py index ca4dc489eec..4f8a96a5a58 100644 --- a/fm4tlp/get_embedding_correlation.py +++ b/fm4tlp/get_embedding_correlation.py @@ -45,7 +45,7 @@ import torch import tqdm -import experiment_config +import model_config as model_config_lib from models import all_models from models import model_template from modules import early_stopping @@ -169,7 +169,7 @@ def main(_): G.add_edges_from(zip(train_data.src.numpy(), train_data.dst.numpy())) # define model - model_config = experiment_config.get_model_config(_MODEL_NAME.value) + model_config = model_config_lib.get_model_config(_MODEL_NAME.value) model: model_template.TlpModel = getattr( all_models, model_config.model_class )( diff --git a/fm4tlp/model_config.py b/fm4tlp/model_config.py new file mode 100644 index 00000000000..82d22938722 --- /dev/null +++ b/fm4tlp/model_config.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# Copyright 2024 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helpers to define models for xmanager runs. +""" + +import sys +import tensorflow.compat.v1 as tf + +from google.protobuf import text_format +from models import model_config_pb2 + + +if "gfile" not in sys.modules: + gfile = tf.io.gfile + + +_MODEL_CONFIG_PATHS = [ + "google_research/fm4tlp/models/configs/tgn.pbtxt", + "google_research/fm4tlp/models/configs/tgn_structmap.pbtxt", + "google_research/fm4tlp/models/configs/tgn_structmap_alpha10.pbtxt", + "google_research/fm4tlp/models/configs/tgn_structmap_alpha100.pbtxt", + "google_research/fm4tlp/models/configs/edgebank.pbtxt", +] + + +def get_model_config(model_name): + """Returns a model config from the specified model name.""" + model_configs = {} + for model_config_path in _MODEL_CONFIG_PATHS: + model_config = model_config_pb2.TlpModelConfig() + filepath = str(model_config_path) + with gfile.GFile(filepath, "r") as f: + text_format.Parse(f.read(), model_config) + if model_config.model_name in model_configs: + raise ValueError( + f"Duplicate model name: {model_config.model_name}" + ) + model_configs[model_config.model_name] = model_config + return model_configs[model_name] diff --git a/fm4tlp/test.py b/fm4tlp/test.py index f746fe5642d..964d59f194a 100644 --- a/fm4tlp/test.py +++ b/fm4tlp/test.py @@ -51,7 +51,7 @@ import torch from torch_geometric import loader as torch_geo_data_loader -import experiment_config +import model_config as model_config_lib from models import all_models from models import model_template from modules import early_stopping @@ -255,7 +255,7 @@ def main(_): total_nodes, size=_NUM_NEIGHBORS.value, device=device ) - model_config = experiment_config.get_model_config(_MODEL_NAME.value) + model_config = model_config_lib.get_model_config(_MODEL_NAME.value) test_feature_dim = 0 test_structural_features = {} structural_feats_list = [ diff --git a/fm4tlp/train.py b/fm4tlp/train.py index 9ccda23a58f..3ec360a5606 100644 --- a/fm4tlp/train.py +++ b/fm4tlp/train.py @@ -53,7 +53,7 @@ from torch_geometric import data as torch_geo_data from torch_geometric import loader as torch_geo_data_loader -import experiment_config +import model_config as model_config_lib from models import all_models from models import model_template from modules import early_stopping @@ -286,7 +286,7 @@ def main(_): total_nodes, size=_NUM_NEIGHBORS.value, device=device ) - model_config = experiment_config.get_model_config(_MODEL_NAME.value) + model_config = model_config_lib.get_model_config(_MODEL_NAME.value) train_feature_dim = 0 train_structural_features = {} val_structural_features = {}