From e5b5af4c78b1c7996ba8f71431171af683409c37 Mon Sep 17 00:00:00 2001 From: hlums Date: Mon, 17 Jun 2019 22:40:24 +0000 Subject: [PATCH 001/108] Added warmup and support for two-sequence classification. --- utils_nlp/bert/common.py | 57 +++++++++++++++++++++-- utils_nlp/bert/sequence_classification.py | 41 +++++++++++++--- 2 files changed, 87 insertions(+), 11 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index fb15d35f7..1b0e9cf9d 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -5,6 +5,7 @@ from enum import Enum import warnings import torch +from tqdm import tqdm from torch.utils.data import ( DataLoader, @@ -54,8 +55,10 @@ def tokenize(self, text): Returns: [list]: [description] """ - tokens = [self.tokenizer.tokenize(x) for x in text] - return tokens + if isinstance(text[0], str): + return [self.tokenizer.tokenize(x) for x in tqdm(text)] + else: + return [[self.tokenizer.tokenize(x) for x in sentences] for sentences in tqdm(text)] def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): """Preprocessing of input tokens: @@ -80,15 +83,59 @@ def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): ) max_len = BERT_MAX_LEN - # truncate and add BERT sentence markers - tokens = [["[CLS]"] + x[0 : max_len - 2] + ["[SEP]"] for x in tokens] + if isinstance(tokens[0], str): + tokens = [x[0 : max_len - 2] + ["[SEP]"] for x in tokens] + token_type_ids = None + else: + + def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + # This is a simple heuristic which will always truncate the longer sequence + # one token at a time. This makes more sense than truncating an equal percent + # of tokens from each, since if one sequence is very short then each token + # that's truncated likely contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + tokens_a.append("[SEP]") + tokens_b.append("[SEP]") + + return [tokens_a, tokens_b] + + # print(tokens[:2]) + # get tokens for each sentence [[t00, t01, ...] [t10, t11,... ]] + tokens = [_truncate_seq_pair(sentence[0], sentence[1], max_len - 3) # [CLS] + 2x [SEP] + for sentence in tokens] + + # construct token_type_ids [[0, 0, 0, 0, ... 0, 1, 1, 1, ... 1], [0, 0, 0, ..., 1, 1, ] + token_type_ids = [ + [[i] * len(sentence) for i, sentence in enumerate(example)] + for example in tokens + ] + # merge sentences + tokens = [[token for sentence in example for token in sentence] + for example in tokens] + # prefix with [0] for [CLS] + token_type_ids = [[0] + [i for sentence in example for i in sentence] + for example in token_type_ids] + # pad sequence + token_type_ids = [x + [0] * (max_len - len(x)) + for x in token_type_ids] + + tokens = [["[CLS]"] + x for x in tokens] # convert tokens to indices tokens = [self.tokenizer.convert_tokens_to_ids(x) for x in tokens] # pad sequence tokens = [x + [0] * (max_len - len(x)) for x in tokens] # create input mask input_mask = [[min(1, x) for x in y] for y in tokens] - return tokens, input_mask + return tokens, input_mask, token_type_ids def preprocess_ner_tokens( self, diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index 60b3ce02c..2086e2f02 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -42,10 +42,12 @@ def fit( token_ids, input_mask, labels, + token_type_ids=None, num_gpus=None, num_epochs=1, batch_size=32, lr=2e-5, + warmup_proportion=None, verbose=True, ): """Fine-tunes the BERT classifier using the given training data. @@ -62,6 +64,9 @@ def fit( Defaults to 1. batch_size (int, optional): Training batch size. Defaults to 32. lr (float): Learning rate of the Adam optimizer. Defaults to 2e-5. + warmup_proportion (float, optional): Proportion of training to + perform linear learning rate warmup for. E.g., 0.1 = 10% of + training. Defaults to None. verbose (bool, optional): If True, shows the training progress and loss values. Defaults to True. """ @@ -90,7 +95,19 @@ def fit( }, ] - opt = BertAdam(optimizer_grouped_parameters, lr=lr) + num_train_optimization_steps = ( + int(len(token_ids) / batch_size) * num_epochs + ) + + if warmup_proportion is None: + opt = BertAdam(optimizer_grouped_parameters, lr=lr) + else: + opt = BertAdam( + optimizer_grouped_parameters, + lr=lr, + t_total=num_train_optimization_steps, + warmup=warmup_proportion, + ) # define loss function loss_func = nn.CrossEntropyLoss().to(device) @@ -99,7 +116,8 @@ def fit( self.model.train() # training mode num_examples = len(token_ids) num_batches = int(num_examples / batch_size) - + + token_type_ids_batch = None for epoch in range(num_epochs): for i in range(num_batches): @@ -115,12 +133,17 @@ def fit( mask_batch = torch.tensor( input_mask[start:end], dtype=torch.long, device=device ) + + if token_type_ids is not None: + token_type_ids_batch = torch.tensor( + token_type_ids[start:end], dtype=torch.long, device=device + ) opt.zero_grad() y_h = self.model( input_ids=x_batch, - token_type_ids=None, + token_type_ids=token_type_ids_batch, attention_mask=mask_batch, labels=None, ) @@ -141,10 +164,10 @@ def fit( ) ) # empty cache - del [x_batch, y_batch, mask_batch] + del [x_batch, y_batch, mask_batch, token_type_ids_batch] torch.cuda.empty_cache() - def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): + def predict(self, token_ids, input_mask, token_type_ids=None, num_gpus=None, batch_size=32): """Scores the given dataset and returns the predicted classes. Args: token_ids (list): List of training token lists. @@ -173,10 +196,16 @@ def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): mask_batch = torch.tensor( mask_batch, dtype=torch.long, device=device ) + token_type_ids_batch = None + if token_type_ids is not None: + token_type_ids_batch = torch.tensor( + token_type_ids[i : i + + batch_size], dtype=torch.long, device=device + ) with torch.no_grad(): p_batch = self.model( input_ids=x_batch, - token_type_ids=None, + token_type_ids=token_type_ids_batch, attention_mask=mask_batch, labels=None, ) From 53cdba04721e18ca931b3f0bd44e8b241526e87c Mon Sep 17 00:00:00 2001 From: hlums Date: Mon, 17 Jun 2019 23:07:44 +0000 Subject: [PATCH 002/108] Added entailment notebook on XNLI --- .../entailment_xnli_multilingual.ipynb | 434 ++++++++++++++++++ 1 file changed, 434 insertions(+) create mode 100644 scenarios/entailment/entailment_xnli_multilingual.ipynb diff --git a/scenarios/entailment/entailment_xnli_multilingual.ipynb b/scenarios/entailment/entailment_xnli_multilingual.ipynb new file mode 100644 index 000000000..81dd40993 --- /dev/null +++ b/scenarios/entailment/entailment_xnli_multilingual.ipynb @@ -0,0 +1,434 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multi-lingual Entailment on XNLI Dataset using BERT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Required packages\n", + "* pytorch-pretrained-bert\n", + "* pandas\n", + "* seqeval\n", + "* unicode" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "import sys\n", + "import os\n", + "import random\n", + "import numpy as np\n", + "import csv\n", + "import six\n", + "\n", + "import torch\n", + "\n", + "nlp_path = os.path.abspath('../../')\n", + "if nlp_path not in sys.path:\n", + " sys.path.insert(0, nlp_path)\n", + "\n", + "from utils_nlp.bert.sequence_classification import BERTSequenceClassifier\n", + "from utils_nlp.bert.common import Language, Tokenizer" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# set random seeds\n", + "random_seed = 42\n", + "random.seed(random_seed)\n", + "np.random.seed(random_seed)\n", + "torch.manual_seed(random_seed)\n", + "num_cuda_devices = torch.cuda.device_count()\n", + "if num_cuda_devices > 1:\n", + " torch.cuda.manual_seed_all(random_seed)\n", + "\n", + "# model configurations\n", + "language = Language.CHINESE\n", + "do_lower_case = True\n", + "max_seq_length = 128\n", + "\n", + "# training configurations\n", + "device=\"gpu\"\n", + "batch_size = 32\n", + "num_train_epochs = 2\n", + "\n", + "# optimizer configurations\n", + "learning_rate = 5e-5\n", + "config_file = \"config_multilingual.yaml\"\n", + "train_data_dir = \"./data/XNLI-MT-1.0/XNLI-MT-1.0/\"\n", + "dev_data_dir = \"./data/XNLI-MT-1.0/XNLI-MT-1.0/\"\n", + "cache_dir=\".\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preprocess Data" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "def convert_to_unicode(text):\n", + " \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n", + " if six.PY3:\n", + " if isinstance(text, str):\n", + " return text\n", + " elif isinstance(text, bytes):\n", + " return text.decode(\"utf-8\", \"ignore\")\n", + " else:\n", + " raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n", + " elif six.PY2:\n", + " if isinstance(text, str):\n", + " return text.decode(\"utf-8\", \"ignore\")\n", + " elif isinstance(text, unicode):\n", + " return text\n", + " else:\n", + " raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n", + " else:\n", + " raise ValueError(\"Not running on Python2 or Python 3?\")\n", + " \n", + "class DataProcessor(object):\n", + " \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n", + "\n", + " def get_train_examples(self, data_dir):\n", + " \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n", + " raise NotImplementedError()\n", + "\n", + " def get_dev_examples(self, data_dir):\n", + " \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n", + " raise NotImplementedError()\n", + "\n", + " def get_labels(self):\n", + " \"\"\"Gets the list of labels for this data set.\"\"\"\n", + " raise NotImplementedError()\n", + "\n", + " @classmethod\n", + " def _read_tsv(cls, input_file, quotechar=None):\n", + " \"\"\"Reads a tab separated value file.\"\"\"\n", + " with open(input_file, \"r\", encoding=\"utf-8\") as f:\n", + " reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n", + " lines = []\n", + " for line in reader:\n", + " if sys.version_info[0] == 2:\n", + " line = list(unicode(cell, 'utf-8') for cell in line)\n", + " lines.append(line)\n", + " return lines\n", + "\n", + " \n", + "class XnliProcessor(DataProcessor):\n", + " \"\"\"Processor for the XNLI data set.\"\"\"\n", + "\n", + " def __init__(self):\n", + " self.language = \"zh\"\n", + "\n", + " def get_train_examples(self, data_dir):\n", + " \"\"\"See base class.\"\"\"\n", + " lines = self._read_tsv(\n", + " os.path.join(data_dir, \"multinli\",\n", + " \"multinli.train.%s.tsv\" % self.language))\n", + " text_list = []\n", + " label_list = []\n", + " for (i, line) in enumerate(lines):\n", + " if i == 0:\n", + " continue\n", + " text_a = convert_to_unicode(line[0])\n", + " text_b = convert_to_unicode(line[1])\n", + " label = convert_to_unicode(line[2])\n", + " if label == convert_to_unicode(\"contradictory\"):\n", + " label = convert_to_unicode(\"contradiction\")\n", + " text_list.append((text_a, text_b))\n", + " label_list.append(label)\n", + " return text_list, label_list\n", + "\n", + " def get_dev_examples(self, data_dir):\n", + " \"\"\"See base class.\"\"\"\n", + " lines = self._read_tsv(os.path.join(data_dir, \"xnli\", \"xnli.dev.tsv\"))\n", + " text_list = []\n", + " label_list = []\n", + " for (i, line) in enumerate(lines):\n", + " if i == 0:\n", + " continue\n", + " language = convert_to_unicode(line[0])\n", + " if language != convert_to_unicode(self.language):\n", + " continue\n", + " text_a = convert_to_unicode(line[6])\n", + " text_b = convert_to_unicode(line[7])\n", + " label = convert_to_unicode(line[1])\n", + " \n", + " text_list.append((text_a, text_b))\n", + " label_list.append(label)\n", + " return text_list, label_list\n", + "\n", + " def get_labels(self):\n", + " \"\"\"See base class.\"\"\"\n", + " return [\"contradiction\", \"entailment\", \"neutral\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "xnli_processor = XnliProcessor()\n", + "train_text, train_labels = xnli_processor.get_train_examples(data_dir=train_data_dir)\n", + "dev_text, dev_labels= xnli_processor.get_dev_examples(data_dir=dev_data_dir)\n", + "label_list = xnli_processor.get_labels()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "train_text = train_text[:1000]\n", + "train_labels = train_labels[:1000]\n", + "dev_text = dev_text[:1000]\n", + "dev_labels = dev_labels[:1000]" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['contradiction', 'entailment', 'neutral']\n" + ] + } + ], + "source": [ + "print(label_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Convert examples to features\n", + "The function `convert_examples_to_token_features` converts raw string data to numerical features, involving the following steps:\n", + "1. Tokenization\n", + "2. Convert tokens and labels to numerical values\n", + "3. Sequence padding or truncation" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "tokenizer = Tokenizer(language=language, \n", + " to_lower=do_lower_case, \n", + " cache_dir=cache_dir)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 1000/1000 [00:00<00:00, 2916.60it/s]\n", + "100%|██████████| 1000/1000 [00:00<00:00, 3776.67it/s]\n" + ] + } + ], + "source": [ + "train_tokens = tokenizer.tokenize(train_text)\n", + "dev_tokens = tokenizer.tokenize(dev_text)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "train_token_ids, train_input_mask, train_token_type_ids = \\\n", + " tokenizer.preprocess_classification_tokens(train_tokens, max_len=max_seq_length)\n", + "dev_token_ids, dev_input_mask, dev_token_type_ids = \\\n", + " tokenizer.preprocess_classification_tokens(dev_tokens, max_len=max_seq_length)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "label_map = {label: i for i, label in enumerate(label_list)}\n", + "train_label_ids = [label_map[l] for l in train_labels]\n", + "dev_label_ids = [label_map[l] for l in dev_labels]" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 382072689/382072689 [00:07<00:00, 48295901.53B/s]\n" + ] + } + ], + "source": [ + "classifier = BERTSequenceClassifier(language=language,\n", + " num_labels=len(label_list),\n", + " cache_dir=cache_dir)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Warning: Only 1 CUDA device is available. Data parallelism is not possible.\n", + "epoch:1/2; batch:1->4/31; loss:1.304341\n", + "epoch:1/2; batch:5->8/31; loss:1.074271\n", + "epoch:1/2; batch:9->12/31; loss:1.063943\n", + "epoch:1/2; batch:13->16/31; loss:1.124226\n", + "epoch:1/2; batch:17->20/31; loss:1.100629\n", + "epoch:1/2; batch:21->24/31; loss:1.128772\n", + "epoch:1/2; batch:25->28/31; loss:1.216407\n", + "epoch:1/2; batch:29->32/31; loss:1.041168\n", + "epoch:2/2; batch:1->4/31; loss:1.054121\n", + "epoch:2/2; batch:5->8/31; loss:1.015953\n", + "epoch:2/2; batch:9->12/31; loss:1.072215\n", + "epoch:2/2; batch:13->16/31; loss:1.143950\n", + "epoch:2/2; batch:17->20/31; loss:0.927066\n", + "epoch:2/2; batch:21->24/31; loss:1.054619\n", + "epoch:2/2; batch:25->28/31; loss:0.914871\n", + "epoch:2/2; batch:29->32/31; loss:0.977393\n" + ] + } + ], + "source": [ + "classifier.fit(token_ids=train_token_ids,\n", + " input_mask=train_input_mask,\n", + " token_type_ids=train_token_type_ids,\n", + " labels=train_label_ids,\n", + " num_gpus=2,\n", + " num_epochs=num_train_epochs,\n", + " batch_size=batch_size,\n", + " lr=learning_rate,\n", + " warmup_proportion=0.1)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + " 1%| | 8/1000 [00:00<00:20, 49.08it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Warning: Only 1 CUDA device is available. Data parallelism is not possible.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 1000/1000 [00:15<00:00, 65.77it/s]\n" + ] + } + ], + "source": [ + "predictions = classifier.predict(token_ids=dev_token_ids,\n", + " input_mask=dev_input_mask,\n", + " token_type_ids=dev_token_type_ids,\n", + " batch_size=8)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "480\n", + "520\n", + "0\n" + ] + } + ], + "source": [ + "print(len([l for l in predictions if l==0]))\n", + "print(len([l for l in predictions if l==1]))\n", + "print(len([l for l in predictions if l==2]))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pytorch", + "language": "python", + "name": "pytorch" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 7cba858a42d7e24b8b663551fb2f1f0ec3730268 Mon Sep 17 00:00:00 2001 From: hlums Date: Tue, 18 Jun 2019 14:57:49 +0000 Subject: [PATCH 003/108] Updated docstring. --- utils_nlp/bert/common.py | 22 +++++++++++++++------- utils_nlp/bert/sequence_classification.py | 10 ++++++++-- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index 1b0e9cf9d..20162929a 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -1,6 +1,11 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. +# This script reuses some code from +# https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples +# /run_classifier.py + + from pytorch_pretrained_bert.tokenization import BertTokenizer from enum import Enum import warnings @@ -50,10 +55,10 @@ def tokenize(self, text): """Uses a BERT tokenizer Args: - text (list): [description] + text (list): List of strings (one sequence) or tuples (two sequences). Returns: - [list]: [description] + [list]: List of lists. Each sublist contains WordPiece tokens of the input sequence(s). """ if isinstance(text[0], str): return [self.tokenizer.tokenize(x) for x in tqdm(text)] @@ -63,17 +68,20 @@ def tokenize(self, text): def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): """Preprocessing of input tokens: - add BERT sentence markers ([CLS] and [SEP]) - - map tokens to indices + - map tokens to token indices in the BERT vocabulary - pad and truncate sequences - create an input_mask + - create token type ids, aka. segment ids Args: - tokens (list): List of tokens to preprocess. + tokens (list): List of token lists to preprocess. max_len (int, optional): Maximum number of tokens (documents will be truncated or padded). Defaults to 512. - Returns: - list of preprocesssed token lists - list of input mask lists + Returns: + tuple: A tuple containing the following three lists + list of preprocesssed token lists + list of input mask lists + list of token type id lists """ if max_len > BERT_MAX_LEN: print( diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index 2086e2f02..ec13c2d96 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -55,8 +55,10 @@ def fit( token_ids (list): List of training token id lists. input_mask (list): List of input mask lists. labels (list): List of training labels. - device (str, optional): Device used for training ("cpu" or "gpu"). - Defaults to "gpu". + token_type_ids (list, optional): List of lists. Each sublist + contains segment ids indicating if the token belongs to + the first sentence(0) or second sentence(1). Only needed + for two-sentence tasks. num_gpus (int, optional): The number of gpus to use. If None is specified, all available GPUs will be used. Defaults to None. @@ -172,6 +174,10 @@ def predict(self, token_ids, input_mask, token_type_ids=None, num_gpus=None, bat Args: token_ids (list): List of training token lists. input_mask (list): List of input mask lists. + token_type_ids (list, optional): List of lists. Each sublist + contains segment ids indicating if the token belongs to + the first sentence(0) or second sentence(1). Only needed + for two-sentence tasks. num_gpus (int, optional): The number of gpus to use. If None is specified, all available GPUs will be used. Defaults to None. From c9d587317ef1641e64f832c8cf02b23ee0c645ec Mon Sep 17 00:00:00 2001 From: miguelgfierro Date: Tue, 18 Jun 2019 20:25:27 +0100 Subject: [PATCH 004/108] track github metrics --- tests/ci/repo_metrics_pipeline.yml | 26 ++++++++++++++++++++++++++ tools/repo_metrics/README.md | 18 ++++++++++++++---- tools/repo_metrics/config_template.py | 2 +- tools/repo_metrics/track_metrics.py | 14 ++++++++++---- 4 files changed, 51 insertions(+), 9 deletions(-) create mode 100644 tests/ci/repo_metrics_pipeline.yml diff --git a/tests/ci/repo_metrics_pipeline.yml b/tests/ci/repo_metrics_pipeline.yml new file mode 100644 index 000000000..13166ccc2 --- /dev/null +++ b/tests/ci/repo_metrics_pipeline.yml @@ -0,0 +1,26 @@ + +jobs: +- job: Repometrics + pool: + vmImage: 'ubuntu-16.04' + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.6' + architecture: 'x64' + + - script: | + cp tools/repo_metrics/config_template.py tools/repo_metrics/config.py + sed -i ''s/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/$(github_token)/g'' tools/repo_metrics/config.py + sed -i ''s/XXXXXXXXXXXXXXXXXXXXXXXXX/$(cosmosdb_connectionstring)/g'' tools/repo_metrics/config.py + displayName: Configure CosmosDB Connection + + - script: | + python -m pip install python-dateutil>=2.80 pymongo>=3.8.0 gitpython>2.1.11 requests>=2.21.0 + python tools/repo_metrics/track_metrics.py --github_repo "https://github.com/microsoft/nlp" --save_to_database + displayName: Python script to record stats + + + + diff --git a/tools/repo_metrics/README.md b/tools/repo_metrics/README.md index 389fa9ed8..884cc3eb1 100755 --- a/tools/repo_metrics/README.md +++ b/tools/repo_metrics/README.md @@ -1,6 +1,6 @@ # Repository Metrics -[![Build status](https://msdata.visualstudio.com/AlgorithmsAndDataScience/_apis/build/status/Recommenders/Recommenders%20repo%20stats)](https://msdata.visualstudio.com/AlgorithmsAndDataScience/_build/latest?definitionId=5206) +[![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/repo_metrics?branchName=master)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=36&branchName=master) We developed a script that allows us to track the metrics of the Recommenders repo. Some of the metrics we can track are listed here: @@ -10,11 +10,11 @@ We developed a script that allows us to track the metrics of the Recommenders re * Number of views * Number of lines of code -To see the full list of metrics, see [git_stats.py](scripts/repo_metrics/git_stats.py) +To see the full list of metrics, see [git_stats.py](git_stats.py) The first step is to set up the credentials, copy the configuration file and fill up the credentials of GitHub and CosmosDB: - cp scripts/repo_metrics/config_template.py scripts/repo_metrics/config.py + cp tools/repo_metrics/config_template.py tools/repo_metrics/config.py To track the current state of the repository and save it to CosmosDB: @@ -22,5 +22,15 @@ To track the current state of the repository and save it to CosmosDB: To track an event related to this repository and save it to CosmosDB: - python scripts/repo_metrics/track_metrics.py --event "Today we did our first blog of the project" --event_date 2018-12-01 --save_to_database + python tools/repo_metrics/track_metrics.py --event "Today we did our first blog of the project" --event_date 2018-12-01 --save_to_database + + +### Setting up Azure CosmosDB + +The API that we is used to track the GitHub metrics is the [Mongo API](https://docs.microsoft.com/en-us/azure/cosmos-db/mongodb-introduction). + +The database name and collections name are defined in the [config file](config_template.py). There are two main collections, defined as `COLLECTION_GITHUB_STATS` and `COLLECTION_EVENTS` to store the information defined on the previous section. + +**IMPORTANT NOTE**: If the database and the collections are created directly through the portal, a common partition key should be defined. We recommend to use `date` as partition key. + diff --git a/tools/repo_metrics/config_template.py b/tools/repo_metrics/config_template.py index 78825d957..dad9a0950 100755 --- a/tools/repo_metrics/config_template.py +++ b/tools/repo_metrics/config_template.py @@ -7,7 +7,7 @@ # CosmosDB Mongo API CONNECTION_STRING = "mongodb://XXXXXXXXXXXXXXXXXXXXXXXXX.documents.azure.com:10255/?ssl=true&replicaSet=globaldb" -DATABASE = "reco_stats" +DATABASE = "nlp_stats" COLLECTION_GITHUB_STATS = "github_stats" COLLECTION_EVENTS = "events" diff --git a/tools/repo_metrics/track_metrics.py b/tools/repo_metrics/track_metrics.py index e4136fdc9..5580ec9f0 100755 --- a/tools/repo_metrics/track_metrics.py +++ b/tools/repo_metrics/track_metrics.py @@ -5,7 +5,6 @@ import os # Need to append a full path instead of relative path. -# This seems to be an issue from Azure DevOps command line task. # NOTE this does not affect running directly in the shell. sys.path.append(os.getcwd()) import argparse @@ -14,9 +13,8 @@ from datetime import datetime from dateutil.parser import isoparse from pymongo import MongoClient -from datetime import datetime -from scripts.repo_metrics.git_stats import Github -from scripts.repo_metrics.config import ( +from tools.repo_metrics.git_stats import Github +from tools.repo_metrics.config import ( GITHUB_TOKEN, CONNECTION_STRING, DATABASE, @@ -32,6 +30,7 @@ def parse_args(): """Argument parser. + Returns: obj: Parser. """ @@ -61,8 +60,10 @@ def parse_args(): def connect(uri="mongodb://localhost"): """Mongo connector. + Args: uri (str): Connection string. + Returns: obj: Mongo client. """ @@ -78,9 +79,11 @@ def connect(uri="mongodb://localhost"): def event_as_dict(event, date): """Encodes an string event input as a dictionary with the date. + Args: event (str): Details of a event. date (datetime): Date of the event. + Returns: dict: Dictionary with the event and the date. """ @@ -89,8 +92,10 @@ def event_as_dict(event, date): def github_stats_as_dict(github): """Encodes Github statistics as a dictionary with the date. + Args: obj: Github object. + Returns: dict: Dictionary with Github details and the date. """ @@ -125,6 +130,7 @@ def github_stats_as_dict(github): def tracker(args): """Main function to track metrics. + Args: args (obj): Parsed arguments. """ From 7f1bb8a03997b59f733b2552aa9b414a0dd55c15 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 12 Jun 2019 11:19:01 -0400 Subject: [PATCH 005/108] bug fix: sts-benchmark has extra tabs in some rows which caused incorrect reading of pandas df or azureml dataflow object --- utils_nlp/dataset/stsbenchmark.py | 75 +++++++++++++------------------ 1 file changed, 32 insertions(+), 43 deletions(-) diff --git a/utils_nlp/dataset/stsbenchmark.py b/utils_nlp/dataset/stsbenchmark.py index ed919ed57..5e276bbcb 100644 --- a/utils_nlp/dataset/stsbenchmark.py +++ b/utils_nlp/dataset/stsbenchmark.py @@ -4,7 +4,6 @@ import os import tarfile import pandas as pd -import azureml.dataprep as dp from utils_nlp.dataset.url_utils import maybe_download @@ -22,26 +21,18 @@ def load_pandas_df(data_path, file_split=DEFAULT_FILE_SPLIT): Returns: pd.DataFrame: STS Benchmark dataset """ - clean_file_path = os.path.join( - data_path, "clean/stsbenchmark", "sts-{}.csv".format(file_split) - ) - dflow = _maybe_download_and_extract(data_path, clean_file_path) - return dflow.to_pandas_dataframe() + file_name = "sts-{}.csv".format(file_split) + df = _maybe_download_and_extract(file_name, data_path) + return df -def _maybe_download_and_extract(base_data_path, clean_file_path): - if not os.path.exists(clean_file_path): - raw_data_path = os.path.join(base_data_path, "raw") - if not os.path.exists(raw_data_path): - os.makedirs(raw_data_path) - sts_path = _download_sts(raw_data_path) - sts_files = [f for f in os.listdir(sts_path) if f.endswith(".csv")] - _clean_sts( - sts_files, - sts_path, - os.path.join(base_data_path, "clean", "stsbenchmark"), - ) - return dp.auto_read_file(clean_file_path).drop_columns("Column1") +def _maybe_download_and_extract(sts_file, base_data_path): + raw_data_path = os.path.join(base_data_path, "raw") + if not os.path.exists(raw_data_path): + os.makedirs(raw_data_path) + sts_path = _download_sts(raw_data_path) + df = _load_sts(os.path.join(sts_path, sts_file)) + return df def _download_sts(dirpath): @@ -79,31 +70,29 @@ def _extract_sts(tarpath, target_dirpath=".", tmode="r"): return os.path.join(target_dirpath, extracted) -def _clean_sts(filenames, src_dir, target_dir): +def _load_sts(src_file_path): + """Drop columns containing irrelevant metadata and save as new csv files in the target_dir + + Args: + src_file_path (str): filepath to train/dev/test csv files. + """ + with open(src_file_path, 'r', encoding="utf-8") as f: + sent_pairs = [] + for line in f: + l = line.strip().split("\t") + sent_pairs.append([l[0].strip(), l[1].strip(), l[2].strip(), l[3].strip(), float(l[4]), l[5].strip(), + l[6].strip()]) + + sdf = pd.DataFrame(sent_pairs, columns=["column_0", "column_1", "column_2", "column_3", "column_4", "column_5", "column_6"]) + return sdf + + +def clean_sts(df): """Drop columns containing irrelevant metadata and save as new csv files in the target_dir Args: - filenames (list of str): List of filenames for the train/dev/test csv files. - src_dir (str): Directory for the raw csv files. - target_dir (str): Directory for the clean csv files to be written to. + df (pandas.Dataframe): drop columns from train/test/dev files. """ - if not os.path.exists(target_dir): - os.makedirs(target_dir) - filepaths = [os.path.join(src_dir, f) for f in filenames] - for i, fp in enumerate(filepaths): - dat = dp.auto_read_file(path=fp) - s = dat.keep_columns(["Column5", "Column6", "Column7"]).rename_columns( - { - "Column5": "score", - "Column6": "sentence1", - "Column7": "sentence2", - } - ) - print( - "Writing clean dataframe to {}".format( - os.path.join(target_dir, filenames[i]) - ) - ) - sdf = s.to_pandas_dataframe().to_csv( - os.path.join(target_dir, filenames[i]), sep="\t" - ) + clean_df = df.drop(["column_0", "column_1", "column_2", "column_3"], axis=1) + clean_df = clean_df.rename(index=str, columns={"column_4": "score", "column_5": "sentence1", "column_6": "sentence2"}) + return clean_df From 074bca3619873cb5d9670b2336f586ed92808afb Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 12 Jun 2019 13:01:42 -0400 Subject: [PATCH 006/108] black formatter --- utils_nlp/dataset/stsbenchmark.py | 43 ++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/utils_nlp/dataset/stsbenchmark.py b/utils_nlp/dataset/stsbenchmark.py index 5e276bbcb..5d91b7fcf 100644 --- a/utils_nlp/dataset/stsbenchmark.py +++ b/utils_nlp/dataset/stsbenchmark.py @@ -76,14 +76,34 @@ def _load_sts(src_file_path): Args: src_file_path (str): filepath to train/dev/test csv files. """ - with open(src_file_path, 'r', encoding="utf-8") as f: + with open(src_file_path, "r", encoding="utf-8") as f: sent_pairs = [] for line in f: l = line.strip().split("\t") - sent_pairs.append([l[0].strip(), l[1].strip(), l[2].strip(), l[3].strip(), float(l[4]), l[5].strip(), - l[6].strip()]) - - sdf = pd.DataFrame(sent_pairs, columns=["column_0", "column_1", "column_2", "column_3", "column_4", "column_5", "column_6"]) + sent_pairs.append( + [ + l[0].strip(), + l[1].strip(), + l[2].strip(), + l[3].strip(), + float(l[4]), + l[5].strip(), + l[6].strip(), + ] + ) + + sdf = pd.DataFrame( + sent_pairs, + columns=[ + "column_0", + "column_1", + "column_2", + "column_3", + "column_4", + "column_5", + "column_6", + ], + ) return sdf @@ -93,6 +113,15 @@ def clean_sts(df): Args: df (pandas.Dataframe): drop columns from train/test/dev files. """ - clean_df = df.drop(["column_0", "column_1", "column_2", "column_3"], axis=1) - clean_df = clean_df.rename(index=str, columns={"column_4": "score", "column_5": "sentence1", "column_6": "sentence2"}) + clean_df = df.drop( + ["column_0", "column_1", "column_2", "column_3"], axis=1 + ) + clean_df = clean_df.rename( + index=str, + columns={ + "column_4": "score", + "column_5": "sentence1", + "column_6": "sentence2", + }, + ) return clean_df From 236a64e9d8ac7794aa611867ee82249a3d5e4630 Mon Sep 17 00:00:00 2001 From: Casey Hong Date: Tue, 18 Jun 2019 17:17:23 -0400 Subject: [PATCH 007/108] update stsbenchmark :notebook: --- scenarios/data_prep/README.md | 4 +- scenarios/data_prep/stsbenchmark.ipynb | 417 +++---------------------- utils_nlp/dataset/stsbenchmark.py | 2 +- 3 files changed, 55 insertions(+), 368 deletions(-) diff --git a/scenarios/data_prep/README.md b/scenarios/data_prep/README.md index eaf84ad28..5e13abec5 100644 --- a/scenarios/data_prep/README.md +++ b/scenarios/data_prep/README.md @@ -25,7 +25,7 @@ STS Benchmark - sts_load.ipynb + stsbenchmark.ipynb Downloads and cleans the STS Benchmark dataset. Shows an example of tokenizing and removing stopwords using the popular spaCy library. @@ -34,7 +34,7 @@ MSR Paraphrase Corpus - msrpc_load.ipynb + msrpc.ipynb Download and clean the MSR Paraphrase corpus. diff --git a/scenarios/data_prep/stsbenchmark.ipynb b/scenarios/data_prep/stsbenchmark.ipynb index ddd649814..e76967a79 100644 --- a/scenarios/data_prep/stsbenchmark.ipynb +++ b/scenarios/data_prep/stsbenchmark.ipynb @@ -46,7 +46,7 @@ "source": [ "import sys\n", "\n", - "sys.path.append(\"../../../\") ## set the environment path\n", + "sys.path.append(\"../../\") ## set the environment path\n", "\n", "import os\n", "import azureml.dataprep as dp\n", @@ -67,7 +67,7 @@ "outputs": [], "source": [ "STS_URL = \"http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz\"\n", - "BASE_DATA_PATH = \"../../../data\"\n", + "BASE_DATA_PATH = \"../../data\"\n", "RAW_DATA_PATH = os.path.join(BASE_DATA_PATH, \"raw\")\n", "CLEAN_DATA_PATH = os.path.join(BASE_DATA_PATH, \"clean\")" ] @@ -76,14 +76,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 01 Data Download" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Make a directory for the data if it doesn't already exist, and then download." + "### 01 Data Download\n", + "In this section we \n", + "* load raw data into a dataframe\n", + "* peek into the first 5 rows" ] }, { @@ -100,68 +96,21 @@ "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [], - "source": [ - "def download_sts(url, dirpath):\n", - " zipfile = maybe_download(url, work_directory=dirpath)\n", - " unzipped = stsbenchmark._extract_sts(zipfile, target_dirpath=dirpath, tmode=\"r:gz\")\n", - " return zipfile, unzipped" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "418kB [00:03, 138kB/s] " + "100%|██████████| 401/401 [00:01<00:00, 310KB/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Data downloaded to ../../../data/raw/stsbenchmark\n" + "Data downloaded to ../../data/raw/raw/stsbenchmark\n" ] }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "tarfile, datapath = download_sts(STS_URL, RAW_DATA_PATH)\n", - "print(\"Data downloaded to {}\".format(datapath))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 02 Data Understanding\n", - "In this section we \n", - "* load raw data into a dataframe\n", - "* peek into the first 10 rows" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can load the data using a `read` function that has built-in automatic filetype inference:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ { "data": { "text/html": [ @@ -183,13 +132,13 @@ " \n", " \n", " \n", - " Column1\n", - " Column2\n", - " Column3\n", - " Column4\n", - " Column5\n", - " Column6\n", - " Column7\n", + " column_0\n", + " column_1\n", + " column_2\n", + " column_3\n", + " column_4\n", + " column_5\n", + " column_6\n", " \n", " \n", " \n", @@ -198,7 +147,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 1\n", + " 0001\n", " 5.00\n", " A plane is taking off.\n", " An air plane is taking off.\n", @@ -208,7 +157,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 4\n", + " 0004\n", " 3.80\n", " A man is playing a large flute.\n", " A man is playing a flute.\n", @@ -218,7 +167,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 5\n", + " 0005\n", " 3.80\n", " A man is spreading shreded cheese on a pizza.\n", " A man is spreading shredded cheese on an uncoo...\n", @@ -228,7 +177,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 6\n", + " 0006\n", " 2.60\n", " Three men are playing chess.\n", " Two men are playing chess.\n", @@ -238,178 +187,59 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 9\n", + " 0009\n", " 4.25\n", " A man is playing the cello.\n", " A man seated is playing the cello.\n", " \n", - " \n", - " 5\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 11\n", - " 4.25\n", - " Some men are fighting.\n", - " Two men are fighting.\n", - " \n", - " \n", - " 6\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 12\n", - " 0.50\n", - " A man is smoking.\n", - " A man is skating.\n", - " \n", - " \n", - " 7\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 13\n", - " 1.60\n", - " The man is playing the piano.\n", - " The man is playing the guitar.\n", - " \n", - " \n", - " 8\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 14\n", - " 2.20\n", - " A man is playing on a guitar and singing.\n", - " A woman is playing an acoustic guitar and sing...\n", - " \n", - " \n", - " 9\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 16\n", - " 5.00\n", - " A person is throwing a cat on to the ceiling.\n", - " A person throws a cat on the ceiling.\n", - " \n", " \n", "\n", "" ], "text/plain": [ - " Column1 Column2 Column3 Column4 Column5 \\\n", - "0 main-captions MSRvid 2012test 1 5.00 \n", - "1 main-captions MSRvid 2012test 4 3.80 \n", - "2 main-captions MSRvid 2012test 5 3.80 \n", - "3 main-captions MSRvid 2012test 6 2.60 \n", - "4 main-captions MSRvid 2012test 9 4.25 \n", - "5 main-captions MSRvid 2012test 11 4.25 \n", - "6 main-captions MSRvid 2012test 12 0.50 \n", - "7 main-captions MSRvid 2012test 13 1.60 \n", - "8 main-captions MSRvid 2012test 14 2.20 \n", - "9 main-captions MSRvid 2012test 16 5.00 \n", + " column_0 column_1 column_2 column_3 column_4 \\\n", + "0 main-captions MSRvid 2012test 0001 5.00 \n", + "1 main-captions MSRvid 2012test 0004 3.80 \n", + "2 main-captions MSRvid 2012test 0005 3.80 \n", + "3 main-captions MSRvid 2012test 0006 2.60 \n", + "4 main-captions MSRvid 2012test 0009 4.25 \n", "\n", - " Column6 \\\n", + " column_5 \\\n", "0 A plane is taking off. \n", "1 A man is playing a large flute. \n", "2 A man is spreading shreded cheese on a pizza. \n", "3 Three men are playing chess. \n", "4 A man is playing the cello. \n", - "5 Some men are fighting. \n", - "6 A man is smoking. \n", - "7 The man is playing the piano. \n", - "8 A man is playing on a guitar and singing. \n", - "9 A person is throwing a cat on to the ceiling. \n", "\n", - " Column7 \n", + " column_6 \n", "0 An air plane is taking off. \n", "1 A man is playing a flute. \n", "2 A man is spreading shredded cheese on an uncoo... \n", "3 Two men are playing chess. \n", - "4 A man seated is playing the cello. \n", - "5 Two men are fighting. \n", - "6 A man is skating. \n", - "7 The man is playing the guitar. \n", - "8 A woman is playing an acoustic guitar and sing... \n", - "9 A person throws a cat on the ceiling. " + "4 A man seated is playing the cello. " ] }, - "execution_count": 6, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "dflow = dp.auto_read_file(path=os.path.join(datapath, \"sts-train.csv\"))\n", - "dflow.head()" + "df = stsbenchmark.load_pandas_df(RAW_DATA_PATH, file_split=\"train\")\n", + "df.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `auto_read_file` function from the AzureML Data Prep module actually returns a `Dataflow` object, which you can read more about [here](https://docs.microsoft.com/en-us/python/api/azureml-dataprep/azureml.dataprep.dataflow?view=azure-dataprep-py). We can easily transfer the data into a Pandas DataFrame (as before) in a single line using the `to_pandas_dataframe` function, or we can continue manipulating the data as a Dataflow object using the AzureML Data Prep API. For the remainder of this notebook we will be doing the latter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 03 Data Cleaning\n", + "### 02 Data Cleaning\n", "Now that we know about the general shape of the data, we can clean it so that it is ready for further preprocessing. The main operation we need for the STS Benchmark data is to drop all of columns except for the sentence pairs and scores." ] }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "sentences = dflow.keep_columns([\"Column5\", \"Column6\", \"Column7\"]).rename_columns(\n", - " {\"Column5\": \"score\", \"Column6\": \"sentence1\", \"Column7\": \"sentence2\"}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 04 One-Shot Dataframe Loading\n", - "You can also use our STSBenchmark utils to automatically download, extract, and persist the data. You can then load the sanitized data as a pandas DataFrame in one line. " - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "418kB [00:02, 191kB/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../../data/raw/stsbenchmark\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-test.csv\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-dev.csv\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-train.csv\n" - ] - } - ], - "source": [ - "# Initializing this instance runs the downloader and extractor behind the scenes\n", - "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -489,12 +319,13 @@ "4 A man seated is playing the cello. " ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "sts_train = stsbenchmark.clean_sts(df)\n", "sts_train.head()" ] }, @@ -502,13 +333,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 05 Make Lowercase\n", - "We start with simple standardization of the text by making all text lowercase." + "### 03 Make Lowercase\n", + "We do simple standardization of the text by making all text lowercase." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -588,7 +419,7 @@ "4 a man seated is playing the cello. " ] }, - "execution_count": 10, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -602,13 +433,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 06 Tokenize\n", + "### 04 Tokenize\n", "We tokenize the text using spaCy's non-destructive tokenizer." ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -680,46 +511,6 @@ " [a, man, is, playing, the, cello, .]\n", " [a, man, seated, is, playing, the, cello, .]\n", " \n", - " \n", - " 5\n", - " 4.25\n", - " some men are fighting.\n", - " two men are fighting.\n", - " [some, men, are, fighting, .]\n", - " [two, men, are, fighting, .]\n", - " \n", - " \n", - " 6\n", - " 0.50\n", - " a man is smoking.\n", - " a man is skating.\n", - " [a, man, is, smoking, .]\n", - " [a, man, is, skating, .]\n", - " \n", - " \n", - " 7\n", - " 1.60\n", - " the man is playing the piano.\n", - " the man is playing the guitar.\n", - " [the, man, is, playing, the, piano, .]\n", - " [the, man, is, playing, the, guitar, .]\n", - " \n", - " \n", - " 8\n", - " 2.20\n", - " a man is playing on a guitar and singing.\n", - " a woman is playing an acoustic guitar and sing...\n", - " [a, man, is, playing, on, a, guitar, and, sing...\n", - " [a, woman, is, playing, an, acoustic, guitar, ...\n", - " \n", - " \n", - " 9\n", - " 5.00\n", - " a person is throwing a cat on to the ceiling.\n", - " a person throws a cat on the ceiling.\n", - " [a, person, is, throwing, a, cat, on, to, the,...\n", - " [a, person, throws, a, cat, on, the, ceiling, .]\n", - " \n", " \n", "\n", "" @@ -731,11 +522,6 @@ "2 3.80 a man is spreading shreded cheese on a pizza. \n", "3 2.60 three men are playing chess. \n", "4 4.25 a man is playing the cello. \n", - "5 4.25 some men are fighting. \n", - "6 0.50 a man is smoking. \n", - "7 1.60 the man is playing the piano. \n", - "8 2.20 a man is playing on a guitar and singing. \n", - "9 5.00 a person is throwing a cat on to the ceiling. \n", "\n", " sentence2 \\\n", "0 an air plane is taking off. \n", @@ -743,11 +529,6 @@ "2 a man is spreading shredded cheese on an uncoo... \n", "3 two men are playing chess. \n", "4 a man seated is playing the cello. \n", - "5 two men are fighting. \n", - "6 a man is skating. \n", - "7 the man is playing the guitar. \n", - "8 a woman is playing an acoustic guitar and sing... \n", - "9 a person throws a cat on the ceiling. \n", "\n", " sentence1_tokens \\\n", "0 [a, plane, is, taking, off, .] \n", @@ -755,48 +536,36 @@ "2 [a, man, is, spreading, shreded, cheese, on, a... \n", "3 [three, men, are, playing, chess, .] \n", "4 [a, man, is, playing, the, cello, .] \n", - "5 [some, men, are, fighting, .] \n", - "6 [a, man, is, smoking, .] \n", - "7 [the, man, is, playing, the, piano, .] \n", - "8 [a, man, is, playing, on, a, guitar, and, sing... \n", - "9 [a, person, is, throwing, a, cat, on, to, the,... \n", "\n", " sentence2_tokens \n", "0 [an, air, plane, is, taking, off, .] \n", "1 [a, man, is, playing, a, flute, .] \n", "2 [a, man, is, spreading, shredded, cheese, on, ... \n", "3 [two, men, are, playing, chess, .] \n", - "4 [a, man, seated, is, playing, the, cello, .] \n", - "5 [two, men, are, fighting, .] \n", - "6 [a, man, is, skating, .] \n", - "7 [the, man, is, playing, the, guitar, .] \n", - "8 [a, woman, is, playing, an, acoustic, guitar, ... \n", - "9 [a, person, throws, a, cat, on, the, ceiling, .] " + "4 [a, man, seated, is, playing, the, cello, .] " ] }, - "execution_count": 11, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "sts_train_tok = to_spacy_tokens(\n", - " sts_train_low.head(10)\n", - ") # operating on a small slice of the data as an example\n", - "sts_train_tok.head(10)" + "sts_train_tok = to_spacy_tokens(sts_train_low.head())\n", + "sts_train_tok.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 07 Optional: Remove Stop Words\n", + "### 05 Optional: Remove Stop Words\n", "Removing stop words is another common preprocessing step for NLP tasks. We use the `rm_spacy_stopwords` utility function to do this on the dataframe. This function makes use of the spaCy language model's default set of stop words. If we need to add our own set of stop words (for example, if we are doing an NLP task for a very specific domain of content), we can do this in-line by simply providing the list as the `custom_stopwords` parameter of `rm_spacy_stopwords`." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -880,56 +649,6 @@ " [man, playing, cello, .]\n", " [man, seated, playing, cello, .]\n", " \n", - " \n", - " 5\n", - " 4.25\n", - " some men are fighting.\n", - " two men are fighting.\n", - " [some, men, are, fighting, .]\n", - " [two, men, are, fighting, .]\n", - " [men, fighting, .]\n", - " [men, fighting, .]\n", - " \n", - " \n", - " 6\n", - " 0.50\n", - " a man is smoking.\n", - " a man is skating.\n", - " [a, man, is, smoking, .]\n", - " [a, man, is, skating, .]\n", - " [man, smoking, .]\n", - " [man, skating, .]\n", - " \n", - " \n", - " 7\n", - " 1.60\n", - " the man is playing the piano.\n", - " the man is playing the guitar.\n", - " [the, man, is, playing, the, piano, .]\n", - " [the, man, is, playing, the, guitar, .]\n", - " [man, playing, piano, .]\n", - " [man, playing, guitar, .]\n", - " \n", - " \n", - " 8\n", - " 2.20\n", - " a man is playing on a guitar and singing.\n", - " a woman is playing an acoustic guitar and sing...\n", - " [a, man, is, playing, on, a, guitar, and, sing...\n", - " [a, woman, is, playing, an, acoustic, guitar, ...\n", - " [man, playing, guitar, singing, .]\n", - " [woman, playing, acoustic, guitar, singing, .]\n", - " \n", - " \n", - " 9\n", - " 5.00\n", - " a person is throwing a cat on to the ceiling.\n", - " a person throws a cat on the ceiling.\n", - " [a, person, is, throwing, a, cat, on, to, the,...\n", - " [a, person, throws, a, cat, on, the, ceiling, .]\n", - " [person, throwing, cat, ceiling, .]\n", - " [person, throws, cat, ceiling, .]\n", - " \n", " \n", "\n", "" @@ -941,11 +660,6 @@ "2 3.80 a man is spreading shreded cheese on a pizza. \n", "3 2.60 three men are playing chess. \n", "4 4.25 a man is playing the cello. \n", - "5 4.25 some men are fighting. \n", - "6 0.50 a man is smoking. \n", - "7 1.60 the man is playing the piano. \n", - "8 2.20 a man is playing on a guitar and singing. \n", - "9 5.00 a person is throwing a cat on to the ceiling. \n", "\n", " sentence2 \\\n", "0 an air plane is taking off. \n", @@ -953,11 +667,6 @@ "2 a man is spreading shredded cheese on an uncoo... \n", "3 two men are playing chess. \n", "4 a man seated is playing the cello. \n", - "5 two men are fighting. \n", - "6 a man is skating. \n", - "7 the man is playing the guitar. \n", - "8 a woman is playing an acoustic guitar and sing... \n", - "9 a person throws a cat on the ceiling. \n", "\n", " sentence1_tokens \\\n", "0 [a, plane, is, taking, off, .] \n", @@ -965,11 +674,6 @@ "2 [a, man, is, spreading, shreded, cheese, on, a... \n", "3 [three, men, are, playing, chess, .] \n", "4 [a, man, is, playing, the, cello, .] \n", - "5 [some, men, are, fighting, .] \n", - "6 [a, man, is, smoking, .] \n", - "7 [the, man, is, playing, the, piano, .] \n", - "8 [a, man, is, playing, on, a, guitar, and, sing... \n", - "9 [a, person, is, throwing, a, cat, on, to, the,... \n", "\n", " sentence2_tokens \\\n", "0 [an, air, plane, is, taking, off, .] \n", @@ -977,11 +681,6 @@ "2 [a, man, is, spreading, shredded, cheese, on, ... \n", "3 [two, men, are, playing, chess, .] \n", "4 [a, man, seated, is, playing, the, cello, .] \n", - "5 [two, men, are, fighting, .] \n", - "6 [a, man, is, skating, .] \n", - "7 [the, man, is, playing, the, guitar, .] \n", - "8 [a, woman, is, playing, an, acoustic, guitar, ... \n", - "9 [a, person, throws, a, cat, on, the, ceiling, .] \n", "\n", " sentence1_tokens_rm_stopwords \\\n", "0 [plane, taking, .] \n", @@ -989,34 +688,22 @@ "2 [man, spreading, shreded, cheese, pizza, .] \n", "3 [men, playing, chess, .] \n", "4 [man, playing, cello, .] \n", - "5 [men, fighting, .] \n", - "6 [man, smoking, .] \n", - "7 [man, playing, piano, .] \n", - "8 [man, playing, guitar, singing, .] \n", - "9 [person, throwing, cat, ceiling, .] \n", "\n", " sentence2_tokens_rm_stopwords \n", "0 [air, plane, taking, .] \n", "1 [man, playing, flute, .] \n", "2 [man, spreading, shredded, cheese, uncooked, p... \n", "3 [men, playing, chess, .] \n", - "4 [man, seated, playing, cello, .] \n", - "5 [men, fighting, .] \n", - "6 [man, skating, .] \n", - "7 [man, playing, guitar, .] \n", - "8 [woman, playing, acoustic, guitar, singing, .] \n", - "9 [person, throws, cat, ceiling, .] " + "4 [man, seated, playing, cello, .] " ] }, - "execution_count": 12, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "rm_spacy_stopwords(\n", - " sts_train_tok\n", - ") # operating on a small slice of the data as an example" + "rm_spacy_stopwords(sts_train_tok).head()" ] } ], @@ -1036,7 +723,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.5" + "version": "3.6.8" } }, "nbformat": 4, diff --git a/utils_nlp/dataset/stsbenchmark.py b/utils_nlp/dataset/stsbenchmark.py index 5d91b7fcf..8c62240b1 100644 --- a/utils_nlp/dataset/stsbenchmark.py +++ b/utils_nlp/dataset/stsbenchmark.py @@ -71,7 +71,7 @@ def _extract_sts(tarpath, target_dirpath=".", tmode="r"): def _load_sts(src_file_path): - """Drop columns containing irrelevant metadata and save as new csv files in the target_dir + """Load datafile as dataframe Args: src_file_path (str): filepath to train/dev/test csv files. From f8a2db0d2d330d327d4936e5e88ff9dd60d949a0 Mon Sep 17 00:00:00 2001 From: Liqun Shao Date: Wed, 19 Jun 2019 11:24:39 -0400 Subject: [PATCH 008/108] move all the imports to global settings --- scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb index 41fc46729..8a40dc74b 100644 --- a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb +++ b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb @@ -127,7 +127,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 1, "metadata": { "scrolled": true }, @@ -887,8 +887,6 @@ } ], "source": [ - "import shutil\n", - "\n", "gensen_folder = os.path.join(project_folder,'utils_nlp/gensen/')\n", "shutil.copy('gensen_train.py', gensen_folder)\n", "shutil.copy('gensen_config.json', gensen_folder)" @@ -1030,7 +1028,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ From 5245afafdd61c09f82153d6f31724a0f5283699d Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Wed, 19 Jun 2019 14:54:33 -0400 Subject: [PATCH 009/108] add dask data loader --- utils_nlp/dataset/data_loaders.py | 59 +++++++++++++++++++++++++++++++ 1 file changed, 59 insertions(+) create mode 100644 utils_nlp/dataset/data_loaders.py diff --git a/utils_nlp/dataset/data_loaders.py b/utils_nlp/dataset/data_loaders.py new file mode 100644 index 000000000..6124fdbd4 --- /dev/null +++ b/utils_nlp/dataset/data_loaders.py @@ -0,0 +1,59 @@ +import random +import dask.dataframe as dd + + +class DaskCSVLoader: + """Class for creating and using a loader for large csv + or other delimited files. The loader uses dask to read + smaller partitions of a file into memory (one partition at a time), + before sampling batches from the partitions. + """ + + def __init__( + self, + file_path, + sep=",", + header="infer", + block_size=10e6, + random_seed=None, + ): + """Initializes the loader. + Args: + file_path (str): Path to delimited file. + sep (str, optional): Delimiter. Defaults to ",". + header (str, optional): Number of rows to be used as the header. + See pandas.read_csv() + Defaults to "infer". + block_size (int, optional): Size of partition in bytes. + See dask.dataframe.read_csv() + Defaults to 10e6. + random_seed (int, optional): Random seed. See random.seed(). + Defaults to None. + """ + + self.df = dd.read_csv( + file_path, sep=sep, header=header, blocksize=block_size + ) + self.random_seed = random_seed + random.seed(random_seed) + + def get_random_batches(self, num_batches, batch_size): + """Creates a random-batch generator. + Batches returned are pandas dataframes of length=batch_size. + Note: If the sampled partition has less rows than the + specified batch_size, then a smaller batch of the same + size as that partition's number of rows is returned. + + Args: + num_batches (int): Number of batches to generate. + batch_size (int]): Batch size. + """ + for i in range(num_batches): + rnd_part_idx = random.randint(0, self.df.npartitions - 1) + sample_part = self.df.partitions[rnd_part_idx].compute() + if sample_part.shape[0] > batch_size: + yield sample_part.sample( + batch_size, random_state=self.random_seed + ) + else: + yield sample_part From bab7499fef499cb19a28b64477c909e50ddc44e0 Mon Sep 17 00:00:00 2001 From: Heather Shapiro Date: Wed, 19 Jun 2019 15:03:44 -0400 Subject: [PATCH 010/108] Updated different readmes --- scenarios/README.md | 34 +++++++++++++++++++++++++ scenarios/sentence_similarity/README.md | 24 ++++++++++++++--- scenarios/text_classification/README.md | 3 +++ 3 files changed, 58 insertions(+), 3 deletions(-) create mode 100644 scenarios/README.md diff --git a/scenarios/README.md b/scenarios/README.md new file mode 100644 index 000000000..eff49fa21 --- /dev/null +++ b/scenarios/README.md @@ -0,0 +1,34 @@ +# NLP Scenarios + +This folder contains examples and best practices, written in Jupyter notebooks, for building Natural Language Processing systems for different scenarios. + +## Summary + +The following summarizes each scenario of the best practice notebooks. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. + +| Scenario | Applications | Languages | Models | +|---| ------------------------ | -------------------------------------------- | ------------------- | +|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| +|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| +|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| +|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| + +## Azure-enhanced notebooks + +Azure products and services are used in certain notebooks to enhance the efficiency of developing Natural Language systems at scale. + +To successfully run these notebooks, the users **need an Azure subscription** or can [use Azure for free](https://azure.microsoft.com/en-us/free/). + +The Azure products featured in the notebooks include: + +* [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/) - Azure Machine Learning service is a cloud service used to train, deploy, automate, and manage machine learning models, all at the broad scale that the cloud provides. It is used across various notebooks for the AI model development related tasks like: + * Tracking and monitoring metrics to enhance the model creation process + * Hyperparameter tuning + * Scaling up and out on Azure Machine Learning Compute + * Deploying a web service to both Azure Container Instance and Azure Kubernetes Service + +* [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aks) - You can use Azure Machine Learning service to host your classification model in a web service deployment on Azure Kubernetes Service (AKS). AKS is good for high-scale production deployments and provides autoscaling, and fast response times. + +* [Azure Container Instance](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aci)- You can use Azure Machine Learning service to host your classification model in a web service deployment on Azure Container Instance (ACI). ACI is good for low scale, CPU-based workloads. + +There may be other Azure service or products used in the notebooks. Introduction and/or reference of those will be provided in the notebooks. diff --git a/scenarios/sentence_similarity/README.md b/scenarios/sentence_similarity/README.md index f22c24d23..9fa0b6805 100644 --- a/scenarios/sentence_similarity/README.md +++ b/scenarios/sentence_similarity/README.md @@ -1,6 +1,24 @@ +# Sentence Similarity -## What is sentence similarity? +This folder contains examples and best practices, written in Jupyter notebooks, for building sentence similarity models. The scores can be used in a wide variety of applications, such as search/retrieval, nearest-neighbor or kernel-based classification methods, recommendation, and ranking tasks. -Sentence similarity or semantic textual similarity is to determine how similar two pieces of texts are and a measure of the degree to which two pieces of text express the same meaning. This can take the form of assigning a score from 1 to 5. Related tasks are parahrase or duplicate identification. Sentence similarity is normally calculated by the following two steps: 1. obtaining the embeddings of the sentences, 2. taking the cosine similarity between them as shown in the following figure: +## What is sentence similarity -![Sentence Similarity](https://nlpbp.blob.core.windows.net/images/example-similarity.png)**Sentence Similarity ([Source](https://tfhub.dev/google/universal-sentence-encoder/1))** \ No newline at end of file +Sentence similarity or semantic textual similarity is to determine how similar two pieces of texts are and a measure of the degree to which two pieces of text express the same meaning. This can take the form of assigning a score from 1 to 5. Related tasks are paraphrase or duplicate identification. The common methods used for text similarity range from simple word-vector dot products to pairwise classification, and more recently, Siamese recurrent/convolutional neural networks with triplet loss functions. + +Sentence similarity is normally calculated by the following two steps: + +1. obtaining the embeddings of the sentences + +2. taking the cosine similarity between them as shown in the following figure([Source](https://tfhub.dev/google/universal-sentence-encoder/1)): + ![Sentence Similarity](https://nlpbp.blob.core.windows.net/images/example-similarity.png) + +## Summary + +The following summarizes each notebook for Sentence Similarity. Each notebook provides more details and guiding in principles on building state of the art models. + +|Notebook|Runs Local|Description| +|---|---|---| +|[Creating a Baseline model](baseline_deep_dive.ipynb)| Yes| A baseline model is a basic solution that serves as a point of reference for comparing other models to. The baseline model's performance gives us an indication of how much better our models can perform relative to a naive approach.| +|Senteval |[local](senteval_local.ipynb), [AzureML](senteval_azureml.ipynb)|SentEval is a widely used benchmarking tool for evaluating general-purpose sentence embeddings. Running SentEval locally is easy, but not necessarily efficient depending on the model specs. We provide an example on how to do this efficiently in Azure Machine Learning Service. | +|[GenSen on AzureML](gensen_aml_deep_dive.ipynb_)| No | This notebook serves as an introduction to an end-to-end NLP solution for sentence similarity building one of the State of the Art models, GenSen, on the AzureML platform. We show the advantages of AzureML when training large NLP models with GPU. diff --git a/scenarios/text_classification/README.md b/scenarios/text_classification/README.md index e69de29bb..5a8e46488 100644 --- a/scenarios/text_classification/README.md +++ b/scenarios/text_classification/README.md @@ -0,0 +1,3 @@ +# Text Classification + +Text classification is a supervised learning method of learning and predicting the category or the class of a document given its text content. The state-of-the-art methods are based on neural networks of different architectures as well as pretrained language models or word embeddings. Text classification is a core task in natural language Processing and has numerous applications such as sentiment analysis, document indexing in digital libraries, hate speech detection, and general-purpose categorization in medical, academic, legal, and many other domains. From d0b057ac5179b818f3cae8e706b9a000189b6baf Mon Sep 17 00:00:00 2001 From: Heather Shapiro Date: Wed, 19 Jun 2019 15:24:48 -0400 Subject: [PATCH 011/108] updated azureml section --- scenarios/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scenarios/README.md b/scenarios/README.md index eff49fa21..b86aa9ff0 100644 --- a/scenarios/README.md +++ b/scenarios/README.md @@ -22,7 +22,9 @@ To successfully run these notebooks, the users **need an Azure subscription** or The Azure products featured in the notebooks include: * [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/) - Azure Machine Learning service is a cloud service used to train, deploy, automate, and manage machine learning models, all at the broad scale that the cloud provides. It is used across various notebooks for the AI model development related tasks like: + * Using Datastores * Tracking and monitoring metrics to enhance the model creation process + * Distributed Training * Hyperparameter tuning * Scaling up and out on Azure Machine Learning Compute * Deploying a web service to both Azure Container Instance and Azure Kubernetes Service From b407204c797d2c69265d2c550613957f7c0e7daa Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Wed, 19 Jun 2019 16:12:20 -0400 Subject: [PATCH 012/108] add sequential loader --- utils_nlp/dataset/data_loaders.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/utils_nlp/dataset/data_loaders.py b/utils_nlp/dataset/data_loaders.py index 6124fdbd4..014a8b334 100644 --- a/utils_nlp/dataset/data_loaders.py +++ b/utils_nlp/dataset/data_loaders.py @@ -57,3 +57,15 @@ def get_random_batches(self, num_batches, batch_size): ) else: yield sample_part + + def get_sequential_batches(self, batch_size): + """Creates a sequential generator. + Batches returned are pandas dataframes of length=batch_size. + Note: Final batch might be of smaller size. + Args: + batch_size (int): Batch size. + """ + for i in range(self.df.npartitions): + part = self.df.partitions[i].compute() + for j in range(0, part.shape[0], batch_size): + yield part.iloc[j : j + batch_size, :] From 2354a0d1167b9407f417ce46c31f1541776d07c5 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Wed, 19 Jun 2019 17:03:37 -0400 Subject: [PATCH 013/108] add dask dependency --- tools/generate_conda_file.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/generate_conda_file.py b/tools/generate_conda_file.py index da0a591a4..95befecc2 100644 --- a/tools/generate_conda_file.py +++ b/tools/generate_conda_file.py @@ -59,6 +59,7 @@ ), "azureml-dataprep": "azureml-dataprep==1.1.4", "black": "black>=18.6b4", + "dask": "dask[dataframe]==1.2.2", "papermill": "papermill==0.18.2", "pydocumentdb": "pydocumentdb>=2.3.3", "tqdm": "tqdm==4.31.1", From 21e1749349e123847ef0077d20a8bd1eaa5fb89f Mon Sep 17 00:00:00 2001 From: hlums Date: Wed, 19 Jun 2019 21:03:42 +0000 Subject: [PATCH 014/108] Updated notebook with new data utils and added Hindi example --- .../entailment_xnli_multilingual.ipynb | 590 +++++++++++------- 1 file changed, 372 insertions(+), 218 deletions(-) diff --git a/scenarios/entailment/entailment_xnli_multilingual.ipynb b/scenarios/entailment/entailment_xnli_multilingual.ipynb index 81dd40993..409029fd9 100644 --- a/scenarios/entailment/entailment_xnli_multilingual.ipynb +++ b/scenarios/entailment/entailment_xnli_multilingual.ipynb @@ -4,18 +4,17 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Multi-lingual Entailment on XNLI Dataset using BERT" + "# Multi-lingual Inference on XNLI Dataset using BERT" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Required packages\n", - "* pytorch-pretrained-bert\n", - "* pandas\n", - "* seqeval\n", - "* unicode" + "## Summary\n", + "In this notebook, we demostrate using the [Multi-lingual BERT model](https://github.com/google-research/bert/blob/master/multilingual.md) to do language inference in Chinese and Hindi. We use the [XNLI](https://github.com/facebookresearch/XNLI) dataset and the task is to classify sentence pairs into three classes: contradiction, entailment, and neutral. \n", + "The figure below shows how [BERT](https://arxiv.org/abs/1810.04805) classifies sentence pairs. It concatenates the tokens in each sentence pairs and separates the sentences by the [SEP] token. A [CLS] token is prepended to the token list and used as the aggregate sequence representation for the classification task.\n", + "" ] }, { @@ -30,8 +29,8 @@ "import os\n", "import random\n", "import numpy as np\n", - "import csv\n", - "import six\n", + "from sklearn.metrics import classification_report\n", + "from sklearn.preprocessing import LabelEncoder\n", "\n", "import torch\n", "\n", @@ -40,165 +39,117 @@ " sys.path.insert(0, nlp_path)\n", "\n", "from utils_nlp.bert.sequence_classification import BERTSequenceClassifier\n", - "from utils_nlp.bert.common import Language, Tokenizer" + "from utils_nlp.bert.common import Language, Tokenizer\n", + "from utils_nlp.dataset.xnli import load_pandas_df" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "# set random seeds\n", - "random_seed = 42\n", - "random.seed(random_seed)\n", - "np.random.seed(random_seed)\n", - "torch.manual_seed(random_seed)\n", + "RANDOM_SEED = 42\n", + "random.seed(RANDOM_SEED)\n", + "np.random.seed(RANDOM_SEED)\n", + "torch.manual_seed(RANDOM_SEED)\n", "num_cuda_devices = torch.cuda.device_count()\n", "if num_cuda_devices > 1:\n", - " torch.cuda.manual_seed_all(random_seed)\n", + " torch.cuda.manual_seed_all(RANDOM_SEED)\n", "\n", "# model configurations\n", - "language = Language.CHINESE\n", - "do_lower_case = True\n", - "max_seq_length = 128\n", + "LANGUAGE_CHINESE = Language.CHINESE\n", + "LANGUAGE_MULTI = Language.MULTILINGUAL\n", + "TO_LOWER = True\n", + "MAX_SEQ_LENGTH = 128\n", "\n", "# training configurations\n", - "device=\"gpu\"\n", - "batch_size = 32\n", - "num_train_epochs = 2\n", + "NUM_GPUS = 2\n", + "BATCH_SIZE = 32\n", + "NUM_EPOCHS = 2\n", "\n", "# optimizer configurations\n", - "learning_rate = 5e-5\n", - "config_file = \"config_multilingual.yaml\"\n", - "train_data_dir = \"./data/XNLI-MT-1.0/XNLI-MT-1.0/\"\n", - "dev_data_dir = \"./data/XNLI-MT-1.0/XNLI-MT-1.0/\"\n", - "cache_dir=\".\"" + "LEARNING_RATE= 5e-5\n", + "WARMUP_PROPORTION= 0.1\n", + "\n", + "# data configurations\n", + "TEXT_COL = \"text\"\n", + "LABEL_COL = \"label\"\n", + "\n", + "CACHE_DIR = \"./temp\"" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Preprocess Data" + "## Load Data\n", + "The XNLI dataset comes in two zip files: \n", + "* XNLI-1.0.zip: dev and test datasets in 15 languages. The original English data was translated into other languages by human translators. \n", + "* XNLI-MT-1.0.zip: training dataset in 15 languages. This dataset is machine translations of the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) dataset. It also contains English translations of the dev and test datasets, but not used in this notebook. \n", + "\n", + "The `load_pandas_df` function downloads and extracts the zip files if they don't already exist in `local_cache_path` and returns the data subset specified by `file_split` and `language`" ] }, { "cell_type": "code", "execution_count": 3, - "metadata": { - "scrolled": false - }, + "metadata": {}, "outputs": [], "source": [ - "def convert_to_unicode(text):\n", - " \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n", - " if six.PY3:\n", - " if isinstance(text, str):\n", - " return text\n", - " elif isinstance(text, bytes):\n", - " return text.decode(\"utf-8\", \"ignore\")\n", - " else:\n", - " raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n", - " elif six.PY2:\n", - " if isinstance(text, str):\n", - " return text.decode(\"utf-8\", \"ignore\")\n", - " elif isinstance(text, unicode):\n", - " return text\n", - " else:\n", - " raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n", - " else:\n", - " raise ValueError(\"Not running on Python2 or Python 3?\")\n", - " \n", - "class DataProcessor(object):\n", - " \"\"\"Base class for data converters for sequence classification data sets.\"\"\"\n", - "\n", - " def get_train_examples(self, data_dir):\n", - " \"\"\"Gets a collection of `InputExample`s for the train set.\"\"\"\n", - " raise NotImplementedError()\n", - "\n", - " def get_dev_examples(self, data_dir):\n", - " \"\"\"Gets a collection of `InputExample`s for the dev set.\"\"\"\n", - " raise NotImplementedError()\n", - "\n", - " def get_labels(self):\n", - " \"\"\"Gets the list of labels for this data set.\"\"\"\n", - " raise NotImplementedError()\n", - "\n", - " @classmethod\n", - " def _read_tsv(cls, input_file, quotechar=None):\n", - " \"\"\"Reads a tab separated value file.\"\"\"\n", - " with open(input_file, \"r\", encoding=\"utf-8\") as f:\n", - " reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n", - " lines = []\n", - " for line in reader:\n", - " if sys.version_info[0] == 2:\n", - " line = list(unicode(cell, 'utf-8') for cell in line)\n", - " lines.append(line)\n", - " return lines\n", + "train_df_chinese = load_pandas_df(local_cache_path=\"./\", file_split=\"train\", language=\"zh\")\n", + "dev_df_chinese = load_pandas_df(local_cache_path=\"./\", file_split=\"dev\", language=\"zh\")\n", + "test_df_chinese = load_pandas_df(local_cache_path=\"./\", file_split=\"test\", language=\"zh\")\n", "\n", - " \n", - "class XnliProcessor(DataProcessor):\n", - " \"\"\"Processor for the XNLI data set.\"\"\"\n", - "\n", - " def __init__(self):\n", - " self.language = \"zh\"\n", - "\n", - " def get_train_examples(self, data_dir):\n", - " \"\"\"See base class.\"\"\"\n", - " lines = self._read_tsv(\n", - " os.path.join(data_dir, \"multinli\",\n", - " \"multinli.train.%s.tsv\" % self.language))\n", - " text_list = []\n", - " label_list = []\n", - " for (i, line) in enumerate(lines):\n", - " if i == 0:\n", - " continue\n", - " text_a = convert_to_unicode(line[0])\n", - " text_b = convert_to_unicode(line[1])\n", - " label = convert_to_unicode(line[2])\n", - " if label == convert_to_unicode(\"contradictory\"):\n", - " label = convert_to_unicode(\"contradiction\")\n", - " text_list.append((text_a, text_b))\n", - " label_list.append(label)\n", - " return text_list, label_list\n", - "\n", - " def get_dev_examples(self, data_dir):\n", - " \"\"\"See base class.\"\"\"\n", - " lines = self._read_tsv(os.path.join(data_dir, \"xnli\", \"xnli.dev.tsv\"))\n", - " text_list = []\n", - " label_list = []\n", - " for (i, line) in enumerate(lines):\n", - " if i == 0:\n", - " continue\n", - " language = convert_to_unicode(line[0])\n", - " if language != convert_to_unicode(self.language):\n", - " continue\n", - " text_a = convert_to_unicode(line[6])\n", - " text_b = convert_to_unicode(line[7])\n", - " label = convert_to_unicode(line[1])\n", - " \n", - " text_list.append((text_a, text_b))\n", - " label_list.append(label)\n", - " return text_list, label_list\n", - "\n", - " def get_labels(self):\n", - " \"\"\"See base class.\"\"\"\n", - " return [\"contradiction\", \"entailment\", \"neutral\"]" + "train_df_hindi = load_pandas_df(local_cache_path=\"./\", file_split=\"train\", language=\"hi\")\n", + "dev_df_hindi = load_pandas_df(local_cache_path=\"./\", file_split=\"dev\", language=\"hi\")\n", + "test_df_hindi = load_pandas_df(local_cache_path=\"./\", file_split=\"test\", language=\"hi\")" ] }, { "cell_type": "code", "execution_count": 4, - "metadata": { - "scrolled": false - }, - "outputs": [], + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chinese training dataset size: 392702\n", + "Chinese dev dataset size: 2490\n", + "Chinese test dataset size: 5010\n", + "\n", + "Hindi training dataset size: 392702\n", + "Hindi dev dataset size: 2490\n", + "Hindi test dataset size: 5010\n", + "\n", + " text label\n", + "0 (从 概念 上 看 , 奶油 收入 有 两 个 基本 方面 产品 和 地理 ., 产品 和 ... neutral\n", + "1 (你 知道 在 这个 季节 , 我 猜 在 你 的 水平 你 把 他们 丢到 下 一个 水平... entailment\n", + "2 (我们 的 一个 号码 会 非常 详细 地 执行 你 的 指示, 我 团队 的 一个 成员 ... entailment\n", + "3 (你 怎么 知道 的 ? 所有 这些 都 是 他们 的 信息 ., 这些 信息 属于 他们 .) entailment\n", + "4 (是 啊 , 我 告诉 你 , 如果 你 去 买 一些 网球鞋 , 我 可以 看到 为什么 ... neutral\n", + " text label\n", + "0 (Conceptually क ् रीम एंजलिस में दो मूल आयाम ह... neutral\n", + "1 (आप मौसम के दौरान जानते हैं और मैं अपने स ् तर... entailment\n", + "2 (हमारे एक नंबर में से एक आपके निर ् देशों को म... entailment\n", + "3 (आप कैसे जानते हैं ? ये सब उनकी जानकारी फिर से... entailment\n", + "4 (हाँ मैं आपको बताता हूँ कि अगर आप उन टेनिस जूत... neutral\n" + ] + } + ], "source": [ - "xnli_processor = XnliProcessor()\n", - "train_text, train_labels = xnli_processor.get_train_examples(data_dir=train_data_dir)\n", - "dev_text, dev_labels= xnli_processor.get_dev_examples(data_dir=dev_data_dir)\n", - "label_list = xnli_processor.get_labels()" + "print(\"Chinese training dataset size: {}\".format(train_df_chinese.shape[0]))\n", + "print(\"Chinese dev dataset size: {}\".format(dev_df_chinese.shape[0]))\n", + "print(\"Chinese test dataset size: {}\".format(test_df_chinese.shape[0]))\n", + "print()\n", + "print(\"Hindi training dataset size: {}\".format(train_df_hindi.shape[0]))\n", + "print(\"Hindi dev dataset size: {}\".format(dev_df_hindi.shape[0]))\n", + "print(\"Hindi test dataset size: {}\".format(test_df_hindi.shape[0]))\n", + "print()\n", + "print(train_df_chinese.head())\n", + "print(train_df_hindi.head())" ] }, { @@ -207,10 +158,36 @@ "metadata": {}, "outputs": [], "source": [ - "train_text = train_text[:1000]\n", - "train_labels = train_labels[:1000]\n", - "dev_text = dev_text[:1000]\n", - "dev_labels = dev_labels[:1000]" + "train_df_chinese = train_df_chinese.loc[:1000]\n", + "dev_df_chinese = dev_df_chinese.loc[:1000]\n", + "test_df_chinese = test_df_chinese.loc[:1000]\n", + "\n", + "train_df_hindi = train_df_hindi.loc[:1000]\n", + "dev_df_hindi = dev_df_hindi.loc[:1000]\n", + "test_df_hindi = test_df_hindi.loc[:1000]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that the texts are convereted to unicode which can be processed by BERT models. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Language Inference on Chinese\n", + "For Chinese dataset, we use the `bert-base-chinese` model which was pretrained on Chinese dataset only. The `bert-base-multilingual-cased` model can also be used on Chinese, but the accuracy is 3% lower." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenize and Preprocess\n", + "Before training, we tokenize the sentence texts and convert them to lists of tokens. The following steps instantiate a BERT tokenizer given the language, and tokenize the text of the training and testing sets." ] }, { @@ -219,26 +196,34 @@ "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "['contradiction', 'entailment', 'neutral']\n" + "100%|██████████| 1001/1001 [00:00<00:00, 2612.95it/s]\n", + "100%|██████████| 1001/1001 [00:00<00:00, 3663.45it/s]\n" ] } ], "source": [ - "print(label_list)" + "tokenizer_chinese = Tokenizer(LANGUAGE_CHINESE, to_lower=TO_LOWER, cache_dir=CACHE_DIR)\n", + "\n", + "train_tokens_chinese = tokenizer_chinese.tokenize(train_df_chinese[TEXT_COL])\n", + "test_tokens_chinese= tokenizer_chinese.tokenize(test_df_chinese[TEXT_COL])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Convert examples to features\n", - "The function `convert_examples_to_token_features` converts raw string data to numerical features, involving the following steps:\n", - "1. Tokenization\n", - "2. Convert tokens and labels to numerical values\n", - "3. Sequence padding or truncation" + "In addition, we perform the following preprocessing steps in the cell below:\n", + "\n", + "* Convert the tokens into token indices corresponding to the BERT tokenizer's vocabulary\n", + "* Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", + "* Pad or truncate the token lists to the specified max length\n", + "* Return mask lists that indicate paddings' positions\n", + "* Return token type id lists that indicate which sentence the tokens belong to\n", + "\n", + "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" ] }, { @@ -247,51 +232,136 @@ "metadata": {}, "outputs": [], "source": [ - "tokenizer = Tokenizer(language=language, \n", - " to_lower=do_lower_case, \n", - " cache_dir=cache_dir)" + "train_token_ids_chinese, train_input_mask_chinese, train_token_type_ids_chinese = \\\n", + " tokenizer_chinese.preprocess_classification_tokens(train_tokens_chinese, max_len=MAX_SEQ_LENGTH)\n", + "test_token_ids_chinese, test_input_mask_chinese, test_token_type_ids_chinese = \\\n", + " tokenizer_chinese.preprocess_classification_tokens(test_tokens_chinese, max_len=MAX_SEQ_LENGTH)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, + "outputs": [], + "source": [ + "label_encoder_chinese = LabelEncoder()\n", + "train_labels_chinese = label_encoder_chinese.fit_transform(train_df_chinese[LABEL_COL])\n", + "num_labels_chinese = len(np.unique(train_labels_chinese))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "classifier_chinese = BERTSequenceClassifier(language=LANGUAGE_CHINESE,\n", + " num_labels=num_labels_chinese,\n", + " cache_dir=CACHE_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Train Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, "outputs": [ { - "name": "stderr", + "name": "stdout", "output_type": "stream", "text": [ - "100%|██████████| 1000/1000 [00:00<00:00, 2916.60it/s]\n", - "100%|██████████| 1000/1000 [00:00<00:00, 3776.67it/s]\n" + "Warning: Only 1 CUDA device is available. Data parallelism is not possible.\n", + "epoch:1/2; batch:1->4/31; loss:1.273249\n", + "epoch:1/2; batch:5->8/31; loss:1.103003\n", + "epoch:1/2; batch:9->12/31; loss:1.107130\n", + "epoch:1/2; batch:13->16/31; loss:1.112338\n", + "epoch:1/2; batch:17->20/31; loss:1.334211\n", + "epoch:1/2; batch:21->24/31; loss:1.244677\n", + "epoch:1/2; batch:25->28/31; loss:1.146302\n", + "epoch:1/2; batch:29->31/31; loss:1.145210\n", + "epoch:2/2; batch:1->4/31; loss:1.084830\n", + "epoch:2/2; batch:5->8/31; loss:1.107789\n", + "epoch:2/2; batch:9->12/31; loss:1.125404\n", + "epoch:2/2; batch:13->16/31; loss:1.104571\n", + "epoch:2/2; batch:17->20/31; loss:1.115697\n", + "epoch:2/2; batch:21->24/31; loss:1.153866\n", + "epoch:2/2; batch:25->28/31; loss:1.093025\n", + "epoch:2/2; batch:29->31/31; loss:1.098436\n" ] } ], "source": [ - "train_tokens = tokenizer.tokenize(train_text)\n", - "dev_tokens = tokenizer.tokenize(dev_text)" + "classifier_chinese.fit(token_ids=train_token_ids_chinese,\n", + " input_mask=train_input_mask_chinese,\n", + " token_type_ids=train_token_type_ids_chinese,\n", + " labels=train_labels_chinese,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)" ] }, { - "cell_type": "code", - "execution_count": 10, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "train_token_ids, train_input_mask, train_token_type_ids = \\\n", - " tokenizer.preprocess_classification_tokens(train_tokens, max_len=max_seq_length)\n", - "dev_token_ids, dev_input_mask, dev_token_type_ids = \\\n", - " tokenizer.preprocess_classification_tokens(dev_tokens, max_len=max_seq_length)" + "### Predict on Test Data" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\r", + " 0%| | 0/1001 [00:004/31; loss:1.304341\n", - "epoch:1/2; batch:5->8/31; loss:1.074271\n", - "epoch:1/2; batch:9->12/31; loss:1.063943\n", - "epoch:1/2; batch:13->16/31; loss:1.124226\n", - "epoch:1/2; batch:17->20/31; loss:1.100629\n", - "epoch:1/2; batch:21->24/31; loss:1.128772\n", - "epoch:1/2; batch:25->28/31; loss:1.216407\n", - "epoch:1/2; batch:29->32/31; loss:1.041168\n", - "epoch:2/2; batch:1->4/31; loss:1.054121\n", - "epoch:2/2; batch:5->8/31; loss:1.015953\n", - "epoch:2/2; batch:9->12/31; loss:1.072215\n", - "epoch:2/2; batch:13->16/31; loss:1.143950\n", - "epoch:2/2; batch:17->20/31; loss:0.927066\n", - "epoch:2/2; batch:21->24/31; loss:1.054619\n", - "epoch:2/2; batch:25->28/31; loss:0.914871\n", - "epoch:2/2; batch:29->32/31; loss:0.977393\n" + "epoch:1/2; batch:1->4/31; loss:1.128533\n", + "epoch:1/2; batch:5->8/31; loss:1.139760\n", + "epoch:1/2; batch:9->12/31; loss:1.128057\n", + "epoch:1/2; batch:13->16/31; loss:1.163460\n", + "epoch:1/2; batch:17->20/31; loss:1.091910\n", + "epoch:1/2; batch:21->24/31; loss:1.198568\n", + "epoch:1/2; batch:25->28/31; loss:0.941484\n", + "epoch:1/2; batch:29->31/31; loss:1.049881\n", + "epoch:2/2; batch:1->4/31; loss:1.109279\n", + "epoch:2/2; batch:5->8/31; loss:1.075177\n", + "epoch:2/2; batch:9->12/31; loss:1.122685\n", + "epoch:2/2; batch:13->16/31; loss:1.124175\n", + "epoch:2/2; batch:17->20/31; loss:1.109364\n", + "epoch:2/2; batch:21->24/31; loss:1.052536\n", + "epoch:2/2; batch:25->28/31; loss:1.074721\n", + "epoch:2/2; batch:29->31/31; loss:1.132380\n" ] } ], "source": [ - "classifier.fit(token_ids=train_token_ids,\n", - " input_mask=train_input_mask,\n", - " token_type_ids=train_token_type_ids,\n", - " labels=train_label_ids,\n", - " num_gpus=2,\n", - " num_epochs=num_train_epochs,\n", - " batch_size=batch_size,\n", - " lr=learning_rate,\n", - " warmup_proportion=0.1)" + "classifier_multi = BERTSequenceClassifier(language=LANGUAGE_MULTI,\n", + " num_labels=num_labels_hindi,\n", + " cache_dir=CACHE_DIR)\n", + "classifier_multi.fit(token_ids=train_token_ids_hindi,\n", + " input_mask=train_input_mask_hindi,\n", + " token_type_ids=train_token_type_ids_hindi,\n", + " labels=train_labels_hindi,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict and Evaluate" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - " 1%| | 8/1000 [00:00<00:20, 49.08it/s]" + "\r", + " 0%| | 0/1001 [00:00 Date: Wed, 19 Jun 2019 21:05:53 +0000 Subject: [PATCH 015/108] Resolved confict with staging. --- utils_nlp/bert/common.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index c7c3f879a..7e289e657 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -53,16 +53,10 @@ def __init__( def tokenize(self, text): """Tokenizes a list of documents using a BERT tokenizer Args: -<<<<<<< HEAD text (list): List of strings (one sequence) or tuples (two sequences). Returns: [list]: List of lists. Each sublist contains WordPiece tokens of the input sequence(s). -======= - text (list(str)): list of text documents. - Returns: - [list(str)]: list of token lists. ->>>>>>> origin/staging """ if isinstance(text[0], str): return [self.tokenizer.tokenize(x) for x in tqdm(text)] From 593bb4eb5dc493203bbf033b32f6425d6d33727d Mon Sep 17 00:00:00 2001 From: hlums Date: Wed, 19 Jun 2019 21:06:15 +0000 Subject: [PATCH 016/108] Added convert_to_unicode helper function. --- utils_nlp/dataset/preprocess.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/utils_nlp/dataset/preprocess.py b/utils_nlp/dataset/preprocess.py index 03f0e9062..082454e67 100644 --- a/utils_nlp/dataset/preprocess.py +++ b/utils_nlp/dataset/preprocess.py @@ -5,6 +5,7 @@ import spacy import nltk from nltk.corpus import stopwords +import six def to_lowercase_all(df): @@ -160,3 +161,12 @@ def rm_nltk_stopwords( stop_df.columns = stop_cols return pd.concat([df, stop_df], axis=1) + +def convert_to_unicode(input_text): + """Converts intput_text to Unicode. Input must be utf-8.""" + if isinstance(input_text, str): + return input_text + elif isinstance(input_text, bytes): + return input_text.decode("utf-8", "ignore") + else: + raise TypeError("Unsupported string type: %s" % (type(input_text))) From ed3415b3208f64044424de6815d5a7b14376a85b Mon Sep 17 00:00:00 2001 From: hlums Date: Wed, 19 Jun 2019 21:06:38 +0000 Subject: [PATCH 017/108] Updated utils of XNLI dataset. --- utils_nlp/dataset/xnli.py | 76 +++++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 19 deletions(-) diff --git a/utils_nlp/dataset/xnli.py b/utils_nlp/dataset/xnli.py index e7bbcf4cb..549dd1893 100644 --- a/utils_nlp/dataset/xnli.py +++ b/utils_nlp/dataset/xnli.py @@ -6,41 +6,79 @@ """ import os +import sys import pandas as pd from utils_nlp.dataset.url_utils import extract_zip, maybe_download +from utils_nlp.dataset.preprocess import convert_to_unicode -URL = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" +URL_XNLI = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" +URL_XNLI_MT = "https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip" -DATA_FILES = { - "dev": "XNLI-1.0/xnli.dev.jsonl", - "test": "XNLI-1.0/xnli.test.jsonl", -} - - -def load_pandas_df(local_cache_path=None, file_split="dev"): +def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): """Downloads and extracts the dataset files Args: - local_cache_path ([type], optional): [description]. - Defaults to None. + local_cache_path (str, optional): Path to store the data. + Defaults to "./". file_split (str, optional): The subset to load. - One of: {"dev", "test"} - Defaults to "train". + One of: {"train", "dev", "test"} + Defaults to "dev". + language (str, optional): language subset to read. + One of: {"en", "fr", "es", "de", "el", "bg", "ru", + "tr", "ar", "vi", "th", "zh", "hi", "sw", "ur"} + Defaults to "zh" (Chinese). Returns: pd.DataFrame: pandas DataFrame containing the specified XNLI subset. """ - file_name = URL.split("/")[-1] - maybe_download(URL, file_name, local_cache_path) + if file_split in ("dev", "test"): + url = URL_XNLI + sentence_1_index = 6 + sentence_2_index = 7 + label_index = 1 + + zip_file_name = url.split("/")[-1] + folder_name = ".".join(zip_file_name.split(".")[:-1]) + file_name = folder_name + "/" + ".".join(["xnli", file_split, "tsv"]) + elif file_split == "train": + url = URL_XNLI_MT + sentence_1_index = 0 + sentence_2_index = 1 + label_index = 2 + + zip_file_name = url.split("/")[-1] + folder_name = ".".join(zip_file_name.split(".")[:-1]) + file_name = folder_name + "/multinli/" + ".".join(["multinli", file_split, language, "tsv"]) + + maybe_download(url, zip_file_name, local_cache_path) if not os.path.exists( - os.path.join(local_cache_path, DATA_FILES[file_split]) + os.path.join(local_cache_path, folder_name) ): extract_zip( - os.path.join(local_cache_path, file_name), local_cache_path + os.path.join(local_cache_path, zip_file_name), local_cache_path ) - return pd.read_json( - os.path.join(local_cache_path, DATA_FILES[file_split]), lines=True - ) + + with open(os.path.join(local_cache_path, file_name), "r", encoding="utf-8") as f: + lines = f.read().splitlines() + + line_list = [line.split("\t") for line in lines] + # Remove the column name row + line_list.pop(0) + if file_split != "train": + line_list = [line for line in line_list if line[0] == language] + + label_list = [convert_to_unicode(line[label_index]) for line in line_list] + old_contradict_label = convert_to_unicode("contradictory") + new_contradict_label = convert_to_unicode("contradiction") + label_list = [new_contradict_label if label == old_contradict_label else label for label in label_list] + text_list = [(convert_to_unicode(line[sentence_1_index]), convert_to_unicode(line[sentence_2_index])) for line in line_list] + + df = pd.DataFrame({"text": text_list, "label": label_list}) + + return df + +if __name__ == "__main__": + load_pandas_df() \ No newline at end of file From 4c4f91e4c95f72fcde195ee4d96f7314447077be Mon Sep 17 00:00:00 2001 From: Heather Shapiro Date: Thu, 20 Jun 2019 10:56:01 -0400 Subject: [PATCH 018/108] removed scenarios chart from root --- README.md | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/README.md b/README.md index 7c736f652..4ad19b9a3 100755 --- a/README.md +++ b/README.md @@ -3,25 +3,10 @@ | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | master | [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/unit-test-master?branchName=master)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=22&branchName=master) | | staging | [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/unit-test-staging?branchName=staging)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=21&branchName=staging) | - # NLP Best Practices This repository contains examples and best practices for building NLP systems, provided as Jupyter notebooks and utility functions. The focus of the repository is on state-of-the-art methods and common scenarios that are popular among researchers and practitioners working on problems involving text and language. -The following section includes a list of the available scenarios. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. - - -## Scenarios - - -| Scenario | Applications | Languages | Models | -|---| ------------------------ | -------------------------------------------- | ------------------- | -|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| -|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| -|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| -|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| - - ## Planning All feature planning is done via projects, milestones, and issues in this repository. From 4388e6c58868828c786ebf7468a3f189081052ea Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Thu, 20 Jun 2019 13:28:10 -0400 Subject: [PATCH 019/108] add whole-word pretrained models --- utils_nlp/bert/common.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index 52dcdb2d5..6d6cf1019 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -18,12 +18,14 @@ class Language(Enum): - """An enumeration of the supported languages.""" + """An enumeration of the supported pretrained models and languages.""" ENGLISH = "bert-base-uncased" ENGLISHCASED = "bert-base-cased" ENGLISHLARGE = "bert-large-uncased" ENGLISHLARGECASED = "bert-large-cased" + ENGLISHLARGEWW = "bert-large-uncased-whole-word-masking" + ENGLISHLARGECASEDWW = "bert-large-cased-whole-word-masking" CHINESE = "bert-base-chinese" MULTILINGUAL = "bert-base-multilingual-cased" @@ -33,6 +35,7 @@ def __init__( self, language=Language.ENGLISH, to_lower=False, cache_dir="." ): """Initializes the underlying pretrained BERT tokenizer. + Args: language (Language, optional): The pretrained model's language. Defaults to Language.ENGLISH. @@ -46,6 +49,7 @@ def __init__( def tokenize(self, text): """Tokenizes a list of documents using a BERT tokenizer + Args: text (list(str)): list of text documents. Returns: @@ -60,6 +64,7 @@ def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): - map tokens to indices - pad and truncate sequences - create an input_mask + Args: tokens (list): List of tokens to preprocess. max_len (int, optional): Maximum number of tokens @@ -235,6 +240,7 @@ def create_data_loader( ): """ Create a dataloader for sampling and serving data batches. + Args: input_ids (list): List of lists. Each sublist contains numerical values, i.e. token ids, corresponding to the tokens in the input From aa35f62e4d564e327f7482cab301652a5c319e0e Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Thu, 20 Jun 2019 13:55:33 -0400 Subject: [PATCH 020/108] Updated entailment notebook with results. --- .../entailment_xnli_multilingual.ipynb | 187 ++++++++---------- 1 file changed, 81 insertions(+), 106 deletions(-) diff --git a/scenarios/entailment/entailment_xnli_multilingual.ipynb b/scenarios/entailment/entailment_xnli_multilingual.ipynb index 409029fd9..335710642 100644 --- a/scenarios/entailment/entailment_xnli_multilingual.ipynb +++ b/scenarios/entailment/entailment_xnli_multilingual.ipynb @@ -43,6 +43,13 @@ "from utils_nlp.dataset.xnli import load_pandas_df" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configurations" + ] + }, { "cell_type": "code", "execution_count": 2, @@ -89,7 +96,7 @@ "* XNLI-1.0.zip: dev and test datasets in 15 languages. The original English data was translated into other languages by human translators. \n", "* XNLI-MT-1.0.zip: training dataset in 15 languages. This dataset is machine translations of the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) dataset. It also contains English translations of the dev and test datasets, but not used in this notebook. \n", "\n", - "The `load_pandas_df` function downloads and extracts the zip files if they don't already exist in `local_cache_path` and returns the data subset specified by `file_split` and `language`" + "The `load_pandas_df` function downloads and extracts the zip files if they don't already exist in `local_cache_path` and returns the data subset specified by `file_split` and `language`." ] }, { @@ -98,13 +105,13 @@ "metadata": {}, "outputs": [], "source": [ - "train_df_chinese = load_pandas_df(local_cache_path=\"./\", file_split=\"train\", language=\"zh\")\n", - "dev_df_chinese = load_pandas_df(local_cache_path=\"./\", file_split=\"dev\", language=\"zh\")\n", - "test_df_chinese = load_pandas_df(local_cache_path=\"./\", file_split=\"test\", language=\"zh\")\n", + "train_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\", language=\"zh\")\n", + "dev_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"dev\", language=\"zh\")\n", + "test_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\", language=\"zh\")\n", "\n", - "train_df_hindi = load_pandas_df(local_cache_path=\"./\", file_split=\"train\", language=\"hi\")\n", - "dev_df_hindi = load_pandas_df(local_cache_path=\"./\", file_split=\"dev\", language=\"hi\")\n", - "test_df_hindi = load_pandas_df(local_cache_path=\"./\", file_split=\"test\", language=\"hi\")" + "train_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\", language=\"hi\")\n", + "dev_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"dev\", language=\"hi\")\n", + "test_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\", language=\"hi\")" ] }, { @@ -158,20 +165,20 @@ "metadata": {}, "outputs": [], "source": [ - "train_df_chinese = train_df_chinese.loc[:1000]\n", - "dev_df_chinese = dev_df_chinese.loc[:1000]\n", - "test_df_chinese = test_df_chinese.loc[:1000]\n", + "# train_df_chinese = train_df_chinese.loc[:1000]\n", + "# dev_df_chinese = dev_df_chinese.loc[:1000]\n", + "# test_df_chinese = test_df_chinese.loc[:1000]\n", "\n", - "train_df_hindi = train_df_hindi.loc[:1000]\n", - "dev_df_hindi = dev_df_hindi.loc[:1000]\n", - "test_df_hindi = test_df_hindi.loc[:1000]" + "# train_df_hindi = train_df_hindi.loc[:1000]\n", + "# dev_df_hindi = dev_df_hindi.loc[:1000]\n", + "# test_df_hindi = test_df_hindi.loc[:1000]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Note that the texts are convereted to unicode which can be processed by BERT models. " + "Note that the texts are convereted to Unicode which can be processed by BERT models. " ] }, { @@ -199,8 +206,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 1001/1001 [00:00<00:00, 2612.95it/s]\n", - "100%|██████████| 1001/1001 [00:00<00:00, 3663.45it/s]\n" + "100%|██████████| 392702/392702 [02:27<00:00, 2667.38it/s]\n", + "100%|██████████| 5010/5010 [00:01<00:00, 3054.76it/s]\n" ] } ], @@ -283,23 +290,26 @@ "name": "stdout", "output_type": "stream", "text": [ - "Warning: Only 1 CUDA device is available. Data parallelism is not possible.\n", - "epoch:1/2; batch:1->4/31; loss:1.273249\n", - "epoch:1/2; batch:5->8/31; loss:1.103003\n", - "epoch:1/2; batch:9->12/31; loss:1.107130\n", - "epoch:1/2; batch:13->16/31; loss:1.112338\n", - "epoch:1/2; batch:17->20/31; loss:1.334211\n", - "epoch:1/2; batch:21->24/31; loss:1.244677\n", - "epoch:1/2; batch:25->28/31; loss:1.146302\n", - "epoch:1/2; batch:29->31/31; loss:1.145210\n", - "epoch:2/2; batch:1->4/31; loss:1.084830\n", - "epoch:2/2; batch:5->8/31; loss:1.107789\n", - "epoch:2/2; batch:9->12/31; loss:1.125404\n", - "epoch:2/2; batch:13->16/31; loss:1.104571\n", - "epoch:2/2; batch:17->20/31; loss:1.115697\n", - "epoch:2/2; batch:21->24/31; loss:1.153866\n", - "epoch:2/2; batch:25->28/31; loss:1.093025\n", - "epoch:2/2; batch:29->31/31; loss:1.098436\n" + "epoch:1/2; batch:1->1228/12271; loss:1.194384\n", + "epoch:1/2; batch:1229->2456/12271; loss:0.863067\n", + "epoch:1/2; batch:2457->3684/12271; loss:0.781256\n", + "epoch:1/2; batch:3685->4912/12271; loss:1.067413\n", + "epoch:1/2; batch:4913->6140/12271; loss:0.599279\n", + "epoch:1/2; batch:6141->7368/12271; loss:0.471488\n", + "epoch:1/2; batch:7369->8596/12271; loss:0.572327\n", + "epoch:1/2; batch:8597->9824/12271; loss:0.689093\n", + "epoch:1/2; batch:9825->11052/12271; loss:0.651702\n", + "epoch:1/2; batch:11053->12271/12271; loss:0.431085\n", + "epoch:2/2; batch:1->1228/12271; loss:0.255859\n", + "epoch:2/2; batch:1229->2456/12271; loss:0.434052\n", + "epoch:2/2; batch:2457->3684/12271; loss:0.433569\n", + "epoch:2/2; batch:3685->4912/12271; loss:0.405915\n", + "epoch:2/2; batch:4913->6140/12271; loss:0.636128\n", + "epoch:2/2; batch:6141->7368/12271; loss:0.416685\n", + "epoch:2/2; batch:7369->8596/12271; loss:0.265789\n", + "epoch:2/2; batch:8597->9824/12271; loss:0.328964\n", + "epoch:2/2; batch:9825->11052/12271; loss:0.436310\n", + "epoch:2/2; batch:11053->12271/12271; loss:0.374193\n" ] } ], @@ -331,22 +341,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\r", - " 0%| | 0/1001 [00:004/31; loss:1.128533\n", - "epoch:1/2; batch:5->8/31; loss:1.139760\n", - "epoch:1/2; batch:9->12/31; loss:1.128057\n", - "epoch:1/2; batch:13->16/31; loss:1.163460\n", - "epoch:1/2; batch:17->20/31; loss:1.091910\n", - "epoch:1/2; batch:21->24/31; loss:1.198568\n", - "epoch:1/2; batch:25->28/31; loss:0.941484\n", - "epoch:1/2; batch:29->31/31; loss:1.049881\n", - "epoch:2/2; batch:1->4/31; loss:1.109279\n", - "epoch:2/2; batch:5->8/31; loss:1.075177\n", - "epoch:2/2; batch:9->12/31; loss:1.122685\n", - "epoch:2/2; batch:13->16/31; loss:1.124175\n", - "epoch:2/2; batch:17->20/31; loss:1.109364\n", - "epoch:2/2; batch:21->24/31; loss:1.052536\n", - "epoch:2/2; batch:25->28/31; loss:1.074721\n", - "epoch:2/2; batch:29->31/31; loss:1.132380\n" + "epoch:1/2; batch:1->1228/12271; loss:1.091754\n", + "epoch:1/2; batch:1229->2456/12271; loss:0.992931\n", + "epoch:1/2; batch:2457->3684/12271; loss:1.045146\n", + "epoch:1/2; batch:3685->4912/12271; loss:0.799912\n", + "epoch:1/2; batch:4913->6140/12271; loss:0.815425\n", + "epoch:1/2; batch:6141->7368/12271; loss:0.564856\n", + "epoch:1/2; batch:7369->8596/12271; loss:0.726981\n", + "epoch:1/2; batch:8597->9824/12271; loss:0.764087\n", + "epoch:1/2; batch:9825->11052/12271; loss:0.964115\n", + "epoch:1/2; batch:11053->12271/12271; loss:0.502252\n", + "epoch:2/2; batch:1->1228/12271; loss:0.601600\n", + "epoch:2/2; batch:1229->2456/12271; loss:0.695099\n", + "epoch:2/2; batch:2457->3684/12271; loss:0.419610\n", + "epoch:2/2; batch:3685->4912/12271; loss:0.603106\n", + "epoch:2/2; batch:4913->6140/12271; loss:0.705180\n", + "epoch:2/2; batch:6141->7368/12271; loss:0.493404\n", + "epoch:2/2; batch:7369->8596/12271; loss:0.864921\n", + "epoch:2/2; batch:8597->9824/12271; loss:0.518601\n", + "epoch:2/2; batch:9825->11052/12271; loss:0.395920\n", + "epoch:2/2; batch:11053->12271/12271; loss:0.685858\n" ] } ], @@ -512,22 +502,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "\r", - " 0%| | 0/1001 [00:00 Date: Thu, 20 Jun 2019 14:01:48 -0400 Subject: [PATCH 021/108] Fixed formatting. --- utils_nlp/bert/sequence_classification.py | 38 ++++++++------ utils_nlp/dataset/preprocess.py | 60 ++++++++++++----------- utils_nlp/dataset/xnli.py | 46 +++++++++++------ 3 files changed, 85 insertions(+), 59 deletions(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index d13c73fde..9c0a5a4c7 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -61,9 +61,9 @@ def fit( input_mask (list): List of input mask lists. labels (list): List of training labels. token_type_ids (list, optional): List of lists. Each sublist - contains segment ids indicating if the token belongs to - the first sentence(0) or second sentence(1). Only needed - for two-sentence tasks. + contains segment ids indicating if the token belongs to + the first sentence(0) or second sentence(1). Only needed + for two-sentence tasks. num_gpus (int, optional): The number of gpus to use. If None is specified, all available GPUs will be used. Defaults to None. @@ -123,7 +123,7 @@ def fit( self.model.train() # training mode num_examples = len(token_ids) num_batches = int(num_examples / batch_size) - + token_type_ids_batch = None for epoch in range(num_epochs): for i in range(num_batches): @@ -140,11 +140,13 @@ def fit( mask_batch = torch.tensor( input_mask[start:end], dtype=torch.long, device=device ) - + if token_type_ids is not None: - token_type_ids_batch = torch.tensor( - token_type_ids[start:end], dtype=torch.long, device=device - ) + token_type_ids_batch = torch.tensor( + token_type_ids[start:end], + dtype=torch.long, + device=device, + ) opt.zero_grad() @@ -174,15 +176,22 @@ def fit( del [x_batch, y_batch, mask_batch, token_type_ids_batch] torch.cuda.empty_cache() - def predict(self, token_ids, input_mask, token_type_ids=None, num_gpus=None, batch_size=32): + def predict( + self, + token_ids, + input_mask, + token_type_ids=None, + num_gpus=None, + batch_size=32, + ): """Scores the given dataset and returns the predicted classes. Args: token_ids (list): List of training token lists. input_mask (list): List of input mask lists. token_type_ids (list, optional): List of lists. Each sublist - contains segment ids indicating if the token belongs to - the first sentence(0) or second sentence(1). Only needed - for two-sentence tasks. + contains segment ids indicating if the token belongs to + the first sentence(0) or second sentence(1). Only needed + for two-sentence tasks. num_gpus (int, optional): The number of gpus to use. If None is specified, all available GPUs will be used. Defaults to None. @@ -210,8 +219,9 @@ def predict(self, token_ids, input_mask, token_type_ids=None, num_gpus=None, bat token_type_ids_batch = None if token_type_ids is not None: token_type_ids_batch = torch.tensor( - token_type_ids[i : i + - batch_size], dtype=torch.long, device=device + token_type_ids[i : i + batch_size], + dtype=torch.long, + device=device, ) with torch.no_grad(): p_batch = self.model( diff --git a/utils_nlp/dataset/preprocess.py b/utils_nlp/dataset/preprocess.py index 082454e67..2e51821f5 100644 --- a/utils_nlp/dataset/preprocess.py +++ b/utils_nlp/dataset/preprocess.py @@ -5,7 +5,6 @@ import spacy import nltk from nltk.corpus import stopwords -import six def to_lowercase_all(df): @@ -23,7 +22,8 @@ def to_lowercase_all(df): def to_lowercase(df, column_names=[]): """ - This function transforms strings of the column names in the dataframe passed to lowercase + This function transforms strings of the column names in the dataframe + passed to lowercase Args: df (pd.DataFrame): Raw dataframe with some text columns. @@ -47,18 +47,18 @@ def to_spacy_tokens( token_cols=["sentence1_tokens", "sentence2_tokens"], ): """ - This function tokenizes the sentence pairs using spaCy, defaulting to the - spaCy en_core_web_sm model - - Args: - df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. - sentence_cols (list, optional): Column names of the raw sentence pairs. - token_cols (list, optional): Column names for the tokenized sentences. - - Returns: - pd.DataFrame: Dataframe with new columns token_cols, each containing - a list of tokens for their respective sentences. - """ + This function tokenizes the sentence pairs using spaCy, defaulting to the + spaCy en_core_web_sm model + + Args: + df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. + sentence_cols (list, optional): Column names of the raw sentence pairs. + token_cols (list, optional): Column names for the tokenized sentences. + + Returns: + pd.DataFrame: Dataframe with new columns token_cols, each containing + a list of tokens for their respective sentences. + """ nlp = spacy.load("en_core_web_sm") text_df = df[sentence_cols] nlp_df = text_df.applymap(lambda x: nlp(x)) @@ -78,21 +78,22 @@ def rm_spacy_stopwords( custom_stopwords=[], ): """ - This function tokenizes the sentence pairs using spaCy and remove stopwords, - defaulting to the spaCy en_core_web_sm model - - Args: - df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. - sentence_cols (list, optional): Column names for the raw sentence pairs. - stop_cols (list, optional): Column names for the tokenized sentences - without stop words. - custom_stopwords (list of str, optional): List of custom stopwords to - register with the spaCy model. - - Returns: - pd.DataFrame: Dataframe with new columns stop_cols, each containing a - list of tokens for their respective sentences. - """ + This function tokenizes the sentence pairs using spaCy and remove + stopwords, defaulting to the spaCy en_core_web_sm model + + Args: + df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. + sentence_cols (list, optional): Column names for the raw sentence + pairs. + stop_cols (list, optional): Column names for the tokenized sentences + without stop words. + custom_stopwords (list of str, optional): List of custom stopwords to + register with the spaCy model. + + Returns: + pd.DataFrame: Dataframe with new columns stop_cols, each containing a + list of tokens for their respective sentences. + """ nlp = spacy.load("en_core_web_sm") if len(custom_stopwords) > 0: for csw in custom_stopwords: @@ -162,6 +163,7 @@ def rm_nltk_stopwords( stop_df.columns = stop_cols return pd.concat([df, stop_df], axis=1) + def convert_to_unicode(input_text): """Converts intput_text to Unicode. Input must be utf-8.""" if isinstance(input_text, str): diff --git a/utils_nlp/dataset/xnli.py b/utils_nlp/dataset/xnli.py index 549dd1893..1c5229d8b 100644 --- a/utils_nlp/dataset/xnli.py +++ b/utils_nlp/dataset/xnli.py @@ -6,7 +6,6 @@ """ import os -import sys import pandas as pd @@ -16,18 +15,19 @@ URL_XNLI = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" URL_XNLI_MT = "https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip" + def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): """Downloads and extracts the dataset files Args: - local_cache_path (str, optional): Path to store the data. + local_cache_path (str, optional): Path to store the data. Defaults to "./". file_split (str, optional): The subset to load. One of: {"train", "dev", "test"} Defaults to "dev". - language (str, optional): language subset to read. - One of: {"en", "fr", "es", "de", "el", "bg", "ru", + language (str, optional): language subset to read. + One of: {"en", "fr", "es", "de", "el", "bg", "ru", "tr", "ar", "vi", "th", "zh", "hi", "sw", "ur"} - Defaults to "zh" (Chinese). + Defaults to "zh" (Chinese). Returns: pd.DataFrame: pandas DataFrame containing the specified XNLI subset. @@ -50,20 +50,24 @@ def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): zip_file_name = url.split("/")[-1] folder_name = ".".join(zip_file_name.split(".")[:-1]) - file_name = folder_name + "/multinli/" + ".".join(["multinli", file_split, language, "tsv"]) + file_name = ( + folder_name + + "/multinli/" + + ".".join(["multinli", file_split, language, "tsv"]) + ) maybe_download(url, zip_file_name, local_cache_path) - if not os.path.exists( - os.path.join(local_cache_path, folder_name) - ): + if not os.path.exists(os.path.join(local_cache_path, folder_name)): extract_zip( os.path.join(local_cache_path, zip_file_name), local_cache_path ) - - with open(os.path.join(local_cache_path, file_name), "r", encoding="utf-8") as f: + + with open( + os.path.join(local_cache_path, file_name), "r", encoding="utf-8" + ) as f: lines = f.read().splitlines() - + line_list = [line.split("\t") for line in lines] # Remove the column name row line_list.pop(0) @@ -71,14 +75,24 @@ def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): line_list = [line for line in line_list if line[0] == language] label_list = [convert_to_unicode(line[label_index]) for line in line_list] - old_contradict_label = convert_to_unicode("contradictory") + old_contradict_label = convert_to_unicode("contradictory") new_contradict_label = convert_to_unicode("contradiction") - label_list = [new_contradict_label if label == old_contradict_label else label for label in label_list] - text_list = [(convert_to_unicode(line[sentence_1_index]), convert_to_unicode(line[sentence_2_index])) for line in line_list] + label_list = [ + new_contradict_label if label == old_contradict_label else label + for label in label_list + ] + text_list = [ + ( + convert_to_unicode(line[sentence_1_index]), + convert_to_unicode(line[sentence_2_index]), + ) + for line in line_list + ] df = pd.DataFrame({"text": text_list, "label": label_list}) return df + if __name__ == "__main__": - load_pandas_df() \ No newline at end of file + load_pandas_df() From 2010daf6379675f02fd2c072336182e9c7a82473 Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Thu, 20 Jun 2019 14:08:40 -0400 Subject: [PATCH 022/108] Removed redundant code. --- utils_nlp/dataset/xnli.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/utils_nlp/dataset/xnli.py b/utils_nlp/dataset/xnli.py index 1c5229d8b..c838b1517 100644 --- a/utils_nlp/dataset/xnli.py +++ b/utils_nlp/dataset/xnli.py @@ -92,7 +92,3 @@ def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): df = pd.DataFrame({"text": text_list, "label": label_list}) return df - - -if __name__ == "__main__": - load_pandas_df() From fad256460458bb26b438096964e26b6aab7b1c5f Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Thu, 20 Jun 2019 14:23:09 -0400 Subject: [PATCH 023/108] added optional prob dist predictions --- utils_nlp/bert/sequence_classification.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index c5d4614f2..7ba75ee2c 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -22,6 +22,7 @@ class BERTSequenceClassifier: def __init__(self, language=Language.ENGLISH, num_labels=2, cache_dir="."): """Initializes the classifier and the underlying pretrained model. + Args: language (Language, optional): The pretrained model's language. Defaults to Language.ENGLISH. @@ -54,6 +55,7 @@ def fit( verbose=True, ): """Fine-tunes the BERT classifier using the given training data. + Args: token_ids (list): List of training token id lists. input_mask (list): List of input mask lists. @@ -149,8 +151,16 @@ def fit( del [x_batch, y_batch, mask_batch] torch.cuda.empty_cache() - def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): + def predict( + self, + token_ids, + input_mask, + num_gpus=None, + batch_size=32, + probabilities=False, + ): """Scores the given dataset and returns the predicted classes. + Args: token_ids (list): List of training token lists. input_mask (list): List of input mask lists. @@ -158,6 +168,9 @@ def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): If None is specified, all available GPUs will be used. Defaults to None. batch_size (int, optional): Scoring batch size. Defaults to 32. + probabilities: If true, the predicted probability distribution + is returned; otherwise, the predicted classes are returned. + Defaults to False. Returns: [ndarray]: Predicted classes. """ @@ -188,6 +201,10 @@ def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): preds.append(p_batch.cpu().data.numpy()) if i % batch_size == 0: pbar.update(batch_size) - preds = [x.argmax(1) for x in preds] + preds = np.concatenate(preds) - return preds + + if probabilities: + return nn.Softmax(dim=1)(torch.Tensor(preds)).numpy() + else: + return preds.argmax(axis=1) From d53c17e1edfb2b3f19eab60c7db7e7df56287aec Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Thu, 20 Jun 2019 14:37:47 -0400 Subject: [PATCH 024/108] minor edit to preds --- utils_nlp/bert/sequence_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index 7ba75ee2c..c34e2f2ef 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -198,7 +198,7 @@ def predict( attention_mask=mask_batch, labels=None, ) - preds.append(p_batch.cpu().data.numpy()) + preds.append(p_batch.cpu()) if i % batch_size == 0: pbar.update(batch_size) From c0951fae7c99955fa426889bc18aefbacc2e480e Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Thu, 20 Jun 2019 14:44:18 -0400 Subject: [PATCH 025/108] rem data_loader --- utils_nlp/dataset/data_loaders.py | 71 ------------------------------- 1 file changed, 71 deletions(-) delete mode 100644 utils_nlp/dataset/data_loaders.py diff --git a/utils_nlp/dataset/data_loaders.py b/utils_nlp/dataset/data_loaders.py deleted file mode 100644 index 014a8b334..000000000 --- a/utils_nlp/dataset/data_loaders.py +++ /dev/null @@ -1,71 +0,0 @@ -import random -import dask.dataframe as dd - - -class DaskCSVLoader: - """Class for creating and using a loader for large csv - or other delimited files. The loader uses dask to read - smaller partitions of a file into memory (one partition at a time), - before sampling batches from the partitions. - """ - - def __init__( - self, - file_path, - sep=",", - header="infer", - block_size=10e6, - random_seed=None, - ): - """Initializes the loader. - Args: - file_path (str): Path to delimited file. - sep (str, optional): Delimiter. Defaults to ",". - header (str, optional): Number of rows to be used as the header. - See pandas.read_csv() - Defaults to "infer". - block_size (int, optional): Size of partition in bytes. - See dask.dataframe.read_csv() - Defaults to 10e6. - random_seed (int, optional): Random seed. See random.seed(). - Defaults to None. - """ - - self.df = dd.read_csv( - file_path, sep=sep, header=header, blocksize=block_size - ) - self.random_seed = random_seed - random.seed(random_seed) - - def get_random_batches(self, num_batches, batch_size): - """Creates a random-batch generator. - Batches returned are pandas dataframes of length=batch_size. - Note: If the sampled partition has less rows than the - specified batch_size, then a smaller batch of the same - size as that partition's number of rows is returned. - - Args: - num_batches (int): Number of batches to generate. - batch_size (int]): Batch size. - """ - for i in range(num_batches): - rnd_part_idx = random.randint(0, self.df.npartitions - 1) - sample_part = self.df.partitions[rnd_part_idx].compute() - if sample_part.shape[0] > batch_size: - yield sample_part.sample( - batch_size, random_state=self.random_seed - ) - else: - yield sample_part - - def get_sequential_batches(self, batch_size): - """Creates a sequential generator. - Batches returned are pandas dataframes of length=batch_size. - Note: Final batch might be of smaller size. - Args: - batch_size (int): Batch size. - """ - for i in range(self.df.npartitions): - part = self.df.partitions[i].compute() - for j in range(0, part.shape[0], batch_size): - yield part.iloc[j : j + batch_size, :] From e561deec22fab0f03d1594cc1baa5007ecd96165 Mon Sep 17 00:00:00 2001 From: Liqun Shao Date: Mon, 3 Jun 2019 12:17:38 -0400 Subject: [PATCH 026/108] add the aml utility function that can get or create workspace as that create the workspace if it does not exist --- .../gensen_aml_deep_dive.ipynb | 1287 ++++++++++------- 1 file changed, 768 insertions(+), 519 deletions(-) diff --git a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb index 8a40dc74b..0676f3175 100644 --- a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb +++ b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb @@ -14,54 +14,36 @@ "metadata": {}, "source": [ "# GenSen Deep Dive on AzureML\n", - "**Learning General Purpose Distributed Sentence Representations via Large Scale Multi-task Learning** [\\[1\\]](#References)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ + "**Learning General Purpose Distributed Sentence Representations via Large Scale Multi-task Learning** [\\[1\\]](#References)\n", + "\n", + "## What is sentence similarity?\n", + "\n", + "Sentence similarity or semantic textual similarity deals with determining how similar two pieces of texts are. This can take the form of assigning a score from 1 to 5. Related tasks are parahrase or duplicate identification.\n", + "\n", + "## How to evaluate?\n", + "\n", + "[SentEval](https://arxiv.org/abs/1803.05449) [\\[2\\]](#References) is an evaluation toolkit for evaluating sentence representations. It includes 17 downstream tasks, including common semantic textual similarity tasks. The semantic textual similarity (**STS**) benchmark tasks from 2012-2016 (STS12, STS13, STS14, STS15, STS16, STSB) measure the relatedness of two sentences based on the cosine similarity of the two representations. The evaluation criterion is Pearson correlation.\n", + "\n", + "The SICK relatedness (**SICK-R**) task trains a linear model to output a score from 1 to 5 indicating the relatedness of two sentences. For the same dataset (**SICK-E**) can be treated as a three-class classification problem using the entailment labels (classes are ‘entailment’, ‘contradiction’, and ‘neutral’). The evaluation metric for SICK-R is Pearson correlation and classification accuracy for SICK-E.\n", + "\n", + "The Microsoft Research Paraphrase Corpus [(**MRPC**)](https://www.microsoft.com/en-us/download/details.aspx?id=52398) corpus is a paraphrase identification dataset, where systems aim to identify if two sentences are paraphrases of each other. The evaluation metric is classification accuracy and F1.\n", + "\n", "## What is GenSen?\n", "\n", - "GenSen is a technique to learn general purpose, fixed-length representations of sentences via multi-task training. GenSen model combines the benefits of diverse sentence-representation learning objectives into a single multi-task framework. \"This is the first large-scale reusable sentence representation model obtained by combining a set of training objectives with the level of diversity explored here, i.e. multi-lingual NMT, natural language inference, constituency parsing and skip-thought vectors.\" [\\[1\\]](#References) These representations are useful for transfer and low-resource learning. GenSen is trained on several data sources with multiple training objectives on over 100 milion sentences.\n", + "GenSen is a technique to learn general purpose, fixed-length representations of sentences via multi-task training. GenSen model is to combine the benefits of diverse sentence-representation learning objectives into a single multi-task framework. This is the first large-scale reusable sentence representation model obtained by combining a set of training objectives with the level of diversity explored here, i.e. multi-lingual NMT, natural language inference, constituency parsing and skip-thought vectors. These representations are useful for transfer and low-resource learning. GenSen is trained on several data sources with multiple training objectives on over 100 milion sentences.\n", + "\n", + "The GenSen model is most similar to that of Luong et al. (2015) [\\[4\\]](#References), who train a many-to-many **sequence-to-sequence** model on a diverse set of weakly ralated tasks that includes machine translation, constituency parsing, image captioning, sequence autoencoding, and intra-sentence skip-thoughts. However, there are two key differences. GenSen uses an attention mechanism preventing learning a fixed-length vector representation for a sentence and it aims for learning re-usable sentence representations that transfers elsewhere, as opposed to Luong's work aims for improvements on the same tasks on which the model is trained.\n", "\n", - "The GenSen model is most similar to that of Luong et al. (2015) [\\[4\\]](#References), who train a many-to-many **sequence-to-sequence** model on a diverse set of weakly related tasks that includes machine translation, constituency parsing, image captioning, sequence autoencoding, and intra-sentence skip-thoughts. However, there are two key differences. \"First, like McCann et al. (2017) [\\[5\\]](#References), their use of an attention mechanism prevents learning a fixed-length vector representation for a sentence. Second, their work aims for improvements on the same tasks on which the model is trained, as opposed to learning re-usable sentence representations that transfer elsewhere.\" [\\[1\\]](#References)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ "### Sequence to Sequence Learning\n", "\n", "![Sequence to sequence learning examples - (left) machine translation and (right) constituent parsing](https://nlpbp.blob.core.windows.net/images/seq2seq.png)**Sequence to sequence learning examples - (left) machine translation and (right) constituent parsing**\n", "\n", - "\"Sequence to sequence learning (*seq2seq*) aims to directly model the conditional probability $p(x|y)$ of mapping an input sequence, $x_1,...,x_n$, into an output sequence, $y_1,...,y_m$. It accomplishes such goal through the *encoder-decoder* framework. As illustrated in the above figure, the encoder computes a representation $s$ for each input sequence. Based on that input representation, the *decoder* generates an ouput sequence, one unit at a time, and hence, decomposes the conditional probability as\" [\\[4\\]](#References):\n", + "Sequence to sequence learning (*seq2seq*) aims to directly model the conditional probability $p(x|y)$ of mapping an input sequence, $x_1,...,x_n$, into an output sequence, $y_1,...,y_m$. It accomplishes such goal through the *encoder-decoder* framework. As illustrated in the above figure, the encoder computes a representation $s$ for each input sequence. Based on that input representation, the *decoder* generates an ouput sequence, one unit at a time, and hence, decomposes the conditional probability as:\n", "\n", "$$\n", "\\log p(y|x)=\\sum_{j=1}^{m} \\log p(y_i|y_{\",\n", - " resource_group=\"\",\n", - " workspace_name=\"\",\n", - " workspace_region=\"\"\n", - ")\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -242,7 +158,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -285,7 +201,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 42, "metadata": {}, "outputs": [], "source": [ @@ -296,12 +212,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1.1 Load SNLI Dataset" + "## 1.1 Load SNLI Dataset\n", + "We provide a function `load_pandas_df` which\n", + "* Downloads the SNLI zipfile at the specified directory location\n", + "* Extracts the file based on the specified split\n", + "* Loads the split as a pandas dataframe" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 7, "metadata": { "scrolled": true }, @@ -484,7 +404,7 @@ "4 2267923837.jpg#2r1e entailment NaN NaN NaN NaN " ] }, - "execution_count": 22, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -506,96 +426,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1.2 Tokenize" + "## 1.2 Tokenize\n", + "Now that we've loaded the data into a pandas.DataFrame, we can tokenize the sentences.\n", + "We also clean the data before tokenizing. This includes dropping unneccessary columns and renaming the relevant columns as score, sentence_1, and sentence_2." ] }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 8, "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
scoresentence1sentence2
0neutralA person on a horse jumps over a broken down a...A person is training his horse for a competition.
1contradictionA person on a horse jumps over a broken down a...A person is at a diner, ordering an omelette.
2entailmentA person on a horse jumps over a broken down a...A person is outdoors, on a horse.
3neutralChildren smiling and waving at cameraThey are smiling at their parents
4entailmentChildren smiling and waving at cameraThere are children present
\n", - "
" - ], - "text/plain": [ - " score sentence1 \\\n", - "0 neutral A person on a horse jumps over a broken down a... \n", - "1 contradiction A person on a horse jumps over a broken down a... \n", - "2 entailment A person on a horse jumps over a broken down a... \n", - "3 neutral Children smiling and waving at camera \n", - "4 entailment Children smiling and waving at camera \n", - "\n", - " sentence2 \n", - "0 A person is training his horse for a competition. \n", - "1 A person is at a diner, ordering an omelette. \n", - "2 A person is outdoors, on a horse. \n", - "3 They are smiling at their parents \n", - "4 There are children present " - ] - }, - "execution_count": 23, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "def clean(df, file_split):\n", " src_file_path = os.path.join(BASE_DATA_PATH, \"raw/snli_1.0/snli_1.0_{}.txt\".format(file_split))\n", @@ -622,9 +462,25 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 5, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package punkt to\n", + "[nltk_data] C:\\Users\\lishao\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n", + "[nltk_data] Downloading package punkt to\n", + "[nltk_data] C:\\Users\\lishao\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n", + "[nltk_data] Downloading package punkt to\n", + "[nltk_data] C:\\Users\\lishao\\AppData\\Roaming\\nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n" + ] + } + ], "source": [ "train_tok = to_nltk_tokens(to_lowercase(train))\n", "dev_tok = to_nltk_tokens(to_lowercase(dev))\n", @@ -643,24 +499,33 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 6, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "C:\\Users\\lishao\\Project\\Rotation2\\NLP\\data\\clean/snli_1.0/snli_1.0_train.txt\n", + "C:\\Users\\lishao\\Project\\Rotation2\\NLP\\data\\clean/snli_1.0/snli_1.0_dev.txt\n", + "C:\\Users\\lishao\\Project\\Rotation2\\NLP\\data\\clean/snli_1.0/snli_1.0_test.txt\n" + ] + }, { "data": { "text/plain": [ - "'../../data\\\\clean/snli_1.0'" + "'C:\\\\Users\\\\lishao\\\\Project\\\\Rotation2\\\\NLP\\\\data\\\\clean/snli_1.0'" ] }, - "execution_count": 25, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "gensen_preprocess(train_tok, dev_tok, test_tok, BASE_DATA_PATH)" + "gensen_preprocess(train_tok, dev_tok, test_tok, os.path.abspath(BASE_DATA_PATH))" ] }, { @@ -670,106 +535,112 @@ "## 1.4 Upload to Azure Blob Storage\n", "We make the data accessible remotely by uploading that data from your local machine into Azure. Then it can be accessed for remote training. The datastore is a convenient construct associated with your workspace for you to upload or download data. You can also interact with it from your remote compute targets. It's backed by an Azure Blob storage account.\n", "\n", - "**Note: If you already have all the files under `clean/snli_1.0/` in your default datastorage, you DO NOT need to redo this section.**" + "**Note: User needs to upload all the files under `data_folder` MANUALLY to Azure Blob storage account for now, because the uploading function has bugs on Azure Blob.**" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [], + "source": [ + "data_folder = os.path.join(BASE_DATA_PATH, \"clean/snli_1.0/\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To download some of the data required to train a GenSen model, run the bash file [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). Make sure to upload all the large files to azure file share. You can access to datastore by using `ds.as_mount()`." + "# 2 Train GenSen Model with Distributed Pytorch with Horovod on AzureML\n", + "In this tutorial, you will train a GenSen model with PyTorch on AML using distributed training across a GPU cluster. This could also be a generic guideline to train models using GPU cluster.\n", + "\n", + "Once you've created your workspace and set up your development environment, training a model in Azure Machine Learning involves the following steps:\n", + "1. Create a remote compute target (note you can also use local computer as compute target)\n", + "2. Prepare your training data and upload it to datastore\n", + "3. Create your training script\n", + "4. Create an Estimator object\n", + "5. Submit the estimator to an experiment object under the workspace" ] }, { - "cell_type": "code", - "execution_count": 26, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2.1 Initialization\n", + "In this section, we will initialize workspace and create a AmlCompute for training." + ] + }, + { + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "data_folder = os.path.join(BASE_DATA_PATH, \"clean/snli_1.0\")" + "### 2.1.1 Initialize Workspace\n", + "\n", + "Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. For instructions on how to do this, see [here](README.md). `Workspace.from_config()` creates a workspace object from the details stored in `config.json`." ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "AzureFile maidaptest3334372853 azureml-filestore-792de9d4-7d0a-464c-b40a-58584f23f5ec $AZUREML_DATAREFERENCE_liqungensen\n" + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n", + "Workspace name: MAIDAPTest\n", + "Azure region: eastus2\n", + "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", + "Resource group: nlprg\n" ] } ], "source": [ - "ds = ws.get_default_datastore()\n", - "print(ds.datastore_type, ds.account_name, ds.container_name, ds.as_mount())" + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Prerequisites:**\n", + "### 2.1.2 Create or Attach Existing AmlCompute\n", + "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", + "\n", + "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", "\n", - "Upload the all the local files under `data_folder` to the path `./data/processed/` on the default datastore.\n", + "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.\n", "\n", - "**Note: To download data required to train a GenSen model in the original paper, run code [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). By training on the original datasets (training time around 20 hours), it will reproduce the results in the [paper](https://arxiv.org/abs/1804.00079). For simplicity, we will train on a smaller dataset, which is SNLI preprocessed in [1 Data Loading and Preprocessing](#1-Data-Loading-and-Preprocessing) for showcasing the example.**" + "**Use Standard_NC6 for now.**" ] }, { "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "$AZUREML_DATAREFERENCE_6faee69b569b4268b8bf027b0bb4fd73" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "ds.upload(src_dir=data_folder, target_path=\"data/processed\", overwrite=True, show_progress=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 2 Train GenSen Model with Distributed Pytorch with Horovod on AzureML\n", - "In this tutorial, you will train a GenSen model with PyTorch on AML using distributed training across a GPU cluster.\n", - "\n", - "Once you've created your workspace and set up your development environment, training a model in Azure Machine Learning involves the following steps:\n", - "1. Create a remote compute target (note you can also use local computer as compute target)\n", - "2. Prepare your training data and upload it to datastore\n", - "3. Create your training script\n", - "4. Create an Estimator object\n", - "5. Submit the estimator to an experiment object under the workspace" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2.1 Create or Attach Existing AmlCompute\n", - "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", - "\n", - "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", - "\n", - "As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.\n", - "\n", - "**Use Standard_NC6 for now.**" - ] - }, - { - "cell_type": "code", - "execution_count": 30, + "execution_count": 2, "metadata": {}, "outputs": [ { @@ -777,13 +648,16 @@ "output_type": "stream", "text": [ "Found existing compute target.\n", - "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-14T20:39:31.676000+00:00', 'errors': None, 'creationTime': '2019-06-03T21:18:34.507970+00:00', 'modifiedTime': '2019-06-03T21:18:50.790782+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 8, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "{'currentNodeCount': 4, 'targetNodeCount': 4, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 4, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-05-31T21:24:32.828000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], "source": [ + "from azureml.core.compute import ComputeTarget, AmlCompute\n", + "from azureml.core.compute_target import ComputeTargetException\n", + "\n", "# choose a name for your cluster\n", - "cluster_name = \"gpugensen\"\n", + "cluster_name = \"gpucluster\"\n", "\n", "try:\n", " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", @@ -791,7 +665,7 @@ "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", - " max_nodes=8)\n", + " max_nodes=4)\n", "\n", " # create the cluster\n", " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", @@ -806,90 +680,113 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2.2 Access to a Project Directory\n", - "In this section, we set the GenSen code folder and data folder for training. Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.\n", - "\n", - "`project_folder` contains all the code you want to submit to AmlCompute to run. The size of the folder can not exceed 300Mb. In GenSen model, it loads large pre-trained embedding files to the model. Thus, we need to save large files in datastore and only upload code to `project_folder`. We set the gensen project folder under `utils_nlp`." + "## 2.2 Settings for GenSen\n", + "In this section, we set the GenSen code folder and data folder for training." ] }, { - "cell_type": "code", - "execution_count": 31, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Change the path to where your model code locates.\n", - "project_folder = '../../'\n", - "os.makedirs(project_folder, exist_ok=True)" + "### 2.2.1 Access to a Project Directory\n", + "Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.\n", + "\n", + "`project_folder` contains all the code you want to submit to AmlCompute to run. The size of the folder can not exceed 300Mb. In GenSen model, it loads large pre-trained embedding files to the model. Thus, we need to save large files in datastore and only uploads code to `project_folder`." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 3, "metadata": {}, + "outputs": [], "source": [ - "## 2.3 Train Model on the Remote Compute\n", - "Now that we have the AmlCompute ready to go, let's run our distributed training job." + "import os\n", + "\n", + "# Change the path to where your model code locates.\n", + "\n", + "project_folder = '../../utils_nlp/model/gensen/'\n", + "os.makedirs(project_folder, exist_ok=True)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.3.1 Prepare Training Script\n", - "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `train.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.\n", - "\n", - "However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.\n", + "### 2.2.2 Access to Datastore\n", + "To download some of the data required to train a GenSen model, run the bash file [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). Make sure to upload all the large files to azure file share. You can access to datastore by using `ds.as_mount()`.\n", "\n", - "To do so, in `train.py`, we will first access the Azure ML `Run` object within the script:\n", - "```Python\n", - "from azureml.core.run import Run\n", - "run = Run.get_context()\n", - "```\n", - "Later within the script, we log the loss metric to our run:\n", - "```Python\n", - "run.log('loss', loss.item())\n", - "```" + "**Note: To download data required to train a GenSen model in the original paper, run code [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). By training on the original datasets (training time around 20 hours), it will reproduce the results in the [paper](https://arxiv.org/abs/1804.00079). For simplicity, we will train on a smaller dataset, which is SNLI preprocessed in [1 Data Loading and Preprocessing](#1-Data-Loading-and-Preprocessing) for showcasing the example.**" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 4, "metadata": {}, + "outputs": [], "source": [ - "The training process follows the steps:\n", - "1. Create or load the dataset vocabulary\n", - "2. Train on the training dataset for each batch epoch (batch size = 48 updates)\n", - "3. Evaluate on the validation dataset for every 10 epochs\n", - "4. Find the local minimum point on validation loss\n", - "5. Save the best model and stop the training process" + "from azureml.core import Datastore\n", + "ds = Datastore.register_azure_file_share(workspace=ws,\n", + " datastore_name= 'GenSen',\n", + " file_share_name='azureml-filestore-792de9d4-7d0a-464c-b40a-58584f23f5ec',\n", + " account_name='maidaptest3334372853',\n", + " account_key='p0qz3rO4YWDeRRyhU+aQycW8kD2vvF061OyURSLwwQxkfQmhfch48tC+kFzBdZlJPDR/Jk8JoFxSLxKbUaZ1lQ==')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Once your script is ready, copy the training script `gensen_train.py` and config file `gensen_config.json` into the project directory." + "**Prerequisites:**\n", + "\n", + "Upload the all the files under `data_folder` in [1.4 Upload to Azure Blob Storage](#1.4-Upload-to-Azure-Blob-Storage) to the path `./data/processed/` on the above datastore." ] }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 5, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'../../utils_nlp/gensen/gensen_config.json'" + "$AZUREML_DATAREFERENCE_gensen" ] }, - "execution_count": 36, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "gensen_folder = os.path.join(project_folder,'utils_nlp/gensen/')\n", - "shutil.copy('gensen_train.py', gensen_folder)\n", - "shutil.copy('gensen_config.json', gensen_folder)" + "ds.as_mount()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2.3 Train model on the Remote Compute\n", + "Now that we have the AmlCompute ready to go, let's run our distributed training job." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.3.1 Prepare Training Script\n", + "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `train.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.\n", + "\n", + "However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.\n", + "\n", + "To do so, in `train.py`, we will first access the Azure ML `Run` object within the script:\n", + "```Python\n", + "from azureml.core.run import Run\n", + "run = Run.get_context()\n", + "```\n", + "Later within the script, we log the loss metric to our run:\n", + "```Python\n", + "run.log('loss', loss.item())\n", + "```" ] }, { @@ -902,12 +799,14 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ + "from azureml.core import Experiment, get_run\n", + "\n", "experiment_name = 'pytorch-gensen'\n", - "experiment = Experiment(ws, name=experiment_name)" + "experiment = Experiment(ws, name=experiment_name)\n" ] }, { @@ -917,44 +816,39 @@ "### 2.3.3 Create a PyTorch Estimator\n", "The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch).\n", "\n", - "`sample_config.json` defines all the hyperparameters and paths when training GenSen model. The trained model will be saved in `data/models/example` to Azure Blob Storage. **Remember to clean `data/models/example` folder in order to save new models.**" + "`sample_config.json` defines all the hyper parameters and paths when training GenSen model. The trained model will be saved in `data/models/example` to Azure Blob Storage." ] }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 6, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - framework_version is not specified, defaulting to version 1.1.\n" - ] - } - ], + "outputs": [], "source": [ + "from azureml.train.dnn import PyTorch\n", + "from azureml.train.estimator import Estimator\n", + "\n", "script_params = {\n", - " '--config': 'utils_nlp/gensen/gensen_config.json',\n", - " '--data_folder': ws.get_default_datastore().as_mount()}\n", + " '--config': 'sample_config.json',\n", + " '--data_folder': ds.as_mount()}\n", "\n", "estimator = PyTorch(source_directory=project_folder,\n", " script_params=script_params,\n", " compute_target=compute_target,\n", - " entry_script='utils_nlp/gensen/gensen_train.py',\n", + " entry_script='train.py',\n", " node_count=4,\n", " process_count_per_node=1,\n", - " distributed_training=MpiConfiguration(),\n", + " distributed_backend='mpi',\n", " use_gpu=True,\n", - " conda_packages=['scikit-learn=0.20.3', 'h5py', 'nltk']\n", - " )" + " conda_packages=['scikit-learn=0.20.3']\n", + " )\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The above code specifies that we will run our training script on `4` nodes, with one worker per node. In order to execute a distributed run using GPU, you must provide the argument `use_gpu=True`. To execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. If this is the first time creating an experiment, it may take longer to set up conda environments under `.azureml/conda_dependencies.yml`. After the first run, it will use the existing conda environments and directly run the code. However, if your script also uses other packages not initialized in `.azureml/conda_dependencies.yml` environment file, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters.\n", + "The above code specifies that we will run our training script on `4` nodes, with one worker per node. In order to execute a distributed run using GPU, you must provide the argument `use_gpu=True`. To execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. If you are the first time to create a experiment, it may take longer to set up conda environments under `.azureml/conda_dependencies.yml`. After the first run, it will use the existing conda environments and directly run the code. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters. The more required packages are stored in `.azureml/conda_dependencies.yml` file.\n", "\n", "**Requirements:**\n", "- python=3.6.2\n", @@ -973,32 +867,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.3.4 Submit a job\n", + "### 2.3.4 Submit or Cancel a job\n", "Run your experiment by submitting your estimator object. Note that this call is asynchronous." ] }, { "cell_type": "code", - "execution_count": 38, - "metadata": { - "scrolled": true - }, + "execution_count": 13, + "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Submitting C:\\Users\\lishao\\Project\\Rotation2\\NLP directory for run. The size of the directory >= 25 MB, so it can take a few minutes.\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Run(Experiment: pytorch-gensen,\n", - "Id: pytorch-gensen_1560797674_e36e44f4,\n", + "Id: pytorch-gensen_1559577451_8b3c6f42,\n", "Type: azureml.scriptrun,\n", - "Status: Preparing)\n" + "Status: Queued)\n" ] } ], @@ -1011,48 +896,42 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.3.5 Monitor your run\n", - "You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. You can see that the widget automatically plots and visualizes the loss metric that we logged to the Azure ML run." + "**Cancel the job**\n", + "\n", + "It's better to cancel the job manually to make sure you does not waste resources." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 12, "metadata": {}, + "outputs": [], "source": [ - "**Horovod on AzureML**\n", - "\n", - "[Horovod](https://github.com/horovod/horovod) is a distributed training framework for TensorFlow, PyTorch etc. to make distributed Deep Learning fast and easy to use. We have created 2 nodes in the GPU cluster on AzureML. By using Horovod, we can use those two machines to train the model in parallel. In theory, the model trains faster on AzureML than on VM which uses single machine because it converges faster which we will get lower loss. However, by using more nodes, the model may take more time in communicating with each node. The communication time could be ignored when the model is trained on the large datasets.\n", + "# Cancel the job with id.\n", + "# job_id = \"pytorch-gensen_1555533596_d9cc75fe\"\n", + "# run = get_run(experiment, job_id)\n", "\n", - "AzureML can automatically create figures on the loss and time, which is eaiser to track the performance as in the following figure shown the valiation loss v.s. the number of epochs:\n", - "![best_val_loss](https://nlpbp.blob.core.windows.net/images/best_val_loss.PNG)" + "# Cancel jobs.\n", + "run.cancel()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Interpret the Training Results**\n", - "\n", - "The following chart shows the model validation loss (the less loss, the better performance) with different nodes with AmlCompute:\n", - "\n", - "| Standard_NC6 | AML_1node | AML_2nodes | AML_4nodes | AML_8nodes |\n", - "| --- | --- | --- | --- | --- |\n", - "| Best_val_loss | 4.81 | 4.78 | 4.77 | 4.58 |\n", - "\n", - "From the chart, we can tell training with more nodes, the performance is getting better with lower loss." + "### 2.3.5 Monitor your run\n", + "You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. You can see that the widget automatically plots and visualizes the loss metric that we logged to the Azure ML run." ] }, { "cell_type": "code", - "execution_count": 39, - "metadata": { - "scrolled": true - }, + "execution_count": 14, + "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "19d55fcc0871444da604b1d828d9eac4", + "model_id": "debca7cb57da4fc0b97c05b973fa0412", "version_major": 2, "version_minor": 0 }, @@ -1065,16 +944,11 @@ } ], "source": [ + "from azureml.widgets import RunDetails\n", + "\n", "RunDetails(run).show()" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![AML_results](https://nlpbp.blob.core.windows.net/images/aml_results.PNG)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1084,36 +958,513 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 37, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "RunId: pytorch-gensen_1559153095_0e7f4645\n", + "\n", + "Streaming azureml-logs/80_driver_log_rank_0.txt\n", + "===============================================\n", + "\n", + "Building vocabulary ...\n", + "Building common source vocab ...\n", + "Found existing vocab file. Reloading ...\n", + "Building target vocabs ...\n", + "Found existing vocab file. Reloading ...\n", + "Reloading vocab for snli \n", + "Fetching sentences ...\n", + "Processing corpus : 0 task snli \n", + "Reached end of dataset, reseting file pointer ...\n", + "Fetching sentences ...\n", + "Processing corpus : 0 task snli \n", + "Fetched 1000000 sentences\n", + "Fetched 1000000 sentences\n", + "2019-05-29 18:05:35,740 - INFO - Finished creating iterator ...\n", + "2019-05-29 18:05:35,747 - INFO - Found 19966 words in source : \n", + "2019-05-29 18:05:35,753 - INFO - Found 30004 target words in task snli \n", + "2019-05-29 18:05:35,758 - INFO - Model Parameters : \n", + "2019-05-29 18:05:35,763 - INFO - Task : multi-seq2seq-nli \n", + "2019-05-29 18:05:35,768 - INFO - Source Word Embedding Dim : 512\n", + "2019-05-29 18:05:35,772 - INFO - Target Word Embedding Dim : 512\n", + "2019-05-29 18:05:35,777 - INFO - Source RNN Hidden Dim : 2048\n", + "2019-05-29 18:05:35,781 - INFO - Target RNN Hidden Dim : 2048\n", + "2019-05-29 18:05:35,788 - INFO - Source RNN Bidirectional : True\n", + "2019-05-29 18:05:35,792 - INFO - Batch Size : 48 \n", + "2019-05-29 18:05:35,806 - INFO - Optimizer : adam \n", + "2019-05-29 18:05:35,844 - INFO - Learning Rate : 0.000100 \n", + "2019-05-29 18:05:35,849 - INFO - Found 19966 words in src \n", + "2019-05-29 18:05:35,854 - INFO - Found 30004 words in trg \n", + "/azureml-envs/azureml_4737f522821717a6daa7464dfd956f84/lib/python3.6/site-packages/torch/nn/modules/rnn.py:46: UserWarning: dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1, but got dropout=0.3 and num_layers=1\n", + " \"num_layers={}\".format(dropout, num_layers))\n", + "2019-05-29 18:05:40,676 - INFO - MultitaskModel(\n", + " (src_embedding): Embedding(19966, 512, padding_idx=1)\n", + " (encoder): GRU(512, 1024, batch_first=True, dropout=0.3, bidirectional=True)\n", + " (enc_drp): Dropout(p=0.3)\n", + " (trg_embedding): ModuleList(\n", + " (0): Embedding(30004, 512, padding_idx=1)\n", + " )\n", + " (decoders): ModuleList(\n", + " (0): ConditionalGRU(\n", + " (input_weights): Linear(in_features=512, out_features=6144, bias=True)\n", + " (hidden_weights): Linear(in_features=2048, out_features=6144, bias=True)\n", + " (peep_weights): Linear(in_features=2048, out_features=6144, bias=True)\n", + " )\n", + " )\n", + " (decoder2vocab): ModuleList(\n", + " (0): Linear(in_features=2048, out_features=30004, bias=True)\n", + " )\n", + " (nli_decoder): Sequential(\n", + " (0): Dropout(p=0.3)\n", + " (1): Linear(in_features=8192, out_features=512, bias=True)\n", + " (2): ReLU()\n", + " (3): Linear(in_features=512, out_features=3, bias=True)\n", + " )\n", + ")\n", + "2019-05-29 18:05:40,715 - INFO - Could not find model checkpoint, starting afresh\n", + "2019-05-29 18:05:40,720 - INFO - Commencing Training ...\n", + "train.py:245: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.\n", + " torch.nn.utils.clip_grad_norm(model.parameters(), 1.)\n", + "2019-05-29 18:05:40,930 - INFO - ############################\n", + "2019-05-29 18:05:40,960 - INFO - ##### Evaluating model #####\n", + "2019-05-29 18:05:40,968 - INFO - ############################\n", + "/azureml-envs/azureml_4737f522821717a6daa7464dfd956f84/lib/python3.6/site-packages/torch/nn/functional.py:1332: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n", + " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n", + "/azureml-envs/azureml_4737f522821717a6daa7464dfd956f84/lib/python3.6/site-packages/torch/nn/functional.py:1320: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n", + " warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n", + "2019-05-29 18:06:57,575 - INFO - snli Validation Loss : 10.312\n", + "2019-05-29 18:06:57,602 - INFO - Evaluating on NLI\n", + "train.py:390: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + " class_preds = F.softmax(class_logits).data.cpu().numpy().argmax(\n", + "2019-05-29 18:07:04,824 - INFO - NLI Dev Acc : 0.32930\n", + "train.py:412: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", + " class_preds = F.softmax(class_logits).data.cpu().numpy().argmax(\n", + "2019-05-29 18:07:11,986 - INFO - NLI Test Acc : 0.32736\n", + "2019-05-29 18:07:12,004 - INFO - ******************************************************\n", + "/mnt/batch/tasks/shared/LS_root/jobs/maidaptest/azureml/pytorch-gensen_1559153095_0e7f4645/mounts/workspaceblobstore/azureml/pytorch-gensen_1559153095_0e7f4645/utils.py:310: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead.\n", + " torch.LongTensor(sorted_src_lens), volatile=True\n", + "train.py:316: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.\n", + " torch.nn.utils.clip_grad_norm(model.parameters(), 1.)\n", + "2019-05-29 18:09:30,890 - INFO - Seq2Seq Examples Processed : 9600 snli Loss : 5.71808 Num snli minibatches : 180\n", + "2019-05-29 18:09:30,946 - INFO - Round: 200 NLI Epoch : 0 NLI Examples Processed : 1008 NLI Loss : 1.12384\n", + "2019-05-29 18:09:30,996 - INFO - Average time per mininbatch : 0.69195\n", + "2019-05-29 18:09:31,032 - INFO - ******************************************************\n", + "2019-05-29 18:11:39,438 - INFO - Seq2Seq Examples Processed : 19200 snli Loss : 4.66382 Num snli minibatches : 180\n", + "2019-05-29 18:11:39,514 - INFO - Round: 400 NLI Epoch : 0 NLI Examples Processed : 1968 NLI Loss : 1.08484\n", + "2019-05-29 18:11:39,521 - INFO - Average time per mininbatch : 0.64192\n", + "2019-05-29 18:11:39,532 - INFO - ******************************************************\n", + "2019-05-29 18:13:46,059 - INFO - Seq2Seq Examples Processed : 28800 snli Loss : 4.33601 Num snli minibatches : 180\n", + "2019-05-29 18:13:46,068 - INFO - Round: 600 NLI Epoch : 0 NLI Examples Processed : 2928 NLI Loss : 1.08472\n", + "2019-05-29 18:13:46,073 - INFO - Average time per mininbatch : 0.63259\n", + "2019-05-29 18:13:46,079 - INFO - ******************************************************\n", + "2019-05-29 18:15:46,846 - INFO - Seq2Seq Examples Processed : 38400 snli Loss : 4.09014 Num snli minibatches : 180\n", + "2019-05-29 18:15:46,914 - INFO - Round: 800 NLI Epoch : 0 NLI Examples Processed : 3888 NLI Loss : 1.04902\n", + "2019-05-29 18:15:46,921 - INFO - Average time per mininbatch : 0.60379\n", + "2019-05-29 18:15:46,928 - INFO - ******************************************************\n", + "2019-05-29 18:17:46,728 - INFO - Seq2Seq Examples Processed : 48000 snli Loss : 3.93451 Num snli minibatches : 180\n", + "2019-05-29 18:17:46,761 - INFO - Round: 1000 NLI Epoch : 0 NLI Examples Processed : 4848 NLI Loss : 1.04646\n", + "2019-05-29 18:17:46,797 - INFO - Average time per mininbatch : 0.59882\n", + "2019-05-29 18:17:46,806 - INFO - ******************************************************\n", + "2019-05-29 18:19:45,026 - INFO - Seq2Seq Examples Processed : 57600 snli Loss : 3.80465 Num snli minibatches : 180\n", + "2019-05-29 18:19:45,059 - INFO - Round: 1200 NLI Epoch : 0 NLI Examples Processed : 5808 NLI Loss : 1.03508\n", + "2019-05-29 18:19:45,066 - INFO - Average time per mininbatch : 0.59106\n", + "2019-05-29 18:19:45,101 - INFO - ******************************************************\n", + "2019-05-29 18:21:41,592 - INFO - Seq2Seq Examples Processed : 67200 snli Loss : 3.69689 Num snli minibatches : 180\n", + "2019-05-29 18:21:41,609 - INFO - Round: 1400 NLI Epoch : 0 NLI Examples Processed : 6768 NLI Loss : 1.01259\n", + "2019-05-29 18:21:41,615 - INFO - Average time per mininbatch : 0.58242\n", + "2019-05-29 18:21:41,621 - INFO - ******************************************************\n", + "2019-05-29 18:23:37,792 - INFO - Seq2Seq Examples Processed : 76800 snli Loss : 3.63456 Num snli minibatches : 180\n", + "2019-05-29 18:23:37,801 - INFO - Round: 1600 NLI Epoch : 0 NLI Examples Processed : 7728 NLI Loss : 1.01536\n", + "2019-05-29 18:23:37,807 - INFO - Average time per mininbatch : 0.58082\n", + "2019-05-29 18:23:37,813 - INFO - ******************************************************\n", + "2019-05-29 18:25:31,047 - INFO - Seq2Seq Examples Processed : 86400 snli Loss : 3.66306 Num snli minibatches : 180\n", + "2019-05-29 18:25:31,251 - INFO - Round: 1800 NLI Epoch : 0 NLI Examples Processed : 8688 NLI Loss : 0.98221\n", + "2019-05-29 18:25:31,264 - INFO - Average time per mininbatch : 0.56610\n", + "2019-05-29 18:25:31,270 - INFO - ******************************************************\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-05-29 18:27:25,602 - INFO - Seq2Seq Examples Processed : 96000 snli Loss : 3.44516 Num snli minibatches : 180\n", + "2019-05-29 18:27:25,708 - INFO - Round: 2000 NLI Epoch : 0 NLI Examples Processed : 9648 NLI Loss : 0.99616\n", + "2019-05-29 18:27:25,714 - INFO - Average time per mininbatch : 0.57163\n", + "2019-05-29 18:27:25,728 - INFO - ******************************************************\n", + "2019-05-29 18:29:17,873 - INFO - Seq2Seq Examples Processed : 105600 snli Loss : 3.59119 Num snli minibatches : 180\n", + "2019-05-29 18:29:17,894 - INFO - Round: 2200 NLI Epoch : 0 NLI Examples Processed : 10608 NLI Loss : 0.99196\n", + "2019-05-29 18:29:17,899 - INFO - Average time per mininbatch : 0.56068\n", + "2019-05-29 18:29:17,951 - INFO - ******************************************************\n", + "2019-05-29 18:31:09,519 - INFO - Seq2Seq Examples Processed : 115200 snli Loss : 3.35993 Num snli minibatches : 180\n", + "2019-05-29 18:31:09,529 - INFO - Round: 2400 NLI Epoch : 0 NLI Examples Processed : 11568 NLI Loss : 0.98847\n", + "2019-05-29 18:31:09,538 - INFO - Average time per mininbatch : 0.55776\n", + "2019-05-29 18:31:09,554 - INFO - ******************************************************\n", + "2019-05-29 18:33:01,925 - INFO - Seq2Seq Examples Processed : 124800 snli Loss : 3.48944 Num snli minibatches : 180\n", + "2019-05-29 18:33:01,960 - INFO - Round: 2600 NLI Epoch : 0 NLI Examples Processed : 12528 NLI Loss : 0.98408\n", + "2019-05-29 18:33:01,966 - INFO - Average time per mininbatch : 0.56180\n", + "2019-05-29 18:33:01,973 - INFO - ******************************************************\n", + "2019-05-29 18:34:50,796 - INFO - Seq2Seq Examples Processed : 134400 snli Loss : 3.44262 Num snli minibatches : 180\n", + "2019-05-29 18:34:50,847 - INFO - Round: 2800 NLI Epoch : 0 NLI Examples Processed : 13488 NLI Loss : 0.91956\n", + "2019-05-29 18:34:50,852 - INFO - Average time per mininbatch : 0.54408\n", + "2019-05-29 18:34:50,858 - INFO - ******************************************************\n", + "2019-05-29 18:36:42,178 - INFO - Seq2Seq Examples Processed : 144000 snli Loss : 3.24111 Num snli minibatches : 180\n", + "2019-05-29 18:36:42,187 - INFO - Round: 3000 NLI Epoch : 0 NLI Examples Processed : 14448 NLI Loss : 0.94733\n", + "2019-05-29 18:36:42,193 - INFO - Average time per mininbatch : 0.55657\n", + "2019-05-29 18:36:42,201 - INFO - ******************************************************\n", + "2019-05-29 18:38:31,432 - INFO - Seq2Seq Examples Processed : 153600 snli Loss : 3.39076 Num snli minibatches : 180\n", + "2019-05-29 18:38:31,441 - INFO - Round: 3200 NLI Epoch : 0 NLI Examples Processed : 15408 NLI Loss : 0.96010\n", + "2019-05-29 18:38:31,447 - INFO - Average time per mininbatch : 0.54612\n", + "2019-05-29 18:38:31,453 - INFO - ******************************************************\n", + "2019-05-29 18:40:20,337 - INFO - Seq2Seq Examples Processed : 163200 snli Loss : 3.34304 Num snli minibatches : 180\n", + "2019-05-29 18:40:20,352 - INFO - Round: 3400 NLI Epoch : 0 NLI Examples Processed : 16368 NLI Loss : 0.90768\n", + "2019-05-29 18:40:20,359 - INFO - Average time per mininbatch : 0.54439\n", + "2019-05-29 18:40:20,365 - INFO - ******************************************************\n", + "2019-05-29 18:42:09,816 - INFO - Seq2Seq Examples Processed : 172800 snli Loss : 3.15766 Num snli minibatches : 180\n", + "2019-05-29 18:42:09,828 - INFO - Round: 3600 NLI Epoch : 0 NLI Examples Processed : 17328 NLI Loss : 0.90791\n", + "2019-05-29 18:42:09,834 - INFO - Average time per mininbatch : 0.54722\n", + "2019-05-29 18:42:09,840 - INFO - ******************************************************\n", + "2019-05-29 18:43:56,758 - INFO - Seq2Seq Examples Processed : 182400 snli Loss : 3.26524 Num snli minibatches : 180\n", + "2019-05-29 18:43:56,786 - INFO - Round: 3800 NLI Epoch : 0 NLI Examples Processed : 18288 NLI Loss : 0.92823\n", + "2019-05-29 18:43:56,795 - INFO - Average time per mininbatch : 0.53455\n", + "2019-05-29 18:43:56,802 - INFO - ******************************************************\n", + "2019-05-29 18:45:44,253 - INFO - Seq2Seq Examples Processed : 192000 snli Loss : 3.32298 Num snli minibatches : 180\n", + "2019-05-29 18:45:44,336 - INFO - Round: 4000 NLI Epoch : 0 NLI Examples Processed : 19248 NLI Loss : 0.85748\n", + "2019-05-29 18:45:44,343 - INFO - Average time per mininbatch : 0.53723\n", + "2019-05-29 18:45:44,349 - INFO - ******************************************************\n", + "2019-05-29 18:47:30,880 - INFO - Seq2Seq Examples Processed : 201600 snli Loss : 3.16485 Num snli minibatches : 180\n", + "2019-05-29 18:47:30,896 - INFO - Round: 4200 NLI Epoch : 0 NLI Examples Processed : 20208 NLI Loss : 0.90056\n", + "2019-05-29 18:47:30,901 - INFO - Average time per mininbatch : 0.53256\n", + "2019-05-29 18:47:30,907 - INFO - ******************************************************\n", + "2019-05-29 18:49:18,536 - INFO - Seq2Seq Examples Processed : 211200 snli Loss : 3.05513 Num snli minibatches : 180\n", + "2019-05-29 18:49:18,567 - INFO - Round: 4400 NLI Epoch : 0 NLI Examples Processed : 21168 NLI Loss : 0.86934\n", + "2019-05-29 18:49:18,579 - INFO - Average time per mininbatch : 0.53807\n", + "2019-05-29 18:49:18,584 - INFO - ******************************************************\n", + "2019-05-29 18:51:04,240 - INFO - Seq2Seq Examples Processed : 220800 snli Loss : 3.24391 Num snli minibatches : 180\n", + "2019-05-29 18:51:04,358 - INFO - Round: 4600 NLI Epoch : 0 NLI Examples Processed : 22128 NLI Loss : 0.84865\n", + "2019-05-29 18:51:04,365 - INFO - Average time per mininbatch : 0.52824\n", + "2019-05-29 18:51:04,414 - INFO - ******************************************************\n", + "2019-05-29 18:52:50,226 - INFO - Seq2Seq Examples Processed : 230400 snli Loss : 3.25106 Num snli minibatches : 180\n", + "2019-05-29 18:52:50,268 - INFO - Round: 4800 NLI Epoch : 0 NLI Examples Processed : 23088 NLI Loss : 0.87072\n", + "2019-05-29 18:52:50,276 - INFO - Average time per mininbatch : 0.52897\n", + "2019-05-29 18:52:50,292 - INFO - ******************************************************\n", + "2019-05-29 18:54:35,638 - INFO - Seq2Seq Examples Processed : 240000 snli Loss : 3.06245 Num snli minibatches : 180\n", + "2019-05-29 18:54:35,664 - INFO - Round: 5000 NLI Epoch : 0 NLI Examples Processed : 24048 NLI Loss : 0.84427\n", + "2019-05-29 18:54:35,670 - INFO - Average time per mininbatch : 0.52669\n", + "2019-05-29 18:54:35,677 - INFO - ******************************************************\n", + "2019-05-29 18:56:21,116 - INFO - Seq2Seq Examples Processed : 249600 snli Loss : 2.99545 Num snli minibatches : 180\n", + "2019-05-29 18:56:21,167 - INFO - Round: 5200 NLI Epoch : 0 NLI Examples Processed : 25008 NLI Loss : 0.89375\n", + "2019-05-29 18:56:21,172 - INFO - Average time per mininbatch : 0.52716\n", + "2019-05-29 18:56:21,178 - INFO - ******************************************************\n", + "2019-05-29 18:58:03,355 - INFO - Seq2Seq Examples Processed : 259200 snli Loss : 3.12514 Num snli minibatches : 180\n", + "2019-05-29 18:58:03,377 - INFO - Round: 5400 NLI Epoch : 0 NLI Examples Processed : 25968 NLI Loss : 0.84170\n", + "2019-05-29 18:58:03,383 - INFO - Average time per mininbatch : 0.51073\n", + "2019-05-29 18:58:03,389 - INFO - ******************************************************\n", + "2019-05-29 18:59:45,907 - INFO - Seq2Seq Examples Processed : 268800 snli Loss : 3.13727 Num snli minibatches : 180\n", + "2019-05-29 18:59:45,929 - INFO - Round: 5600 NLI Epoch : 0 NLI Examples Processed : 26928 NLI Loss : 0.85129\n", + "2019-05-29 18:59:45,947 - INFO - Average time per mininbatch : 0.51247\n", + "2019-05-29 18:59:45,953 - INFO - ******************************************************\n", + "2019-05-29 19:01:28,814 - INFO - Seq2Seq Examples Processed : 278400 snli Loss : 3.08039 Num snli minibatches : 180\n", + "2019-05-29 19:01:28,851 - INFO - Round: 5800 NLI Epoch : 0 NLI Examples Processed : 27888 NLI Loss : 0.86816\n", + "2019-05-29 19:01:28,857 - INFO - Average time per mininbatch : 0.51426\n", + "2019-05-29 19:01:28,863 - INFO - ******************************************************\n", + "2019-05-29 19:03:09,420 - INFO - Seq2Seq Examples Processed : 288000 snli Loss : 2.87723 Num snli minibatches : 180\n", + "2019-05-29 19:03:09,452 - INFO - Round: 6000 NLI Epoch : 0 NLI Examples Processed : 28848 NLI Loss : 0.84560\n", + "2019-05-29 19:03:09,458 - INFO - Average time per mininbatch : 0.50258\n", + "2019-05-29 19:03:09,464 - INFO - ******************************************************\n", + "2019-05-29 19:04:52,208 - INFO - Seq2Seq Examples Processed : 297600 snli Loss : 2.91778 Num snli minibatches : 180\n", + "2019-05-29 19:04:52,251 - INFO - Round: 6200 NLI Epoch : 0 NLI Examples Processed : 29808 NLI Loss : 0.81515\n", + "2019-05-29 19:04:52,257 - INFO - Average time per mininbatch : 0.51368\n", + "2019-05-29 19:04:52,264 - INFO - ******************************************************\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-05-29 19:06:32,652 - INFO - Seq2Seq Examples Processed : 307200 snli Loss : 3.07575 Num snli minibatches : 180\n", + "2019-05-29 19:06:32,708 - INFO - Round: 6400 NLI Epoch : 0 NLI Examples Processed : 30768 NLI Loss : 0.78086\n", + "2019-05-29 19:06:32,715 - INFO - Average time per mininbatch : 0.50191\n", + "2019-05-29 19:06:32,722 - INFO - ******************************************************\n", + "2019-05-29 19:08:12,195 - INFO - Seq2Seq Examples Processed : 316800 snli Loss : 3.01557 Num snli minibatches : 180\n", + "2019-05-29 19:08:12,203 - INFO - Round: 6600 NLI Epoch : 0 NLI Examples Processed : 31728 NLI Loss : 0.85198\n", + "2019-05-29 19:08:12,225 - INFO - Average time per mininbatch : 0.49733\n", + "2019-05-29 19:08:12,255 - INFO - ******************************************************\n", + "2019-05-29 19:09:50,561 - INFO - Seq2Seq Examples Processed : 326400 snli Loss : 3.01893 Num snli minibatches : 180\n", + "2019-05-29 19:09:50,631 - INFO - Round: 6800 NLI Epoch : 0 NLI Examples Processed : 32688 NLI Loss : 0.79130\n", + "2019-05-29 19:09:50,637 - INFO - Average time per mininbatch : 0.49149\n", + "2019-05-29 19:09:50,642 - INFO - ******************************************************\n", + "2019-05-29 19:11:31,019 - INFO - Seq2Seq Examples Processed : 336000 snli Loss : 2.80379 Num snli minibatches : 180\n", + "2019-05-29 19:11:31,070 - INFO - Round: 7000 NLI Epoch : 0 NLI Examples Processed : 33648 NLI Loss : 0.84067\n", + "2019-05-29 19:11:31,077 - INFO - Average time per mininbatch : 0.50185\n", + "2019-05-29 19:11:31,085 - INFO - ******************************************************\n", + "2019-05-29 19:13:10,680 - INFO - Seq2Seq Examples Processed : 345600 snli Loss : 2.75945 Num snli minibatches : 180\n", + "2019-05-29 19:13:10,708 - INFO - Round: 7200 NLI Epoch : 0 NLI Examples Processed : 34608 NLI Loss : 0.81463\n", + "2019-05-29 19:13:10,714 - INFO - Average time per mininbatch : 0.49794\n", + "2019-05-29 19:13:10,719 - INFO - ******************************************************\n", + "2019-05-29 19:14:48,873 - INFO - Seq2Seq Examples Processed : 355200 snli Loss : 2.88910 Num snli minibatches : 180\n", + "2019-05-29 19:14:49,171 - INFO - Round: 7400 NLI Epoch : 0 NLI Examples Processed : 35568 NLI Loss : 0.82873\n", + "2019-05-29 19:14:49,181 - INFO - Average time per mininbatch : 0.49074\n", + "2019-05-29 19:14:49,186 - INFO - ******************************************************\n", + "2019-05-29 19:16:26,470 - INFO - Seq2Seq Examples Processed : 364800 snli Loss : 3.01164 Num snli minibatches : 180\n", + "2019-05-29 19:16:26,532 - INFO - Round: 7600 NLI Epoch : 0 NLI Examples Processed : 36528 NLI Loss : 0.79707\n", + "2019-05-29 19:16:26,538 - INFO - Average time per mininbatch : 0.48639\n", + "2019-05-29 19:16:26,547 - INFO - ******************************************************\n", + "2019-05-29 19:18:04,600 - INFO - Seq2Seq Examples Processed : 374400 snli Loss : 2.97503 Num snli minibatches : 180\n", + "2019-05-29 19:18:04,627 - INFO - Round: 7800 NLI Epoch : 0 NLI Examples Processed : 37488 NLI Loss : 0.81165\n", + "2019-05-29 19:18:04,633 - INFO - Average time per mininbatch : 0.49024\n", + "2019-05-29 19:18:04,638 - INFO - ******************************************************\n", + "2019-05-29 19:19:42,343 - INFO - Seq2Seq Examples Processed : 384000 snli Loss : 2.95044 Num snli minibatches : 180\n", + "2019-05-29 19:19:42,383 - INFO - Round: 8000 NLI Epoch : 0 NLI Examples Processed : 38448 NLI Loss : 0.77696\n", + "2019-05-29 19:19:42,388 - INFO - Average time per mininbatch : 0.48847\n", + "2019-05-29 19:19:42,393 - INFO - ******************************************************\n", + "2019-05-29 19:21:20,043 - INFO - Seq2Seq Examples Processed : 393600 snli Loss : 2.77445 Num snli minibatches : 180\n", + "2019-05-29 19:21:20,059 - INFO - Round: 8200 NLI Epoch : 0 NLI Examples Processed : 39408 NLI Loss : 0.83046\n", + "2019-05-29 19:21:20,094 - INFO - Average time per mininbatch : 0.48821\n", + "2019-05-29 19:21:20,099 - INFO - ******************************************************\n", + "2019-05-29 19:22:57,666 - INFO - Seq2Seq Examples Processed : 403200 snli Loss : 2.74390 Num snli minibatches : 180\n", + "2019-05-29 19:22:57,687 - INFO - Round: 8400 NLI Epoch : 0 NLI Examples Processed : 40368 NLI Loss : 0.79176\n", + "2019-05-29 19:22:57,694 - INFO - Average time per mininbatch : 0.48760\n", + "2019-05-29 19:22:57,700 - INFO - ******************************************************\n", + "2019-05-29 19:24:36,064 - INFO - Seq2Seq Examples Processed : 412800 snli Loss : 2.70470 Num snli minibatches : 180\n", + "2019-05-29 19:24:36,084 - INFO - Round: 8600 NLI Epoch : 0 NLI Examples Processed : 41328 NLI Loss : 0.78364\n", + "2019-05-29 19:24:36,104 - INFO - Average time per mininbatch : 0.49178\n", + "2019-05-29 19:24:36,110 - INFO - ******************************************************\n", + "2019-05-29 19:26:12,557 - INFO - Seq2Seq Examples Processed : 422400 snli Loss : 2.89966 Num snli minibatches : 180\n", + "2019-05-29 19:26:12,568 - INFO - Round: 8800 NLI Epoch : 0 NLI Examples Processed : 42288 NLI Loss : 0.79969\n", + "2019-05-29 19:26:12,573 - INFO - Average time per mininbatch : 0.48220\n", + "2019-05-29 19:26:12,578 - INFO - ******************************************************\n", + "2019-05-29 19:27:48,184 - INFO - Seq2Seq Examples Processed : 432000 snli Loss : 2.89469 Num snli minibatches : 180\n", + "2019-05-29 19:27:48,205 - INFO - Round: 9000 NLI Epoch : 0 NLI Examples Processed : 43248 NLI Loss : 0.81662\n", + "2019-05-29 19:27:48,211 - INFO - Average time per mininbatch : 0.47799\n", + "2019-05-29 19:27:48,216 - INFO - ******************************************************\n", + "2019-05-29 19:29:24,910 - INFO - Seq2Seq Examples Processed : 441600 snli Loss : 2.87607 Num snli minibatches : 180\n", + "2019-05-29 19:29:25,067 - INFO - Round: 9200 NLI Epoch : 0 NLI Examples Processed : 44208 NLI Loss : 0.77653\n", + "2019-05-29 19:29:25,079 - INFO - Average time per mininbatch : 0.48344\n", + "2019-05-29 19:29:25,085 - INFO - ******************************************************\n", + "2019-05-29 19:31:00,670 - INFO - Seq2Seq Examples Processed : 451200 snli Loss : 2.85335 Num snli minibatches : 180\n", + "2019-05-29 19:31:00,678 - INFO - Round: 9400 NLI Epoch : 0 NLI Examples Processed : 45168 NLI Loss : 0.75379\n", + "2019-05-29 19:31:00,686 - INFO - Average time per mininbatch : 0.47779\n", + "2019-05-29 19:31:00,693 - INFO - ******************************************************\n", + "2019-05-29 19:32:37,261 - INFO - Seq2Seq Examples Processed : 460800 snli Loss : 2.80405 Num snli minibatches : 180\n", + "2019-05-29 19:32:37,285 - INFO - Round: 9600 NLI Epoch : 0 NLI Examples Processed : 46128 NLI Loss : 0.77045\n", + "2019-05-29 19:32:37,291 - INFO - Average time per mininbatch : 0.48279\n", + "2019-05-29 19:32:37,327 - INFO - ******************************************************\n", + "2019-05-29 19:34:13,404 - INFO - Seq2Seq Examples Processed : 470400 snli Loss : 2.63074 Num snli minibatches : 180\n", + "2019-05-29 19:34:13,452 - INFO - Round: 9800 NLI Epoch : 0 NLI Examples Processed : 47088 NLI Loss : 0.79249\n", + "2019-05-29 19:34:13,480 - INFO - Average time per mininbatch : 0.48036\n", + "2019-05-29 19:34:13,485 - INFO - ******************************************************\n", + "2019-05-29 19:35:50,016 - INFO - Seq2Seq Examples Processed : 480000 snli Loss : 2.62458 Num snli minibatches : 180\n", + "2019-05-29 19:35:50,054 - INFO - Round: 10000 NLI Epoch : 0 NLI Examples Processed : 48048 NLI Loss : 0.82520\n", + "2019-05-29 19:35:50,060 - INFO - Average time per mininbatch : 0.48262\n", + "2019-05-29 19:35:50,065 - INFO - ******************************************************\n", + "2019-05-29 19:37:25,555 - INFO - Seq2Seq Examples Processed : 489600 snli Loss : 2.61139 Num snli minibatches : 180\n", + "2019-05-29 19:37:25,621 - INFO - Round: 10200 NLI Epoch : 0 NLI Examples Processed : 49008 NLI Loss : 0.79771\n", + "2019-05-29 19:37:25,636 - INFO - Average time per mininbatch : 0.47742\n", + "2019-05-29 19:37:25,672 - INFO - ******************************************************\n", + "2019-05-29 19:39:00,465 - INFO - Seq2Seq Examples Processed : 499200 snli Loss : 2.85354 Num snli minibatches : 180\n", + "2019-05-29 19:39:00,485 - INFO - Round: 10400 NLI Epoch : 0 NLI Examples Processed : 49968 NLI Loss : 0.75161\n", + "2019-05-29 19:39:00,515 - INFO - Average time per mininbatch : 0.47392\n", + "2019-05-29 19:39:00,521 - INFO - ******************************************************\n", + "2019-05-29 19:40:35,133 - INFO - Seq2Seq Examples Processed : 508800 snli Loss : 2.81737 Num snli minibatches : 180\n", + "2019-05-29 19:40:35,153 - INFO - Round: 10600 NLI Epoch : 0 NLI Examples Processed : 50928 NLI Loss : 0.76678\n", + "2019-05-29 19:40:35,183 - INFO - Average time per mininbatch : 0.47303\n", + "2019-05-29 19:40:35,191 - INFO - ******************************************************\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2019-05-29 19:42:10,143 - INFO - Seq2Seq Examples Processed : 518400 snli Loss : 2.81220 Num snli minibatches : 180\n", + "2019-05-29 19:42:10,181 - INFO - Round: 10800 NLI Epoch : 0 NLI Examples Processed : 51888 NLI Loss : 0.80043\n", + "2019-05-29 19:42:10,186 - INFO - Average time per mininbatch : 0.47473\n", + "2019-05-29 19:42:10,191 - INFO - ******************************************************\n", + "2019-05-29 19:43:45,532 - INFO - Seq2Seq Examples Processed : 528000 snli Loss : 2.79347 Num snli minibatches : 180\n", + "2019-05-29 19:43:45,556 - INFO - Round: 11000 NLI Epoch : 0 NLI Examples Processed : 52848 NLI Loss : 0.76758\n", + "2019-05-29 19:43:45,561 - INFO - Average time per mininbatch : 0.47667\n", + "2019-05-29 19:43:45,566 - INFO - ******************************************************\n", + "2019-05-29 19:45:19,931 - INFO - Seq2Seq Examples Processed : 537600 snli Loss : 2.77058 Num snli minibatches : 180\n", + "2019-05-29 19:45:19,940 - INFO - Round: 11200 NLI Epoch : 0 NLI Examples Processed : 53808 NLI Loss : 0.75474\n", + "2019-05-29 19:45:19,947 - INFO - Average time per mininbatch : 0.47179\n", + "2019-05-29 19:45:19,952 - INFO - ******************************************************\n", + "2019-05-29 19:46:54,122 - INFO - Seq2Seq Examples Processed : 547200 snli Loss : 2.57868 Num snli minibatches : 180\n", + "2019-05-29 19:46:54,202 - INFO - Round: 11400 NLI Epoch : 0 NLI Examples Processed : 54768 NLI Loss : 0.72065\n", + "2019-05-29 19:46:54,207 - INFO - Average time per mininbatch : 0.47081\n", + "2019-05-29 19:46:54,212 - INFO - ******************************************************\n", + "2019-05-29 19:48:27,836 - INFO - Seq2Seq Examples Processed : 556800 snli Loss : 2.53883 Num snli minibatches : 180\n", + "2019-05-29 19:48:27,866 - INFO - Round: 11600 NLI Epoch : 0 NLI Examples Processed : 55728 NLI Loss : 0.74271\n", + "2019-05-29 19:48:27,872 - INFO - Average time per mininbatch : 0.46808\n", + "2019-05-29 19:48:27,901 - INFO - ******************************************************\n", + "2019-05-29 19:50:04,260 - INFO - Seq2Seq Examples Processed : 566400 snli Loss : 2.56377 Num snli minibatches : 180\n", + "2019-05-29 19:50:04,275 - INFO - Round: 11800 NLI Epoch : 0 NLI Examples Processed : 56688 NLI Loss : 0.70260\n", + "2019-05-29 19:50:04,280 - INFO - Average time per mininbatch : 0.48162\n", + "2019-05-29 19:50:04,285 - INFO - ******************************************************\n", + "2019-05-29 19:51:39,188 - INFO - Seq2Seq Examples Processed : 576000 snli Loss : 2.57128 Num snli minibatches : 180\n", + "2019-05-29 19:51:39,237 - INFO - Round: 12000 NLI Epoch : 0 NLI Examples Processed : 57648 NLI Loss : 0.75922\n", + "2019-05-29 19:51:39,246 - INFO - Average time per mininbatch : 0.47448\n", + "2019-05-29 19:51:39,280 - INFO - ******************************************************\n", + "2019-05-29 19:53:11,436 - INFO - Seq2Seq Examples Processed : 585600 snli Loss : 2.81611 Num snli minibatches : 180\n", + "2019-05-29 19:53:11,510 - INFO - Round: 12200 NLI Epoch : 0 NLI Examples Processed : 58608 NLI Loss : 0.76561\n", + "2019-05-29 19:53:11,545 - INFO - Average time per mininbatch : 0.46075\n", + "2019-05-29 19:53:11,554 - INFO - ******************************************************\n", + "2019-05-29 19:54:42,824 - INFO - Seq2Seq Examples Processed : 595200 snli Loss : 2.78098 Num snli minibatches : 180\n", + "2019-05-29 19:54:42,867 - INFO - Round: 12400 NLI Epoch : 0 NLI Examples Processed : 59568 NLI Loss : 0.73193\n", + "2019-05-29 19:54:42,875 - INFO - Average time per mininbatch : 0.45632\n", + "2019-05-29 19:54:42,881 - INFO - ******************************************************\n", + "2019-05-29 19:56:14,538 - INFO - Seq2Seq Examples Processed : 604800 snli Loss : 2.75304 Num snli minibatches : 180\n", + "2019-05-29 19:56:14,596 - INFO - Round: 12600 NLI Epoch : 0 NLI Examples Processed : 60528 NLI Loss : 0.75785\n", + "2019-05-29 19:56:14,602 - INFO - Average time per mininbatch : 0.45825\n", + "2019-05-29 19:56:14,609 - INFO - ******************************************************\n", + "2019-05-29 19:57:47,344 - INFO - Seq2Seq Examples Processed : 614400 snli Loss : 2.76325 Num snli minibatches : 180\n", + "2019-05-29 19:57:47,364 - INFO - Round: 12800 NLI Epoch : 0 NLI Examples Processed : 61488 NLI Loss : 0.75488\n", + "2019-05-29 19:57:47,412 - INFO - Average time per mininbatch : 0.46364\n", + "2019-05-29 19:57:47,418 - INFO - ******************************************************\n", + "2019-05-29 19:59:19,095 - INFO - Seq2Seq Examples Processed : 624000 snli Loss : 2.70555 Num snli minibatches : 180\n", + "2019-05-29 19:59:19,135 - INFO - Round: 13000 NLI Epoch : 0 NLI Examples Processed : 62448 NLI Loss : 0.73895\n", + "2019-05-29 19:59:19,145 - INFO - Average time per mininbatch : 0.45835\n", + "2019-05-29 19:59:19,150 - INFO - ******************************************************\n", + "2019-05-29 20:00:51,249 - INFO - Seq2Seq Examples Processed : 633600 snli Loss : 2.60308 Num snli minibatches : 180\n", + "2019-05-29 20:00:51,287 - INFO - Round: 13200 NLI Epoch : 0 NLI Examples Processed : 63408 NLI Loss : 0.74067\n", + "2019-05-29 20:00:51,297 - INFO - Average time per mininbatch : 0.46046\n", + "2019-05-29 20:00:51,303 - INFO - ******************************************************\n", + "2019-05-29 20:02:22,456 - INFO - Seq2Seq Examples Processed : 643200 snli Loss : 2.48930 Num snli minibatches : 180\n", + "2019-05-29 20:02:22,536 - INFO - Round: 13400 NLI Epoch : 0 NLI Examples Processed : 64368 NLI Loss : 0.76212\n", + "2019-05-29 20:02:22,542 - INFO - Average time per mininbatch : 0.45573\n", + "2019-05-29 20:02:22,570 - INFO - ******************************************************\n", + "2019-05-29 20:03:54,309 - INFO - Seq2Seq Examples Processed : 652800 snli Loss : 2.49109 Num snli minibatches : 180\n", + "2019-05-29 20:03:54,334 - INFO - Round: 13600 NLI Epoch : 0 NLI Examples Processed : 65328 NLI Loss : 0.72918\n", + "2019-05-29 20:03:54,342 - INFO - Average time per mininbatch : 0.45853\n", + "2019-05-29 20:03:54,361 - INFO - ******************************************************\n", + "2019-05-29 20:05:26,473 - INFO - Seq2Seq Examples Processed : 662400 snli Loss : 2.49999 Num snli minibatches : 180\n", + "2019-05-29 20:05:26,500 - INFO - Round: 13800 NLI Epoch : 0 NLI Examples Processed : 66288 NLI Loss : 0.71488\n", + "2019-05-29 20:05:26,531 - INFO - Average time per mininbatch : 0.46042\n", + "2019-05-29 20:05:26,539 - INFO - ******************************************************\n", + "2019-05-29 20:06:58,271 - INFO - Seq2Seq Examples Processed : 672000 snli Loss : 2.56470 Num snli minibatches : 180\n", + "2019-05-29 20:06:58,336 - INFO - Round: 14000 NLI Epoch : 0 NLI Examples Processed : 67248 NLI Loss : 0.72321\n", + "2019-05-29 20:06:58,342 - INFO - Average time per mininbatch : 0.45860\n", + "2019-05-29 20:06:58,348 - INFO - ******************************************************\n", + "2019-05-29 20:08:29,144 - INFO - Seq2Seq Examples Processed : 681600 snli Loss : 2.72915 Num snli minibatches : 180\n", + "2019-05-29 20:08:29,198 - INFO - Round: 14200 NLI Epoch : 0 NLI Examples Processed : 68208 NLI Loss : 0.73935\n", + "2019-05-29 20:08:29,204 - INFO - Average time per mininbatch : 0.45394\n", + "2019-05-29 20:08:29,210 - INFO - ******************************************************\n", + "2019-05-29 20:09:58,806 - INFO - Seq2Seq Examples Processed : 691200 snli Loss : 2.68844 Num snli minibatches : 180\n", + "2019-05-29 20:09:58,846 - INFO - Round: 14400 NLI Epoch : 0 NLI Examples Processed : 69168 NLI Loss : 0.74668\n", + "2019-05-29 20:09:58,852 - INFO - Average time per mininbatch : 0.44793\n", + "2019-05-29 20:09:58,859 - INFO - ******************************************************\n", + "2019-05-29 20:11:29,986 - INFO - Seq2Seq Examples Processed : 700800 snli Loss : 2.69640 Num snli minibatches : 180\n", + "2019-05-29 20:11:30,011 - INFO - Round: 14600 NLI Epoch : 0 NLI Examples Processed : 70128 NLI Loss : 0.71737\n", + "2019-05-29 20:11:30,017 - INFO - Average time per mininbatch : 0.45559\n", + "2019-05-29 20:11:30,022 - INFO - ******************************************************\n", + "\n", + "Execution Summary\n", + "=================\n", + "RunId: pytorch-gensen_1559153095_0e7f4645\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "{'runId': 'pytorch-gensen_1559153095_0e7f4645',\n", + " 'target': 'gpucluster',\n", + " 'status': 'CancelRequested',\n", + " 'startTimeUtc': '2019-05-29T18:05:02.390551Z',\n", + " 'properties': {'azureml.runsource': 'experiment',\n", + " 'AzureML.DerivedImageName': 'azureml/azureml_f6cd7804b6a4e89cea33d34d8659fed9',\n", + " 'ContentSnapshotId': 'f0eb2538-559b-4051-9d66-5a6a79570c3d',\n", + " 'azureml.git.repository_uri': 'https://github.com/Microsoft/NLP.git',\n", + " 'azureml.git.branch': 'liqun-first-pull',\n", + " 'azureml.git.commit': 'ba716d109a6db89aa94d95255afe7f972a97f0b8',\n", + " 'azureml.git.dirty': 'True',\n", + " 'azureml.git.build_id': None,\n", + " 'azureml.git.build_uri': None,\n", + " 'mlflow.source.git.branch': 'liqun-first-pull',\n", + " 'mlflow.source.git.commit': 'ba716d109a6db89aa94d95255afe7f972a97f0b8',\n", + " 'mlflow.source.git.repoURL': 'https://github.com/Microsoft/NLP.git'},\n", + " 'runDefinition': {'script': 'train.py',\n", + " 'arguments': ['--config',\n", + " 'sample_config.json',\n", + " '--data_folder',\n", + " '$AZUREML_DATAREFERENCE_gensen'],\n", + " 'sourceDirectoryDataStore': 'workspaceblobstore',\n", + " 'framework': 'Python',\n", + " 'communicator': 'Mpi',\n", + " 'target': 'gpucluster',\n", + " 'dataReferences': {'gensen': {'dataStoreName': 'gensen',\n", + " 'mode': 'Mount',\n", + " 'pathOnDataStore': None,\n", + " 'pathOnCompute': None,\n", + " 'overwrite': False},\n", + " 'workspaceblobstore': {'dataStoreName': 'workspaceblobstore',\n", + " 'mode': 'Mount',\n", + " 'pathOnDataStore': None,\n", + " 'pathOnCompute': None,\n", + " 'overwrite': False}},\n", + " 'jobName': None,\n", + " 'maxRunDurationSeconds': None,\n", + " 'nodeCount': 4,\n", + " 'environment': {'name': 'Experiment pytorch-gensen Environment',\n", + " 'version': 'Autosave_2019-05-29T17:23:26Z_8a3fa4ff',\n", + " 'python': {'interpreterPath': 'python',\n", + " 'userManagedDependencies': False,\n", + " 'condaDependencies': {'name': 'project_environment',\n", + " 'dependencies': ['python=3.6.2',\n", + " {'pip': ['azureml-defaults',\n", + " 'torch==1.0.0',\n", + " 'torchvision==0.2.1',\n", + " 'horovod==0.15.2']},\n", + " 'scikit-learn=0.20.3']},\n", + " 'baseCondaEnvironment': None},\n", + " 'environmentVariables': {'EXAMPLE_ENV_VAR': 'EXAMPLE_VALUE',\n", + " 'NCCL_SOCKET_IFNAME': '^docker0'},\n", + " 'docker': {'baseImage': 'mcr.microsoft.com/azureml/base-gpu:intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04',\n", + " 'enabled': True,\n", + " 'sharedVolumes': True,\n", + " 'gpuSupport': True,\n", + " 'shmSize': '1g',\n", + " 'arguments': [],\n", + " 'baseImageRegistry': {'address': None,\n", + " 'username': None,\n", + " 'password': None}},\n", + " 'spark': {'repositories': ['https://mmlspark.azureedge.net/maven'],\n", + " 'packages': [{'group': 'com.microsoft.ml.spark',\n", + " 'artifact': 'mmlspark_2.11',\n", + " 'version': '0.12'}],\n", + " 'precachePackages': True}},\n", + " 'history': {'outputCollection': True,\n", + " 'directoriesToWatch': ['logs'],\n", + " 'snapshotProject': True},\n", + " 'spark': {'configuration': {'spark.app.name': 'Azure ML Experiment',\n", + " 'spark.yarn.maxAppAttempts': '1'}},\n", + " 'amlCompute': {'name': None,\n", + " 'vmSize': None,\n", + " 'vmPriority': None,\n", + " 'retainCluster': False,\n", + " 'clusterMaxNodeCount': 4},\n", + " 'tensorflow': {'workerCount': 1, 'parameterServerCount': 1},\n", + " 'mpi': {'processCountPerNode': 1},\n", + " 'hdi': {'yarnDeployMode': 'Cluster'},\n", + " 'containerInstance': {'region': None, 'cpuCores': 2, 'memoryGb': 3.5},\n", + " 'exposedPorts': None},\n", + " 'logFiles': {'azureml-logs/80_driver_log_rank_0.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_0.txt?sv=2018-03-28&sr=b&sig=DQm40ZucopOZIMdeEOgfpLYIopsnzDl0fQVKokQcOaw%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", + " 'azureml-logs/80_driver_log_rank_1.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_1.txt?sv=2018-03-28&sr=b&sig=yGX4ZaTAWOu8XsikG3oZ9ZFFJycb%2FrrmPU%2FbDWfQs%2FY%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", + " 'azureml-logs/80_driver_log_rank_2.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_2.txt?sv=2018-03-28&sr=b&sig=9zNw%2BZ94ncqQY6%2BzZWJiJJBT%2F3blXF6mTDohsPkvOl4%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", + " 'azureml-logs/80_driver_log_rank_3.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_3.txt?sv=2018-03-28&sr=b&sig=vowPfbhv6HR8QFeFKJJFy6afd9h5Dt5YS18r2I5Xfzs%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", + " 'logs/azureml/azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/logs/azureml/azureml.log?sv=2018-03-28&sr=b&sig=KXjFQzVr00dx7PF7vL2gKOszt0Qvbj7H6%2F9eWP2FEMg%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r'}}" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "run.wait_for_completion(show_output=True) # this provides a verbose log" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Cancel the job**\n", - "\n", - "It's better to cancel the job manually to make sure you do not waste resources." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " ```python\n", - "# Cancel the job with id.\n", - "job_id = \"pytorch-gensen_1555533596_d9cc75fe\"\n", - "run = get_run(experiment, job_id)\n", - "\n", - "# Cancel jobs.\n", - "run.cancel()\n", - " ```" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1135,10 +1486,12 @@ }, { "cell_type": "code", - "execution_count": 130, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveRunConfig, uniform, PrimaryMetricGoal\n", + "\n", "param_sampling = RandomParameterSampling( {\n", " 'learning_rate': uniform(0.0001, 0.001)\n", " }\n", @@ -1164,7 +1517,7 @@ }, { "cell_type": "code", - "execution_count": 131, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1181,81 +1534,16 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "**Interpret the Tuning Results**\n", - "\n", - "The chart shows 4 different threads running in parallel with different learning rate, and the number of total runs is 8. By comparing the 'Best Metric' which is `best_val_loss` in our case, we can pick the best learning rate. The HyperDrive run automatically shows the tracking charts (example in the following) to let users understand the tuning process.\n", - "![Tuning](https://nlpbp.blob.core.windows.net/images/tuning.PNG)\n", + "from azureml.widgets import RunDetails\n", "\n", - "**From the results in section [2.3.5 Monitor your run](#2.3.5-Monitor-your-run), the `best_val_loss` for 1 node is 4.81, but with tuning we can easily achieve better performance around 4.65.**" - ] - }, - { - "cell_type": "code", - "execution_count": 132, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "c61e610d4601486e9f41fd852320b47b", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_HyperDriveWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO',…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 's…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "5c47f13e11c646cd865d4f286b70ab0c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 's…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ "RunDetails(hyperdrive_run).show()" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see the experiment progress from this notebook by using `azureml.widgets.RunDetails(hd_run).show()` or check from the Azure portal with the url link you can get by running `hd_run.get_portal_url()`.\n", - "To load an existing Hyperdrive run, use `hd_run = hd.HyperDriveRun(exp, , hyperdrive_run_config=hd_run_config)`. You also can cancel a run with `hd_run.cancel()`.\n", - "![](https://nlpbp.blob.core.windows.net/images/tuning1.PNG)\n", - "![](https://nlpbp.blob.core.windows.net/images/tuning2.PNG)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -1263,51 +1551,13 @@ "**Cancel the hyper drive run to save the resources**" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " ```python\n", - "# Cancel the hyper drive\n", - "hyperdrive_run.cancel()\n", - " ```" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3.3 Find the Best Model" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once all the runs complete, we can find the run that produced the model with the lowest loss." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "best_run = hyperdrive_run.get_best_run_by_primary_metric()\n", - "best_run_metrics = best_run.get_metrics()\n", - "print(best_run)" - ] - }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "print('Best Run is:\\n Validation loss: {0:.5f} \\n Learning rate: {1:.5f} \\n'.format(\n", - " best_run_metrics['best_val_loss'][-1],\n", - " best_run_metrics['lr'])\n", - " )" + "hyperdrive_run.cancel()" ] }, { @@ -1319,8 +1569,7 @@ "1. Subramanian, Sandeep and Trischler, Adam and Bengio, Yoshua and Pal, Christopher J, [*Learning general purpose distributed sentence representations via large scale multi-task learning*](https://arxiv.org/abs/1804.00079), ICLR, 2018.\n", "2. A. Conneau, D. Kiela, [*SentEval: An Evaluation Toolkit for Universal Sentence Representations*](https://arxiv.org/abs/1803.05449).\n", "3. Semantic textual similarity. url: http://nlpprogress.com/english/semantic_textual_similarity.html\n", - "4. Minh-Thang Luong, Quoc V Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. [*Multi-task sequence to sequence learning*](https://arxiv.org/abs/1511.06114), 2015.\n", - "5. Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. [*Learned in translation: Contextualized word vectors](https://arxiv.org/abs/1708.00107), 2017. " + "4. Minh-Thang Luong, Quoc V Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. [*Multi-task sequence to sequence learning*](https://arxiv.org/abs/1511.06114), 2015." ] } ], From ed26f8fcc6cf560edd21472e66da543ca7ce70ff Mon Sep 17 00:00:00 2001 From: Abhiram E Date: Thu, 20 Jun 2019 19:50:19 -0400 Subject: [PATCH 027/108] Integrated Mlflow with AzureMl Gensen deep dive notebook --- .gitignore | 3 + .../gensen_aml_deep_dive.ipynb | 1261 +++++++---------- .../sentence_similarity/gensen_config.json | 16 +- scenarios/sentence_similarity/gensen_train.py | 42 +- tests/unit/test_word_embeddings.py | 4 +- tools/generate_conda_file.py | 5 +- 6 files changed, 518 insertions(+), 813 deletions(-) diff --git a/.gitignore b/.gitignore index a7102e9f9..18e4a9716 100644 --- a/.gitignore +++ b/.gitignore @@ -128,3 +128,6 @@ nlp_*.yaml data/ sentence-similarity/data/ +#Scripts +utils_nlp/gensen/gensen_config.json +utils_nlp/gensen/gensen_train.py diff --git a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb index 0676f3175..4f51a3b39 100644 --- a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb +++ b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb @@ -14,36 +14,54 @@ "metadata": {}, "source": [ "# GenSen Deep Dive on AzureML\n", - "**Learning General Purpose Distributed Sentence Representations via Large Scale Multi-task Learning** [\\[1\\]](#References)\n", - "\n", - "## What is sentence similarity?\n", - "\n", - "Sentence similarity or semantic textual similarity deals with determining how similar two pieces of texts are. This can take the form of assigning a score from 1 to 5. Related tasks are parahrase or duplicate identification.\n", - "\n", - "## How to evaluate?\n", - "\n", - "[SentEval](https://arxiv.org/abs/1803.05449) [\\[2\\]](#References) is an evaluation toolkit for evaluating sentence representations. It includes 17 downstream tasks, including common semantic textual similarity tasks. The semantic textual similarity (**STS**) benchmark tasks from 2012-2016 (STS12, STS13, STS14, STS15, STS16, STSB) measure the relatedness of two sentences based on the cosine similarity of the two representations. The evaluation criterion is Pearson correlation.\n", - "\n", - "The SICK relatedness (**SICK-R**) task trains a linear model to output a score from 1 to 5 indicating the relatedness of two sentences. For the same dataset (**SICK-E**) can be treated as a three-class classification problem using the entailment labels (classes are ‘entailment’, ‘contradiction’, and ‘neutral’). The evaluation metric for SICK-R is Pearson correlation and classification accuracy for SICK-E.\n", - "\n", - "The Microsoft Research Paraphrase Corpus [(**MRPC**)](https://www.microsoft.com/en-us/download/details.aspx?id=52398) corpus is a paraphrase identification dataset, where systems aim to identify if two sentences are paraphrases of each other. The evaluation metric is classification accuracy and F1.\n", - "\n", + "**Learning General Purpose Distributed Sentence Representations via Large Scale Multi-task Learning** [\\[1\\]](#References)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "## What is GenSen?\n", "\n", - "GenSen is a technique to learn general purpose, fixed-length representations of sentences via multi-task training. GenSen model is to combine the benefits of diverse sentence-representation learning objectives into a single multi-task framework. This is the first large-scale reusable sentence representation model obtained by combining a set of training objectives with the level of diversity explored here, i.e. multi-lingual NMT, natural language inference, constituency parsing and skip-thought vectors. These representations are useful for transfer and low-resource learning. GenSen is trained on several data sources with multiple training objectives on over 100 milion sentences.\n", - "\n", - "The GenSen model is most similar to that of Luong et al. (2015) [\\[4\\]](#References), who train a many-to-many **sequence-to-sequence** model on a diverse set of weakly ralated tasks that includes machine translation, constituency parsing, image captioning, sequence autoencoding, and intra-sentence skip-thoughts. However, there are two key differences. GenSen uses an attention mechanism preventing learning a fixed-length vector representation for a sentence and it aims for learning re-usable sentence representations that transfers elsewhere, as opposed to Luong's work aims for improvements on the same tasks on which the model is trained.\n", + "GenSen is a technique to learn general purpose, fixed-length representations of sentences via multi-task training. GenSen model combines the benefits of diverse sentence-representation learning objectives into a single multi-task framework. \"This is the first large-scale reusable sentence representation model obtained by combining a set of training objectives with the level of diversity explored here, i.e. multi-lingual NMT, natural language inference, constituency parsing and skip-thought vectors.\" [\\[1\\]](#References) These representations are useful for transfer and low-resource learning. GenSen is trained on several data sources with multiple training objectives on over 100 milion sentences.\n", "\n", + "The GenSen model is most similar to that of Luong et al. (2015) [\\[4\\]](#References), who train a many-to-many **sequence-to-sequence** model on a diverse set of weakly related tasks that includes machine translation, constituency parsing, image captioning, sequence autoencoding, and intra-sentence skip-thoughts. However, there are two key differences. \"First, like McCann et al. (2017) [\\[5\\]](#References), their use of an attention mechanism prevents learning a fixed-length vector representation for a sentence. Second, their work aims for improvements on the same tasks on which the model is trained, as opposed to learning re-usable sentence representations that transfer elsewhere.\" [\\[1\\]](#References)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "### Sequence to Sequence Learning\n", "\n", "![Sequence to sequence learning examples - (left) machine translation and (right) constituent parsing](https://nlpbp.blob.core.windows.net/images/seq2seq.png)**Sequence to sequence learning examples - (left) machine translation and (right) constituent parsing**\n", "\n", - "Sequence to sequence learning (*seq2seq*) aims to directly model the conditional probability $p(x|y)$ of mapping an input sequence, $x_1,...,x_n$, into an output sequence, $y_1,...,y_m$. It accomplishes such goal through the *encoder-decoder* framework. As illustrated in the above figure, the encoder computes a representation $s$ for each input sequence. Based on that input representation, the *decoder* generates an ouput sequence, one unit at a time, and hence, decomposes the conditional probability as:\n", + "\"Sequence to sequence learning (*seq2seq*) aims to directly model the conditional probability $p(x|y)$ of mapping an input sequence, $x_1,...,x_n$, into an output sequence, $y_1,...,y_m$. It accomplishes such goal through the *encoder-decoder* framework. As illustrated in the above figure, the encoder computes a representation $s$ for each input sequence. Based on that input representation, the *decoder* generates an ouput sequence, one unit at a time, and hence, decomposes the conditional probability as\" [\\[4\\]](#References):\n", "\n", "$$\n", "\\log p(y|x)=\\sum_{j=1}^{m} \\log p(y_i|y_{\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -158,7 +242,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -201,7 +285,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -212,18 +296,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1.1 Load SNLI Dataset\n", - "We provide a function `load_pandas_df` which\n", - "* Downloads the SNLI zipfile at the specified directory location\n", - "* Extracts the file based on the specified split\n", - "* Loads the split as a pandas dataframe" + "## 1.1 Load SNLI Dataset" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 5, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [ { @@ -404,7 +484,7 @@ "4 2267923837.jpg#2r1e entailment NaN NaN NaN NaN " ] }, - "execution_count": 7, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -426,16 +506,96 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 1.2 Tokenize\n", - "Now that we've loaded the data into a pandas.DataFrame, we can tokenize the sentences.\n", - "We also clean the data before tokenizing. This includes dropping unneccessary columns and renaming the relevant columns as score, sentence_1, and sentence_2." + "## 1.2 Tokenize" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 6, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
scoresentence1sentence2
0neutralA person on a horse jumps over a broken down a...A person is training his horse for a competition.
1contradictionA person on a horse jumps over a broken down a...A person is at a diner, ordering an omelette.
2entailmentA person on a horse jumps over a broken down a...A person is outdoors, on a horse.
3neutralChildren smiling and waving at cameraThey are smiling at their parents
4entailmentChildren smiling and waving at cameraThere are children present
\n", + "
" + ], + "text/plain": [ + " score sentence1 \\\n", + "0 neutral A person on a horse jumps over a broken down a... \n", + "1 contradiction A person on a horse jumps over a broken down a... \n", + "2 entailment A person on a horse jumps over a broken down a... \n", + "3 neutral Children smiling and waving at camera \n", + "4 entailment Children smiling and waving at camera \n", + "\n", + " sentence2 \n", + "0 A person is training his horse for a competition. \n", + "1 A person is at a diner, ordering an omelette. \n", + "2 A person is outdoors, on a horse. \n", + "3 They are smiling at their parents \n", + "4 There are children present " + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "def clean(df, file_split):\n", " src_file_path = os.path.join(BASE_DATA_PATH, \"raw/snli_1.0/snli_1.0_{}.txt\".format(file_split))\n", @@ -462,25 +622,9 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "[nltk_data] Downloading package punkt to\n", - "[nltk_data] C:\\Users\\lishao\\AppData\\Roaming\\nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n", - "[nltk_data] Downloading package punkt to\n", - "[nltk_data] C:\\Users\\lishao\\AppData\\Roaming\\nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n", - "[nltk_data] Downloading package punkt to\n", - "[nltk_data] C:\\Users\\lishao\\AppData\\Roaming\\nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n" - ] - } - ], + "outputs": [], "source": [ "train_tok = to_nltk_tokens(to_lowercase(train))\n", "dev_tok = to_nltk_tokens(to_lowercase(dev))\n", @@ -499,33 +643,24 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "metadata": { "scrolled": true }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "C:\\Users\\lishao\\Project\\Rotation2\\NLP\\data\\clean/snli_1.0/snli_1.0_train.txt\n", - "C:\\Users\\lishao\\Project\\Rotation2\\NLP\\data\\clean/snli_1.0/snli_1.0_dev.txt\n", - "C:\\Users\\lishao\\Project\\Rotation2\\NLP\\data\\clean/snli_1.0/snli_1.0_test.txt\n" - ] - }, { "data": { "text/plain": [ - "'C:\\\\Users\\\\lishao\\\\Project\\\\Rotation2\\\\NLP\\\\data\\\\clean/snli_1.0'" + "'../../data\\\\clean/snli_1.0'" ] }, - "execution_count": 6, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "gensen_preprocess(train_tok, dev_tok, test_tok, os.path.abspath(BASE_DATA_PATH))" + "gensen_preprocess(train_tok, dev_tok, test_tok, BASE_DATA_PATH)" ] }, { @@ -535,100 +670,94 @@ "## 1.4 Upload to Azure Blob Storage\n", "We make the data accessible remotely by uploading that data from your local machine into Azure. Then it can be accessed for remote training. The datastore is a convenient construct associated with your workspace for you to upload or download data. You can also interact with it from your remote compute targets. It's backed by an Azure Blob storage account.\n", "\n", - "**Note: User needs to upload all the files under `data_folder` MANUALLY to Azure Blob storage account for now, because the uploading function has bugs on Azure Blob.**" + "**Note: If you already have all the files under `clean/snli_1.0/` in your default datastorage, you DO NOT need to redo this section.**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To download some of the data required to train a GenSen model, run the bash file [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). Make sure to upload all the large files to azure file share. You can access to datastore by using `ds.as_mount()`." ] }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ - "data_folder = os.path.join(BASE_DATA_PATH, \"clean/snli_1.0/\")" + "data_folder = os.path.join(BASE_DATA_PATH, \"clean/snli_1.0\")" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AzureFile maidaptest3334372853 azureml-filestore-792de9d4-7d0a-464c-b40a-58584f23f5ec $AZUREML_DATAREFERENCE_liqungensen\n" + ] + } + ], + "source": [ + "ds = ws.get_default_datastore()\n", + "print(ds.datastore_type, ds.account_name, ds.container_name, ds.as_mount())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# 2 Train GenSen Model with Distributed Pytorch with Horovod on AzureML\n", - "In this tutorial, you will train a GenSen model with PyTorch on AML using distributed training across a GPU cluster. This could also be a generic guideline to train models using GPU cluster.\n", + "**Prerequisites:**\n", "\n", - "Once you've created your workspace and set up your development environment, training a model in Azure Machine Learning involves the following steps:\n", - "1. Create a remote compute target (note you can also use local computer as compute target)\n", - "2. Prepare your training data and upload it to datastore\n", - "3. Create your training script\n", - "4. Create an Estimator object\n", - "5. Submit the estimator to an experiment object under the workspace" + "Upload the all the local files under `data_folder` to the default datastore.\n", + "\n", + "**Note: To download data required to train a GenSen model in the original paper, run code [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). By training on the original datasets (training time around 20 hours), it will reproduce the results in the [paper](https://arxiv.org/abs/1804.00079). For simplicity, we will train on a smaller dataset, which is SNLI preprocessed in [1 Data Loading and Preprocessing](#1-Data-Loading-and-Preprocessing) for showcasing the example.**" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 11, "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "$AZUREML_DATAREFERENCE_liqungensen" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "## 2.1 Initialization\n", - "In this section, we will initialize workspace and create a AmlCompute for training." + "ds.upload(src_dir=data_folder, overwrite=True, show_progress=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.1.1 Initialize Workspace\n", + "# 2 Train GenSen Model with Distributed Pytorch with Horovod on AzureML\n", + "In this tutorial, you will train a GenSen model with PyTorch on AML using distributed training across a GPU cluster.\n", "\n", - "Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. For instructions on how to do this, see [here](README.md). `Workspace.from_config()` creates a workspace object from the details stored in `config.json`." + "Once you've created your workspace and set up your development environment, training a model in Azure Machine Learning involves the following steps:\n", + "1. Create a remote compute target (note you can also use local computer as compute target)\n", + "2. Prepare your training data and upload it to datastore\n", + "3. Create your training script\n", + "4. Create an Estimator object\n", + "5. Submit the estimator to an experiment object under the workspace" ] }, { - "cell_type": "code", - "execution_count": 19, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n", - "Workspace name: MAIDAPTest\n", - "Azure region: eastus2\n", - "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", - "Resource group: nlprg\n" - ] - } - ], "source": [ - "ws = azureml_utils.get_or_create_workspace(\n", - " subscription_id=\"\",\n", - " resource_group=\"\",\n", - " workspace_name=\"\",\n", - " workspace_region=\"\"\n", - ")\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2.1.2 Create or Attach Existing AmlCompute\n", + "## 2.1 Create or Attach Existing AmlCompute\n", "You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource. Specifically, the below code creates an `STANDARD_NC6` GPU cluster that autoscales from `0` to `4` nodes.\n", "\n", "**Creation of AmlCompute takes approximately 5 minutes.** If the AmlCompute with that name is already in your workspace, this code will skip the creation process.\n", @@ -640,7 +769,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -648,16 +777,13 @@ "output_type": "stream", "text": [ "Found existing compute target.\n", - "{'currentNodeCount': 4, 'targetNodeCount': 4, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 4, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-05-31T21:24:32.828000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "{'currentNodeCount': 2, 'targetNodeCount': 2, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 2, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-20T22:33:59.775000+00:00', 'errors': None, 'creationTime': '2019-06-19T02:57:39.833104+00:00', 'modifiedTime': '2019-06-19T02:58:11.339451+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], "source": [ - "from azureml.core.compute import ComputeTarget, AmlCompute\n", - "from azureml.core.compute_target import ComputeTargetException\n", - "\n", "# choose a name for your cluster\n", - "cluster_name = \"gpucluster\"\n", + "cluster_name = \"gensen-mlflow\"\n", "\n", "try:\n", " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", @@ -665,7 +791,7 @@ "except ComputeTargetException:\n", " print('Creating a new compute target...')\n", " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", - " max_nodes=4)\n", + " max_nodes=2)\n", "\n", " # create the cluster\n", " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", @@ -680,31 +806,20 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2.2 Settings for GenSen\n", - "In this section, we set the GenSen code folder and data folder for training." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2.2.1 Access to a Project Directory\n", - "Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.\n", + "## 2.2 Access to a Project Directory\n", + "In this section, we set the GenSen code folder and data folder for training. Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.\n", "\n", - "`project_folder` contains all the code you want to submit to AmlCompute to run. The size of the folder can not exceed 300Mb. In GenSen model, it loads large pre-trained embedding files to the model. Thus, we need to save large files in datastore and only uploads code to `project_folder`." + "`project_folder` contains all the code you want to submit to AmlCompute to run. The size of the folder can not exceed 300Mb. In GenSen model, it loads large pre-trained embedding files to the model. Thus, we need to save large files in datastore and only upload code to `project_folder`. We set the gensen project folder under `utils_nlp`." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 25, "metadata": {}, "outputs": [], "source": [ - "import os\n", - "\n", "# Change the path to where your model code locates.\n", - "\n", - "project_folder = '../../utils_nlp/model/gensen/'\n", + "project_folder = '../../'\n", "os.makedirs(project_folder, exist_ok=True)" ] }, @@ -712,81 +827,71 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.2.2 Access to Datastore\n", - "To download some of the data required to train a GenSen model, run the bash file [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). Make sure to upload all the large files to azure file share. You can access to datastore by using `ds.as_mount()`.\n", + "## 2.3 Train Model on the Remote Compute\n", + "Now that we have the AmlCompute ready to go, let's run our distributed training job." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.3.1 Prepare Training Script\n", + "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `train.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.\n", "\n", - "**Note: To download data required to train a GenSen model in the original paper, run code [here](https://github.com/Maluuba/gensen/blob/master/get_data.sh). By training on the original datasets (training time around 20 hours), it will reproduce the results in the [paper](https://arxiv.org/abs/1804.00079). For simplicity, we will train on a smaller dataset, which is SNLI preprocessed in [1 Data Loading and Preprocessing](#1-Data-Loading-and-Preprocessing) for showcasing the example.**" + "However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.\n", + "\n", + "To do so, in `train.py`, we will first access the Azure ML `Run` object within the script:\n", + "```Python\n", + "from azureml.core.run import Run\n", + "run = Run.get_context()\n", + "```\n", + "Later within the script, we log the loss metric to our run:\n", + "```Python\n", + "run.log('loss', loss.item())\n", + "```" ] }, { - "cell_type": "code", - "execution_count": 4, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from azureml.core import Datastore\n", - "ds = Datastore.register_azure_file_share(workspace=ws,\n", - " datastore_name= 'GenSen',\n", - " file_share_name='azureml-filestore-792de9d4-7d0a-464c-b40a-58584f23f5ec',\n", - " account_name='maidaptest3334372853',\n", - " account_key='p0qz3rO4YWDeRRyhU+aQycW8kD2vvF061OyURSLwwQxkfQmhfch48tC+kFzBdZlJPDR/Jk8JoFxSLxKbUaZ1lQ==')" + "The training process follows the steps:\n", + "1. Create or load the dataset vocabulary\n", + "2. Train on the training dataset for each batch epoch (batch size = 48 updates)\n", + "3. Evaluate on the validation dataset for every 10 epochs\n", + "4. Find the local minimum point on validation loss\n", + "5. Save the best model and stop the training process" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Prerequisites:**\n", - "\n", - "Upload the all the files under `data_folder` in [1.4 Upload to Azure Blob Storage](#1.4-Upload-to-Azure-Blob-Storage) to the path `./data/processed/` on the above datastore." + "Once your script is ready, copy the training script `gensen_train.py` and config file `gensen_config.json` into the project directory." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 26, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "$AZUREML_DATAREFERENCE_gensen" + "'../../utils_nlp/gensen/gensen_config.json'" ] }, - "execution_count": 5, + "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "ds.as_mount()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2.3 Train model on the Remote Compute\n", - "Now that we have the AmlCompute ready to go, let's run our distributed training job." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 2.3.1 Prepare Training Script\n", - "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `train.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.\n", - "\n", - "However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.\n", + "import shutil\n", "\n", - "To do so, in `train.py`, we will first access the Azure ML `Run` object within the script:\n", - "```Python\n", - "from azureml.core.run import Run\n", - "run = Run.get_context()\n", - "```\n", - "Later within the script, we log the loss metric to our run:\n", - "```Python\n", - "run.log('loss', loss.item())\n", - "```" + "gensen_folder = os.path.join(project_folder,'utils_nlp/gensen/')\n", + "shutil.copy('gensen_train.py', gensen_folder)\n", + "shutil.copy('gensen_config.json', gensen_folder)" ] }, { @@ -799,14 +904,12 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ - "from azureml.core import Experiment, get_run\n", - "\n", "experiment_name = 'pytorch-gensen'\n", - "experiment = Experiment(ws, name=experiment_name)\n" + "experiment = Experiment(ws, name=experiment_name)" ] }, { @@ -816,39 +919,45 @@ "### 2.3.3 Create a PyTorch Estimator\n", "The Azure ML SDK's PyTorch estimator enables you to easily submit PyTorch training jobs for both single-node and distributed runs. For more information on the PyTorch estimator, refer [here](https://docs.microsoft.com/azure/machine-learning/service/how-to-train-pytorch).\n", "\n", - "`sample_config.json` defines all the hyper parameters and paths when training GenSen model. The trained model will be saved in `data/models/example` to Azure Blob Storage." + "`gensen_config.json` defines all the hyperparameters and paths when training GenSen model. The trained model will be saved in `models` to Azure Blob Storage. **Remember to clean `models` folder in order to save new models.**" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 28, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - framework_version is not specified, defaulting to version 1.1.\n" + ] + } + ], "source": [ - "from azureml.train.dnn import PyTorch\n", - "from azureml.train.estimator import Estimator\n", - "\n", "script_params = {\n", - " '--config': 'sample_config.json',\n", - " '--data_folder': ds.as_mount()}\n", + " '--config': 'utils_nlp/gensen/gensen_config.json',\n", + " '--data_folder': ws.get_default_datastore().as_mount()}\n", "\n", "estimator = PyTorch(source_directory=project_folder,\n", " script_params=script_params,\n", " compute_target=compute_target,\n", - " entry_script='train.py',\n", - " node_count=4,\n", + " entry_script='utils_nlp/gensen/gensen_train.py',\n", + " node_count=2,\n", " process_count_per_node=1,\n", - " distributed_backend='mpi',\n", + " distributed_training=MpiConfiguration(),\n", " use_gpu=True,\n", - " conda_packages=['scikit-learn=0.20.3']\n", - " )\n" + " conda_packages=['scikit-learn=0.20.3', 'h5py', 'nltk'],\n", + " pip_packages=['azureml-mlflow>=1.0.41','numpy>=1.16.0']\n", + " )" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The above code specifies that we will run our training script on `4` nodes, with one worker per node. In order to execute a distributed run using GPU, you must provide the argument `use_gpu=True`. To execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. If you are the first time to create a experiment, it may take longer to set up conda environments under `.azureml/conda_dependencies.yml`. After the first run, it will use the existing conda environments and directly run the code. However, if your script also uses other packages, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters. The more required packages are stored in `.azureml/conda_dependencies.yml` file.\n", + "The above code specifies that we will run our training script on `4` nodes, with one worker per node. In order to execute a distributed run using GPU, you must provide the argument `use_gpu=True`. To execute a distributed run using MPI/Horovod, you must provide the argument `distributed_backend='mpi'`. Using this estimator with these settings, PyTorch, Horovod and their dependencies will be installed for you. If this is the first time creating an experiment, it may take longer to set up conda environments under `.azureml/conda_dependencies.yml`. After the first run, it will use the existing conda environments and directly run the code. However, if your script also uses other packages not initialized in `.azureml/conda_dependencies.yml` environment file, make sure to install them via the `PyTorch` constructor's `pip_packages` or `conda_packages` parameters.\n", "\n", "**Requirements:**\n", "- python=3.6.2\n", @@ -867,21 +976,23 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.3.4 Submit or Cancel a job\n", + "### 2.3.4 Submit a job\n", "Run your experiment by submitting your estimator object. Note that this call is asynchronous." ] }, { "cell_type": "code", - "execution_count": 13, - "metadata": {}, + "execution_count": 29, + "metadata": { + "scrolled": true + }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Run(Experiment: pytorch-gensen,\n", - "Id: pytorch-gensen_1559577451_8b3c6f42,\n", + "Id: pytorch-gensen_1561070750_49a97f3c,\n", "Type: azureml.scriptrun,\n", "Status: Queued)\n" ] @@ -896,59 +1007,71 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Cancel the job**\n", - "\n", - "It's better to cancel the job manually to make sure you does not waste resources." + "### 2.3.5 Monitor your run\n", + "You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. You can see that the widget automatically plots and visualizes the loss metric that we logged to the Azure ML run." ] }, { - "cell_type": "code", - "execution_count": 12, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "# Cancel the job with id.\n", - "# job_id = \"pytorch-gensen_1555533596_d9cc75fe\"\n", - "# run = get_run(experiment, job_id)\n", + "#### Horovod on AzureML\n", "\n", - "# Cancel jobs.\n", - "run.cancel()" + "[Horovod](https://github.com/horovod/horovod) is a distributed training framework for TensorFlow, PyTorch etc. to make distributed Deep Learning fast and easy to use. We have created 2 nodes in the GPU cluster on AzureML. By using Horovod, we can use those two machines to train the model in parallel. In theory, the model trains faster on AzureML than on VM which uses single machine because it converges faster which we will get lower loss. However, by using more nodes, the model may take more time in communicating with each node. The communication time could be ignored when the model is trained on the large datasets.\n", + "\n", + "AzureML can automatically create figures on the loss and time, which is eaiser to track the performance as in the following figure shown the valiation loss v.s. the number of epochs:\n", + "![best_val_loss](https://nlpbp.blob.core.windows.net/images/best_val_loss.PNG)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 2.3.5 Monitor your run\n", - "You can monitor the progress of the run with a Jupyter widget. Like the run submission, the widget is asynchronous and provides live updates every 10-15 seconds until the job completes. You can see that the widget automatically plots and visualizes the loss metric that we logged to the Azure ML run." + "#### Interpret the Training Results\n", + "\n", + "The following chart shows the model validation loss (the less loss, the better performance) with different nodes with AmlCompute:\n", + "\n", + "| Standard_NC6 | AML_1node | AML_2nodes | AML_4nodes | AML_8nodes |\n", + "| --- | --- | --- | --- | --- |\n", + "| Best_val_loss | 4.81 | 4.78 | 4.77 | 4.58 |\n", + "\n", + "From the chart, we can tell training with more nodes, the performance is getting better with lower loss." ] }, { - "cell_type": "code", - "execution_count": 14, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "debca7cb57da4fc0b97c05b973fa0412", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 's…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], "source": [ - "from azureml.widgets import RunDetails\n", + "The Azureml Widget allows an easy way to stream updates of the logged metrics right into your notebook. To use this feature install the widget by running the commands below. \n", + "\n", + "```\n", + "conda install ipywidgets\n", + "\n", + "jupyter nbextension install --py --user azureml.widgets\n", + "\n", + "jupyter nbextension enable azureml.widgets --user --py\n", "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ "RunDetails(run).show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![AML_results](https://nlpbp.blob.core.windows.net/images/aml_results.PNG)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -958,513 +1081,38 @@ }, { "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "RunId: pytorch-gensen_1559153095_0e7f4645\n", - "\n", - "Streaming azureml-logs/80_driver_log_rank_0.txt\n", - "===============================================\n", - "\n", - "Building vocabulary ...\n", - "Building common source vocab ...\n", - "Found existing vocab file. Reloading ...\n", - "Building target vocabs ...\n", - "Found existing vocab file. Reloading ...\n", - "Reloading vocab for snli \n", - "Fetching sentences ...\n", - "Processing corpus : 0 task snli \n", - "Reached end of dataset, reseting file pointer ...\n", - "Fetching sentences ...\n", - "Processing corpus : 0 task snli \n", - "Fetched 1000000 sentences\n", - "Fetched 1000000 sentences\n", - "2019-05-29 18:05:35,740 - INFO - Finished creating iterator ...\n", - "2019-05-29 18:05:35,747 - INFO - Found 19966 words in source : \n", - "2019-05-29 18:05:35,753 - INFO - Found 30004 target words in task snli \n", - "2019-05-29 18:05:35,758 - INFO - Model Parameters : \n", - "2019-05-29 18:05:35,763 - INFO - Task : multi-seq2seq-nli \n", - "2019-05-29 18:05:35,768 - INFO - Source Word Embedding Dim : 512\n", - "2019-05-29 18:05:35,772 - INFO - Target Word Embedding Dim : 512\n", - "2019-05-29 18:05:35,777 - INFO - Source RNN Hidden Dim : 2048\n", - "2019-05-29 18:05:35,781 - INFO - Target RNN Hidden Dim : 2048\n", - "2019-05-29 18:05:35,788 - INFO - Source RNN Bidirectional : True\n", - "2019-05-29 18:05:35,792 - INFO - Batch Size : 48 \n", - "2019-05-29 18:05:35,806 - INFO - Optimizer : adam \n", - "2019-05-29 18:05:35,844 - INFO - Learning Rate : 0.000100 \n", - "2019-05-29 18:05:35,849 - INFO - Found 19966 words in src \n", - "2019-05-29 18:05:35,854 - INFO - Found 30004 words in trg \n", - "/azureml-envs/azureml_4737f522821717a6daa7464dfd956f84/lib/python3.6/site-packages/torch/nn/modules/rnn.py:46: UserWarning: dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1, but got dropout=0.3 and num_layers=1\n", - " \"num_layers={}\".format(dropout, num_layers))\n", - "2019-05-29 18:05:40,676 - INFO - MultitaskModel(\n", - " (src_embedding): Embedding(19966, 512, padding_idx=1)\n", - " (encoder): GRU(512, 1024, batch_first=True, dropout=0.3, bidirectional=True)\n", - " (enc_drp): Dropout(p=0.3)\n", - " (trg_embedding): ModuleList(\n", - " (0): Embedding(30004, 512, padding_idx=1)\n", - " )\n", - " (decoders): ModuleList(\n", - " (0): ConditionalGRU(\n", - " (input_weights): Linear(in_features=512, out_features=6144, bias=True)\n", - " (hidden_weights): Linear(in_features=2048, out_features=6144, bias=True)\n", - " (peep_weights): Linear(in_features=2048, out_features=6144, bias=True)\n", - " )\n", - " )\n", - " (decoder2vocab): ModuleList(\n", - " (0): Linear(in_features=2048, out_features=30004, bias=True)\n", - " )\n", - " (nli_decoder): Sequential(\n", - " (0): Dropout(p=0.3)\n", - " (1): Linear(in_features=8192, out_features=512, bias=True)\n", - " (2): ReLU()\n", - " (3): Linear(in_features=512, out_features=3, bias=True)\n", - " )\n", - ")\n", - "2019-05-29 18:05:40,715 - INFO - Could not find model checkpoint, starting afresh\n", - "2019-05-29 18:05:40,720 - INFO - Commencing Training ...\n", - "train.py:245: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.\n", - " torch.nn.utils.clip_grad_norm(model.parameters(), 1.)\n", - "2019-05-29 18:05:40,930 - INFO - ############################\n", - "2019-05-29 18:05:40,960 - INFO - ##### Evaluating model #####\n", - "2019-05-29 18:05:40,968 - INFO - ############################\n", - "/azureml-envs/azureml_4737f522821717a6daa7464dfd956f84/lib/python3.6/site-packages/torch/nn/functional.py:1332: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n", - " warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n", - "/azureml-envs/azureml_4737f522821717a6daa7464dfd956f84/lib/python3.6/site-packages/torch/nn/functional.py:1320: UserWarning: nn.functional.tanh is deprecated. Use torch.tanh instead.\n", - " warnings.warn(\"nn.functional.tanh is deprecated. Use torch.tanh instead.\")\n", - "2019-05-29 18:06:57,575 - INFO - snli Validation Loss : 10.312\n", - "2019-05-29 18:06:57,602 - INFO - Evaluating on NLI\n", - "train.py:390: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", - " class_preds = F.softmax(class_logits).data.cpu().numpy().argmax(\n", - "2019-05-29 18:07:04,824 - INFO - NLI Dev Acc : 0.32930\n", - "train.py:412: UserWarning: Implicit dimension choice for softmax has been deprecated. Change the call to include dim=X as an argument.\n", - " class_preds = F.softmax(class_logits).data.cpu().numpy().argmax(\n", - "2019-05-29 18:07:11,986 - INFO - NLI Test Acc : 0.32736\n", - "2019-05-29 18:07:12,004 - INFO - ******************************************************\n", - "/mnt/batch/tasks/shared/LS_root/jobs/maidaptest/azureml/pytorch-gensen_1559153095_0e7f4645/mounts/workspaceblobstore/azureml/pytorch-gensen_1559153095_0e7f4645/utils.py:310: UserWarning: volatile was removed and now has no effect. Use `with torch.no_grad():` instead.\n", - " torch.LongTensor(sorted_src_lens), volatile=True\n", - "train.py:316: UserWarning: torch.nn.utils.clip_grad_norm is now deprecated in favor of torch.nn.utils.clip_grad_norm_.\n", - " torch.nn.utils.clip_grad_norm(model.parameters(), 1.)\n", - "2019-05-29 18:09:30,890 - INFO - Seq2Seq Examples Processed : 9600 snli Loss : 5.71808 Num snli minibatches : 180\n", - "2019-05-29 18:09:30,946 - INFO - Round: 200 NLI Epoch : 0 NLI Examples Processed : 1008 NLI Loss : 1.12384\n", - "2019-05-29 18:09:30,996 - INFO - Average time per mininbatch : 0.69195\n", - "2019-05-29 18:09:31,032 - INFO - ******************************************************\n", - "2019-05-29 18:11:39,438 - INFO - Seq2Seq Examples Processed : 19200 snli Loss : 4.66382 Num snli minibatches : 180\n", - "2019-05-29 18:11:39,514 - INFO - Round: 400 NLI Epoch : 0 NLI Examples Processed : 1968 NLI Loss : 1.08484\n", - "2019-05-29 18:11:39,521 - INFO - Average time per mininbatch : 0.64192\n", - "2019-05-29 18:11:39,532 - INFO - ******************************************************\n", - "2019-05-29 18:13:46,059 - INFO - Seq2Seq Examples Processed : 28800 snli Loss : 4.33601 Num snli minibatches : 180\n", - "2019-05-29 18:13:46,068 - INFO - Round: 600 NLI Epoch : 0 NLI Examples Processed : 2928 NLI Loss : 1.08472\n", - "2019-05-29 18:13:46,073 - INFO - Average time per mininbatch : 0.63259\n", - "2019-05-29 18:13:46,079 - INFO - ******************************************************\n", - "2019-05-29 18:15:46,846 - INFO - Seq2Seq Examples Processed : 38400 snli Loss : 4.09014 Num snli minibatches : 180\n", - "2019-05-29 18:15:46,914 - INFO - Round: 800 NLI Epoch : 0 NLI Examples Processed : 3888 NLI Loss : 1.04902\n", - "2019-05-29 18:15:46,921 - INFO - Average time per mininbatch : 0.60379\n", - "2019-05-29 18:15:46,928 - INFO - ******************************************************\n", - "2019-05-29 18:17:46,728 - INFO - Seq2Seq Examples Processed : 48000 snli Loss : 3.93451 Num snli minibatches : 180\n", - "2019-05-29 18:17:46,761 - INFO - Round: 1000 NLI Epoch : 0 NLI Examples Processed : 4848 NLI Loss : 1.04646\n", - "2019-05-29 18:17:46,797 - INFO - Average time per mininbatch : 0.59882\n", - "2019-05-29 18:17:46,806 - INFO - ******************************************************\n", - "2019-05-29 18:19:45,026 - INFO - Seq2Seq Examples Processed : 57600 snli Loss : 3.80465 Num snli minibatches : 180\n", - "2019-05-29 18:19:45,059 - INFO - Round: 1200 NLI Epoch : 0 NLI Examples Processed : 5808 NLI Loss : 1.03508\n", - "2019-05-29 18:19:45,066 - INFO - Average time per mininbatch : 0.59106\n", - "2019-05-29 18:19:45,101 - INFO - ******************************************************\n", - "2019-05-29 18:21:41,592 - INFO - Seq2Seq Examples Processed : 67200 snli Loss : 3.69689 Num snli minibatches : 180\n", - "2019-05-29 18:21:41,609 - INFO - Round: 1400 NLI Epoch : 0 NLI Examples Processed : 6768 NLI Loss : 1.01259\n", - "2019-05-29 18:21:41,615 - INFO - Average time per mininbatch : 0.58242\n", - "2019-05-29 18:21:41,621 - INFO - ******************************************************\n", - "2019-05-29 18:23:37,792 - INFO - Seq2Seq Examples Processed : 76800 snli Loss : 3.63456 Num snli minibatches : 180\n", - "2019-05-29 18:23:37,801 - INFO - Round: 1600 NLI Epoch : 0 NLI Examples Processed : 7728 NLI Loss : 1.01536\n", - "2019-05-29 18:23:37,807 - INFO - Average time per mininbatch : 0.58082\n", - "2019-05-29 18:23:37,813 - INFO - ******************************************************\n", - "2019-05-29 18:25:31,047 - INFO - Seq2Seq Examples Processed : 86400 snli Loss : 3.66306 Num snli minibatches : 180\n", - "2019-05-29 18:25:31,251 - INFO - Round: 1800 NLI Epoch : 0 NLI Examples Processed : 8688 NLI Loss : 0.98221\n", - "2019-05-29 18:25:31,264 - INFO - Average time per mininbatch : 0.56610\n", - "2019-05-29 18:25:31,270 - INFO - ******************************************************\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2019-05-29 18:27:25,602 - INFO - Seq2Seq Examples Processed : 96000 snli Loss : 3.44516 Num snli minibatches : 180\n", - "2019-05-29 18:27:25,708 - INFO - Round: 2000 NLI Epoch : 0 NLI Examples Processed : 9648 NLI Loss : 0.99616\n", - "2019-05-29 18:27:25,714 - INFO - Average time per mininbatch : 0.57163\n", - "2019-05-29 18:27:25,728 - INFO - ******************************************************\n", - "2019-05-29 18:29:17,873 - INFO - Seq2Seq Examples Processed : 105600 snli Loss : 3.59119 Num snli minibatches : 180\n", - "2019-05-29 18:29:17,894 - INFO - Round: 2200 NLI Epoch : 0 NLI Examples Processed : 10608 NLI Loss : 0.99196\n", - "2019-05-29 18:29:17,899 - INFO - Average time per mininbatch : 0.56068\n", - "2019-05-29 18:29:17,951 - INFO - ******************************************************\n", - "2019-05-29 18:31:09,519 - INFO - Seq2Seq Examples Processed : 115200 snli Loss : 3.35993 Num snli minibatches : 180\n", - "2019-05-29 18:31:09,529 - INFO - Round: 2400 NLI Epoch : 0 NLI Examples Processed : 11568 NLI Loss : 0.98847\n", - "2019-05-29 18:31:09,538 - INFO - Average time per mininbatch : 0.55776\n", - "2019-05-29 18:31:09,554 - INFO - ******************************************************\n", - "2019-05-29 18:33:01,925 - INFO - Seq2Seq Examples Processed : 124800 snli Loss : 3.48944 Num snli minibatches : 180\n", - "2019-05-29 18:33:01,960 - INFO - Round: 2600 NLI Epoch : 0 NLI Examples Processed : 12528 NLI Loss : 0.98408\n", - "2019-05-29 18:33:01,966 - INFO - Average time per mininbatch : 0.56180\n", - "2019-05-29 18:33:01,973 - INFO - ******************************************************\n", - "2019-05-29 18:34:50,796 - INFO - Seq2Seq Examples Processed : 134400 snli Loss : 3.44262 Num snli minibatches : 180\n", - "2019-05-29 18:34:50,847 - INFO - Round: 2800 NLI Epoch : 0 NLI Examples Processed : 13488 NLI Loss : 0.91956\n", - "2019-05-29 18:34:50,852 - INFO - Average time per mininbatch : 0.54408\n", - "2019-05-29 18:34:50,858 - INFO - ******************************************************\n", - "2019-05-29 18:36:42,178 - INFO - Seq2Seq Examples Processed : 144000 snli Loss : 3.24111 Num snli minibatches : 180\n", - "2019-05-29 18:36:42,187 - INFO - Round: 3000 NLI Epoch : 0 NLI Examples Processed : 14448 NLI Loss : 0.94733\n", - "2019-05-29 18:36:42,193 - INFO - Average time per mininbatch : 0.55657\n", - "2019-05-29 18:36:42,201 - INFO - ******************************************************\n", - "2019-05-29 18:38:31,432 - INFO - Seq2Seq Examples Processed : 153600 snli Loss : 3.39076 Num snli minibatches : 180\n", - "2019-05-29 18:38:31,441 - INFO - Round: 3200 NLI Epoch : 0 NLI Examples Processed : 15408 NLI Loss : 0.96010\n", - "2019-05-29 18:38:31,447 - INFO - Average time per mininbatch : 0.54612\n", - "2019-05-29 18:38:31,453 - INFO - ******************************************************\n", - "2019-05-29 18:40:20,337 - INFO - Seq2Seq Examples Processed : 163200 snli Loss : 3.34304 Num snli minibatches : 180\n", - "2019-05-29 18:40:20,352 - INFO - Round: 3400 NLI Epoch : 0 NLI Examples Processed : 16368 NLI Loss : 0.90768\n", - "2019-05-29 18:40:20,359 - INFO - Average time per mininbatch : 0.54439\n", - "2019-05-29 18:40:20,365 - INFO - ******************************************************\n", - "2019-05-29 18:42:09,816 - INFO - Seq2Seq Examples Processed : 172800 snli Loss : 3.15766 Num snli minibatches : 180\n", - "2019-05-29 18:42:09,828 - INFO - Round: 3600 NLI Epoch : 0 NLI Examples Processed : 17328 NLI Loss : 0.90791\n", - "2019-05-29 18:42:09,834 - INFO - Average time per mininbatch : 0.54722\n", - "2019-05-29 18:42:09,840 - INFO - ******************************************************\n", - "2019-05-29 18:43:56,758 - INFO - Seq2Seq Examples Processed : 182400 snli Loss : 3.26524 Num snli minibatches : 180\n", - "2019-05-29 18:43:56,786 - INFO - Round: 3800 NLI Epoch : 0 NLI Examples Processed : 18288 NLI Loss : 0.92823\n", - "2019-05-29 18:43:56,795 - INFO - Average time per mininbatch : 0.53455\n", - "2019-05-29 18:43:56,802 - INFO - ******************************************************\n", - "2019-05-29 18:45:44,253 - INFO - Seq2Seq Examples Processed : 192000 snli Loss : 3.32298 Num snli minibatches : 180\n", - "2019-05-29 18:45:44,336 - INFO - Round: 4000 NLI Epoch : 0 NLI Examples Processed : 19248 NLI Loss : 0.85748\n", - "2019-05-29 18:45:44,343 - INFO - Average time per mininbatch : 0.53723\n", - "2019-05-29 18:45:44,349 - INFO - ******************************************************\n", - "2019-05-29 18:47:30,880 - INFO - Seq2Seq Examples Processed : 201600 snli Loss : 3.16485 Num snli minibatches : 180\n", - "2019-05-29 18:47:30,896 - INFO - Round: 4200 NLI Epoch : 0 NLI Examples Processed : 20208 NLI Loss : 0.90056\n", - "2019-05-29 18:47:30,901 - INFO - Average time per mininbatch : 0.53256\n", - "2019-05-29 18:47:30,907 - INFO - ******************************************************\n", - "2019-05-29 18:49:18,536 - INFO - Seq2Seq Examples Processed : 211200 snli Loss : 3.05513 Num snli minibatches : 180\n", - "2019-05-29 18:49:18,567 - INFO - Round: 4400 NLI Epoch : 0 NLI Examples Processed : 21168 NLI Loss : 0.86934\n", - "2019-05-29 18:49:18,579 - INFO - Average time per mininbatch : 0.53807\n", - "2019-05-29 18:49:18,584 - INFO - ******************************************************\n", - "2019-05-29 18:51:04,240 - INFO - Seq2Seq Examples Processed : 220800 snli Loss : 3.24391 Num snli minibatches : 180\n", - "2019-05-29 18:51:04,358 - INFO - Round: 4600 NLI Epoch : 0 NLI Examples Processed : 22128 NLI Loss : 0.84865\n", - "2019-05-29 18:51:04,365 - INFO - Average time per mininbatch : 0.52824\n", - "2019-05-29 18:51:04,414 - INFO - ******************************************************\n", - "2019-05-29 18:52:50,226 - INFO - Seq2Seq Examples Processed : 230400 snli Loss : 3.25106 Num snli minibatches : 180\n", - "2019-05-29 18:52:50,268 - INFO - Round: 4800 NLI Epoch : 0 NLI Examples Processed : 23088 NLI Loss : 0.87072\n", - "2019-05-29 18:52:50,276 - INFO - Average time per mininbatch : 0.52897\n", - "2019-05-29 18:52:50,292 - INFO - ******************************************************\n", - "2019-05-29 18:54:35,638 - INFO - Seq2Seq Examples Processed : 240000 snli Loss : 3.06245 Num snli minibatches : 180\n", - "2019-05-29 18:54:35,664 - INFO - Round: 5000 NLI Epoch : 0 NLI Examples Processed : 24048 NLI Loss : 0.84427\n", - "2019-05-29 18:54:35,670 - INFO - Average time per mininbatch : 0.52669\n", - "2019-05-29 18:54:35,677 - INFO - ******************************************************\n", - "2019-05-29 18:56:21,116 - INFO - Seq2Seq Examples Processed : 249600 snli Loss : 2.99545 Num snli minibatches : 180\n", - "2019-05-29 18:56:21,167 - INFO - Round: 5200 NLI Epoch : 0 NLI Examples Processed : 25008 NLI Loss : 0.89375\n", - "2019-05-29 18:56:21,172 - INFO - Average time per mininbatch : 0.52716\n", - "2019-05-29 18:56:21,178 - INFO - ******************************************************\n", - "2019-05-29 18:58:03,355 - INFO - Seq2Seq Examples Processed : 259200 snli Loss : 3.12514 Num snli minibatches : 180\n", - "2019-05-29 18:58:03,377 - INFO - Round: 5400 NLI Epoch : 0 NLI Examples Processed : 25968 NLI Loss : 0.84170\n", - "2019-05-29 18:58:03,383 - INFO - Average time per mininbatch : 0.51073\n", - "2019-05-29 18:58:03,389 - INFO - ******************************************************\n", - "2019-05-29 18:59:45,907 - INFO - Seq2Seq Examples Processed : 268800 snli Loss : 3.13727 Num snli minibatches : 180\n", - "2019-05-29 18:59:45,929 - INFO - Round: 5600 NLI Epoch : 0 NLI Examples Processed : 26928 NLI Loss : 0.85129\n", - "2019-05-29 18:59:45,947 - INFO - Average time per mininbatch : 0.51247\n", - "2019-05-29 18:59:45,953 - INFO - ******************************************************\n", - "2019-05-29 19:01:28,814 - INFO - Seq2Seq Examples Processed : 278400 snli Loss : 3.08039 Num snli minibatches : 180\n", - "2019-05-29 19:01:28,851 - INFO - Round: 5800 NLI Epoch : 0 NLI Examples Processed : 27888 NLI Loss : 0.86816\n", - "2019-05-29 19:01:28,857 - INFO - Average time per mininbatch : 0.51426\n", - "2019-05-29 19:01:28,863 - INFO - ******************************************************\n", - "2019-05-29 19:03:09,420 - INFO - Seq2Seq Examples Processed : 288000 snli Loss : 2.87723 Num snli minibatches : 180\n", - "2019-05-29 19:03:09,452 - INFO - Round: 6000 NLI Epoch : 0 NLI Examples Processed : 28848 NLI Loss : 0.84560\n", - "2019-05-29 19:03:09,458 - INFO - Average time per mininbatch : 0.50258\n", - "2019-05-29 19:03:09,464 - INFO - ******************************************************\n", - "2019-05-29 19:04:52,208 - INFO - Seq2Seq Examples Processed : 297600 snli Loss : 2.91778 Num snli minibatches : 180\n", - "2019-05-29 19:04:52,251 - INFO - Round: 6200 NLI Epoch : 0 NLI Examples Processed : 29808 NLI Loss : 0.81515\n", - "2019-05-29 19:04:52,257 - INFO - Average time per mininbatch : 0.51368\n", - "2019-05-29 19:04:52,264 - INFO - ******************************************************\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2019-05-29 19:06:32,652 - INFO - Seq2Seq Examples Processed : 307200 snli Loss : 3.07575 Num snli minibatches : 180\n", - "2019-05-29 19:06:32,708 - INFO - Round: 6400 NLI Epoch : 0 NLI Examples Processed : 30768 NLI Loss : 0.78086\n", - "2019-05-29 19:06:32,715 - INFO - Average time per mininbatch : 0.50191\n", - "2019-05-29 19:06:32,722 - INFO - ******************************************************\n", - "2019-05-29 19:08:12,195 - INFO - Seq2Seq Examples Processed : 316800 snli Loss : 3.01557 Num snli minibatches : 180\n", - "2019-05-29 19:08:12,203 - INFO - Round: 6600 NLI Epoch : 0 NLI Examples Processed : 31728 NLI Loss : 0.85198\n", - "2019-05-29 19:08:12,225 - INFO - Average time per mininbatch : 0.49733\n", - "2019-05-29 19:08:12,255 - INFO - ******************************************************\n", - "2019-05-29 19:09:50,561 - INFO - Seq2Seq Examples Processed : 326400 snli Loss : 3.01893 Num snli minibatches : 180\n", - "2019-05-29 19:09:50,631 - INFO - Round: 6800 NLI Epoch : 0 NLI Examples Processed : 32688 NLI Loss : 0.79130\n", - "2019-05-29 19:09:50,637 - INFO - Average time per mininbatch : 0.49149\n", - "2019-05-29 19:09:50,642 - INFO - ******************************************************\n", - "2019-05-29 19:11:31,019 - INFO - Seq2Seq Examples Processed : 336000 snli Loss : 2.80379 Num snli minibatches : 180\n", - "2019-05-29 19:11:31,070 - INFO - Round: 7000 NLI Epoch : 0 NLI Examples Processed : 33648 NLI Loss : 0.84067\n", - "2019-05-29 19:11:31,077 - INFO - Average time per mininbatch : 0.50185\n", - "2019-05-29 19:11:31,085 - INFO - ******************************************************\n", - "2019-05-29 19:13:10,680 - INFO - Seq2Seq Examples Processed : 345600 snli Loss : 2.75945 Num snli minibatches : 180\n", - "2019-05-29 19:13:10,708 - INFO - Round: 7200 NLI Epoch : 0 NLI Examples Processed : 34608 NLI Loss : 0.81463\n", - "2019-05-29 19:13:10,714 - INFO - Average time per mininbatch : 0.49794\n", - "2019-05-29 19:13:10,719 - INFO - ******************************************************\n", - "2019-05-29 19:14:48,873 - INFO - Seq2Seq Examples Processed : 355200 snli Loss : 2.88910 Num snli minibatches : 180\n", - "2019-05-29 19:14:49,171 - INFO - Round: 7400 NLI Epoch : 0 NLI Examples Processed : 35568 NLI Loss : 0.82873\n", - "2019-05-29 19:14:49,181 - INFO - Average time per mininbatch : 0.49074\n", - "2019-05-29 19:14:49,186 - INFO - ******************************************************\n", - "2019-05-29 19:16:26,470 - INFO - Seq2Seq Examples Processed : 364800 snli Loss : 3.01164 Num snli minibatches : 180\n", - "2019-05-29 19:16:26,532 - INFO - Round: 7600 NLI Epoch : 0 NLI Examples Processed : 36528 NLI Loss : 0.79707\n", - "2019-05-29 19:16:26,538 - INFO - Average time per mininbatch : 0.48639\n", - "2019-05-29 19:16:26,547 - INFO - ******************************************************\n", - "2019-05-29 19:18:04,600 - INFO - Seq2Seq Examples Processed : 374400 snli Loss : 2.97503 Num snli minibatches : 180\n", - "2019-05-29 19:18:04,627 - INFO - Round: 7800 NLI Epoch : 0 NLI Examples Processed : 37488 NLI Loss : 0.81165\n", - "2019-05-29 19:18:04,633 - INFO - Average time per mininbatch : 0.49024\n", - "2019-05-29 19:18:04,638 - INFO - ******************************************************\n", - "2019-05-29 19:19:42,343 - INFO - Seq2Seq Examples Processed : 384000 snli Loss : 2.95044 Num snli minibatches : 180\n", - "2019-05-29 19:19:42,383 - INFO - Round: 8000 NLI Epoch : 0 NLI Examples Processed : 38448 NLI Loss : 0.77696\n", - "2019-05-29 19:19:42,388 - INFO - Average time per mininbatch : 0.48847\n", - "2019-05-29 19:19:42,393 - INFO - ******************************************************\n", - "2019-05-29 19:21:20,043 - INFO - Seq2Seq Examples Processed : 393600 snli Loss : 2.77445 Num snli minibatches : 180\n", - "2019-05-29 19:21:20,059 - INFO - Round: 8200 NLI Epoch : 0 NLI Examples Processed : 39408 NLI Loss : 0.83046\n", - "2019-05-29 19:21:20,094 - INFO - Average time per mininbatch : 0.48821\n", - "2019-05-29 19:21:20,099 - INFO - ******************************************************\n", - "2019-05-29 19:22:57,666 - INFO - Seq2Seq Examples Processed : 403200 snli Loss : 2.74390 Num snli minibatches : 180\n", - "2019-05-29 19:22:57,687 - INFO - Round: 8400 NLI Epoch : 0 NLI Examples Processed : 40368 NLI Loss : 0.79176\n", - "2019-05-29 19:22:57,694 - INFO - Average time per mininbatch : 0.48760\n", - "2019-05-29 19:22:57,700 - INFO - ******************************************************\n", - "2019-05-29 19:24:36,064 - INFO - Seq2Seq Examples Processed : 412800 snli Loss : 2.70470 Num snli minibatches : 180\n", - "2019-05-29 19:24:36,084 - INFO - Round: 8600 NLI Epoch : 0 NLI Examples Processed : 41328 NLI Loss : 0.78364\n", - "2019-05-29 19:24:36,104 - INFO - Average time per mininbatch : 0.49178\n", - "2019-05-29 19:24:36,110 - INFO - ******************************************************\n", - "2019-05-29 19:26:12,557 - INFO - Seq2Seq Examples Processed : 422400 snli Loss : 2.89966 Num snli minibatches : 180\n", - "2019-05-29 19:26:12,568 - INFO - Round: 8800 NLI Epoch : 0 NLI Examples Processed : 42288 NLI Loss : 0.79969\n", - "2019-05-29 19:26:12,573 - INFO - Average time per mininbatch : 0.48220\n", - "2019-05-29 19:26:12,578 - INFO - ******************************************************\n", - "2019-05-29 19:27:48,184 - INFO - Seq2Seq Examples Processed : 432000 snli Loss : 2.89469 Num snli minibatches : 180\n", - "2019-05-29 19:27:48,205 - INFO - Round: 9000 NLI Epoch : 0 NLI Examples Processed : 43248 NLI Loss : 0.81662\n", - "2019-05-29 19:27:48,211 - INFO - Average time per mininbatch : 0.47799\n", - "2019-05-29 19:27:48,216 - INFO - ******************************************************\n", - "2019-05-29 19:29:24,910 - INFO - Seq2Seq Examples Processed : 441600 snli Loss : 2.87607 Num snli minibatches : 180\n", - "2019-05-29 19:29:25,067 - INFO - Round: 9200 NLI Epoch : 0 NLI Examples Processed : 44208 NLI Loss : 0.77653\n", - "2019-05-29 19:29:25,079 - INFO - Average time per mininbatch : 0.48344\n", - "2019-05-29 19:29:25,085 - INFO - ******************************************************\n", - "2019-05-29 19:31:00,670 - INFO - Seq2Seq Examples Processed : 451200 snli Loss : 2.85335 Num snli minibatches : 180\n", - "2019-05-29 19:31:00,678 - INFO - Round: 9400 NLI Epoch : 0 NLI Examples Processed : 45168 NLI Loss : 0.75379\n", - "2019-05-29 19:31:00,686 - INFO - Average time per mininbatch : 0.47779\n", - "2019-05-29 19:31:00,693 - INFO - ******************************************************\n", - "2019-05-29 19:32:37,261 - INFO - Seq2Seq Examples Processed : 460800 snli Loss : 2.80405 Num snli minibatches : 180\n", - "2019-05-29 19:32:37,285 - INFO - Round: 9600 NLI Epoch : 0 NLI Examples Processed : 46128 NLI Loss : 0.77045\n", - "2019-05-29 19:32:37,291 - INFO - Average time per mininbatch : 0.48279\n", - "2019-05-29 19:32:37,327 - INFO - ******************************************************\n", - "2019-05-29 19:34:13,404 - INFO - Seq2Seq Examples Processed : 470400 snli Loss : 2.63074 Num snli minibatches : 180\n", - "2019-05-29 19:34:13,452 - INFO - Round: 9800 NLI Epoch : 0 NLI Examples Processed : 47088 NLI Loss : 0.79249\n", - "2019-05-29 19:34:13,480 - INFO - Average time per mininbatch : 0.48036\n", - "2019-05-29 19:34:13,485 - INFO - ******************************************************\n", - "2019-05-29 19:35:50,016 - INFO - Seq2Seq Examples Processed : 480000 snli Loss : 2.62458 Num snli minibatches : 180\n", - "2019-05-29 19:35:50,054 - INFO - Round: 10000 NLI Epoch : 0 NLI Examples Processed : 48048 NLI Loss : 0.82520\n", - "2019-05-29 19:35:50,060 - INFO - Average time per mininbatch : 0.48262\n", - "2019-05-29 19:35:50,065 - INFO - ******************************************************\n", - "2019-05-29 19:37:25,555 - INFO - Seq2Seq Examples Processed : 489600 snli Loss : 2.61139 Num snli minibatches : 180\n", - "2019-05-29 19:37:25,621 - INFO - Round: 10200 NLI Epoch : 0 NLI Examples Processed : 49008 NLI Loss : 0.79771\n", - "2019-05-29 19:37:25,636 - INFO - Average time per mininbatch : 0.47742\n", - "2019-05-29 19:37:25,672 - INFO - ******************************************************\n", - "2019-05-29 19:39:00,465 - INFO - Seq2Seq Examples Processed : 499200 snli Loss : 2.85354 Num snli minibatches : 180\n", - "2019-05-29 19:39:00,485 - INFO - Round: 10400 NLI Epoch : 0 NLI Examples Processed : 49968 NLI Loss : 0.75161\n", - "2019-05-29 19:39:00,515 - INFO - Average time per mininbatch : 0.47392\n", - "2019-05-29 19:39:00,521 - INFO - ******************************************************\n", - "2019-05-29 19:40:35,133 - INFO - Seq2Seq Examples Processed : 508800 snli Loss : 2.81737 Num snli minibatches : 180\n", - "2019-05-29 19:40:35,153 - INFO - Round: 10600 NLI Epoch : 0 NLI Examples Processed : 50928 NLI Loss : 0.76678\n", - "2019-05-29 19:40:35,183 - INFO - Average time per mininbatch : 0.47303\n", - "2019-05-29 19:40:35,191 - INFO - ******************************************************\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2019-05-29 19:42:10,143 - INFO - Seq2Seq Examples Processed : 518400 snli Loss : 2.81220 Num snli minibatches : 180\n", - "2019-05-29 19:42:10,181 - INFO - Round: 10800 NLI Epoch : 0 NLI Examples Processed : 51888 NLI Loss : 0.80043\n", - "2019-05-29 19:42:10,186 - INFO - Average time per mininbatch : 0.47473\n", - "2019-05-29 19:42:10,191 - INFO - ******************************************************\n", - "2019-05-29 19:43:45,532 - INFO - Seq2Seq Examples Processed : 528000 snli Loss : 2.79347 Num snli minibatches : 180\n", - "2019-05-29 19:43:45,556 - INFO - Round: 11000 NLI Epoch : 0 NLI Examples Processed : 52848 NLI Loss : 0.76758\n", - "2019-05-29 19:43:45,561 - INFO - Average time per mininbatch : 0.47667\n", - "2019-05-29 19:43:45,566 - INFO - ******************************************************\n", - "2019-05-29 19:45:19,931 - INFO - Seq2Seq Examples Processed : 537600 snli Loss : 2.77058 Num snli minibatches : 180\n", - "2019-05-29 19:45:19,940 - INFO - Round: 11200 NLI Epoch : 0 NLI Examples Processed : 53808 NLI Loss : 0.75474\n", - "2019-05-29 19:45:19,947 - INFO - Average time per mininbatch : 0.47179\n", - "2019-05-29 19:45:19,952 - INFO - ******************************************************\n", - "2019-05-29 19:46:54,122 - INFO - Seq2Seq Examples Processed : 547200 snli Loss : 2.57868 Num snli minibatches : 180\n", - "2019-05-29 19:46:54,202 - INFO - Round: 11400 NLI Epoch : 0 NLI Examples Processed : 54768 NLI Loss : 0.72065\n", - "2019-05-29 19:46:54,207 - INFO - Average time per mininbatch : 0.47081\n", - "2019-05-29 19:46:54,212 - INFO - ******************************************************\n", - "2019-05-29 19:48:27,836 - INFO - Seq2Seq Examples Processed : 556800 snli Loss : 2.53883 Num snli minibatches : 180\n", - "2019-05-29 19:48:27,866 - INFO - Round: 11600 NLI Epoch : 0 NLI Examples Processed : 55728 NLI Loss : 0.74271\n", - "2019-05-29 19:48:27,872 - INFO - Average time per mininbatch : 0.46808\n", - "2019-05-29 19:48:27,901 - INFO - ******************************************************\n", - "2019-05-29 19:50:04,260 - INFO - Seq2Seq Examples Processed : 566400 snli Loss : 2.56377 Num snli minibatches : 180\n", - "2019-05-29 19:50:04,275 - INFO - Round: 11800 NLI Epoch : 0 NLI Examples Processed : 56688 NLI Loss : 0.70260\n", - "2019-05-29 19:50:04,280 - INFO - Average time per mininbatch : 0.48162\n", - "2019-05-29 19:50:04,285 - INFO - ******************************************************\n", - "2019-05-29 19:51:39,188 - INFO - Seq2Seq Examples Processed : 576000 snli Loss : 2.57128 Num snli minibatches : 180\n", - "2019-05-29 19:51:39,237 - INFO - Round: 12000 NLI Epoch : 0 NLI Examples Processed : 57648 NLI Loss : 0.75922\n", - "2019-05-29 19:51:39,246 - INFO - Average time per mininbatch : 0.47448\n", - "2019-05-29 19:51:39,280 - INFO - ******************************************************\n", - "2019-05-29 19:53:11,436 - INFO - Seq2Seq Examples Processed : 585600 snli Loss : 2.81611 Num snli minibatches : 180\n", - "2019-05-29 19:53:11,510 - INFO - Round: 12200 NLI Epoch : 0 NLI Examples Processed : 58608 NLI Loss : 0.76561\n", - "2019-05-29 19:53:11,545 - INFO - Average time per mininbatch : 0.46075\n", - "2019-05-29 19:53:11,554 - INFO - ******************************************************\n", - "2019-05-29 19:54:42,824 - INFO - Seq2Seq Examples Processed : 595200 snli Loss : 2.78098 Num snli minibatches : 180\n", - "2019-05-29 19:54:42,867 - INFO - Round: 12400 NLI Epoch : 0 NLI Examples Processed : 59568 NLI Loss : 0.73193\n", - "2019-05-29 19:54:42,875 - INFO - Average time per mininbatch : 0.45632\n", - "2019-05-29 19:54:42,881 - INFO - ******************************************************\n", - "2019-05-29 19:56:14,538 - INFO - Seq2Seq Examples Processed : 604800 snli Loss : 2.75304 Num snli minibatches : 180\n", - "2019-05-29 19:56:14,596 - INFO - Round: 12600 NLI Epoch : 0 NLI Examples Processed : 60528 NLI Loss : 0.75785\n", - "2019-05-29 19:56:14,602 - INFO - Average time per mininbatch : 0.45825\n", - "2019-05-29 19:56:14,609 - INFO - ******************************************************\n", - "2019-05-29 19:57:47,344 - INFO - Seq2Seq Examples Processed : 614400 snli Loss : 2.76325 Num snli minibatches : 180\n", - "2019-05-29 19:57:47,364 - INFO - Round: 12800 NLI Epoch : 0 NLI Examples Processed : 61488 NLI Loss : 0.75488\n", - "2019-05-29 19:57:47,412 - INFO - Average time per mininbatch : 0.46364\n", - "2019-05-29 19:57:47,418 - INFO - ******************************************************\n", - "2019-05-29 19:59:19,095 - INFO - Seq2Seq Examples Processed : 624000 snli Loss : 2.70555 Num snli minibatches : 180\n", - "2019-05-29 19:59:19,135 - INFO - Round: 13000 NLI Epoch : 0 NLI Examples Processed : 62448 NLI Loss : 0.73895\n", - "2019-05-29 19:59:19,145 - INFO - Average time per mininbatch : 0.45835\n", - "2019-05-29 19:59:19,150 - INFO - ******************************************************\n", - "2019-05-29 20:00:51,249 - INFO - Seq2Seq Examples Processed : 633600 snli Loss : 2.60308 Num snli minibatches : 180\n", - "2019-05-29 20:00:51,287 - INFO - Round: 13200 NLI Epoch : 0 NLI Examples Processed : 63408 NLI Loss : 0.74067\n", - "2019-05-29 20:00:51,297 - INFO - Average time per mininbatch : 0.46046\n", - "2019-05-29 20:00:51,303 - INFO - ******************************************************\n", - "2019-05-29 20:02:22,456 - INFO - Seq2Seq Examples Processed : 643200 snli Loss : 2.48930 Num snli minibatches : 180\n", - "2019-05-29 20:02:22,536 - INFO - Round: 13400 NLI Epoch : 0 NLI Examples Processed : 64368 NLI Loss : 0.76212\n", - "2019-05-29 20:02:22,542 - INFO - Average time per mininbatch : 0.45573\n", - "2019-05-29 20:02:22,570 - INFO - ******************************************************\n", - "2019-05-29 20:03:54,309 - INFO - Seq2Seq Examples Processed : 652800 snli Loss : 2.49109 Num snli minibatches : 180\n", - "2019-05-29 20:03:54,334 - INFO - Round: 13600 NLI Epoch : 0 NLI Examples Processed : 65328 NLI Loss : 0.72918\n", - "2019-05-29 20:03:54,342 - INFO - Average time per mininbatch : 0.45853\n", - "2019-05-29 20:03:54,361 - INFO - ******************************************************\n", - "2019-05-29 20:05:26,473 - INFO - Seq2Seq Examples Processed : 662400 snli Loss : 2.49999 Num snli minibatches : 180\n", - "2019-05-29 20:05:26,500 - INFO - Round: 13800 NLI Epoch : 0 NLI Examples Processed : 66288 NLI Loss : 0.71488\n", - "2019-05-29 20:05:26,531 - INFO - Average time per mininbatch : 0.46042\n", - "2019-05-29 20:05:26,539 - INFO - ******************************************************\n", - "2019-05-29 20:06:58,271 - INFO - Seq2Seq Examples Processed : 672000 snli Loss : 2.56470 Num snli minibatches : 180\n", - "2019-05-29 20:06:58,336 - INFO - Round: 14000 NLI Epoch : 0 NLI Examples Processed : 67248 NLI Loss : 0.72321\n", - "2019-05-29 20:06:58,342 - INFO - Average time per mininbatch : 0.45860\n", - "2019-05-29 20:06:58,348 - INFO - ******************************************************\n", - "2019-05-29 20:08:29,144 - INFO - Seq2Seq Examples Processed : 681600 snli Loss : 2.72915 Num snli minibatches : 180\n", - "2019-05-29 20:08:29,198 - INFO - Round: 14200 NLI Epoch : 0 NLI Examples Processed : 68208 NLI Loss : 0.73935\n", - "2019-05-29 20:08:29,204 - INFO - Average time per mininbatch : 0.45394\n", - "2019-05-29 20:08:29,210 - INFO - ******************************************************\n", - "2019-05-29 20:09:58,806 - INFO - Seq2Seq Examples Processed : 691200 snli Loss : 2.68844 Num snli minibatches : 180\n", - "2019-05-29 20:09:58,846 - INFO - Round: 14400 NLI Epoch : 0 NLI Examples Processed : 69168 NLI Loss : 0.74668\n", - "2019-05-29 20:09:58,852 - INFO - Average time per mininbatch : 0.44793\n", - "2019-05-29 20:09:58,859 - INFO - ******************************************************\n", - "2019-05-29 20:11:29,986 - INFO - Seq2Seq Examples Processed : 700800 snli Loss : 2.69640 Num snli minibatches : 180\n", - "2019-05-29 20:11:30,011 - INFO - Round: 14600 NLI Epoch : 0 NLI Examples Processed : 70128 NLI Loss : 0.71737\n", - "2019-05-29 20:11:30,017 - INFO - Average time per mininbatch : 0.45559\n", - "2019-05-29 20:11:30,022 - INFO - ******************************************************\n", - "\n", - "Execution Summary\n", - "=================\n", - "RunId: pytorch-gensen_1559153095_0e7f4645\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "{'runId': 'pytorch-gensen_1559153095_0e7f4645',\n", - " 'target': 'gpucluster',\n", - " 'status': 'CancelRequested',\n", - " 'startTimeUtc': '2019-05-29T18:05:02.390551Z',\n", - " 'properties': {'azureml.runsource': 'experiment',\n", - " 'AzureML.DerivedImageName': 'azureml/azureml_f6cd7804b6a4e89cea33d34d8659fed9',\n", - " 'ContentSnapshotId': 'f0eb2538-559b-4051-9d66-5a6a79570c3d',\n", - " 'azureml.git.repository_uri': 'https://github.com/Microsoft/NLP.git',\n", - " 'azureml.git.branch': 'liqun-first-pull',\n", - " 'azureml.git.commit': 'ba716d109a6db89aa94d95255afe7f972a97f0b8',\n", - " 'azureml.git.dirty': 'True',\n", - " 'azureml.git.build_id': None,\n", - " 'azureml.git.build_uri': None,\n", - " 'mlflow.source.git.branch': 'liqun-first-pull',\n", - " 'mlflow.source.git.commit': 'ba716d109a6db89aa94d95255afe7f972a97f0b8',\n", - " 'mlflow.source.git.repoURL': 'https://github.com/Microsoft/NLP.git'},\n", - " 'runDefinition': {'script': 'train.py',\n", - " 'arguments': ['--config',\n", - " 'sample_config.json',\n", - " '--data_folder',\n", - " '$AZUREML_DATAREFERENCE_gensen'],\n", - " 'sourceDirectoryDataStore': 'workspaceblobstore',\n", - " 'framework': 'Python',\n", - " 'communicator': 'Mpi',\n", - " 'target': 'gpucluster',\n", - " 'dataReferences': {'gensen': {'dataStoreName': 'gensen',\n", - " 'mode': 'Mount',\n", - " 'pathOnDataStore': None,\n", - " 'pathOnCompute': None,\n", - " 'overwrite': False},\n", - " 'workspaceblobstore': {'dataStoreName': 'workspaceblobstore',\n", - " 'mode': 'Mount',\n", - " 'pathOnDataStore': None,\n", - " 'pathOnCompute': None,\n", - " 'overwrite': False}},\n", - " 'jobName': None,\n", - " 'maxRunDurationSeconds': None,\n", - " 'nodeCount': 4,\n", - " 'environment': {'name': 'Experiment pytorch-gensen Environment',\n", - " 'version': 'Autosave_2019-05-29T17:23:26Z_8a3fa4ff',\n", - " 'python': {'interpreterPath': 'python',\n", - " 'userManagedDependencies': False,\n", - " 'condaDependencies': {'name': 'project_environment',\n", - " 'dependencies': ['python=3.6.2',\n", - " {'pip': ['azureml-defaults',\n", - " 'torch==1.0.0',\n", - " 'torchvision==0.2.1',\n", - " 'horovod==0.15.2']},\n", - " 'scikit-learn=0.20.3']},\n", - " 'baseCondaEnvironment': None},\n", - " 'environmentVariables': {'EXAMPLE_ENV_VAR': 'EXAMPLE_VALUE',\n", - " 'NCCL_SOCKET_IFNAME': '^docker0'},\n", - " 'docker': {'baseImage': 'mcr.microsoft.com/azureml/base-gpu:intelmpi2018.3-cuda9.0-cudnn7-ubuntu16.04',\n", - " 'enabled': True,\n", - " 'sharedVolumes': True,\n", - " 'gpuSupport': True,\n", - " 'shmSize': '1g',\n", - " 'arguments': [],\n", - " 'baseImageRegistry': {'address': None,\n", - " 'username': None,\n", - " 'password': None}},\n", - " 'spark': {'repositories': ['https://mmlspark.azureedge.net/maven'],\n", - " 'packages': [{'group': 'com.microsoft.ml.spark',\n", - " 'artifact': 'mmlspark_2.11',\n", - " 'version': '0.12'}],\n", - " 'precachePackages': True}},\n", - " 'history': {'outputCollection': True,\n", - " 'directoriesToWatch': ['logs'],\n", - " 'snapshotProject': True},\n", - " 'spark': {'configuration': {'spark.app.name': 'Azure ML Experiment',\n", - " 'spark.yarn.maxAppAttempts': '1'}},\n", - " 'amlCompute': {'name': None,\n", - " 'vmSize': None,\n", - " 'vmPriority': None,\n", - " 'retainCluster': False,\n", - " 'clusterMaxNodeCount': 4},\n", - " 'tensorflow': {'workerCount': 1, 'parameterServerCount': 1},\n", - " 'mpi': {'processCountPerNode': 1},\n", - " 'hdi': {'yarnDeployMode': 'Cluster'},\n", - " 'containerInstance': {'region': None, 'cpuCores': 2, 'memoryGb': 3.5},\n", - " 'exposedPorts': None},\n", - " 'logFiles': {'azureml-logs/80_driver_log_rank_0.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_0.txt?sv=2018-03-28&sr=b&sig=DQm40ZucopOZIMdeEOgfpLYIopsnzDl0fQVKokQcOaw%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", - " 'azureml-logs/80_driver_log_rank_1.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_1.txt?sv=2018-03-28&sr=b&sig=yGX4ZaTAWOu8XsikG3oZ9ZFFJycb%2FrrmPU%2FbDWfQs%2FY%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", - " 'azureml-logs/80_driver_log_rank_2.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_2.txt?sv=2018-03-28&sr=b&sig=9zNw%2BZ94ncqQY6%2BzZWJiJJBT%2F3blXF6mTDohsPkvOl4%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", - " 'azureml-logs/80_driver_log_rank_3.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/azureml-logs/80_driver_log_rank_3.txt?sv=2018-03-28&sr=b&sig=vowPfbhv6HR8QFeFKJJFy6afd9h5Dt5YS18r2I5Xfzs%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r',\n", - " 'logs/azureml/azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.pytorch-gensen_1559153095_0e7f4645/logs/azureml/azureml.log?sv=2018-03-28&sr=b&sig=KXjFQzVr00dx7PF7vL2gKOszt0Qvbj7H6%2F9eWP2FEMg%3D&st=2019-05-29T20%3A01%3A43Z&se=2019-05-30T04%3A11%3A43Z&sp=r'}}" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "run.wait_for_completion(show_output=True) # this provides a verbose log" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Cancel the job**\n", + "\n", + "It's better to cancel the job manually to make sure you do not waste resources." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " ```python\n", + "# Cancel the job with id.\n", + "job_id = \"pytorch-gensen_1555533596_d9cc75fe\"\n", + "run = get_run(experiment, job_id)\n", + "\n", + "# Cancel jobs.\n", + "run.cancel()\n", + " ```" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1490,8 +1138,6 @@ "metadata": {}, "outputs": [], "source": [ - "from azureml.train.hyperdrive import RandomParameterSampling, BanditPolicy, HyperDriveRunConfig, uniform, PrimaryMetricGoal\n", - "\n", "param_sampling = RandomParameterSampling( {\n", " 'learning_rate': uniform(0.0001, 0.001)\n", " }\n", @@ -1533,17 +1179,39 @@ "You can monitor the progress of the runs with the following Jupyter widget. " ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Interpret the Tuning Results**\n", + "\n", + "The chart shows 4 different threads running in parallel with different learning rate, and the number of total runs is 8. By comparing the 'Best Metric' which is `best_val_loss` in our case, we can pick the best learning rate. The HyperDrive run automatically shows the tracking charts (example in the following) to let users understand the tuning process.\n", + "![Tuning](https://nlpbp.blob.core.windows.net/images/tuning.PNG)\n", + "\n", + "**From the results in section [2.3.5 Monitor your run](#2.3.5-Monitor-your-run), the `best_val_loss` for 1 node is 4.81, but with tuning we can easily achieve better performance around 4.65.**" + ] + }, { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": false + }, "outputs": [], "source": [ - "from azureml.widgets import RunDetails\n", - "\n", "RunDetails(hyperdrive_run).show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see the experiment progress from this notebook by using `azureml.widgets.RunDetails(hd_run).show()` or check from the Azure portal with the url link you can get by running `hd_run.get_portal_url()`.\n", + "To load an existing Hyperdrive run, use `hd_run = hd.HyperDriveRun(exp, , hyperdrive_run_config=hd_run_config)`. You also can cancel a run with `hd_run.cancel()`.\n", + "![](https://nlpbp.blob.core.windows.net/images/tuning1.PNG)\n", + "![](https://nlpbp.blob.core.windows.net/images/tuning2.PNG)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1551,13 +1219,51 @@ "**Cancel the hyper drive run to save the resources**" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " ```python\n", + "# Cancel the hyper drive\n", + "hyperdrive_run.cancel()\n", + " ```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.3 Find the Best Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once all the runs complete, we can find the run that produced the model with the lowest loss." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "best_run = hyperdrive_run.get_best_run_by_primary_metric()\n", + "best_run_metrics = best_run.get_metrics()\n", + "print(best_run)" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "hyperdrive_run.cancel()" + "print('Best Run is:\\n Validation loss: {0:.5f} \\n Learning rate: {1:.5f} \\n'.format(\n", + " best_run_metrics['best_val_loss'][-1],\n", + " best_run_metrics['lr'])\n", + " )" ] }, { @@ -1569,7 +1275,8 @@ "1. Subramanian, Sandeep and Trischler, Adam and Bengio, Yoshua and Pal, Christopher J, [*Learning general purpose distributed sentence representations via large scale multi-task learning*](https://arxiv.org/abs/1804.00079), ICLR, 2018.\n", "2. A. Conneau, D. Kiela, [*SentEval: An Evaluation Toolkit for Universal Sentence Representations*](https://arxiv.org/abs/1803.05449).\n", "3. Semantic textual similarity. url: http://nlpprogress.com/english/semantic_textual_similarity.html\n", - "4. Minh-Thang Luong, Quoc V Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. [*Multi-task sequence to sequence learning*](https://arxiv.org/abs/1511.06114), 2015." + "4. Minh-Thang Luong, Quoc V Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. [*Multi-task sequence to sequence learning*](https://arxiv.org/abs/1511.06114), 2015.\n", + "5. Bryan McCann, James Bradbury, Caiming Xiong, and Richard Socher. [*Learned in translation: Contextualized word vectors](https://arxiv.org/abs/1708.00107), 2017. " ] } ], @@ -1580,9 +1287,9 @@ } ], "kernelspec": { - "display_name": "Python 3", + "display_name": "Python NLP CPU", "language": "python", - "name": "python3" + "name": "nlp_cpu" }, "language_info": { "codemirror_mode": { diff --git a/scenarios/sentence_similarity/gensen_config.json b/scenarios/sentence_similarity/gensen_config.json index f7e47a76c..54aff67b7 100644 --- a/scenarios/sentence_similarity/gensen_config.json +++ b/scenarios/sentence_similarity/gensen_config.json @@ -15,20 +15,20 @@ }, "data": {"paths": [ { - "train_src": "data/processed/snli_1.0_train.txt.s1.tok", - "train_trg": "data/processed/snli_1.0_train.txt.s2.tok", - "val_src": "data/processed/snli_1.0_dev.txt.s1.tok", - "val_trg": "data/processed/snli_1.0_dev.txt.s1.tok", + "train_src": "snli_1.0_train.txt.s1.tok", + "train_trg": "snli_1.0_train.txt.s2.tok", + "val_src": "snli_1.0_dev.txt.s1.tok", + "val_trg": "snli_1.0_dev.txt.s1.tok", "taskname": "snli" } ], "max_src_length": 90, "max_trg_length": 90, "task": "multi-seq2seq-nli", - "save_dir": "data/models/example", - "nli_train": "data/processed/snli_1.0_train.txt.clean.noblank", - "nli_dev": "data/processed/snli_1.0_dev.txt.clean.noblank", - "nli_test": "data/processed/snli_1.0_test.txt.clean.noblank" + "save_dir": "models/", + "nli_train": "snli_1.0_train.txt.clean.noblank", + "nli_dev": "snli_1.0_dev.txt.clean.noblank", + "nli_test": "snli_1.0_test.txt.clean.noblank" }, "model": { "dim_src": 2048, diff --git a/scenarios/sentence_similarity/gensen_train.py b/scenarios/sentence_similarity/gensen_train.py index 7b704a157..2028207fe 100644 --- a/scenarios/sentence_similarity/gensen_train.py +++ b/scenarios/sentence_similarity/gensen_train.py @@ -15,20 +15,20 @@ This training process is based on GPU only. """ -import logging import argparse -import os import json +import logging +import os import time +import horovod.torch as hvd +import mlflow import numpy as np import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as f import torch.optim as optim -from azureml.core.run import Run -import horovod.torch as hvd from utils_nlp.gensen.multi_task_model import MultitaskModel from utils_nlp.gensen.utils import ( @@ -37,9 +37,6 @@ compute_validation_loss, ) -# get the Azure ML run object -run = Run.get_context() - cudnn.benchmark = True hvd.init() @@ -172,7 +169,7 @@ def evaluate( # Horovod: print output only on first rank. if hvd.rank() == 0: # log the best val accuracy to AML run - run.log("Best Validation Loss", np.float(validation_loss)) + logging.info("Best Validation Loss: %f", np.float(validation_loss)) # If the validation loss is small enough, and it starts to go up. # Should stop training. @@ -182,7 +179,6 @@ def evaluate( min_val_loss_epoch = monitor_epoch model_state = model.state_dict() - run.log("Validation Loss", validation_loss) print(monitor_epoch, min_val_loss_epoch, min_val_loss) logging.info( "Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: " @@ -275,15 +271,12 @@ def train(config, data_folder, learning_rate=0.0001): config(dict): Loaded json file as a python object. data_folder(str): Path to the folder containing the data. learning_rate(float): Learning rate for the model. - """ owd = os.getcwd() + os.chdir(data_folder) - try: + with mlflow.start_run(): save_dir = config["data"]["save_dir"] - - os.chdir(data_folder) - if not os.path.exists("./log"): os.makedirs("./log") @@ -396,6 +389,8 @@ def train(config, data_folder, learning_rate=0.0001): min_val_loss = 10000000 min_val_loss_epoch = -1 rng_num_tasks = len(tasknames) - 1 if paired_tasks else len(tasknames) + print(os.environ) + mlflow.log_param("Learning Rate", learning_rate) logging.info("Commencing Training ...") start = time.time() while True: @@ -420,9 +415,6 @@ def train(config, data_folder, learning_rate=0.0001): torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) optimizer.step() - # For AML. - run.log("loss", loss.item()) - nli_mbatch_ctr += batch_size * n_gpus if nli_mbatch_ctr >= len(nli_iterator.train_lines): nli_mbatch_ctr = 0 @@ -532,21 +524,24 @@ def train(config, data_folder, learning_rate=0.0001): len(task_losses[idx]), ) ) - run.log("Task Loss", np.mean(task_losses[idx])) + mlflow.log_metric( + "Validation Loss", + np.mean(task_losses[idx]), + step=monitor_epoch, + ) logging.info( "Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI " "Loss : %.5f " % (nli_ctr, nli_epoch, nli_mbatch_ctr, np.mean(nli_losses)) ) - run.log("NLI Loss", np.mean(nli_losses)) + mlflow.log_metric( + "NLI Loss", np.mean(nli_losses), step=nli_epoch + ) logging.info( "Average time per mininbatch : %.5f" % (np.mean(mbatch_times)) ) - run.log( - "Average time per mininbatch : ", np.mean(mbatch_times) - ) task_losses = [[] for _ in tasknames] mbatch_times = [] nli_losses = [] @@ -581,8 +576,7 @@ def train(config, data_folder, learning_rate=0.0001): updates += batch_size * n_gpus nli_ctr += 1 logging.info("Updates: %d" % updates) - finally: - os.chdir(owd) + os.chdir(owd) def read_config(json_file): diff --git a/tests/unit/test_word_embeddings.py b/tests/unit/test_word_embeddings.py index c1eb0e32d..6ac9391c9 100644 --- a/tests/unit/test_word_embeddings.py +++ b/tests/unit/test_word_embeddings.py @@ -38,6 +38,7 @@ def test_load_pretrained_vectors_word2vec(): assert isinstance(load_word2vec(dir_path), Word2VecKeyedVectors) + def test_load_pretrained_vectors_glove(): dir_path = "temp_data/" file_path = os.path.join( @@ -58,7 +59,8 @@ def test_load_pretrained_vectors_glove(): def test_load_pretrained_vectors_fasttext(): dir_path = "temp_data/" - file_path = os.path.join(os.path.join(dir_path, "fastText"), "wiki.simple.bin") + file_path = os.path.join(os.path.join(dir_path, "fastText"), + "wiki.simple.bin") assert isinstance(load_fasttext(dir_path), FastText) diff --git a/tools/generate_conda_file.py b/tools/generate_conda_file.py index da0a591a4..372eb6e1a 100644 --- a/tools/generate_conda_file.py +++ b/tools/generate_conda_file.py @@ -54,9 +54,7 @@ } PIP_BASE = { - "azureml-sdk[notebooks,tensorboard]": ( - "azureml-sdk[notebooks,tensorboard]==1.0.33" - ), + "azureml-sdk[notebooks,tensorboard]": "azureml-sdk[notebooks,tensorboard]==1.0.43", "azureml-dataprep": "azureml-dataprep==1.1.4", "black": "black>=18.6b4", "papermill": "papermill==0.18.2", @@ -74,6 +72,7 @@ "nltk": "nltk>=3.4", "pytorch-pretrained-bert": "pytorch-pretrained-bert>=0.6", "seqeval": "seqeval>=0.0.12", + "azureml-mlflow": "azureml-mlflow>=1.0.41", } PIP_GPU = {"horovod": "horovod>=0.16.1"} From 8140f67d9abe3e5188a9763569cf13ef3004a244 Mon Sep 17 00:00:00 2001 From: Abhiram E Date: Thu, 20 Jun 2019 20:24:41 -0400 Subject: [PATCH 028/108] Fixed documentation to get rid of AzureML logging --- .../sentence_similarity/gensen_aml_deep_dive.ipynb | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb index 4f51a3b39..5ab45d0e7 100644 --- a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb +++ b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb @@ -836,19 +836,11 @@ "metadata": {}, "source": [ "### 2.3.1 Prepare Training Script\n", - "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `train.py`. In practice, you should be able to take any custom PyTorch training script as is and run it with Azure ML without having to modify your code.\n", + "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `gensen_train.py`. \n", "\n", - "However, if you would like to use Azure ML's [metric logging](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#logging) capabilities, you will have to add a small amount of Azure ML logic inside your training script. In this example, at each logging interval, we will log the loss for that minibatch to our Azure ML run.\n", + "In this example, we use MLflow to log your metrics. We also use the [AzureML-Mlflow](https://pypi.org/project/azureml-mlflow/) package to further log these metrics to the Azure Portal. This is done with no change to the provided training script!\n", "\n", - "To do so, in `train.py`, we will first access the Azure ML `Run` object within the script:\n", - "```Python\n", - "from azureml.core.run import Run\n", - "run = Run.get_context()\n", - "```\n", - "Later within the script, we log the loss metric to our run:\n", - "```Python\n", - "run.log('loss', loss.item())\n", - "```" + "In this example the script provided logs the loss for that minibatch to our Azure ML portal." ] }, { From c53edc41b32e65bd50fedc48effb3dbec24ee093 Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Fri, 21 Jun 2019 10:50:20 -0400 Subject: [PATCH 029/108] Added training and prediction time to notebook. --- .../entailment_xnli_multilingual.ipynb | 118 ++++++++++-------- utils_nlp/bert/sequence_classification.py | 8 +- utils_nlp/dataset/xnli.py | 3 +- 3 files changed, 73 insertions(+), 56 deletions(-) diff --git a/scenarios/entailment/entailment_xnli_multilingual.ipynb b/scenarios/entailment/entailment_xnli_multilingual.ipynb index 335710642..0816e8a47 100644 --- a/scenarios/entailment/entailment_xnli_multilingual.ipynb +++ b/scenarios/entailment/entailment_xnli_multilingual.ipynb @@ -40,14 +40,16 @@ "\n", "from utils_nlp.bert.sequence_classification import BERTSequenceClassifier\n", "from utils_nlp.bert.common import Language, Tokenizer\n", - "from utils_nlp.dataset.xnli import load_pandas_df" + "from utils_nlp.dataset.xnli import load_pandas_df\n", + "from utils_nlp.common.timer import Timer" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Configurations" + "## Configurations\n", + "Note that the running time shown in this notebook are on a Standard_NC12 Azure Deep Learning Virtual Machine with two NVIDIA Tesla K80 GPUs. If you want to run through the notebook quickly, you can change the `TRAIN_DATA_USED_PERCENT` to a small number, e.g. 0.01. " ] }, { @@ -56,6 +58,8 @@ "metadata": {}, "outputs": [], "source": [ + "TRAIN_DATA_USED_PERCENT = 1.0\n", + "\n", "# set random seeds\n", "RANDOM_SEED = 42\n", "random.seed(RANDOM_SEED)\n", @@ -165,20 +169,9 @@ "metadata": {}, "outputs": [], "source": [ - "# train_df_chinese = train_df_chinese.loc[:1000]\n", - "# dev_df_chinese = dev_df_chinese.loc[:1000]\n", - "# test_df_chinese = test_df_chinese.loc[:1000]\n", - "\n", - "# train_df_hindi = train_df_hindi.loc[:1000]\n", - "# dev_df_hindi = dev_df_hindi.loc[:1000]\n", - "# test_df_hindi = test_df_hindi.loc[:1000]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Note that the texts are convereted to Unicode which can be processed by BERT models. " + "train_data_used_count = round(TRAIN_DATA_USED_PERCENT * train_df_chinese.shape[0])\n", + "train_df_chinese = train_df_chinese.loc[:train_data_used_count]\n", + "train_df_hindi = train_df_hindi.loc[:train_data_used_count]" ] }, { @@ -206,8 +199,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 392702/392702 [02:27<00:00, 2667.38it/s]\n", - "100%|██████████| 5010/5010 [00:01<00:00, 3054.76it/s]\n" + "100%|██████████| 392702/392702 [02:26<00:00, 2682.67it/s]\n", + "100%|██████████| 5010/5010 [00:01<00:00, 3122.04it/s]\n" ] } ], @@ -309,20 +302,23 @@ "epoch:2/2; batch:7369->8596/12271; loss:0.265789\n", "epoch:2/2; batch:8597->9824/12271; loss:0.328964\n", "epoch:2/2; batch:9825->11052/12271; loss:0.436310\n", - "epoch:2/2; batch:11053->12271/12271; loss:0.374193\n" + "epoch:2/2; batch:11053->12271/12271; loss:0.374193\n", + "Training time : 8.050 hrs\n" ] } ], "source": [ - "classifier_chinese.fit(token_ids=train_token_ids_chinese,\n", - " input_mask=train_input_mask_chinese,\n", - " token_type_ids=train_token_type_ids_chinese,\n", - " labels=train_labels_chinese,\n", - " num_gpus=NUM_GPUS,\n", - " num_epochs=NUM_EPOCHS,\n", - " batch_size=BATCH_SIZE,\n", - " lr=LEARNING_RATE,\n", - " warmup_proportion=WARMUP_PROPORTION)" + "with Timer() as t:\n", + " classifier_chinese.fit(token_ids=train_token_ids_chinese,\n", + " input_mask=train_input_mask_chinese,\n", + " token_type_ids=train_token_type_ids_chinese,\n", + " labels=train_labels_chinese,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)\n", + "print(\"Training time : {:.3f} hrs\".format(t.interval / 3600))" ] }, { @@ -341,15 +337,31 @@ "name": "stderr", "output_type": "stream", "text": [ - "5024it [00:53, 101.94it/s] \n" + "5024it [00:54, 101.88it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prediction time : 0.015 hrs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" ] } ], "source": [ - "predictions_chinese = classifier_chinese.predict(token_ids=test_token_ids_chinese,\n", - " input_mask=test_input_mask_chinese,\n", - " token_type_ids=test_token_type_ids_chinese,\n", - " batch_size=BATCH_SIZE)" + "with Timer() as t:\n", + " predictions_chinese = classifier_chinese.predict(token_ids=test_token_ids_chinese,\n", + " input_mask=test_input_mask_chinese,\n", + " token_type_ids=test_token_type_ids_chinese,\n", + " batch_size=BATCH_SIZE)\n", + "print(\"Prediction time : {:.3f} hrs\".format(t.interval / 3600))" ] }, { @@ -411,8 +423,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████| 392702/392702 [03:46<00:00, 1736.64it/s]\n", - "100%|██████████| 5010/5010 [00:02<00:00, 1933.92it/s]\n" + "100%|██████████| 392702/392702 [03:48<00:00, 1719.84it/s]\n", + "100%|██████████| 5010/5010 [00:02<00:00, 1916.46it/s]\n" ] } ], @@ -467,7 +479,8 @@ "epoch:2/2; batch:7369->8596/12271; loss:0.864921\n", "epoch:2/2; batch:8597->9824/12271; loss:0.518601\n", "epoch:2/2; batch:9825->11052/12271; loss:0.395920\n", - "epoch:2/2; batch:11053->12271/12271; loss:0.685858\n" + "epoch:2/2; batch:11053->12271/12271; loss:0.685858\n", + "Training time : 9.520 hrs\n" ] } ], @@ -475,15 +488,17 @@ "classifier_multi = BERTSequenceClassifier(language=LANGUAGE_MULTI,\n", " num_labels=num_labels_hindi,\n", " cache_dir=CACHE_DIR)\n", - "classifier_multi.fit(token_ids=train_token_ids_hindi,\n", - " input_mask=train_input_mask_hindi,\n", - " token_type_ids=train_token_type_ids_hindi,\n", - " labels=train_labels_hindi,\n", - " num_gpus=NUM_GPUS,\n", - " num_epochs=NUM_EPOCHS,\n", - " batch_size=BATCH_SIZE,\n", - " lr=LEARNING_RATE,\n", - " warmup_proportion=WARMUP_PROPORTION)" + "with Timer() as t:\n", + " classifier_multi.fit(token_ids=train_token_ids_hindi,\n", + " input_mask=train_input_mask_hindi,\n", + " token_type_ids=train_token_type_ids_hindi,\n", + " labels=train_labels_hindi,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)\n", + "print(\"Training time : {:.3f} hrs\".format(t.interval / 3600))" ] }, { @@ -502,13 +517,14 @@ "name": "stderr", "output_type": "stream", "text": [ - "5024it [01:02, 86.65it/s] " + "5024it [01:02, 87.10it/s] " ] }, { "name": "stdout", "output_type": "stream", "text": [ + "Prediction time : 0.017 hrs\n", " precision recall f1-score support\n", "\n", "contradiction 0.69 0.72 0.70 1670\n", @@ -530,10 +546,12 @@ } ], "source": [ - "predictions_hindi = classifier_multi.predict(token_ids=test_token_ids_hindi,\n", - " input_mask=test_input_mask_hindi,\n", - " token_type_ids=test_token_type_ids_hindi,\n", - " batch_size=BATCH_SIZE)\n", + "with Timer() as t:\n", + " predictions_hindi = classifier_multi.predict(token_ids=test_token_ids_hindi,\n", + " input_mask=test_input_mask_hindi,\n", + " token_type_ids=test_token_type_ids_hindi,\n", + " batch_size=BATCH_SIZE)\n", + "print(\"Prediction time : {:.3f} hrs\".format(t.interval / 3600))\n", "predictions_hindi= label_encoder_hindi.inverse_transform(predictions_hindi)\n", "print(classification_report(test_df_hindi[LABEL_COL], predictions_hindi))" ] diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index 9c0a5a4c7..5cb31fc93 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -102,9 +102,9 @@ def fit( }, ] - num_train_optimization_steps = ( - int(len(token_ids) / batch_size) * num_epochs - ) + num_examples = len(token_ids) + num_batches = int(num_examples / batch_size) + num_train_optimization_steps = num_batches * num_epochs if warmup_proportion is None: opt = BertAdam(optimizer_grouped_parameters, lr=lr) @@ -121,8 +121,6 @@ def fit( # train self.model.train() # training mode - num_examples = len(token_ids) - num_batches = int(num_examples / batch_size) token_type_ids_batch = None for epoch in range(num_epochs): diff --git a/utils_nlp/dataset/xnli.py b/utils_nlp/dataset/xnli.py index c838b1517..a233c9e7b 100644 --- a/utils_nlp/dataset/xnli.py +++ b/utils_nlp/dataset/xnli.py @@ -17,7 +17,8 @@ def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): - """Downloads and extracts the dataset files + """Downloads and extracts the dataset files. + Args: local_cache_path (str, optional): Path to store the data. Defaults to "./". From b0bfcba2eaff7425beef24fa8bf1e38cc0031c6e Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Fri, 21 Jun 2019 10:56:47 -0400 Subject: [PATCH 030/108] Added placeholder for token type ids. --- scenarios/text_classification/tc_dac_bert_ar.ipynb | 7 ++++--- scenarios/text_classification/tc_mnli_bert.ipynb | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/scenarios/text_classification/tc_dac_bert_ar.ipynb b/scenarios/text_classification/tc_dac_bert_ar.ipynb index f7a56b655..3c77a31e1 100644 --- a/scenarios/text_classification/tc_dac_bert_ar.ipynb +++ b/scenarios/text_classification/tc_dac_bert_ar.ipynb @@ -340,6 +340,7 @@ "- Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", "- Pad or truncate the token lists to the specified max length\n", "- Return mask lists that indicate paddings' positions\n", + "- Return token type id lists that indicate which sentence the tokens belong to (not needed for one-sequence classification)\n", "\n", "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" ] @@ -350,10 +351,10 @@ "metadata": {}, "outputs": [], "source": [ - "tokens_train, mask_train = tokenizer.preprocess_classification_tokens(\n", + "tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_train, MAX_LEN\n", ")\n", - "tokens_test, mask_test = tokenizer.preprocess_classification_tokens(\n", + "tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_test, MAX_LEN\n", ")" ] @@ -511,7 +512,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.5" } }, "nbformat": 4, diff --git a/scenarios/text_classification/tc_mnli_bert.ipynb b/scenarios/text_classification/tc_mnli_bert.ipynb index c7c2b0344..acc15bf23 100644 --- a/scenarios/text_classification/tc_mnli_bert.ipynb +++ b/scenarios/text_classification/tc_mnli_bert.ipynb @@ -275,6 +275,7 @@ "- Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", "- Pad or truncate the token lists to the specified max length\n", "- Return mask lists that indicate paddings' positions\n", + "- Return token type id lists that indicate which sentence the tokens belong to (not needed for one-sequence classification)\n", "\n", "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" ] @@ -285,10 +286,10 @@ "metadata": {}, "outputs": [], "source": [ - "tokens_train, mask_train = tokenizer.preprocess_classification_tokens(\n", + "tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_train, MAX_LEN\n", ")\n", - "tokens_test, mask_test = tokenizer.preprocess_classification_tokens(\n", + "tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_test, MAX_LEN\n", ")" ] @@ -446,7 +447,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.5" } }, "nbformat": 4, From 136cadf0fe8920b07f15c5f27e482cae4b7db94e Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 13:16:43 -0400 Subject: [PATCH 031/108] lm name changes --- utils_nlp/bert/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index 6d6cf1019..590137a7c 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -24,8 +24,8 @@ class Language(Enum): ENGLISHCASED = "bert-base-cased" ENGLISHLARGE = "bert-large-uncased" ENGLISHLARGECASED = "bert-large-cased" - ENGLISHLARGEWW = "bert-large-uncased-whole-word-masking" - ENGLISHLARGECASEDWW = "bert-large-cased-whole-word-masking" + ENGLISHLARGEWWM = "bert-large-uncased-whole-word-masking" + ENGLISHLARGECASEDWWM = "bert-large-cased-whole-word-masking" CHINESE = "bert-base-chinese" MULTILINGUAL = "bert-base-multilingual-cased" From aec2ffbfe718188322ea228f90caddfaf9f584c8 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 13:17:04 -0400 Subject: [PATCH 032/108] add namedtuple preds output --- utils_nlp/bert/sequence_classification.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index c34e2f2ef..df94497a9 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -5,6 +5,7 @@ # https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/run_classifier.py import random +from collections import namedtuple import numpy as np import torch @@ -168,11 +169,15 @@ def predict( If None is specified, all available GPUs will be used. Defaults to None. batch_size (int, optional): Scoring batch size. Defaults to 32. - probabilities: If true, the predicted probability distribution - is returned; otherwise, the predicted classes are returned. + class_proba (bool, str, optional): + If True, the predicted probability distribution is returned; + If False, the predicted classes are returned; + If "both", both the predicted classes and probabilities + are returned. Defaults to False. Returns: - [ndarray]: Predicted classes. + ndarray, namedtuple(ndarray, ndarray): Predicted classes, + probabilities, or both. """ device = get_device("cpu" if num_gpus == 0 else "gpu") @@ -204,7 +209,12 @@ def predict( preds = np.concatenate(preds) - if probabilities: + if class_proba == "both": + return namedtuple("Predictions", "probabilities classes")( + nn.Softmax(dim=1)(torch.Tensor(preds)).numpy(), + preds.argmax(axis=1), + ) + elif class_proba: return nn.Softmax(dim=1)(torch.Tensor(preds)).numpy() else: return preds.argmax(axis=1) From 8b56eec5cdcfb4549c38182391c5df12de102ee4 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 14:24:40 -0400 Subject: [PATCH 033/108] updated defaults for predict's output --- utils_nlp/bert/sequence_classification.py | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index df94497a9..57e4201a9 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -158,7 +158,7 @@ def predict( input_mask, num_gpus=None, batch_size=32, - probabilities=False, + return_proba=False, ): """Scores the given dataset and returns the predicted classes. @@ -169,15 +169,12 @@ def predict( If None is specified, all available GPUs will be used. Defaults to None. batch_size (int, optional): Scoring batch size. Defaults to 32. - class_proba (bool, str, optional): - If True, the predicted probability distribution is returned; - If False, the predicted classes are returned; - If "both", both the predicted classes and probabilities - are returned. - Defaults to False. + return_proba (bool, optional): + If True, the predicted probability distribution + is also returned. Defaults to False. Returns: - ndarray, namedtuple(ndarray, ndarray): Predicted classes, - probabilities, or both. + 1darray, namedtuple(1darray, ndarray): Predicted classes or + (classes, probabilities) if return_proba is True. """ device = get_device("cpu" if num_gpus == 0 else "gpu") @@ -209,12 +206,10 @@ def predict( preds = np.concatenate(preds) - if class_proba == "both": - return namedtuple("Predictions", "probabilities classes")( - nn.Softmax(dim=1)(torch.Tensor(preds)).numpy(), + if return_proba: + return namedtuple("Predictions", "classes probabilities")( preds.argmax(axis=1), + nn.Softmax(dim=1)(torch.Tensor(preds)).numpy(), ) - elif class_proba: - return nn.Softmax(dim=1)(torch.Tensor(preds)).numpy() else: return preds.argmax(axis=1) From 65f1c82a81be401fae7160c06bcc9686321f95ad Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 14:41:39 -0400 Subject: [PATCH 034/108] arg name change --- utils_nlp/bert/sequence_classification.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index 57e4201a9..10356d879 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -169,12 +169,12 @@ def predict( If None is specified, all available GPUs will be used. Defaults to None. batch_size (int, optional): Scoring batch size. Defaults to 32. - return_proba (bool, optional): + probabilities (bool, optional): If True, the predicted probability distribution is also returned. Defaults to False. Returns: 1darray, namedtuple(1darray, ndarray): Predicted classes or - (classes, probabilities) if return_proba is True. + (classes, probabilities) if probabilities is True. """ device = get_device("cpu" if num_gpus == 0 else "gpu") @@ -206,7 +206,7 @@ def predict( preds = np.concatenate(preds) - if return_proba: + if probabilities: return namedtuple("Predictions", "classes probabilities")( preds.argmax(axis=1), nn.Softmax(dim=1)(torch.Tensor(preds)).numpy(), From 399707e74761dc9ebf2a6ae47cc17903862def64 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 14:43:09 -0400 Subject: [PATCH 035/108] meh --- utils_nlp/bert/sequence_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index 10356d879..7874cb8c2 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -158,7 +158,7 @@ def predict( input_mask, num_gpus=None, batch_size=32, - return_proba=False, + probabilities=False, ): """Scores the given dataset and returns the predicted classes. From fb3f7ddef6fdbe03c36d0b69d9eff434f9114fd0 Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Fri, 21 Jun 2019 14:48:10 -0400 Subject: [PATCH 036/108] Moved _truncate_seq_pair outside of if else block. --- utils_nlp/bert/common.py | 81 +++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 34 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index 7e289e657..4c0fdb779 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -53,15 +53,20 @@ def __init__( def tokenize(self, text): """Tokenizes a list of documents using a BERT tokenizer Args: - text (list): List of strings (one sequence) or tuples (two sequences). + text (list): List of strings (one sequence) or + tuples (two sequences). Returns: - [list]: List of lists. Each sublist contains WordPiece tokens of the input sequence(s). + [list]: List of lists. Each sublist contains WordPiece tokens + of the input sequence(s). """ if isinstance(text[0], str): return [self.tokenizer.tokenize(x) for x in tqdm(text)] else: - return [[self.tokenizer.tokenize(x) for x in sentences] for sentences in tqdm(text)] + return [ + [self.tokenizer.tokenize(x) for x in sentences] + for sentences in tqdm(text) + ] def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): """Preprocessing of input tokens: @@ -75,7 +80,7 @@ def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): max_len (int, optional): Maximum number of tokens (documents will be truncated or padded). Defaults to 512. - Returns: + Returns: tuple: A tuple containing the following three lists list of preprocesssed token lists list of input mask lists @@ -89,50 +94,58 @@ def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): ) max_len = BERT_MAX_LEN + def _truncate_seq_pair(tokens_a, tokens_b, max_length): + """Truncates a sequence pair in place to the maximum length.""" + # This is a simple heuristic which will always truncate the longer + # sequence one token at a time. This makes more sense than + # truncating an equal percent of tokens from each, since if one + # sequence is very short then each token that's truncated likely + # contains more information than a longer sequence. + while True: + total_length = len(tokens_a) + len(tokens_b) + if total_length <= max_length: + break + if len(tokens_a) > len(tokens_b): + tokens_a.pop() + else: + tokens_b.pop() + + tokens_a.append("[SEP]") + tokens_b.append("[SEP]") + + return [tokens_a, tokens_b] + if isinstance(tokens[0], str): tokens = [x[0 : max_len - 2] + ["[SEP]"] for x in tokens] token_type_ids = None else: - - def _truncate_seq_pair(tokens_a, tokens_b, max_length): - """Truncates a sequence pair in place to the maximum length.""" - # This is a simple heuristic which will always truncate the longer sequence - # one token at a time. This makes more sense than truncating an equal percent - # of tokens from each, since if one sequence is very short then each token - # that's truncated likely contains more information than a longer sequence. - while True: - total_length = len(tokens_a) + len(tokens_b) - if total_length <= max_length: - break - if len(tokens_a) > len(tokens_b): - tokens_a.pop() - else: - tokens_b.pop() - - tokens_a.append("[SEP]") - tokens_b.append("[SEP]") - - return [tokens_a, tokens_b] - # print(tokens[:2]) # get tokens for each sentence [[t00, t01, ...] [t10, t11,... ]] - tokens = [_truncate_seq_pair(sentence[0], sentence[1], max_len - 3) # [CLS] + 2x [SEP] - for sentence in tokens] + tokens = [ + _truncate_seq_pair(sentence[0], sentence[1], max_len - 3) + for sentence in tokens + ] - # construct token_type_ids [[0, 0, 0, 0, ... 0, 1, 1, 1, ... 1], [0, 0, 0, ..., 1, 1, ] + # construct token_type_ids + # [[0, 0, 0, 0, ... 0, 1, 1, 1, ... 1], [0, 0, 0, ..., 1, 1, ] token_type_ids = [ [[i] * len(sentence) for i, sentence in enumerate(example)] for example in tokens ] # merge sentences - tokens = [[token for sentence in example for token in sentence] - for example in tokens] + tokens = [ + [token for sentence in example for token in sentence] + for example in tokens + ] # prefix with [0] for [CLS] - token_type_ids = [[0] + [i for sentence in example for i in sentence] - for example in token_type_ids] + token_type_ids = [ + [0] + [i for sentence in example for i in sentence] + for example in token_type_ids + ] # pad sequence - token_type_ids = [x + [0] * (max_len - len(x)) - for x in token_type_ids] + token_type_ids = [ + x + [0] * (max_len - len(x)) for x in token_type_ids + ] tokens = [["[CLS]"] + x for x in tokens] # convert tokens to indices From ad3c8850ef37354bac0048ba8f0d6af005722eff Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 15:22:15 -0400 Subject: [PATCH 037/108] change line length --- .flake8 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.flake8 b/.flake8 index 562be066d..4d86469bd 100644 --- a/.flake8 +++ b/.flake8 @@ -13,4 +13,4 @@ # F821 undefined name 'get_ipython' --> from generated python files using nbconvert ignore = E203, E266, W503, F403, F405, E402, E731, F821 -max-line-length = 79 +max-line-length = 100 From 0bae4ff59c288a26c600c472b863b95d883d2063 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 16:18:48 -0400 Subject: [PATCH 038/108] add csv loader test --- tests/unit/test_data_loaders.py | 43 +++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 tests/unit/test_data_loaders.py diff --git a/tests/unit/test_data_loaders.py b/tests/unit/test_data_loaders.py new file mode 100644 index 000000000..6aca5e6e4 --- /dev/null +++ b/tests/unit/test_data_loaders.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import random + +import numpy as np +import pytest + +from utils_nlp.dataset.data_loaders import DaskCSVLoader + +UNIF1 = {"a": 0, "b": 10, "n": 1000} + + +@pytest.fixture() +def csv_file(tmpdir): + f = tmpdir.mkdir("test_loaders").join("tl_data.csv") + for i in range(1000): + f.write( + "\n".join( + [ + "{},{}".format( + random.randint(0, 1), + random.randint(UNIF1["a"], UNIF1["b"]), + ) + for x in range(UNIF1["n"]) + ] + ) + ) + return str(f) + + +def test_dask_csv_loader(csv_file): + num_batches = 500 + batch_size = 10 + + loader = DaskCSVLoader(csv_file, header=None) + sample = [] + for batch in loader.get_random_batches(num_batches, batch_size): + sample.append(list(batch.iloc[:, 1])) + sample = np.concatenate(sample) + print(sample.mean()) + assert sample.mean().round() == UNIF1["a"] + UNIF1["b"] / 2 + assert len(sample) == num_batches * batch_size From ed4e09b9b3219b6f1ca4fdc6b829068552e75547 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 16:33:37 -0400 Subject: [PATCH 039/108] edits to loader test --- tests/unit/test_data_loaders.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/unit/test_data_loaders.py b/tests/unit/test_data_loaders.py index 6aca5e6e4..aeda8bb00 100644 --- a/tests/unit/test_data_loaders.py +++ b/tests/unit/test_data_loaders.py @@ -31,13 +31,18 @@ def csv_file(tmpdir): def test_dask_csv_loader(csv_file): num_batches = 500 - batch_size = 10 + batch_size = 12 + num_partitions = 4 + + loader = DaskCSVLoader( + csv_file, header=None, block_size=5 * int(UNIF1["n"] / num_partitions) + ) - loader = DaskCSVLoader(csv_file, header=None) sample = [] for batch in loader.get_random_batches(num_batches, batch_size): sample.append(list(batch.iloc[:, 1])) sample = np.concatenate(sample) - print(sample.mean()) + + assert loader.df.npartitions == num_partitions assert sample.mean().round() == UNIF1["a"] + UNIF1["b"] / 2 - assert len(sample) == num_batches * batch_size + assert len(sample) <= num_batches * batch_size From 80487c804b1af31931bf54359a22ec41bb57b501 Mon Sep 17 00:00:00 2001 From: Abhiram E Date: Fri, 21 Jun 2019 17:12:34 -0400 Subject: [PATCH 040/108] Code changes based on code review comments. --- .gitignore | 3 - .../gensen_aml_deep_dive.ipynb | 71 ++- scenarios/sentence_similarity/gensen_train.py | 569 +++++++++--------- tools/generate_conda_file.py | 2 +- 4 files changed, 354 insertions(+), 291 deletions(-) diff --git a/.gitignore b/.gitignore index 18e4a9716..a7102e9f9 100644 --- a/.gitignore +++ b/.gitignore @@ -128,6 +128,3 @@ nlp_*.yaml data/ sentence-similarity/data/ -#Scripts -utils_nlp/gensen/gensen_config.json -utils_nlp/gensen/gensen_train.py diff --git a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb index 5ab45d0e7..7e40b298d 100644 --- a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb +++ b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb @@ -127,11 +127,18 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 1, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Failure while loading azureml_run_type_providers. Failed to load entrypoint hyperdrive = azureml.train.hyperdrive:HyperDriveRun._from_run_dto with exception module 'azureml.train.hyperdrive' has no attribute 'HyperDriveRun'.\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -188,7 +195,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 2, "metadata": { "scrolled": true }, @@ -242,7 +249,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -769,7 +776,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -777,7 +784,7 @@ "output_type": "stream", "text": [ "Found existing compute target.\n", - "{'currentNodeCount': 2, 'targetNodeCount': 2, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 2, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-20T22:33:59.775000+00:00', 'errors': None, 'creationTime': '2019-06-19T02:57:39.833104+00:00', 'modifiedTime': '2019-06-19T02:58:11.339451+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "{'currentNodeCount': 2, 'targetNodeCount': 2, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 2, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-21T20:14:04.778000+00:00', 'errors': None, 'creationTime': '2019-06-19T02:57:39.833104+00:00', 'modifiedTime': '2019-06-19T02:58:11.339451+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], @@ -814,7 +821,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -838,7 +845,7 @@ "### 2.3.1 Prepare Training Script\n", "Now you will need to create your training script. In this tutorial, the script for distributed training of GENSEN is already provided for you at `gensen_train.py`. \n", "\n", - "In this example, we use MLflow to log your metrics. We also use the [AzureML-Mlflow](https://pypi.org/project/azureml-mlflow/) package to further log these metrics to the Azure Portal. This is done with no change to the provided training script!\n", + "In this example, we use MLflow to log your metrics. We also use the [Azure ML-Mlflow](https://pypi.org/project/azureml-mlflow/) package to log these metrics to the Azure Portal. This is done with no change to the provided training script!\n", "\n", "In this example the script provided logs the loss for that minibatch to our Azure ML portal." ] @@ -864,7 +871,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -873,14 +880,12 @@ "'../../utils_nlp/gensen/gensen_config.json'" ] }, - "execution_count": 26, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "import shutil\n", - "\n", "gensen_folder = os.path.join(project_folder,'utils_nlp/gensen/')\n", "shutil.copy('gensen_train.py', gensen_folder)\n", "shutil.copy('gensen_config.json', gensen_folder)" @@ -896,7 +901,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -916,7 +921,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -974,17 +979,24 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 9, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Submitting E:\\Projects\\NLP-BP\\temp\\nlp directory for run. The size of the directory >= 25 MB, so it can take a few minutes.\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ "Run(Experiment: pytorch-gensen,\n", - "Id: pytorch-gensen_1561070750_49a97f3c,\n", + "Id: pytorch-gensen_1561150688_f84eab04,\n", "Type: azureml.scriptrun,\n", "Status: Queued)\n" ] @@ -1105,6 +1117,35 @@ " ```" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.3.6 Clean up after training\n", + "\n", + "We finally delete the training script `gensen_train.py` and config file `gensen_config.json` from the project directory." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "gensen_train = os.path.join(project_folder,'utils_nlp/gensen/gensen_train.py')\n", + "gensen_config = os.path.join(project_folder,'utils_nlp/gensen/gensen_config.json')\n", + "\n", + "if os.path.isfile(gensen_train):\n", + " os.remove(gensen_train)\n", + "else:\n", + " print(\"Error: %s file not found\" % gensen_train)\n", + " \n", + "if os.path.isfile(gensen_config):\n", + " os.remove(gensen_config)\n", + "else:\n", + " print(\"Error: %s file not found\" % gensen_config)" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/scenarios/sentence_similarity/gensen_train.py b/scenarios/sentence_similarity/gensen_train.py index 2028207fe..253c75caa 100644 --- a/scenarios/sentence_similarity/gensen_train.py +++ b/scenarios/sentence_similarity/gensen_train.py @@ -38,6 +38,7 @@ ) cudnn.benchmark = True +logger = logging.getLogger(__name__) hvd.init() if torch.cuda.is_available(): @@ -169,7 +170,9 @@ def evaluate( # Horovod: print output only on first rank. if hvd.rank() == 0: # log the best val accuracy to AML run - logging.info("Best Validation Loss: %f", np.float(validation_loss)) + logging.info( + "Best Validation Loss: {}".format(np.float(validation_loss)) + ) # If the validation loss is small enough, and it starts to go up. # Should stop training. @@ -179,7 +182,6 @@ def evaluate( min_val_loss_epoch = monitor_epoch model_state = model.state_dict() - print(monitor_epoch, min_val_loss_epoch, min_val_loss) logging.info( "Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: " "%d Loss : %.3f " @@ -275,308 +277,331 @@ def train(config, data_folder, learning_rate=0.0001): owd = os.getcwd() os.chdir(data_folder) - with mlflow.start_run(): - save_dir = config["data"]["save_dir"] - if not os.path.exists("./log"): - os.makedirs("./log") - - os.makedirs(save_dir, exist_ok=True) - - setup_logging(config) - - batch_size = config["training"]["batch_size"] - src_vocab_size = config["model"]["n_words_src"] - trg_vocab_size = config["model"]["n_words_trg"] - max_len_src = config["data"]["max_src_length"] - max_len_trg = config["data"]["max_trg_length"] - model_state = {} - - train_src = [item["train_src"] for item in config["data"]["paths"]] - train_trg = [item["train_trg"] for item in config["data"]["paths"]] - tasknames = [item["taskname"] for item in config["data"]["paths"]] - - # Keep track of indicies to train forward and backward jointly - if ( - "skipthought_next" in tasknames - and "skipthought_previous" in tasknames - ): - skipthought_idx = tasknames.index("skipthought_next") - skipthought_backward_idx = tasknames.index("skipthought_previous") - paired_tasks = { - skipthought_idx: skipthought_backward_idx, - skipthought_backward_idx: skipthought_idx, - } - else: - paired_tasks = None - skipthought_idx = None - skipthought_backward_idx = None - - train_iterator = BufferedDataIterator( - train_src, - train_trg, - src_vocab_size, - trg_vocab_size, - tasknames, - save_dir, - buffer_size=1e6, - lowercase=True, - seed=(hvd.rank() + 1) * 12345, - ) + try: + with mlflow.start_run(): + save_dir = config["data"]["save_dir"] + if not os.path.exists("./log"): + os.makedirs("./log") - nli_iterator = NLIIterator( - train=config["data"]["nli_train"], - dev=config["data"]["nli_dev"], - test=config["data"]["nli_test"], - vocab_size=-1, - vocab=os.path.join(save_dir, "src_vocab.pkl"), - seed=(hvd.rank() + 1) * 12345, - ) + os.makedirs(save_dir, exist_ok=True) - src_vocab_size = len(train_iterator.src[0]["word2id"]) - trg_vocab_size = len(train_iterator.trg[0]["word2id"]) + setup_logging(config) - # Logging set up. - logging.info("Finished creating iterator ...") - log_config(config) - logging.info( - "Found %d words in source : " - % (len(train_iterator.src[0]["id2word"])) - ) - for idx, taskname in enumerate(tasknames): - logging.info( - "Found %d target words in task %s " - % (len(train_iterator.trg[idx]["id2word"]), taskname) - ) - logging.info("Found %d words in src " % src_vocab_size) - logging.info("Found %d words in trg " % trg_vocab_size) - - weight_mask = torch.ones(trg_vocab_size).cuda() - weight_mask[train_iterator.trg[0]["word2id"][""]] = 0 - loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda() - nli_criterion = nn.CrossEntropyLoss().cuda() - - model = MultitaskModel( - src_emb_dim=config["model"]["dim_word_src"], - trg_emb_dim=config["model"]["dim_word_trg"], - src_vocab_size=src_vocab_size, - trg_vocab_size=trg_vocab_size, - src_hidden_dim=config["model"]["dim_src"], - trg_hidden_dim=config["model"]["dim_trg"], - bidirectional=config["model"]["bidirectional"], - pad_token_src=train_iterator.src[0]["word2id"][""], - pad_token_trg=train_iterator.trg[0]["word2id"][""], - nlayers_src=config["model"]["n_layers_src"], - dropout=config["model"]["dropout"], - num_tasks=len(train_iterator.src), - paired_tasks=paired_tasks, - ).cuda() - - optimizer = setup_horovod(model, learning_rate=learning_rate) - logging.info(model) - - n_gpus = config["training"]["n_gpus"] - model = torch.nn.DataParallel(model, device_ids=range(n_gpus)) - - task_losses = [[] for _ in tasknames] - task_idxs = [0 for _ in tasknames] - nli_losses = [] - updates = 0 - nli_ctr = 0 - nli_epoch = 0 - monitor_epoch = 0 - nli_mbatch_ctr = 0 - mbatch_times = [] - min_val_loss = 10000000 - min_val_loss_epoch = -1 - rng_num_tasks = len(tasknames) - 1 if paired_tasks else len(tasknames) - print(os.environ) - mlflow.log_param("Learning Rate", learning_rate) - logging.info("Commencing Training ...") - start = time.time() - while True: - # Train NLI once every 10 minibatches of other tasks - if nli_ctr % 10 == 0: - minibatch = nli_iterator.get_parallel_minibatch( - nli_mbatch_ctr, batch_size * n_gpus - ) - optimizer.zero_grad() - class_logits = model( - minibatch, -1, return_hidden=False, paired_trg=None - ) + batch_size = config["training"]["batch_size"] + src_vocab_size = config["model"]["n_words_src"] + trg_vocab_size = config["model"]["n_words_trg"] + max_len_src = config["data"]["max_src_length"] + max_len_trg = config["data"]["max_trg_length"] + model_state = {} + + train_src = [item["train_src"] for item in config["data"]["paths"]] + train_trg = [item["train_trg"] for item in config["data"]["paths"]] + tasknames = [item["taskname"] for item in config["data"]["paths"]] - loss = nli_criterion( - class_logits.contiguous().view(-1, class_logits.size(1)), - minibatch["labels"].contiguous().view(-1), + # Keep track of indicies to train forward and backward jointly + if ( + "skipthought_next" in tasknames + and "skipthought_previous" in tasknames + ): + skipthought_idx = tasknames.index("skipthought_next") + skipthought_backward_idx = tasknames.index( + "skipthought_previous" ) + paired_tasks = { + skipthought_idx: skipthought_backward_idx, + skipthought_backward_idx: skipthought_idx, + } + else: + paired_tasks = None + skipthought_idx = None + skipthought_backward_idx = None + + train_iterator = BufferedDataIterator( + train_src, + train_trg, + src_vocab_size, + trg_vocab_size, + tasknames, + save_dir, + buffer_size=1e6, + lowercase=True, + seed=(hvd.rank() + 1) * 12345, + ) - # nli_losses.append(loss.data[0]) - nli_losses.append(loss.item()) - loss.backward() - torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) - optimizer.step() + nli_iterator = NLIIterator( + train=config["data"]["nli_train"], + dev=config["data"]["nli_dev"], + test=config["data"]["nli_test"], + vocab_size=-1, + vocab=os.path.join(save_dir, "src_vocab.pkl"), + seed=(hvd.rank() + 1) * 12345, + ) - nli_mbatch_ctr += batch_size * n_gpus - if nli_mbatch_ctr >= len(nli_iterator.train_lines): - nli_mbatch_ctr = 0 - nli_epoch += 1 - else: - # Sample a random task - task_idx = np.random.randint(low=0, high=rng_num_tasks) - - # Get a minibatch corresponding to the sampled task - minibatch = train_iterator.get_parallel_minibatch( - task_idx, - task_idxs[task_idx], - batch_size * n_gpus, - max_len_src, - max_len_trg, - ) + src_vocab_size = len(train_iterator.src[0]["word2id"]) + trg_vocab_size = len(train_iterator.trg[0]["word2id"]) - """Increment pointer into task and if current buffer is - exhausted, fetch new buffer. """ - task_idxs[task_idx] += batch_size * n_gpus - if task_idxs[task_idx] >= train_iterator.buffer_size: - train_iterator.fetch_buffer(task_idx) - task_idxs[task_idx] = 0 - - if task_idx == skipthought_idx: - minibatch_back = train_iterator.get_parallel_minibatch( - skipthought_backward_idx, - task_idxs[skipthought_backward_idx], - batch_size * n_gpus, - max_len_src, - max_len_trg, + # Logging set up. + logging.info("Finished creating iterator ...") + log_config(config) + logging.info( + "Found %d words in source : " + % (len(train_iterator.src[0]["id2word"])) + ) + for idx, taskname in enumerate(tasknames): + logging.info( + "Found %d target words in task %s " + % (len(train_iterator.trg[idx]["id2word"]), taskname) + ) + logging.info("Found %d words in src " % src_vocab_size) + logging.info("Found %d words in trg " % trg_vocab_size) + + weight_mask = torch.ones(trg_vocab_size).cuda() + weight_mask[train_iterator.trg[0]["word2id"][""]] = 0 + loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda() + nli_criterion = nn.CrossEntropyLoss().cuda() + + model = MultitaskModel( + src_emb_dim=config["model"]["dim_word_src"], + trg_emb_dim=config["model"]["dim_word_trg"], + src_vocab_size=src_vocab_size, + trg_vocab_size=trg_vocab_size, + src_hidden_dim=config["model"]["dim_src"], + trg_hidden_dim=config["model"]["dim_trg"], + bidirectional=config["model"]["bidirectional"], + pad_token_src=train_iterator.src[0]["word2id"][""], + pad_token_trg=train_iterator.trg[0]["word2id"][""], + nlayers_src=config["model"]["n_layers_src"], + dropout=config["model"]["dropout"], + num_tasks=len(train_iterator.src), + paired_tasks=paired_tasks, + ).cuda() + + optimizer = setup_horovod(model, learning_rate=learning_rate) + logging.info(model) + + n_gpus = config["training"]["n_gpus"] + model = torch.nn.DataParallel(model, device_ids=range(n_gpus)) + + task_losses = [[] for _ in tasknames] + task_idxs = [0 for _ in tasknames] + nli_losses = [] + updates = 0 + nli_ctr = 0 + nli_epoch = 0 + monitor_epoch = 0 + nli_mbatch_ctr = 0 + mbatch_times = [] + min_val_loss = 10000000 + min_val_loss_epoch = -1 + rng_num_tasks = ( + len(tasknames) - 1 if paired_tasks else len(tasknames) + ) + logging.info("OS Environ: \n {} \n\n".format(os.environ)) + mlflow.log_param("learning_rate", learning_rate) + logging.info("Commencing Training ...") + start = time.time() + while True: + batch_start_time = time.time() + # Train NLI once every 10 minibatches of other tasks + if nli_ctr % 10 == 0: + minibatch = nli_iterator.get_parallel_minibatch( + nli_mbatch_ctr, batch_size * n_gpus ) - task_idxs[skipthought_backward_idx] += batch_size * n_gpus - if ( - task_idxs[skipthought_backward_idx] - >= train_iterator.buffer_size - ): - train_iterator.fetch_buffer(skipthought_backward_idx) - task_idxs[skipthought_backward_idx] = 0 - optimizer.zero_grad() - decoder_logit, decoder_logit_2 = model( - minibatch, - task_idx, - paired_trg=minibatch_back["input_trg"], - ) - - loss_f = loss_criterion( - decoder_logit.contiguous().view( - -1, decoder_logit.size(2) - ), - minibatch["output_trg"].contiguous().view(-1), + class_logits = model( + minibatch, -1, return_hidden=False, paired_trg=None ) - loss_b = loss_criterion( - decoder_logit_2.contiguous().view( - -1, decoder_logit_2.size(2) + loss = nli_criterion( + class_logits.contiguous().view( + -1, class_logits.size(1) ), - minibatch_back["output_trg"].contiguous().view(-1), + minibatch["labels"].contiguous().view(-1), ) - task_losses[task_idx].append(loss_f.data[0]) - task_losses[skipthought_backward_idx].append( - loss_b.data[0] - ) - loss = loss_f + loss_b + # nli_losses.append(loss.data[0]) + nli_losses.append(loss.item()) + loss.backward() + torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) + optimizer.step() + nli_mbatch_ctr += batch_size * n_gpus + if nli_mbatch_ctr >= len(nli_iterator.train_lines): + nli_mbatch_ctr = 0 + nli_epoch += 1 else: - optimizer.zero_grad() - decoder_logit = model(minibatch, task_idx) + # Sample a random task + task_idx = np.random.randint(low=0, high=rng_num_tasks) - loss = loss_criterion( - decoder_logit.contiguous().view( - -1, decoder_logit.size(2) - ), - minibatch["output_trg"].contiguous().view(-1), + # Get a minibatch corresponding to the sampled task + minibatch = train_iterator.get_parallel_minibatch( + task_idx, + task_idxs[task_idx], + batch_size * n_gpus, + max_len_src, + max_len_trg, ) - task_losses[task_idx].append(loss.item()) + """Increment pointer into task and if current buffer is + exhausted, fetch new buffer. """ + task_idxs[task_idx] += batch_size * n_gpus + if task_idxs[task_idx] >= train_iterator.buffer_size: + train_iterator.fetch_buffer(task_idx) + task_idxs[task_idx] = 0 + + if task_idx == skipthought_idx: + minibatch_back = train_iterator.get_parallel_minibatch( + skipthought_backward_idx, + task_idxs[skipthought_backward_idx], + batch_size * n_gpus, + max_len_src, + max_len_trg, + ) + task_idxs[skipthought_backward_idx] += ( + batch_size * n_gpus + ) + if ( + task_idxs[skipthought_backward_idx] + >= train_iterator.buffer_size + ): + train_iterator.fetch_buffer( + skipthought_backward_idx + ) + task_idxs[skipthought_backward_idx] = 0 + + optimizer.zero_grad() + decoder_logit, decoder_logit_2 = model( + minibatch, + task_idx, + paired_trg=minibatch_back["input_trg"], + ) - loss.backward() - # For distributed optimizer need to sync before gradient - # clipping. - optimizer.synchronize() + loss_f = loss_criterion( + decoder_logit.contiguous().view( + -1, decoder_logit.size(2) + ), + minibatch["output_trg"].contiguous().view(-1), + ) - torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) - optimizer.step() + loss_b = loss_criterion( + decoder_logit_2.contiguous().view( + -1, decoder_logit_2.size(2) + ), + minibatch_back["output_trg"].contiguous().view(-1), + ) - end = time.time() - mbatch_times.append(end - start) + task_losses[task_idx].append(loss_f.data[0]) + task_losses[skipthought_backward_idx].append( + loss_b.data[0] + ) + loss = loss_f + loss_b + + else: + optimizer.zero_grad() + decoder_logit = model(minibatch, task_idx) + + loss = loss_criterion( + decoder_logit.contiguous().view( + -1, decoder_logit.size(2) + ), + minibatch["output_trg"].contiguous().view(-1), + ) + + task_losses[task_idx].append(loss.item()) + + loss.backward() + # For distributed optimizer need to sync before gradient + # clipping. + optimizer.synchronize() + + torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) + optimizer.step() + + end = time.time() + mbatch_times.append(end - batch_start_time) + + # Validations + if ( + updates % config["management"]["monitor_loss"] == 0 + and updates != 0 + ): + monitor_epoch += 1 + for idx, task in enumerate(tasknames): + logging.info( + "Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s " + "minibatches : %d" + % ( + updates, + task, + np.mean(task_losses[idx]), + task, + len(task_losses[idx]), + ) + ) + mlflow.log_metric( + "validation_loss", + np.mean(task_losses[idx]), + step=monitor_epoch, + ) - # Validations - if ( - updates % config["management"]["monitor_loss"] == 0 - and updates != 0 - ): - monitor_epoch += 1 - for idx, task in enumerate(tasknames): logging.info( - "Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s " - "minibatches : %d" + "Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI " + "Loss : %.5f " % ( - updates, - task, - np.mean(task_losses[idx]), - task, - len(task_losses[idx]), + nli_ctr, + nli_epoch, + nli_mbatch_ctr, + np.mean(nli_losses), ) ) mlflow.log_metric( - "Validation Loss", - np.mean(task_losses[idx]), - step=monitor_epoch, + "nli_loss", np.mean(nli_losses), step=nli_epoch ) - logging.info( - "Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI " - "Loss : %.5f " - % (nli_ctr, nli_epoch, nli_mbatch_ctr, np.mean(nli_losses)) - ) - mlflow.log_metric( - "NLI Loss", np.mean(nli_losses), step=nli_epoch - ) - logging.info( - "Average time per mininbatch : %.5f" - % (np.mean(mbatch_times)) - ) - task_losses = [[] for _ in tasknames] - mbatch_times = [] - nli_losses = [] - - # For validate and break if done. - logging.info("############################") - logging.info("##### Evaluating model #####") - logging.info("############################") - training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate( - config=config, - train_iterator=train_iterator, - model=model, - loss_criterion=loss_criterion, - monitor_epoch=monitor_epoch, - min_val_loss=min_val_loss, - min_val_loss_epoch=min_val_loss_epoch, - save_dir=save_dir, - starting_time=start, - model_state=model_state, - ) - if training_complete: - break - - logging.info("Evaluating on NLI") - evaluate_nli( - nli_iterator=nli_iterator, - model=model, - n_gpus=n_gpus, - batch_size=batch_size, - ) + logging.info( + "Average time per mininbatch : %.5f" + % (np.mean(mbatch_times)) + ) + mlflow.log_metric( + "minibatch_avg_duration", np.mean(mbatch_times) + ) + + task_losses = [[] for _ in tasknames] + mbatch_times = [] + nli_losses = [] + + # For validate and break if done. + logging.info("############################") + logging.info("##### Evaluating model #####") + logging.info("############################") + training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate( + config=config, + train_iterator=train_iterator, + model=model, + loss_criterion=loss_criterion, + monitor_epoch=monitor_epoch, + min_val_loss=min_val_loss, + min_val_loss_epoch=min_val_loss_epoch, + save_dir=save_dir, + starting_time=start, + model_state=model_state, + ) + if training_complete: + break + + logging.info("Evaluating on NLI") + evaluate_nli( + nli_iterator=nli_iterator, + model=model, + n_gpus=n_gpus, + batch_size=batch_size, + ) - updates += batch_size * n_gpus - nli_ctr += 1 - logging.info("Updates: %d" % updates) - os.chdir(owd) + updates += batch_size * n_gpus + nli_ctr += 1 + logging.info("Updates: %d" % updates) + finally: + os.chdir(owd) def read_config(json_file): diff --git a/tools/generate_conda_file.py b/tools/generate_conda_file.py index 372eb6e1a..7753e2ec7 100644 --- a/tools/generate_conda_file.py +++ b/tools/generate_conda_file.py @@ -72,7 +72,7 @@ "nltk": "nltk>=3.4", "pytorch-pretrained-bert": "pytorch-pretrained-bert>=0.6", "seqeval": "seqeval>=0.0.12", - "azureml-mlflow": "azureml-mlflow>=1.0.41", + "azureml-mlflow": "azureml-mlflow>=1.0.43.1", } PIP_GPU = {"horovod": "horovod>=0.16.1"} From 9c3a95159cb6d2cafc61d6f5aa989d190f7a7902 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Fri, 21 Jun 2019 17:37:03 -0400 Subject: [PATCH 041/108] add sequential loader test --- tests/unit/test_data_loaders.py | 22 ++++++++++++++++++++-- utils_nlp/dataset/data_loaders.py | 26 ++++++++++++++------------ 2 files changed, 34 insertions(+), 14 deletions(-) diff --git a/tests/unit/test_data_loaders.py b/tests/unit/test_data_loaders.py index aeda8bb00..0a535c3e7 100644 --- a/tests/unit/test_data_loaders.py +++ b/tests/unit/test_data_loaders.py @@ -8,7 +8,7 @@ from utils_nlp.dataset.data_loaders import DaskCSVLoader -UNIF1 = {"a": 0, "b": 10, "n": 1000} +UNIF1 = {"a": 0, "b": 10, "n": 1000} # some uniform distribution @pytest.fixture() @@ -29,7 +29,7 @@ def csv_file(tmpdir): return str(f) -def test_dask_csv_loader(csv_file): +def test_dask_csv_rnd_loader(csv_file): num_batches = 500 batch_size = 12 num_partitions = 4 @@ -46,3 +46,21 @@ def test_dask_csv_loader(csv_file): assert loader.df.npartitions == num_partitions assert sample.mean().round() == UNIF1["a"] + UNIF1["b"] / 2 assert len(sample) <= num_batches * batch_size + + +def test_dask_csv_seq_loader(csv_file): + batch_size = 12 + num_partitions = 4 + + loader = DaskCSVLoader( + csv_file, header=None, block_size=5 * int(UNIF1["n"] / num_partitions) + ) + + sample = [] + for batch in loader.get_sequential_batches(batch_size): + sample.append(list(batch.iloc[:, 1])) + sample = np.concatenate(sample) + + assert loader.df.npartitions == num_partitions + assert sample.mean().round() == UNIF1["a"] + UNIF1["b"] / 2 + assert len(sample) == UNIF1["n"] diff --git a/utils_nlp/dataset/data_loaders.py b/utils_nlp/dataset/data_loaders.py index 014a8b334..08111fd95 100644 --- a/utils_nlp/dataset/data_loaders.py +++ b/utils_nlp/dataset/data_loaders.py @@ -18,18 +18,19 @@ def __init__( random_seed=None, ): """Initializes the loader. - Args: - file_path (str): Path to delimited file. - sep (str, optional): Delimiter. Defaults to ",". - header (str, optional): Number of rows to be used as the header. - See pandas.read_csv() - Defaults to "infer". - block_size (int, optional): Size of partition in bytes. - See dask.dataframe.read_csv() - Defaults to 10e6. - random_seed (int, optional): Random seed. See random.seed(). - Defaults to None. - """ + + Args: + file_path (str): Path to delimited file. + sep (str, optional): Delimiter. Defaults to ",". + header (str, optional): Number of rows to be used as the header. + See pandas.read_csv() + Defaults to "infer". + block_size (int, optional): Size of partition in bytes. + See dask.dataframe.read_csv() + Defaults to 10e6. + random_seed (int, optional): Random seed. See random.seed(). + Defaults to None. + """ self.df = dd.read_csv( file_path, sep=sep, header=header, blocksize=block_size @@ -62,6 +63,7 @@ def get_sequential_batches(self, batch_size): """Creates a sequential generator. Batches returned are pandas dataframes of length=batch_size. Note: Final batch might be of smaller size. + Args: batch_size (int): Batch size. """ From 480a08f544b81af8089a3e5829219b2d511539d0 Mon Sep 17 00:00:00 2001 From: hlums Date: Sun, 23 Jun 2019 21:51:34 +0000 Subject: [PATCH 042/108] Updated NER notebook with new tokenizer api. --- .../ner_wikigold_bert.ipynb | 184 +++++++++--------- utils_nlp/dataset/msra_ner.py | 44 +++++ utils_nlp/dataset/ner_utils.py | 46 +++++ 3 files changed, 183 insertions(+), 91 deletions(-) create mode 100644 utils_nlp/dataset/msra_ner.py create mode 100644 utils_nlp/dataset/ner_utils.py diff --git a/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb b/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb index 40928c806..c637a33f2 100644 --- a/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb +++ b/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb @@ -29,7 +29,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "metadata": { "scrolled": true }, @@ -47,9 +47,9 @@ "if nlp_path not in sys.path:\n", " sys.path.insert(0, nlp_path)\n", "\n", - "from utils_nlp.bert.token_classification import BERTTokenClassifier, postprocess_token_labels\n", + "from utils_nlp.bert.token_classification import BERTTokenClassifier, create_label_map, postprocess_token_labels\n", "from utils_nlp.bert.common import Language, Tokenizer\n", - "from utils_nlp.dataset.wikigold import download, read_data, get_train_test_data, get_unique_labels" + "from utils_nlp.dataset.wikigold import load_train_test_dfs, get_unique_labels" ] }, { @@ -61,16 +61,14 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 2, "metadata": { "scrolled": true }, "outputs": [], "source": [ "# path configurations\n", - "DATA_DIR = \"./data\"\n", - "DATA_FILE = \"./data/wikigold.conll.txt\"\n", - "CACHE_DIR=\".\"\n", + "CACHE_DIR=\"./temp\"\n", "\n", "# set random seeds\n", "RANDOM_SEED = 100\n", @@ -82,12 +80,15 @@ "MAX_SEQ_LENGTH = 200\n", "\n", "# training configurations\n", - "DEVICE=\"gpu\"\n", "BATCH_SIZE = 16\n", "NUM_TRAIN_EPOCHS = 5\n", "\n", "# optimizer configuration\n", - "LEARNING_RATE = 3e-5" + "LEARNING_RATE = 3e-5\n", + "\n", + "# data configurations\n", + "TEXT_COL = \"sentence\"\n", + "LABELS_COL = \"labels\"" ] }, { @@ -102,18 +103,18 @@ "metadata": {}, "source": [ "### Get training and testing data\n", - "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). The `download` function downloads the data file to a user-specified directory. \n", + "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). \n", "\n", - "The helper function `get_train_test_data` splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", + "The helper function `load_train_test_dfs` downloads the data file if it doesn't exist in local_cache_path. It splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", "\n", - "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). An 'X' label is added for the trailing word pieces generated by BERT, because BERT uses WordPiece tokenizer. \n", + "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). \n", "\n", "The maximum number of words in a sentence is 144, so we set MAX_SEQ_LENGTH to 200 above, because the number of tokens will grow after WordPiece tokenization." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "metadata": { "scrolled": true }, @@ -126,7 +127,7 @@ "Maximum sequence length in testing data is: 81\n", "\n", "Unique entity labels: \n", - "['O', 'I-LOC', 'I-MISC', 'I-PER', 'I-ORG', 'X']\n", + "['O', 'I-LOC', 'I-MISC', 'I-PER', 'I-ORG']\n", "\n", "Sample sentence: \n", "Two , Samsung based , electronic cash registers were reconstructed in order to expand their functions and adapt them for networking .\n", @@ -138,44 +139,37 @@ } ], "source": [ - "download(DATA_DIR)\n", - "wikigold_text = read_data(DATA_FILE)\n", - "train_text, train_labels, test_text, test_labels = get_train_test_data(wikigold_text, \n", - " test_percentage=0.5, \n", - " random_seed=RANDOM_SEED)\n", + "train_df, test_df = load_train_test_dfs(local_cache_path=CACHE_DIR, test_percentage=0.5,random_seed=RANDOM_SEED)\n", "label_list = get_unique_labels()\n", "print('\\nUnique entity labels: \\n{}\\n'.format(label_list))\n", - "print('Sample sentence: \\n{}\\n'.format(train_text[0]))\n", - "print('Sample sentence labels: \\n{}\\n'.format(train_labels[0]))" + "print('Sample sentence: \\n{}\\n'.format(train_df[TEXT_COL][0]))\n", + "print('Sample sentence labels: \\n{}\\n'.format(train_df[LABELS_COL][0]))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Tokenization and Preprocessing\n", - "The `preprocess_ner_tokens` method of the `Tokenizer` class converts raw string data to numerical features, involving the following steps:\n", - "1. WordPiece tokenization.\n", - "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", - "3. Sequence padding or truncation according to the `max_seq_length` configuration." + "### Tokenization and Preprocessing\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Create a dictionary that maps labels to numerical values**" + "**Create a dictionary that maps labels to numerical values** \n", + "Note there is an argument called trailing_piece_tag. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. \"criticize\" is tokenized into \"critic\" and \"##ize\". Since the input data only come with one token label for \"criticize\", within Tokenizer.prerocess_ner_tokens, the original token label is assigned to the first token \"critic\" and the second token \"##ize\" is labeled as \"X\". By default, trailing_piece_tag is set to \"X\". If \"X\" already exists in your data, you can set trailing_piece_tag to another value that doesn't exist in your data." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "metadata": { "scrolled": true }, "outputs": [], "source": [ - "label_map = {label: i for i, label in enumerate(label_list)}" + "label_map = create_label_map(label_list, trailing_piece_tag=\"X\")" ] }, { @@ -187,7 +181,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": { "scrolled": true }, @@ -202,29 +196,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Create numerical features** \n", - "Note there is an argument called `trailing_piece_tag`. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. \"playing\" is tokenized into \"play\" and \"##ing\". Since the input data only come with one token label for \"playing\", within `prerocess_ner_tokens`, the original token label is assigned to the first token \"play\" and the second token \"##ing\" is labeled as \"X\". By default, `trailing_piece_tag` is set to \"X\". If \"X\" already exists in your data, you can set `trailing_piece_tag` to another value that doesn't exist in your data. " + "**Tokenize and preprocess text** \n", + "The `tokenize_preprocess_ner_text` method of the `Tokenizer` class converts text and labels in strings to numerical features, involving the following steps:\n", + "1. WordPiece tokenization.\n", + "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", + "3. Sequence padding or truncation according to the `max_seq_length` configuration." ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": { - "scrolled": false + "scrolled": true }, "outputs": [], "source": [ "train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \\\n", - " tokenizer.preprocess_ner_tokens(text=train_text,\n", + " tokenizer.tokenize_preprocess_ner_text(text=train_df[TEXT_COL],\n", " label_map=label_map,\n", " max_len=MAX_SEQ_LENGTH,\n", - " labels=train_labels,\n", + " labels=train_df[LABELS_COL],\n", " trailing_piece_tag=\"X\")\n", "test_token_ids, test_input_mask, test_trailing_token_mask, test_label_ids = \\\n", - " tokenizer.preprocess_ner_tokens(text=test_text,\n", + " tokenizer.tokenize_preprocess_ner_text(text=test_df[TEXT_COL],\n", " label_map=label_map,\n", " max_len=MAX_SEQ_LENGTH,\n", - " labels=test_labels,\n", + " labels=test_df[LABELS_COL],\n", " trailing_piece_tag=\"X\")" ] }, @@ -232,16 +229,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "`Tokenizer.preprocess_ner_tokens` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", + "`Tokenizer.tokenize_preprocess_ner_text` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", "1. token ids: list of numerical values each corresponds to a token.\n", "2. attention mask: list of 1s and 0s, 1 for input tokens and 0 for padded tokens, so that padded tokens are not attended to. \n", - "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ing. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", + "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ize. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", "4. label ids: list of numerical values each corresponds to an entity label, if `labels` is provided." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "metadata": { "scrolled": true }, @@ -290,7 +287,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 8, "metadata": { "scrolled": true }, @@ -310,7 +307,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 9, "metadata": { "scrolled": false }, @@ -336,10 +333,10 @@ "output_type": "stream", "text": [ "\n", - "Iteration: 40%|███▉ | 23/58 [00:30<00:45, 1.31s/it]\u001b[A\n", - "Iteration: 40%|███▉ | 23/58 [00:49<00:45, 1.31s/it]\u001b[A\n", - "Iteration: 81%|████████ | 47/58 [01:01<00:14, 1.31s/it]\u001b[A\n", - "Epoch: 20%|██ | 1/5 [01:15<05:01, 75.44s/it]0s/it]\u001b[A\n", + "Iteration: 43%|████▎ | 25/58 [00:30<00:40, 1.22s/it]\u001b[A\n", + "Iteration: 43%|████▎ | 25/58 [00:49<00:40, 1.22s/it]\u001b[A\n", + "Iteration: 86%|████████▌ | 50/58 [01:00<00:09, 1.22s/it]\u001b[A\n", + "Epoch: 20%|██ | 1/5 [01:10<04:42, 70.55s/it]1s/it]\u001b[A\n", "Iteration: 0%| | 0/58 [00:00 1]) + ) + labels_list.append([t[1] for t in s_split_split if len(t) > 1]) + if len(s_split_split) > max_seq_len: + max_seq_len = len(s_split_split) + print( + "Maximum sequence length in {0} data is: {1}".format( + data_type, max_seq_len + ) + ) + return sentence_list, labels_list From 044af9efefabb2edeb31e96d3a1e90f15f9c6db4 Mon Sep 17 00:00:00 2001 From: hlums Date: Sun, 23 Jun 2019 21:52:25 +0000 Subject: [PATCH 043/108] Updated ner token preprocessing for Chinese text. --- tests/unit/test_bert_common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/unit/test_bert_common.py b/tests/unit/test_bert_common.py index f611b5ece..e73dfc826 100644 --- a/tests/unit/test_bert_common.py +++ b/tests/unit/test_bert_common.py @@ -13,7 +13,7 @@ def test_tokenizer_preprocess_ner_tokens(ner_test_data): tokenizer = Tokenizer(language=Language.ENGLISHCASED, to_lower=False) # test providing labels - preprocessed_tokens = tokenizer.preprocess_ner_tokens( + preprocessed_tokens = tokenizer.tokenize_preprocess_ner_text( text=ner_test_data["INPUT_TEXT"], labels=ner_test_data["INPUT_LABELS"], label_map=ner_test_data["LABEL_MAP"], @@ -28,7 +28,7 @@ def test_tokenizer_preprocess_ner_tokens(ner_test_data): assert preprocessed_tokens[3] == ner_test_data["EXPECTED_LABEL_IDS"] # test not providing labels - preprocessed_tokens = tokenizer.preprocess_ner_tokens( + preprocessed_tokens = tokenizer.tokenize_preprocess_ner_text( text=ner_test_data["INPUT_TEXT"], label_map=ner_test_data["LABEL_MAP"], max_len=20, From 7b827d6d5bfed4a28c3dfcc914ed62c0ad0dc397 Mon Sep 17 00:00:00 2001 From: hlums Date: Sun, 23 Jun 2019 21:53:06 +0000 Subject: [PATCH 044/108] Updated ner token preprocessing for Chinese text. --- utils_nlp/bert/common.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index 52dcdb2d5..8cfd8ecef 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -1,11 +1,12 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -from pytorch_pretrained_bert.tokenization import BertTokenizer from enum import Enum import warnings import torch +from pytorch_pretrained_bert.tokenization import BertTokenizer + from torch.utils.data import ( DataLoader, RandomSampler, @@ -87,7 +88,7 @@ def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): input_mask = [[min(1, x) for x in y] for y in tokens] return tokens, input_mask - def preprocess_ner_tokens( + def tokenize_preprocess_ner_text( self, text, max_len=BERT_MAX_LEN, @@ -96,7 +97,7 @@ def preprocess_ner_tokens( trailing_piece_tag="X", ): """ - Preprocesses input text, involving the following steps + Tokenize and preprocesses input text, involving the following steps 0. Tokenize input text. 1. Convert string tokens to token ids. 2. Convert input labels to label ids, if labels and label_map are @@ -118,8 +119,8 @@ def preprocess_ner_tokens( labels (which may be string type) to integers. Default value is None. trailing_piece_tag (str, optional): Tag used to label trailing - word pieces. For example, "playing" is broken into "play" - and "##ing", "play" preserves its original label and "##ing" + word pieces. For example, "criticize" is broken into "critic" + and "##ize", "critic" preserves its original label and "##ize" is labeled as trailing_piece_tag. Default value is "X". Returns: @@ -134,7 +135,7 @@ def preprocess_ner_tokens( 3. trailing_token_mask: List of lists. Each sublist is a boolean list, True for the first word piece of each original word, False for the trailing word pieces, - e.g. "##ing". This mask is useful for removing the + e.g. "##ize". This mask is useful for removing the predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. @@ -142,6 +143,8 @@ def preprocess_ner_tokens( each sublist contains token labels of a input sentence/paragraph, if labels is provided. """ + text = [self.tokenizer.basic_tokenizer._tokenize_chinese_chars(t) + for t in text] if max_len > BERT_MAX_LEN: warnings.warn( "setting max_len to max allowed tokens: {}".format( @@ -162,7 +165,7 @@ def preprocess_ner_tokens( trailing_token_mask_all = [] for t, t_labels in zip(text, labels): new_labels = [] - tokens = [] + new_tokens = [] if label_available: for word, tag in zip(t.split(), t_labels): sub_words = self.tokenizer.tokenize(word) @@ -170,7 +173,7 @@ def preprocess_ner_tokens( if count > 0: tag = trailing_piece_tag new_labels.append(tag) - tokens.append(sub_word) + new_tokens.append(sub_word) else: for word in t.split(): sub_words = self.tokenizer.tokenize(word) @@ -180,12 +183,12 @@ def preprocess_ner_tokens( else: tag = "O" new_labels.append(tag) - tokens.append(sub_word) + new_tokens.append(sub_word) - if len(tokens) > max_len: - tokens = tokens[:max_len] + if len(new_tokens) > max_len: + new_tokens = new_tokens[:max_len] new_labels = new_labels[:max_len] - input_ids = self.tokenizer.convert_tokens_to_ids(tokens) + input_ids = self.tokenizer.convert_tokens_to_ids(new_tokens) # The mask has 1 for real tokens and 0 for padding tokens. # Only real tokens are attended to. From 7c35f670d2154c8eb5f286996f5bb9cbe160a3e6 Mon Sep 17 00:00:00 2001 From: hlums Date: Sun, 23 Jun 2019 21:55:51 +0000 Subject: [PATCH 045/108] Added probabilities output to BERT token classifier. --- tests/unit/test_bert_token_classification.py | 9 ++++ utils_nlp/bert/token_classification.py | 53 ++++++++++++++++---- 2 files changed, 52 insertions(+), 10 deletions(-) diff --git a/tests/unit/test_bert_token_classification.py b/tests/unit/test_bert_token_classification.py index 070c37739..ec4c4a961 100644 --- a/tests/unit/test_bert_token_classification.py +++ b/tests/unit/test_bert_token_classification.py @@ -44,6 +44,15 @@ def test_token_classifier_fit_predict(tmp_path, ner_test_data): labels=ner_test_data["INPUT_LABEL_IDS"], ) + # test output probabilities + predictions = token_classifier.predict( + token_ids=ner_test_data["INPUT_TOKEN_IDS"], + input_mask=ner_test_data["INPUT_MASK"], + labels=ner_test_data["INPUT_LABEL_IDS"], + probabilities=True, + ) + assert len(predictions.classes) == predictions.probabilities.shape[0] + def test_postprocess_token_labels(ner_test_data): labels_no_padding = postprocess_token_labels( diff --git a/utils_nlp/bert/token_classification.py b/utils_nlp/bert/token_classification.py index 3182684b4..ecce743ae 100644 --- a/utils_nlp/bert/token_classification.py +++ b/utils_nlp/bert/token_classification.py @@ -7,6 +7,7 @@ import numpy as np from tqdm import tqdm, trange +from collections import namedtuple import torch import torch.nn as nn @@ -14,7 +15,7 @@ from pytorch_pretrained_bert.optimization import BertAdam from pytorch_pretrained_bert.modeling import BertForTokenClassification -from .common import Language, create_data_loader +from utils_nlp.bert.common import Language, create_data_loader from utils_nlp.pytorch.device_utils import get_device, move_to_device @@ -192,8 +193,16 @@ def fit( train_loss = tr_loss / nb_tr_steps print("Train loss: {}".format(train_loss)) + torch.cuda.empty_cache() + def predict( - self, token_ids, input_mask, labels=None, batch_size=32, num_gpus=None + self, + token_ids, + input_mask, + labels=None, + batch_size=32, + num_gpus=None, + probabilities=False, ): """ Predict token labels on the testing data. @@ -215,7 +224,12 @@ def predict( If None, all available GPUs will be used. Defaults to None. Returns: - list: List of lists of predicted token labels. + list or namedtuple(list, ndarray): List of lists of predicted + token labels or ([token labels], probabilities) if + probabilities is True. The probabilities output is an n x m + array, where n is the size of the testing data and m is the + number of tokens in each input sublist. The probability + values are the softmax probability of the predicted class. """ test_dataloader = create_data_loader( input_ids=token_ids, @@ -228,7 +242,6 @@ def predict( self.model = move_to_device(self.model, device, num_gpus) self.model.eval() - predictions = [] eval_loss = 0 nb_eval_steps = 0 for step, batch in enumerate( @@ -255,16 +268,36 @@ def predict( eval_loss += tmp_eval_loss.mean().item() - logits = logits.detach().cpu().numpy() - predictions.extend([list(p) for p in np.argmax(logits, axis=2)]) + logits = logits.detach().cpu() + + if step == 0: + logits_all = logits + else: + logits_all = np.append(logits_all, logits, axis=0) nb_eval_steps += 1 + predictions = [list(p) for p in np.argmax(logits_all, axis=2)] + if true_label_available: validation_loss = eval_loss / nb_eval_steps print("Evaluation loss: {}".format(validation_loss)) - return predictions + if probabilities: + return namedtuple("Predictions", "classes probabilities")( + predictions, + np.max(nn.Softmax(dim=2)(torch.Tensor(logits_all)).numpy(), 2), + ) + else: + return predictions + + +def create_label_map(label_list, trailing_piece_tag="X"): + if trailing_piece_tag not in label_list: + label_list.append(trailing_piece_tag) + label_map = {label: i for i, label in enumerate(label_list)} + + return label_map def postprocess_token_labels( @@ -294,13 +327,13 @@ def postprocess_token_labels( original labels. Default value is None. remove_trailing_word_pieces (bool, optional): Whether to remove predicted labels of trailing word pieces generated by WordPiece - tokenizer. For example, "playing" is broken into "play" and - "##ing". After removing predicted label for "##ing", + tokenizer. For example, "criticize" is broken into "critic" and + "##ize". After removing predicted label for "##ize", the predicted label for "play" is assigned to the original word "playing". Default value is False. trailing_token_mask (list, optional): list of boolean values, True for the first word piece of each original word, False for trailing - word pieces, e.g. ##ing. If remove_trailing_word_pieces is + word pieces, e.g. ##ize. If remove_trailing_word_pieces is True, this mask is used to remove the predicted labels on trailing word pieces, so that each original word in the input text has a unique predicted label. From aeb9486a6afa4798029a07071edec90f58b0b06c Mon Sep 17 00:00:00 2001 From: hlums Date: Sun, 23 Jun 2019 21:57:04 +0000 Subject: [PATCH 046/108] Updated wikigold utils to be consistent with other datasets. --- tests/unit/test_dataset.py | 22 ++++------- utils_nlp/dataset/wikigold.py | 69 ++++++++++++++++------------------- 2 files changed, 38 insertions(+), 53 deletions(-) diff --git a/tests/unit/test_dataset.py b/tests/unit/test_dataset.py index 2592fbbd3..91ca57a0b 100755 --- a/tests/unit/test_dataset.py +++ b/tests/unit/test_dataset.py @@ -30,27 +30,19 @@ def test_load_pandas_df_msrpc(): def test_wikigold(tmp_path): - wg_text_length = 318333 wg_sentence_count = 1841 wg_test_percentage = 0.5 wg_test_sentence_count = round(wg_sentence_count * wg_test_percentage) wg_train_sentence_count = wg_sentence_count - wg_test_sentence_count - # test download downloaded_file = os.path.join(tmp_path, "wikigold.conll.txt") assert not os.path.exists(downloaded_file) - wg.download(dir_path=tmp_path) - assert os.path.exists(downloaded_file) - - # test read_data - wg_text = wg.read_data(downloaded_file) - assert len(wg_text) == wg_text_length - # test get_train_test_data - train_text, train_labels, test_text, test_labels = wg.get_train_test_data( - wg_text, test_percentage=wg_test_percentage + train_df, test_df = wg.load_train_test_dfs( + tmp_path, test_percentage=wg_test_percentage ) - assert len(train_text) == wg_train_sentence_count - assert len(train_labels) == wg_train_sentence_count - assert len(test_text) == wg_test_sentence_count - assert len(test_labels) == wg_test_sentence_count + + assert os.path.exists(downloaded_file) + + assert train_df.shape == (wg_train_sentence_count, 2) + assert test_df.shape == (wg_test_sentence_count, 2) diff --git a/utils_nlp/dataset/wikigold.py b/utils_nlp/dataset/wikigold.py index 8f32bad27..740440831 100644 --- a/utils_nlp/dataset/wikigold.py +++ b/utils_nlp/dataset/wikigold.py @@ -2,6 +2,9 @@ # Licensed under the MIT License. import random +import os +import pandas as pd + from utils_nlp.dataset.url_utils import maybe_download URL = ( @@ -10,48 +13,35 @@ ) -def download(dir_path="."): - """Download the wikigold data file to dir_path if it doesn't exist yet.""" - file_name = URL.split("/")[-1] - maybe_download(URL, file_name, dir_path) - - -def read_data(data_file): +def load_train_test_dfs( + local_cache_path="./", test_percentage=0.5, random_seed=None +): """ - Read the wikigold dataset into a string of text. + Get the training and testing data frames based on test_percentage. Args: - data_file (str): data file path, including the file name. - - Returns: - str: One string containing the wikigold dataset. - """ - with open(data_file, "r", encoding="utf8") as file: - text = file.read() - - return text - - -def get_train_test_data(text, test_percentage=0.5, random_seed=None): - """ - Get the training and testing data based on test_percentage. - - Args: - text (str): One string containing the wikigold dataset. + local_cache_path (str): Path to store the data. If the data file + doesn't exist in this path, it's downloaded. test_percentage (float, optional): Percentage of data ot use for testing. Since this is a small dataset, the default testing percentage is set to 0.5 random_seed (float, optional): Random seed used to shuffle the data. Returns: - tuple: A tuple containing four lists: - train_sentence_list: List of training sentence strings. - train_labels_list: List of lists. Each sublist contains the - entity labels of the words in the training sentence. - test_sentence_list: List of testing sentence strings. - test_labels_list: List of lists. Each sublist contains the - entity labels of the word in the testing sentence. + tuple: (train_pandas_df, test_pandas_df), each data frame contains + two columns + "sentence": sentences in strings. + "labels": list of entity labels of the words in the sentence. + """ + file_name = URL.split("/")[-1] + maybe_download(URL, file_name, local_cache_path) + + data_file = os.path.join(local_cache_path, file_name) + + with open(data_file, "r", encoding="utf8") as file: + text = file.read() + # Input data are separated by empty lines text_split = text.split("\n\n") # Remove empty line at EOF @@ -94,14 +84,17 @@ def _get_sentence_and_labels(text_list, data_type): test_text_split, "testing" ) - return ( - train_sentence_list, - train_labels_list, - test_sentence_list, - test_labels_list, + train_df = pd.DataFrame( + {"sentence": train_sentence_list, "labels": train_labels_list} ) + test_df = pd.DataFrame( + {"sentence": test_sentence_list, "labels": test_labels_list} + ) + + return (train_df, test_df) + def get_unique_labels(): """Get the unique labels in the wikigold dataset.""" - return ["O", "I-LOC", "I-MISC", "I-PER", "I-ORG", "X"] + return ["O", "I-LOC", "I-MISC", "I-PER", "I-ORG"] From f4aa61520b58d8716968e6bd8c9b10d80ba46cb9 Mon Sep 17 00:00:00 2001 From: hlums Date: Sun, 23 Jun 2019 22:00:20 +0000 Subject: [PATCH 047/108] Removed MSRA NER utils temporarily. --- utils_nlp/dataset/msra_ner.py | 44 -------------------------------- utils_nlp/dataset/ner_utils.py | 46 ---------------------------------- 2 files changed, 90 deletions(-) delete mode 100644 utils_nlp/dataset/msra_ner.py delete mode 100644 utils_nlp/dataset/ner_utils.py diff --git a/utils_nlp/dataset/msra_ner.py b/utils_nlp/dataset/msra_ner.py deleted file mode 100644 index cfd79c9d6..000000000 --- a/utils_nlp/dataset/msra_ner.py +++ /dev/null @@ -1,44 +0,0 @@ -import os -import pandas as pd -from utils_nlp.dataset.ner_utils import get_sentence_and_labels - - -FILES = { - "train": "/MSRA/msra-bakeoff3-training-utf8.2col", - "test": "/MSRA/bakeoff3_goldstandard.txt", -} -ENCODINGS = {"train": "utf8", "test": "gbk"} - - -def load_pandas_df(local_cache_path="./", file_split="test"): - file_path = os.path.join(local_cache_path, FILES[file_split]) - encoding = ENCODINGS[file_split] - - with open(file_path, encoding=encoding) as file_path: - text = file_path.read() - - # Add line break after punctuations indicating end of sentence in Chinese - text = text.replace("。 0", "。 0\n") - text = text.replace("? 0", "? 0\n") - text = text.replace("! 0", "! 0\n") - - text_list = text.split("\n\n") - - # Remove empty line at EOF - text_list = text_list[:-1] - - sentence_list, labels_list = get_sentence_and_labels( - text_list, file_split) - - labels_list = [ - ["O" if label == "0" else label for label in labels] - for labels in labels_list - ] - - df = pd.DataFrame({"sentence": sentence_list, "labels": labels_list}) - - return df - - -def get_unique_labels(): - return ["O", "B-LOC", "B-ORG", "B-PER", "I-LOC", "I-ORG", "I-PER", "X"] diff --git a/utils_nlp/dataset/ner_utils.py b/utils_nlp/dataset/ner_utils.py deleted file mode 100644 index 1fc54ea02..000000000 --- a/utils_nlp/dataset/ner_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -def get_sentence_and_labels(text, data_type=""): - """ - Helper function converting data in conll format to sentence and list - of token labels. - - Args: - text (str): Text string in conll format, e.g. - "Amy B-PER - ADAMS I-PER - works O - at O - the O - University B-ORG - of I-ORG - Minnesota I-ORG - . O" - data_type (str, optional): String that briefly describes the data, - e.g. "train" - Returns: - tuple: - (list of sentences, list of token label lists) - """ - text_list = text.split("\n\n") - if text_list[-1] in (" ", ""): - text_list = text_list[:-1] - - max_seq_len = 0 - sentence_list = [] - labels_list = [] - for s in text_list: - # split each sentence string into "word label" pairs - s_split = s.split("\n") - # split "word label" pairs - s_split_split = [t.split() for t in s_split] - sentence_list.append( - " ".join([t[0] for t in s_split_split if len(t) > 1]) - ) - labels_list.append([t[1] for t in s_split_split if len(t) > 1]) - if len(s_split_split) > max_seq_len: - max_seq_len = len(s_split_split) - print( - "Maximum sequence length in {0} data is: {1}".format( - data_type, max_seq_len - ) - ) - return sentence_list, labels_list From 291d250b455251cfcea70e62a65299dd68640af6 Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Sun, 23 Jun 2019 18:44:42 -0400 Subject: [PATCH 048/108] Added NOTICE.txt file with huggingface BERT. --- NOTICE.txt | 224 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 224 insertions(+) create mode 100644 NOTICE.txt diff --git a/NOTICE.txt b/NOTICE.txt new file mode 100644 index 000000000..11dc6f43c --- /dev/null +++ b/NOTICE.txt @@ -0,0 +1,224 @@ +NOTICES AND INFORMATION +Do Not Translate or Localize + +This software incorporates material from third parties. Microsoft makes certain +open source code available at https://3rdpartysource.microsoft.com, or you may +send a check or money order for US $5.00, including the product name, the open +source component name, and version number, to: + +Source Code Compliance Team +Microsoft Corporation +One Microsoft Way +Redmond, WA 98052 +USA + +Notwithstanding any other terms, you may reverse engineer this software to the +extent required to debug changes to any libraries licensed under the GNU Lesser +General Public License. + +------------Attribution Starts Here---------------------------------------------- +Component: https://github.com/huggingface/pytorch-pretrained-BERT + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +------------End of Attribution --------------------------------------------------- \ No newline at end of file From 6189915eece588289abcc47fe4d40055b326ff45 Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Sun, 23 Jun 2019 18:47:57 -0400 Subject: [PATCH 049/108] Added notebook for Chinese NER. --- .../ner_msra_bert_chinese.ipynb | 848 ++++++++++++++++++ utils_nlp/dataset/msra_ner.py | 46 + utils_nlp/dataset/ner_utils.py | 46 + 3 files changed, 940 insertions(+) create mode 100644 scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb create mode 100644 utils_nlp/dataset/msra_ner.py create mode 100644 utils_nlp/dataset/ner_utils.py diff --git a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb new file mode 100644 index 000000000..c11b54a6e --- /dev/null +++ b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb @@ -0,0 +1,848 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "*Copyright (c) Microsoft Corporation. All rights reserved.* \n", + "*Licensed under the MIT License.*\n", + "# Named Entity Recognition Using BERT on Chinese\n", + "## Summary\n", + "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task on Chinese text. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, and model evaluation. \n", + "\n", + "[BERT (Bidirectional Transformers for Language Understanding)](https://arxiv.org/pdf/1810.04805.pdf) is a powerful pre-trained lanaguage model that can be used for multiple NLP tasks, including text classification, question answering, named entity recognition, etc. It's able to achieve state of the art performance with only a few epochs of fine tuning on task specific datasets. \n", + "The figure below illustrates how BERT can be fine tuned for NER tasks. The input data is a list of tokens representing a sentence. In the training data, each token has an entity label. After fine tuning, the model predicts an entity label for each token in a given testing sentence. \n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Required packages\n", + "* pytorch\n", + "* pytorch-pretrained-bert\n", + "* pandas\n", + "* seqeval" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "import sys\n", + "import os\n", + "import random\n", + "from seqeval.metrics import f1_score, classification_report\n", + "\n", + "import torch\n", + "from pytorch_pretrained_bert.tokenization import BertTokenizer\n", + "\n", + "nlp_path = os.path.abspath('../../')\n", + "if nlp_path not in sys.path:\n", + " sys.path.insert(0, nlp_path)\n", + "\n", + "from utils_nlp.bert.token_classification import BERTTokenClassifier, postprocess_token_labels, create_label_map\n", + "from utils_nlp.bert.common import Language, Tokenizer\n", + "from utils_nlp.dataset.msra_ner import load_pandas_df, get_unique_labels" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configurations" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "# path configurations\n", + "DATA_DIR = \"./data\"\n", + "TRAIN_DATA_FILE = \"../MSRA/msra-bakeoff3-training-utf8.2col\"\n", + "Test_DATA_FILE = \"../MSRA/bakeoff3_goldstandard.txt\"\n", + "CACHE_DIR=\"./temp\"\n", + "\n", + "# set random seeds\n", + "RANDOM_SEED = 100\n", + "torch.manual_seed(RANDOM_SEED)\n", + "\n", + "# model configurations\n", + "LANGUAGE = Language.CHINESE\n", + "DO_LOWER_CASE = True\n", + "MAX_SEQ_LENGTH = 200\n", + "\n", + "# training configurations\n", + "BATCH_SIZE = 16\n", + "NUM_TRAIN_EPOCHS = 1\n", + "\n", + "# optimizer configuration\n", + "LEARNING_RATE = 3e-5\n", + "WARMUP_PROPORTION = 0.1\n", + "\n", + "TEXT_COL = \"sentence\"\n", + "LABEL_COL = \"labels\"\n", + "\n", + "CACHE_DIR = \"../../../\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preprocess Data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get training and testing data\n", + "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). The `download` function downloads the data file to a user-specified directory. \n", + "\n", + "The helper function `get_train_test_data` splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", + "\n", + "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). An 'X' label is added for the trailing word pieces generated by BERT, because BERT uses WordPiece tokenizer. \n", + "\n", + "The maximum number of words in a sentence is 144, so we set MAX_SEQ_LENGTH to 200 above, because the number of tokens will grow after WordPiece tokenization." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Maximum sequence length in train data is: 746\n", + "Maximum sequence length in test data is: 439\n", + "Number of sentences in training data: 45000\n", + "Number of sentences in testing data: 3940\n", + "Unique labels: ['O', 'B-LOC', 'B-ORG', 'B-PER', 'I-LOC', 'I-ORG', 'I-PER']\n" + ] + } + ], + "source": [ + "train_df = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\")\n", + "test_df = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\")\n", + "label_list = get_unique_labels()\n", + "print(\"Number of sentences in training data: {}\".format(train_df.shape[0]))\n", + "print(\"Number of sentences in testing data: {}\".format(test_df.shape[0]))\n", + "print(\"Unique labels: {}\".format(label_list))" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
sentencelabels
0当 希 望 工 程 救 助 的 百 万 儿 童 成 长 起 来 , 科 教 兴 国 蔚 然 ...[O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...
1藏 书 本 来 就 是 所 有 传 统 收 藏 门 类 中 的 第 一 大 户 , 只 是 ...[O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...
2因 有 关 日 寇 在 京 掠 夺 文 物 详 情 , 藏 界 较 为 重 视 , 也 是 ...[O, O, O, B-LOC, O, O, B-LOC, O, O, O, O, O, O...
3我 们 藏 有 一 册 1 9 4 5 年 6 月 油 印 的 《 北 京 文 物 保 存 ...[O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...
4以 家 乡 的 历 史 文 献 、 特 定 历 史 时 期 书 刊 、 某 一 名 家 或 ...[O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...
\n", + "
" + ], + "text/plain": [ + " sentence \\\n", + "0 当 希 望 工 程 救 助 的 百 万 儿 童 成 长 起 来 , 科 教 兴 国 蔚 然 ... \n", + "1 藏 书 本 来 就 是 所 有 传 统 收 藏 门 类 中 的 第 一 大 户 , 只 是 ... \n", + "2 因 有 关 日 寇 在 京 掠 夺 文 物 详 情 , 藏 界 较 为 重 视 , 也 是 ... \n", + "3 我 们 藏 有 一 册 1 9 4 5 年 6 月 油 印 的 《 北 京 文 物 保 存 ... \n", + "4 以 家 乡 的 历 史 文 献 、 特 定 历 史 时 期 书 刊 、 某 一 名 家 或 ... \n", + "\n", + " labels \n", + "0 [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ... \n", + "1 [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ... \n", + "2 [O, O, O, B-LOC, O, O, B-LOC, O, O, O, O, O, O... \n", + "3 [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ... \n", + "4 [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ... " + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_df.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenization and Preprocessing\n", + "The `preprocess_ner_tokens` method of the `Tokenizer` class converts raw string data to numerical features, involving the following steps:\n", + "1. WordPiece tokenization.\n", + "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", + "3. Sequence padding or truncation according to the `max_seq_length` configuration." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Create a dictionary that maps labels to numerical values**" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "label_map = create_label_map(label_list)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Tokenize input text**" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "tokenizer = Tokenizer(language=LANGUAGE, \n", + " to_lower=DO_LOWER_CASE, \n", + " cache_dir=CACHE_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Create numerical features** " + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \\\n", + " tokenizer.tokenize_preprocess_ner_text(text=train_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=train_df[LABEL_COL])\n", + "test_token_ids, test_input_mask, test_trailing_token_mask, test_label_ids = \\\n", + " tokenizer.tokenize_preprocess_ner_text(text=test_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=test_df[LABEL_COL])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`Tokenizer.preprocess_ner_tokens` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", + "1. token ids: list of numerical values each corresponds to a token.\n", + "2. attention mask: list of 1s and 0s, 1 for input tokens and 0 for padded tokens, so that padded tokens are not attended to. \n", + "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ing. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", + "4. label ids: list of numerical values each corresponds to an entity label, if `labels` is provided." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sample token ids:\n", + "[2496, 2361, 3307, 2339, 4923, 3131, 1221, 4638, 4636, 674, 1036, 4997, 2768, 7270, 6629, 3341, 8024, 4906, 3136, 1069, 1744, 5917, 4197, 2768, 7599, 3198, 8024, 791, 1921, 3300, 3119, 5966, 817, 966, 4638, 741, 872, 3766, 743, 8024, 3209, 3189, 2218, 1373, 872, 2637, 679, 2496, 1159, 8013, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n", + "\n", + "Sample attention mask:\n", + "[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n", + "\n", + "Sample trailing token mask:\n", + "[True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]\n", + "\n", + "Sample label ids:\n", + "[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n", + "\n" + ] + } + ], + "source": [ + "print(\"Sample token ids:\\n{}\\n\".format(train_token_ids[0]))\n", + "print(\"Sample attention mask:\\n{}\\n\".format(train_input_mask[0]))\n", + "print(\"Sample trailing token mask:\\n{}\\n\".format(train_trailing_token_mask[0]))\n", + "print(\"Sample label ids:\\n{}\\n\".format(train_label_ids[0]))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Token Classifier\n", + "The value of the `language` argument determines which BERT model is used:\n", + "* Language.ENGLISH: \"bert-base-uncased\"\n", + "* Language.ENGLISHCASED: \"bert-base-cased\"\n", + "* Language.ENGLISHLARGE: \"bert-large-uncased\"\n", + "* Language.ENGLISHLARGECASED: \"bert-large-cased\"\n", + "* Language.CHINESE: \"bert-base-chinese\"\n", + "* Language.MULTILINGUAL: \"bert-base-multilingual-cased\"\n", + "\n", + "Here we use the base, uncased pretrained model." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "token_classifier = BERTTokenClassifier(language=LANGUAGE,\n", + " num_labels=len(label_list),\n", + " cache_dir=CACHE_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Train Model" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "t_total value of -1 results in schedule not being applied\n", + "Epoch: 0%| | 0/1 [00:00 1]) + ) + labels_list.append([t[1] for t in s_split_split if len(t) > 1]) + if len(s_split_split) > max_seq_len: + max_seq_len = len(s_split_split) + print( + "Maximum sequence length in {0} data is: {1}".format( + data_type, max_seq_len + ) + ) + return sentence_list, labels_list From ab2672a579bce31376da3b4b0090c059138e47a9 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Mon, 24 Jun 2019 10:56:23 -0400 Subject: [PATCH 050/108] missing changes --- README.md | 15 - scenarios/README.md | 36 ++ scenarios/data_prep/README.md | 4 +- scenarios/data_prep/stsbenchmark.ipynb | 417 ++----------- .../entailment_xnli_multilingual.ipynb | 581 ++++++++++++++++++ scenarios/sentence_similarity/README.md | 24 +- scenarios/text_classification/README.md | 3 + .../text_classification/tc_dac_bert_ar.ipynb | 7 +- .../text_classification/tc_mnli_bert.ipynb | 7 +- utils_nlp/dataset/preprocess.py | 68 +- utils_nlp/dataset/stsbenchmark.py | 125 ++-- utils_nlp/dataset/xnli.py | 89 ++- 12 files changed, 887 insertions(+), 489 deletions(-) create mode 100644 scenarios/README.md create mode 100644 scenarios/entailment/entailment_xnli_multilingual.ipynb diff --git a/README.md b/README.md index 7c736f652..4ad19b9a3 100755 --- a/README.md +++ b/README.md @@ -3,25 +3,10 @@ | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | master | [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/unit-test-master?branchName=master)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=22&branchName=master) | | staging | [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/unit-test-staging?branchName=staging)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=21&branchName=staging) | - # NLP Best Practices This repository contains examples and best practices for building NLP systems, provided as Jupyter notebooks and utility functions. The focus of the repository is on state-of-the-art methods and common scenarios that are popular among researchers and practitioners working on problems involving text and language. -The following section includes a list of the available scenarios. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. - - -## Scenarios - - -| Scenario | Applications | Languages | Models | -|---| ------------------------ | -------------------------------------------- | ------------------- | -|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| -|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| -|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| -|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| - - ## Planning All feature planning is done via projects, milestones, and issues in this repository. diff --git a/scenarios/README.md b/scenarios/README.md new file mode 100644 index 000000000..b86aa9ff0 --- /dev/null +++ b/scenarios/README.md @@ -0,0 +1,36 @@ +# NLP Scenarios + +This folder contains examples and best practices, written in Jupyter notebooks, for building Natural Language Processing systems for different scenarios. + +## Summary + +The following summarizes each scenario of the best practice notebooks. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. + +| Scenario | Applications | Languages | Models | +|---| ------------------------ | -------------------------------------------- | ------------------- | +|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| +|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| +|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| +|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| + +## Azure-enhanced notebooks + +Azure products and services are used in certain notebooks to enhance the efficiency of developing Natural Language systems at scale. + +To successfully run these notebooks, the users **need an Azure subscription** or can [use Azure for free](https://azure.microsoft.com/en-us/free/). + +The Azure products featured in the notebooks include: + +* [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/) - Azure Machine Learning service is a cloud service used to train, deploy, automate, and manage machine learning models, all at the broad scale that the cloud provides. It is used across various notebooks for the AI model development related tasks like: + * Using Datastores + * Tracking and monitoring metrics to enhance the model creation process + * Distributed Training + * Hyperparameter tuning + * Scaling up and out on Azure Machine Learning Compute + * Deploying a web service to both Azure Container Instance and Azure Kubernetes Service + +* [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aks) - You can use Azure Machine Learning service to host your classification model in a web service deployment on Azure Kubernetes Service (AKS). AKS is good for high-scale production deployments and provides autoscaling, and fast response times. + +* [Azure Container Instance](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aci)- You can use Azure Machine Learning service to host your classification model in a web service deployment on Azure Container Instance (ACI). ACI is good for low scale, CPU-based workloads. + +There may be other Azure service or products used in the notebooks. Introduction and/or reference of those will be provided in the notebooks. diff --git a/scenarios/data_prep/README.md b/scenarios/data_prep/README.md index eaf84ad28..5e13abec5 100644 --- a/scenarios/data_prep/README.md +++ b/scenarios/data_prep/README.md @@ -25,7 +25,7 @@ STS Benchmark - sts_load.ipynb + stsbenchmark.ipynb Downloads and cleans the STS Benchmark dataset. Shows an example of tokenizing and removing stopwords using the popular spaCy library. @@ -34,7 +34,7 @@ MSR Paraphrase Corpus - msrpc_load.ipynb + msrpc.ipynb Download and clean the MSR Paraphrase corpus. diff --git a/scenarios/data_prep/stsbenchmark.ipynb b/scenarios/data_prep/stsbenchmark.ipynb index ddd649814..e76967a79 100644 --- a/scenarios/data_prep/stsbenchmark.ipynb +++ b/scenarios/data_prep/stsbenchmark.ipynb @@ -46,7 +46,7 @@ "source": [ "import sys\n", "\n", - "sys.path.append(\"../../../\") ## set the environment path\n", + "sys.path.append(\"../../\") ## set the environment path\n", "\n", "import os\n", "import azureml.dataprep as dp\n", @@ -67,7 +67,7 @@ "outputs": [], "source": [ "STS_URL = \"http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz\"\n", - "BASE_DATA_PATH = \"../../../data\"\n", + "BASE_DATA_PATH = \"../../data\"\n", "RAW_DATA_PATH = os.path.join(BASE_DATA_PATH, \"raw\")\n", "CLEAN_DATA_PATH = os.path.join(BASE_DATA_PATH, \"clean\")" ] @@ -76,14 +76,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 01 Data Download" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Make a directory for the data if it doesn't already exist, and then download." + "### 01 Data Download\n", + "In this section we \n", + "* load raw data into a dataframe\n", + "* peek into the first 5 rows" ] }, { @@ -100,68 +96,21 @@ "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [], - "source": [ - "def download_sts(url, dirpath):\n", - " zipfile = maybe_download(url, work_directory=dirpath)\n", - " unzipped = stsbenchmark._extract_sts(zipfile, target_dirpath=dirpath, tmode=\"r:gz\")\n", - " return zipfile, unzipped" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "418kB [00:03, 138kB/s] " + "100%|██████████| 401/401 [00:01<00:00, 310KB/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Data downloaded to ../../../data/raw/stsbenchmark\n" + "Data downloaded to ../../data/raw/raw/stsbenchmark\n" ] }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "tarfile, datapath = download_sts(STS_URL, RAW_DATA_PATH)\n", - "print(\"Data downloaded to {}\".format(datapath))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 02 Data Understanding\n", - "In this section we \n", - "* load raw data into a dataframe\n", - "* peek into the first 10 rows" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can load the data using a `read` function that has built-in automatic filetype inference:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ { "data": { "text/html": [ @@ -183,13 +132,13 @@ " \n", " \n", " \n", - " Column1\n", - " Column2\n", - " Column3\n", - " Column4\n", - " Column5\n", - " Column6\n", - " Column7\n", + " column_0\n", + " column_1\n", + " column_2\n", + " column_3\n", + " column_4\n", + " column_5\n", + " column_6\n", " \n", " \n", " \n", @@ -198,7 +147,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 1\n", + " 0001\n", " 5.00\n", " A plane is taking off.\n", " An air plane is taking off.\n", @@ -208,7 +157,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 4\n", + " 0004\n", " 3.80\n", " A man is playing a large flute.\n", " A man is playing a flute.\n", @@ -218,7 +167,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 5\n", + " 0005\n", " 3.80\n", " A man is spreading shreded cheese on a pizza.\n", " A man is spreading shredded cheese on an uncoo...\n", @@ -228,7 +177,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 6\n", + " 0006\n", " 2.60\n", " Three men are playing chess.\n", " Two men are playing chess.\n", @@ -238,178 +187,59 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 9\n", + " 0009\n", " 4.25\n", " A man is playing the cello.\n", " A man seated is playing the cello.\n", " \n", - " \n", - " 5\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 11\n", - " 4.25\n", - " Some men are fighting.\n", - " Two men are fighting.\n", - " \n", - " \n", - " 6\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 12\n", - " 0.50\n", - " A man is smoking.\n", - " A man is skating.\n", - " \n", - " \n", - " 7\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 13\n", - " 1.60\n", - " The man is playing the piano.\n", - " The man is playing the guitar.\n", - " \n", - " \n", - " 8\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 14\n", - " 2.20\n", - " A man is playing on a guitar and singing.\n", - " A woman is playing an acoustic guitar and sing...\n", - " \n", - " \n", - " 9\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 16\n", - " 5.00\n", - " A person is throwing a cat on to the ceiling.\n", - " A person throws a cat on the ceiling.\n", - " \n", " \n", "\n", "" ], "text/plain": [ - " Column1 Column2 Column3 Column4 Column5 \\\n", - "0 main-captions MSRvid 2012test 1 5.00 \n", - "1 main-captions MSRvid 2012test 4 3.80 \n", - "2 main-captions MSRvid 2012test 5 3.80 \n", - "3 main-captions MSRvid 2012test 6 2.60 \n", - "4 main-captions MSRvid 2012test 9 4.25 \n", - "5 main-captions MSRvid 2012test 11 4.25 \n", - "6 main-captions MSRvid 2012test 12 0.50 \n", - "7 main-captions MSRvid 2012test 13 1.60 \n", - "8 main-captions MSRvid 2012test 14 2.20 \n", - "9 main-captions MSRvid 2012test 16 5.00 \n", + " column_0 column_1 column_2 column_3 column_4 \\\n", + "0 main-captions MSRvid 2012test 0001 5.00 \n", + "1 main-captions MSRvid 2012test 0004 3.80 \n", + "2 main-captions MSRvid 2012test 0005 3.80 \n", + "3 main-captions MSRvid 2012test 0006 2.60 \n", + "4 main-captions MSRvid 2012test 0009 4.25 \n", "\n", - " Column6 \\\n", + " column_5 \\\n", "0 A plane is taking off. \n", "1 A man is playing a large flute. \n", "2 A man is spreading shreded cheese on a pizza. \n", "3 Three men are playing chess. \n", "4 A man is playing the cello. \n", - "5 Some men are fighting. \n", - "6 A man is smoking. \n", - "7 The man is playing the piano. \n", - "8 A man is playing on a guitar and singing. \n", - "9 A person is throwing a cat on to the ceiling. \n", "\n", - " Column7 \n", + " column_6 \n", "0 An air plane is taking off. \n", "1 A man is playing a flute. \n", "2 A man is spreading shredded cheese on an uncoo... \n", "3 Two men are playing chess. \n", - "4 A man seated is playing the cello. \n", - "5 Two men are fighting. \n", - "6 A man is skating. \n", - "7 The man is playing the guitar. \n", - "8 A woman is playing an acoustic guitar and sing... \n", - "9 A person throws a cat on the ceiling. " + "4 A man seated is playing the cello. " ] }, - "execution_count": 6, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "dflow = dp.auto_read_file(path=os.path.join(datapath, \"sts-train.csv\"))\n", - "dflow.head()" + "df = stsbenchmark.load_pandas_df(RAW_DATA_PATH, file_split=\"train\")\n", + "df.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `auto_read_file` function from the AzureML Data Prep module actually returns a `Dataflow` object, which you can read more about [here](https://docs.microsoft.com/en-us/python/api/azureml-dataprep/azureml.dataprep.dataflow?view=azure-dataprep-py). We can easily transfer the data into a Pandas DataFrame (as before) in a single line using the `to_pandas_dataframe` function, or we can continue manipulating the data as a Dataflow object using the AzureML Data Prep API. For the remainder of this notebook we will be doing the latter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 03 Data Cleaning\n", + "### 02 Data Cleaning\n", "Now that we know about the general shape of the data, we can clean it so that it is ready for further preprocessing. The main operation we need for the STS Benchmark data is to drop all of columns except for the sentence pairs and scores." ] }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "sentences = dflow.keep_columns([\"Column5\", \"Column6\", \"Column7\"]).rename_columns(\n", - " {\"Column5\": \"score\", \"Column6\": \"sentence1\", \"Column7\": \"sentence2\"}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 04 One-Shot Dataframe Loading\n", - "You can also use our STSBenchmark utils to automatically download, extract, and persist the data. You can then load the sanitized data as a pandas DataFrame in one line. " - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "418kB [00:02, 191kB/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../../data/raw/stsbenchmark\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-test.csv\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-dev.csv\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-train.csv\n" - ] - } - ], - "source": [ - "# Initializing this instance runs the downloader and extractor behind the scenes\n", - "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -489,12 +319,13 @@ "4 A man seated is playing the cello. " ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "sts_train = stsbenchmark.clean_sts(df)\n", "sts_train.head()" ] }, @@ -502,13 +333,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 05 Make Lowercase\n", - "We start with simple standardization of the text by making all text lowercase." + "### 03 Make Lowercase\n", + "We do simple standardization of the text by making all text lowercase." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -588,7 +419,7 @@ "4 a man seated is playing the cello. " ] }, - "execution_count": 10, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -602,13 +433,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 06 Tokenize\n", + "### 04 Tokenize\n", "We tokenize the text using spaCy's non-destructive tokenizer." ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -680,46 +511,6 @@ " [a, man, is, playing, the, cello, .]\n", " [a, man, seated, is, playing, the, cello, .]\n", " \n", - " \n", - " 5\n", - " 4.25\n", - " some men are fighting.\n", - " two men are fighting.\n", - " [some, men, are, fighting, .]\n", - " [two, men, are, fighting, .]\n", - " \n", - " \n", - " 6\n", - " 0.50\n", - " a man is smoking.\n", - " a man is skating.\n", - " [a, man, is, smoking, .]\n", - " [a, man, is, skating, .]\n", - " \n", - " \n", - " 7\n", - " 1.60\n", - " the man is playing the piano.\n", - " the man is playing the guitar.\n", - " [the, man, is, playing, the, piano, .]\n", - " [the, man, is, playing, the, guitar, .]\n", - " \n", - " \n", - " 8\n", - " 2.20\n", - " a man is playing on a guitar and singing.\n", - " a woman is playing an acoustic guitar and sing...\n", - " [a, man, is, playing, on, a, guitar, and, sing...\n", - " [a, woman, is, playing, an, acoustic, guitar, ...\n", - " \n", - " \n", - " 9\n", - " 5.00\n", - " a person is throwing a cat on to the ceiling.\n", - " a person throws a cat on the ceiling.\n", - " [a, person, is, throwing, a, cat, on, to, the,...\n", - " [a, person, throws, a, cat, on, the, ceiling, .]\n", - " \n", " \n", "\n", "" @@ -731,11 +522,6 @@ "2 3.80 a man is spreading shreded cheese on a pizza. \n", "3 2.60 three men are playing chess. \n", "4 4.25 a man is playing the cello. \n", - "5 4.25 some men are fighting. \n", - "6 0.50 a man is smoking. \n", - "7 1.60 the man is playing the piano. \n", - "8 2.20 a man is playing on a guitar and singing. \n", - "9 5.00 a person is throwing a cat on to the ceiling. \n", "\n", " sentence2 \\\n", "0 an air plane is taking off. \n", @@ -743,11 +529,6 @@ "2 a man is spreading shredded cheese on an uncoo... \n", "3 two men are playing chess. \n", "4 a man seated is playing the cello. \n", - "5 two men are fighting. \n", - "6 a man is skating. \n", - "7 the man is playing the guitar. \n", - "8 a woman is playing an acoustic guitar and sing... \n", - "9 a person throws a cat on the ceiling. \n", "\n", " sentence1_tokens \\\n", "0 [a, plane, is, taking, off, .] \n", @@ -755,48 +536,36 @@ "2 [a, man, is, spreading, shreded, cheese, on, a... \n", "3 [three, men, are, playing, chess, .] \n", "4 [a, man, is, playing, the, cello, .] \n", - "5 [some, men, are, fighting, .] \n", - "6 [a, man, is, smoking, .] \n", - "7 [the, man, is, playing, the, piano, .] \n", - "8 [a, man, is, playing, on, a, guitar, and, sing... \n", - "9 [a, person, is, throwing, a, cat, on, to, the,... \n", "\n", " sentence2_tokens \n", "0 [an, air, plane, is, taking, off, .] \n", "1 [a, man, is, playing, a, flute, .] \n", "2 [a, man, is, spreading, shredded, cheese, on, ... \n", "3 [two, men, are, playing, chess, .] \n", - "4 [a, man, seated, is, playing, the, cello, .] \n", - "5 [two, men, are, fighting, .] \n", - "6 [a, man, is, skating, .] \n", - "7 [the, man, is, playing, the, guitar, .] \n", - "8 [a, woman, is, playing, an, acoustic, guitar, ... \n", - "9 [a, person, throws, a, cat, on, the, ceiling, .] " + "4 [a, man, seated, is, playing, the, cello, .] " ] }, - "execution_count": 11, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "sts_train_tok = to_spacy_tokens(\n", - " sts_train_low.head(10)\n", - ") # operating on a small slice of the data as an example\n", - "sts_train_tok.head(10)" + "sts_train_tok = to_spacy_tokens(sts_train_low.head())\n", + "sts_train_tok.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 07 Optional: Remove Stop Words\n", + "### 05 Optional: Remove Stop Words\n", "Removing stop words is another common preprocessing step for NLP tasks. We use the `rm_spacy_stopwords` utility function to do this on the dataframe. This function makes use of the spaCy language model's default set of stop words. If we need to add our own set of stop words (for example, if we are doing an NLP task for a very specific domain of content), we can do this in-line by simply providing the list as the `custom_stopwords` parameter of `rm_spacy_stopwords`." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -880,56 +649,6 @@ " [man, playing, cello, .]\n", " [man, seated, playing, cello, .]\n", " \n", - " \n", - " 5\n", - " 4.25\n", - " some men are fighting.\n", - " two men are fighting.\n", - " [some, men, are, fighting, .]\n", - " [two, men, are, fighting, .]\n", - " [men, fighting, .]\n", - " [men, fighting, .]\n", - " \n", - " \n", - " 6\n", - " 0.50\n", - " a man is smoking.\n", - " a man is skating.\n", - " [a, man, is, smoking, .]\n", - " [a, man, is, skating, .]\n", - " [man, smoking, .]\n", - " [man, skating, .]\n", - " \n", - " \n", - " 7\n", - " 1.60\n", - " the man is playing the piano.\n", - " the man is playing the guitar.\n", - " [the, man, is, playing, the, piano, .]\n", - " [the, man, is, playing, the, guitar, .]\n", - " [man, playing, piano, .]\n", - " [man, playing, guitar, .]\n", - " \n", - " \n", - " 8\n", - " 2.20\n", - " a man is playing on a guitar and singing.\n", - " a woman is playing an acoustic guitar and sing...\n", - " [a, man, is, playing, on, a, guitar, and, sing...\n", - " [a, woman, is, playing, an, acoustic, guitar, ...\n", - " [man, playing, guitar, singing, .]\n", - " [woman, playing, acoustic, guitar, singing, .]\n", - " \n", - " \n", - " 9\n", - " 5.00\n", - " a person is throwing a cat on to the ceiling.\n", - " a person throws a cat on the ceiling.\n", - " [a, person, is, throwing, a, cat, on, to, the,...\n", - " [a, person, throws, a, cat, on, the, ceiling, .]\n", - " [person, throwing, cat, ceiling, .]\n", - " [person, throws, cat, ceiling, .]\n", - " \n", " \n", "\n", "" @@ -941,11 +660,6 @@ "2 3.80 a man is spreading shreded cheese on a pizza. \n", "3 2.60 three men are playing chess. \n", "4 4.25 a man is playing the cello. \n", - "5 4.25 some men are fighting. \n", - "6 0.50 a man is smoking. \n", - "7 1.60 the man is playing the piano. \n", - "8 2.20 a man is playing on a guitar and singing. \n", - "9 5.00 a person is throwing a cat on to the ceiling. \n", "\n", " sentence2 \\\n", "0 an air plane is taking off. \n", @@ -953,11 +667,6 @@ "2 a man is spreading shredded cheese on an uncoo... \n", "3 two men are playing chess. \n", "4 a man seated is playing the cello. \n", - "5 two men are fighting. \n", - "6 a man is skating. \n", - "7 the man is playing the guitar. \n", - "8 a woman is playing an acoustic guitar and sing... \n", - "9 a person throws a cat on the ceiling. \n", "\n", " sentence1_tokens \\\n", "0 [a, plane, is, taking, off, .] \n", @@ -965,11 +674,6 @@ "2 [a, man, is, spreading, shreded, cheese, on, a... \n", "3 [three, men, are, playing, chess, .] \n", "4 [a, man, is, playing, the, cello, .] \n", - "5 [some, men, are, fighting, .] \n", - "6 [a, man, is, smoking, .] \n", - "7 [the, man, is, playing, the, piano, .] \n", - "8 [a, man, is, playing, on, a, guitar, and, sing... \n", - "9 [a, person, is, throwing, a, cat, on, to, the,... \n", "\n", " sentence2_tokens \\\n", "0 [an, air, plane, is, taking, off, .] \n", @@ -977,11 +681,6 @@ "2 [a, man, is, spreading, shredded, cheese, on, ... \n", "3 [two, men, are, playing, chess, .] \n", "4 [a, man, seated, is, playing, the, cello, .] \n", - "5 [two, men, are, fighting, .] \n", - "6 [a, man, is, skating, .] \n", - "7 [the, man, is, playing, the, guitar, .] \n", - "8 [a, woman, is, playing, an, acoustic, guitar, ... \n", - "9 [a, person, throws, a, cat, on, the, ceiling, .] \n", "\n", " sentence1_tokens_rm_stopwords \\\n", "0 [plane, taking, .] \n", @@ -989,34 +688,22 @@ "2 [man, spreading, shreded, cheese, pizza, .] \n", "3 [men, playing, chess, .] \n", "4 [man, playing, cello, .] \n", - "5 [men, fighting, .] \n", - "6 [man, smoking, .] \n", - "7 [man, playing, piano, .] \n", - "8 [man, playing, guitar, singing, .] \n", - "9 [person, throwing, cat, ceiling, .] \n", "\n", " sentence2_tokens_rm_stopwords \n", "0 [air, plane, taking, .] \n", "1 [man, playing, flute, .] \n", "2 [man, spreading, shredded, cheese, uncooked, p... \n", "3 [men, playing, chess, .] \n", - "4 [man, seated, playing, cello, .] \n", - "5 [men, fighting, .] \n", - "6 [man, skating, .] \n", - "7 [man, playing, guitar, .] \n", - "8 [woman, playing, acoustic, guitar, singing, .] \n", - "9 [person, throws, cat, ceiling, .] " + "4 [man, seated, playing, cello, .] " ] }, - "execution_count": 12, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "rm_spacy_stopwords(\n", - " sts_train_tok\n", - ") # operating on a small slice of the data as an example" + "rm_spacy_stopwords(sts_train_tok).head()" ] } ], @@ -1036,7 +723,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.5" + "version": "3.6.8" } }, "nbformat": 4, diff --git a/scenarios/entailment/entailment_xnli_multilingual.ipynb b/scenarios/entailment/entailment_xnli_multilingual.ipynb new file mode 100644 index 000000000..0816e8a47 --- /dev/null +++ b/scenarios/entailment/entailment_xnli_multilingual.ipynb @@ -0,0 +1,581 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multi-lingual Inference on XNLI Dataset using BERT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary\n", + "In this notebook, we demostrate using the [Multi-lingual BERT model](https://github.com/google-research/bert/blob/master/multilingual.md) to do language inference in Chinese and Hindi. We use the [XNLI](https://github.com/facebookresearch/XNLI) dataset and the task is to classify sentence pairs into three classes: contradiction, entailment, and neutral. \n", + "The figure below shows how [BERT](https://arxiv.org/abs/1810.04805) classifies sentence pairs. It concatenates the tokens in each sentence pairs and separates the sentences by the [SEP] token. A [CLS] token is prepended to the token list and used as the aggregate sequence representation for the classification task.\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "import sys\n", + "import os\n", + "import random\n", + "import numpy as np\n", + "from sklearn.metrics import classification_report\n", + "from sklearn.preprocessing import LabelEncoder\n", + "\n", + "import torch\n", + "\n", + "nlp_path = os.path.abspath('../../')\n", + "if nlp_path not in sys.path:\n", + " sys.path.insert(0, nlp_path)\n", + "\n", + "from utils_nlp.bert.sequence_classification import BERTSequenceClassifier\n", + "from utils_nlp.bert.common import Language, Tokenizer\n", + "from utils_nlp.dataset.xnli import load_pandas_df\n", + "from utils_nlp.common.timer import Timer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configurations\n", + "Note that the running time shown in this notebook are on a Standard_NC12 Azure Deep Learning Virtual Machine with two NVIDIA Tesla K80 GPUs. If you want to run through the notebook quickly, you can change the `TRAIN_DATA_USED_PERCENT` to a small number, e.g. 0.01. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "TRAIN_DATA_USED_PERCENT = 1.0\n", + "\n", + "# set random seeds\n", + "RANDOM_SEED = 42\n", + "random.seed(RANDOM_SEED)\n", + "np.random.seed(RANDOM_SEED)\n", + "torch.manual_seed(RANDOM_SEED)\n", + "num_cuda_devices = torch.cuda.device_count()\n", + "if num_cuda_devices > 1:\n", + " torch.cuda.manual_seed_all(RANDOM_SEED)\n", + "\n", + "# model configurations\n", + "LANGUAGE_CHINESE = Language.CHINESE\n", + "LANGUAGE_MULTI = Language.MULTILINGUAL\n", + "TO_LOWER = True\n", + "MAX_SEQ_LENGTH = 128\n", + "\n", + "# training configurations\n", + "NUM_GPUS = 2\n", + "BATCH_SIZE = 32\n", + "NUM_EPOCHS = 2\n", + "\n", + "# optimizer configurations\n", + "LEARNING_RATE= 5e-5\n", + "WARMUP_PROPORTION= 0.1\n", + "\n", + "# data configurations\n", + "TEXT_COL = \"text\"\n", + "LABEL_COL = \"label\"\n", + "\n", + "CACHE_DIR = \"./temp\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load Data\n", + "The XNLI dataset comes in two zip files: \n", + "* XNLI-1.0.zip: dev and test datasets in 15 languages. The original English data was translated into other languages by human translators. \n", + "* XNLI-MT-1.0.zip: training dataset in 15 languages. This dataset is machine translations of the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) dataset. It also contains English translations of the dev and test datasets, but not used in this notebook. \n", + "\n", + "The `load_pandas_df` function downloads and extracts the zip files if they don't already exist in `local_cache_path` and returns the data subset specified by `file_split` and `language`." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "train_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\", language=\"zh\")\n", + "dev_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"dev\", language=\"zh\")\n", + "test_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\", language=\"zh\")\n", + "\n", + "train_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\", language=\"hi\")\n", + "dev_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"dev\", language=\"hi\")\n", + "test_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\", language=\"hi\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chinese training dataset size: 392702\n", + "Chinese dev dataset size: 2490\n", + "Chinese test dataset size: 5010\n", + "\n", + "Hindi training dataset size: 392702\n", + "Hindi dev dataset size: 2490\n", + "Hindi test dataset size: 5010\n", + "\n", + " text label\n", + "0 (从 概念 上 看 , 奶油 收入 有 两 个 基本 方面 产品 和 地理 ., 产品 和 ... neutral\n", + "1 (你 知道 在 这个 季节 , 我 猜 在 你 的 水平 你 把 他们 丢到 下 一个 水平... entailment\n", + "2 (我们 的 一个 号码 会 非常 详细 地 执行 你 的 指示, 我 团队 的 一个 成员 ... entailment\n", + "3 (你 怎么 知道 的 ? 所有 这些 都 是 他们 的 信息 ., 这些 信息 属于 他们 .) entailment\n", + "4 (是 啊 , 我 告诉 你 , 如果 你 去 买 一些 网球鞋 , 我 可以 看到 为什么 ... neutral\n", + " text label\n", + "0 (Conceptually क ् रीम एंजलिस में दो मूल आयाम ह... neutral\n", + "1 (आप मौसम के दौरान जानते हैं और मैं अपने स ् तर... entailment\n", + "2 (हमारे एक नंबर में से एक आपके निर ् देशों को म... entailment\n", + "3 (आप कैसे जानते हैं ? ये सब उनकी जानकारी फिर से... entailment\n", + "4 (हाँ मैं आपको बताता हूँ कि अगर आप उन टेनिस जूत... neutral\n" + ] + } + ], + "source": [ + "print(\"Chinese training dataset size: {}\".format(train_df_chinese.shape[0]))\n", + "print(\"Chinese dev dataset size: {}\".format(dev_df_chinese.shape[0]))\n", + "print(\"Chinese test dataset size: {}\".format(test_df_chinese.shape[0]))\n", + "print()\n", + "print(\"Hindi training dataset size: {}\".format(train_df_hindi.shape[0]))\n", + "print(\"Hindi dev dataset size: {}\".format(dev_df_hindi.shape[0]))\n", + "print(\"Hindi test dataset size: {}\".format(test_df_hindi.shape[0]))\n", + "print()\n", + "print(train_df_chinese.head())\n", + "print(train_df_hindi.head())" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "train_data_used_count = round(TRAIN_DATA_USED_PERCENT * train_df_chinese.shape[0])\n", + "train_df_chinese = train_df_chinese.loc[:train_data_used_count]\n", + "train_df_hindi = train_df_hindi.loc[:train_data_used_count]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Language Inference on Chinese\n", + "For Chinese dataset, we use the `bert-base-chinese` model which was pretrained on Chinese dataset only. The `bert-base-multilingual-cased` model can also be used on Chinese, but the accuracy is 3% lower." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenize and Preprocess\n", + "Before training, we tokenize the sentence texts and convert them to lists of tokens. The following steps instantiate a BERT tokenizer given the language, and tokenize the text of the training and testing sets." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 392702/392702 [02:26<00:00, 2682.67it/s]\n", + "100%|██████████| 5010/5010 [00:01<00:00, 3122.04it/s]\n" + ] + } + ], + "source": [ + "tokenizer_chinese = Tokenizer(LANGUAGE_CHINESE, to_lower=TO_LOWER, cache_dir=CACHE_DIR)\n", + "\n", + "train_tokens_chinese = tokenizer_chinese.tokenize(train_df_chinese[TEXT_COL])\n", + "test_tokens_chinese= tokenizer_chinese.tokenize(test_df_chinese[TEXT_COL])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition, we perform the following preprocessing steps in the cell below:\n", + "\n", + "* Convert the tokens into token indices corresponding to the BERT tokenizer's vocabulary\n", + "* Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", + "* Pad or truncate the token lists to the specified max length\n", + "* Return mask lists that indicate paddings' positions\n", + "* Return token type id lists that indicate which sentence the tokens belong to\n", + "\n", + "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "train_token_ids_chinese, train_input_mask_chinese, train_token_type_ids_chinese = \\\n", + " tokenizer_chinese.preprocess_classification_tokens(train_tokens_chinese, max_len=MAX_SEQ_LENGTH)\n", + "test_token_ids_chinese, test_input_mask_chinese, test_token_type_ids_chinese = \\\n", + " tokenizer_chinese.preprocess_classification_tokens(test_tokens_chinese, max_len=MAX_SEQ_LENGTH)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "label_encoder_chinese = LabelEncoder()\n", + "train_labels_chinese = label_encoder_chinese.fit_transform(train_df_chinese[LABEL_COL])\n", + "num_labels_chinese = len(np.unique(train_labels_chinese))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "classifier_chinese = BERTSequenceClassifier(language=LANGUAGE_CHINESE,\n", + " num_labels=num_labels_chinese,\n", + " cache_dir=CACHE_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Train Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch:1/2; batch:1->1228/12271; loss:1.194384\n", + "epoch:1/2; batch:1229->2456/12271; loss:0.863067\n", + "epoch:1/2; batch:2457->3684/12271; loss:0.781256\n", + "epoch:1/2; batch:3685->4912/12271; loss:1.067413\n", + "epoch:1/2; batch:4913->6140/12271; loss:0.599279\n", + "epoch:1/2; batch:6141->7368/12271; loss:0.471488\n", + "epoch:1/2; batch:7369->8596/12271; loss:0.572327\n", + "epoch:1/2; batch:8597->9824/12271; loss:0.689093\n", + "epoch:1/2; batch:9825->11052/12271; loss:0.651702\n", + "epoch:1/2; batch:11053->12271/12271; loss:0.431085\n", + "epoch:2/2; batch:1->1228/12271; loss:0.255859\n", + "epoch:2/2; batch:1229->2456/12271; loss:0.434052\n", + "epoch:2/2; batch:2457->3684/12271; loss:0.433569\n", + "epoch:2/2; batch:3685->4912/12271; loss:0.405915\n", + "epoch:2/2; batch:4913->6140/12271; loss:0.636128\n", + "epoch:2/2; batch:6141->7368/12271; loss:0.416685\n", + "epoch:2/2; batch:7369->8596/12271; loss:0.265789\n", + "epoch:2/2; batch:8597->9824/12271; loss:0.328964\n", + "epoch:2/2; batch:9825->11052/12271; loss:0.436310\n", + "epoch:2/2; batch:11053->12271/12271; loss:0.374193\n", + "Training time : 8.050 hrs\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " classifier_chinese.fit(token_ids=train_token_ids_chinese,\n", + " input_mask=train_input_mask_chinese,\n", + " token_type_ids=train_token_type_ids_chinese,\n", + " labels=train_labels_chinese,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)\n", + "print(\"Training time : {:.3f} hrs\".format(t.interval / 3600))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict on Test Data" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "5024it [00:54, 101.88it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prediction time : 0.015 hrs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " predictions_chinese = classifier_chinese.predict(token_ids=test_token_ids_chinese,\n", + " input_mask=test_input_mask_chinese,\n", + " token_type_ids=test_token_type_ids_chinese,\n", + " batch_size=BATCH_SIZE)\n", + "print(\"Prediction time : {:.3f} hrs\".format(t.interval / 3600))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Evaluate" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " precision recall f1-score support\n", + "\n", + "contradiction 0.81 0.84 0.82 1670\n", + " entailment 0.84 0.68 0.76 1670\n", + " neutral 0.70 0.80 0.74 1670\n", + "\n", + " accuracy 0.77 5010\n", + " macro avg 0.78 0.77 0.77 5010\n", + " weighted avg 0.78 0.77 0.77 5010\n", + "\n" + ] + } + ], + "source": [ + "predictions_chinese = label_encoder_chinese.inverse_transform(predictions_chinese)\n", + "print(classification_report(test_df_chinese[LABEL_COL], predictions_chinese))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Language Inference on Hindi\n", + "For Hindi and all other languages except Chinese, we use the `bert-base-multilingual-cased` model. \n", + "The preprocesing, model training, and prediction steps are the same as on Chinese data, except for the underlying tokenizer and BERT model used" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenize and Preprocess" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 392702/392702 [03:48<00:00, 1719.84it/s]\n", + "100%|██████████| 5010/5010 [00:02<00:00, 1916.46it/s]\n" + ] + } + ], + "source": [ + "tokenizer_multi = Tokenizer(LANGUAGE_MULTI, cache_dir=CACHE_DIR)\n", + "\n", + "train_tokens_hindi = tokenizer_multi.tokenize(train_df_hindi[TEXT_COL])\n", + "test_tokens_hindi= tokenizer_multi.tokenize(test_df_hindi[TEXT_COL])\n", + "\n", + "train_token_ids_hindi, train_input_mask_hindi, train_token_type_ids_hindi = \\\n", + " tokenizer_multi.preprocess_classification_tokens(train_tokens_hindi, max_len=MAX_SEQ_LENGTH)\n", + "test_token_ids_hindi, test_input_mask_hindi, test_token_type_ids_hindi = \\\n", + " tokenizer_multi.preprocess_classification_tokens(test_tokens_hindi, max_len=MAX_SEQ_LENGTH)\n", + "\n", + "label_encoder_hindi = LabelEncoder()\n", + "train_labels_hindi = label_encoder_hindi.fit_transform(train_df_hindi[LABEL_COL])\n", + "num_labels_hindi = len(np.unique(train_labels_hindi))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create and Train Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch:1/2; batch:1->1228/12271; loss:1.091754\n", + "epoch:1/2; batch:1229->2456/12271; loss:0.992931\n", + "epoch:1/2; batch:2457->3684/12271; loss:1.045146\n", + "epoch:1/2; batch:3685->4912/12271; loss:0.799912\n", + "epoch:1/2; batch:4913->6140/12271; loss:0.815425\n", + "epoch:1/2; batch:6141->7368/12271; loss:0.564856\n", + "epoch:1/2; batch:7369->8596/12271; loss:0.726981\n", + "epoch:1/2; batch:8597->9824/12271; loss:0.764087\n", + "epoch:1/2; batch:9825->11052/12271; loss:0.964115\n", + "epoch:1/2; batch:11053->12271/12271; loss:0.502252\n", + "epoch:2/2; batch:1->1228/12271; loss:0.601600\n", + "epoch:2/2; batch:1229->2456/12271; loss:0.695099\n", + "epoch:2/2; batch:2457->3684/12271; loss:0.419610\n", + "epoch:2/2; batch:3685->4912/12271; loss:0.603106\n", + "epoch:2/2; batch:4913->6140/12271; loss:0.705180\n", + "epoch:2/2; batch:6141->7368/12271; loss:0.493404\n", + "epoch:2/2; batch:7369->8596/12271; loss:0.864921\n", + "epoch:2/2; batch:8597->9824/12271; loss:0.518601\n", + "epoch:2/2; batch:9825->11052/12271; loss:0.395920\n", + "epoch:2/2; batch:11053->12271/12271; loss:0.685858\n", + "Training time : 9.520 hrs\n" + ] + } + ], + "source": [ + "classifier_multi = BERTSequenceClassifier(language=LANGUAGE_MULTI,\n", + " num_labels=num_labels_hindi,\n", + " cache_dir=CACHE_DIR)\n", + "with Timer() as t:\n", + " classifier_multi.fit(token_ids=train_token_ids_hindi,\n", + " input_mask=train_input_mask_hindi,\n", + " token_type_ids=train_token_type_ids_hindi,\n", + " labels=train_labels_hindi,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)\n", + "print(\"Training time : {:.3f} hrs\".format(t.interval / 3600))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict and Evaluate" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "5024it [01:02, 87.10it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prediction time : 0.017 hrs\n", + " precision recall f1-score support\n", + "\n", + "contradiction 0.69 0.72 0.70 1670\n", + " entailment 0.74 0.51 0.60 1670\n", + " neutral 0.58 0.74 0.65 1670\n", + "\n", + " accuracy 0.65 5010\n", + " macro avg 0.67 0.65 0.65 5010\n", + " weighted avg 0.67 0.65 0.65 5010\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " predictions_hindi = classifier_multi.predict(token_ids=test_token_ids_hindi,\n", + " input_mask=test_input_mask_hindi,\n", + " token_type_ids=test_token_type_ids_hindi,\n", + " batch_size=BATCH_SIZE)\n", + "print(\"Prediction time : {:.3f} hrs\".format(t.interval / 3600))\n", + "predictions_hindi= label_encoder_hindi.inverse_transform(predictions_hindi)\n", + "print(classification_report(test_df_hindi[LABEL_COL], predictions_hindi))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pytorch", + "language": "python", + "name": "pytorch" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/scenarios/sentence_similarity/README.md b/scenarios/sentence_similarity/README.md index f22c24d23..9fa0b6805 100644 --- a/scenarios/sentence_similarity/README.md +++ b/scenarios/sentence_similarity/README.md @@ -1,6 +1,24 @@ +# Sentence Similarity -## What is sentence similarity? +This folder contains examples and best practices, written in Jupyter notebooks, for building sentence similarity models. The scores can be used in a wide variety of applications, such as search/retrieval, nearest-neighbor or kernel-based classification methods, recommendation, and ranking tasks. -Sentence similarity or semantic textual similarity is to determine how similar two pieces of texts are and a measure of the degree to which two pieces of text express the same meaning. This can take the form of assigning a score from 1 to 5. Related tasks are parahrase or duplicate identification. Sentence similarity is normally calculated by the following two steps: 1. obtaining the embeddings of the sentences, 2. taking the cosine similarity between them as shown in the following figure: +## What is sentence similarity -![Sentence Similarity](https://nlpbp.blob.core.windows.net/images/example-similarity.png)**Sentence Similarity ([Source](https://tfhub.dev/google/universal-sentence-encoder/1))** \ No newline at end of file +Sentence similarity or semantic textual similarity is to determine how similar two pieces of texts are and a measure of the degree to which two pieces of text express the same meaning. This can take the form of assigning a score from 1 to 5. Related tasks are paraphrase or duplicate identification. The common methods used for text similarity range from simple word-vector dot products to pairwise classification, and more recently, Siamese recurrent/convolutional neural networks with triplet loss functions. + +Sentence similarity is normally calculated by the following two steps: + +1. obtaining the embeddings of the sentences + +2. taking the cosine similarity between them as shown in the following figure([Source](https://tfhub.dev/google/universal-sentence-encoder/1)): + ![Sentence Similarity](https://nlpbp.blob.core.windows.net/images/example-similarity.png) + +## Summary + +The following summarizes each notebook for Sentence Similarity. Each notebook provides more details and guiding in principles on building state of the art models. + +|Notebook|Runs Local|Description| +|---|---|---| +|[Creating a Baseline model](baseline_deep_dive.ipynb)| Yes| A baseline model is a basic solution that serves as a point of reference for comparing other models to. The baseline model's performance gives us an indication of how much better our models can perform relative to a naive approach.| +|Senteval |[local](senteval_local.ipynb), [AzureML](senteval_azureml.ipynb)|SentEval is a widely used benchmarking tool for evaluating general-purpose sentence embeddings. Running SentEval locally is easy, but not necessarily efficient depending on the model specs. We provide an example on how to do this efficiently in Azure Machine Learning Service. | +|[GenSen on AzureML](gensen_aml_deep_dive.ipynb_)| No | This notebook serves as an introduction to an end-to-end NLP solution for sentence similarity building one of the State of the Art models, GenSen, on the AzureML platform. We show the advantages of AzureML when training large NLP models with GPU. diff --git a/scenarios/text_classification/README.md b/scenarios/text_classification/README.md index e69de29bb..5a8e46488 100644 --- a/scenarios/text_classification/README.md +++ b/scenarios/text_classification/README.md @@ -0,0 +1,3 @@ +# Text Classification + +Text classification is a supervised learning method of learning and predicting the category or the class of a document given its text content. The state-of-the-art methods are based on neural networks of different architectures as well as pretrained language models or word embeddings. Text classification is a core task in natural language Processing and has numerous applications such as sentiment analysis, document indexing in digital libraries, hate speech detection, and general-purpose categorization in medical, academic, legal, and many other domains. diff --git a/scenarios/text_classification/tc_dac_bert_ar.ipynb b/scenarios/text_classification/tc_dac_bert_ar.ipynb index f7a56b655..3c77a31e1 100644 --- a/scenarios/text_classification/tc_dac_bert_ar.ipynb +++ b/scenarios/text_classification/tc_dac_bert_ar.ipynb @@ -340,6 +340,7 @@ "- Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", "- Pad or truncate the token lists to the specified max length\n", "- Return mask lists that indicate paddings' positions\n", + "- Return token type id lists that indicate which sentence the tokens belong to (not needed for one-sequence classification)\n", "\n", "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" ] @@ -350,10 +351,10 @@ "metadata": {}, "outputs": [], "source": [ - "tokens_train, mask_train = tokenizer.preprocess_classification_tokens(\n", + "tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_train, MAX_LEN\n", ")\n", - "tokens_test, mask_test = tokenizer.preprocess_classification_tokens(\n", + "tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_test, MAX_LEN\n", ")" ] @@ -511,7 +512,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.5" } }, "nbformat": 4, diff --git a/scenarios/text_classification/tc_mnli_bert.ipynb b/scenarios/text_classification/tc_mnli_bert.ipynb index c7c2b0344..acc15bf23 100644 --- a/scenarios/text_classification/tc_mnli_bert.ipynb +++ b/scenarios/text_classification/tc_mnli_bert.ipynb @@ -275,6 +275,7 @@ "- Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", "- Pad or truncate the token lists to the specified max length\n", "- Return mask lists that indicate paddings' positions\n", + "- Return token type id lists that indicate which sentence the tokens belong to (not needed for one-sequence classification)\n", "\n", "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" ] @@ -285,10 +286,10 @@ "metadata": {}, "outputs": [], "source": [ - "tokens_train, mask_train = tokenizer.preprocess_classification_tokens(\n", + "tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_train, MAX_LEN\n", ")\n", - "tokens_test, mask_test = tokenizer.preprocess_classification_tokens(\n", + "tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_test, MAX_LEN\n", ")" ] @@ -446,7 +447,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.5" } }, "nbformat": 4, diff --git a/utils_nlp/dataset/preprocess.py b/utils_nlp/dataset/preprocess.py index 03f0e9062..2e51821f5 100644 --- a/utils_nlp/dataset/preprocess.py +++ b/utils_nlp/dataset/preprocess.py @@ -22,7 +22,8 @@ def to_lowercase_all(df): def to_lowercase(df, column_names=[]): """ - This function transforms strings of the column names in the dataframe passed to lowercase + This function transforms strings of the column names in the dataframe + passed to lowercase Args: df (pd.DataFrame): Raw dataframe with some text columns. @@ -46,18 +47,18 @@ def to_spacy_tokens( token_cols=["sentence1_tokens", "sentence2_tokens"], ): """ - This function tokenizes the sentence pairs using spaCy, defaulting to the - spaCy en_core_web_sm model - - Args: - df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. - sentence_cols (list, optional): Column names of the raw sentence pairs. - token_cols (list, optional): Column names for the tokenized sentences. - - Returns: - pd.DataFrame: Dataframe with new columns token_cols, each containing - a list of tokens for their respective sentences. - """ + This function tokenizes the sentence pairs using spaCy, defaulting to the + spaCy en_core_web_sm model + + Args: + df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. + sentence_cols (list, optional): Column names of the raw sentence pairs. + token_cols (list, optional): Column names for the tokenized sentences. + + Returns: + pd.DataFrame: Dataframe with new columns token_cols, each containing + a list of tokens for their respective sentences. + """ nlp = spacy.load("en_core_web_sm") text_df = df[sentence_cols] nlp_df = text_df.applymap(lambda x: nlp(x)) @@ -77,21 +78,22 @@ def rm_spacy_stopwords( custom_stopwords=[], ): """ - This function tokenizes the sentence pairs using spaCy and remove stopwords, - defaulting to the spaCy en_core_web_sm model - - Args: - df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. - sentence_cols (list, optional): Column names for the raw sentence pairs. - stop_cols (list, optional): Column names for the tokenized sentences - without stop words. - custom_stopwords (list of str, optional): List of custom stopwords to - register with the spaCy model. - - Returns: - pd.DataFrame: Dataframe with new columns stop_cols, each containing a - list of tokens for their respective sentences. - """ + This function tokenizes the sentence pairs using spaCy and remove + stopwords, defaulting to the spaCy en_core_web_sm model + + Args: + df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. + sentence_cols (list, optional): Column names for the raw sentence + pairs. + stop_cols (list, optional): Column names for the tokenized sentences + without stop words. + custom_stopwords (list of str, optional): List of custom stopwords to + register with the spaCy model. + + Returns: + pd.DataFrame: Dataframe with new columns stop_cols, each containing a + list of tokens for their respective sentences. + """ nlp = spacy.load("en_core_web_sm") if len(custom_stopwords) > 0: for csw in custom_stopwords: @@ -160,3 +162,13 @@ def rm_nltk_stopwords( stop_df.columns = stop_cols return pd.concat([df, stop_df], axis=1) + + +def convert_to_unicode(input_text): + """Converts intput_text to Unicode. Input must be utf-8.""" + if isinstance(input_text, str): + return input_text + elif isinstance(input_text, bytes): + return input_text.decode("utf-8", "ignore") + else: + raise TypeError("Unsupported string type: %s" % (type(input_text))) diff --git a/utils_nlp/dataset/stsbenchmark.py b/utils_nlp/dataset/stsbenchmark.py index ed919ed57..31e05e637 100644 --- a/utils_nlp/dataset/stsbenchmark.py +++ b/utils_nlp/dataset/stsbenchmark.py @@ -4,7 +4,6 @@ import os import tarfile import pandas as pd -import azureml.dataprep as dp from utils_nlp.dataset.url_utils import maybe_download @@ -14,38 +13,33 @@ def load_pandas_df(data_path, file_split=DEFAULT_FILE_SPLIT): """Load the STS Benchmark dataset as a pd.DataFrame - + Args: data_path (str): Path to data directory - file_split (str, optional): File split to load. One of (train, dev, test). Defaults to train. - + file_split (str, optional): File split to load. + One of (train, dev, test). + Defaults to train. + Returns: pd.DataFrame: STS Benchmark dataset """ - clean_file_path = os.path.join( - data_path, "clean/stsbenchmark", "sts-{}.csv".format(file_split) - ) - dflow = _maybe_download_and_extract(data_path, clean_file_path) - return dflow.to_pandas_dataframe() - - -def _maybe_download_and_extract(base_data_path, clean_file_path): - if not os.path.exists(clean_file_path): - raw_data_path = os.path.join(base_data_path, "raw") - if not os.path.exists(raw_data_path): - os.makedirs(raw_data_path) - sts_path = _download_sts(raw_data_path) - sts_files = [f for f in os.listdir(sts_path) if f.endswith(".csv")] - _clean_sts( - sts_files, - sts_path, - os.path.join(base_data_path, "clean", "stsbenchmark"), - ) - return dp.auto_read_file(clean_file_path).drop_columns("Column1") + file_name = "sts-{}.csv".format(file_split) + df = _maybe_download_and_extract(file_name, data_path) + return df + + +def _maybe_download_and_extract(sts_file, base_data_path): + raw_data_path = os.path.join(base_data_path, "raw") + if not os.path.exists(raw_data_path): + os.makedirs(raw_data_path) + sts_path = _download_sts(raw_data_path) + df = _load_sts(os.path.join(sts_path, sts_file)) + return df def _download_sts(dirpath): - """Download and extract data from http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz + """Download and extract data from + http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz Args: dirpath (str): Path to data directory. @@ -66,8 +60,11 @@ def _extract_sts(tarpath, target_dirpath=".", tmode="r"): Args: tarpath (str): Path to tarfile, to be deleted after extraction. - target_dirpath (str, optional): Directory in which to save the extracted files. - tmode (str, optional): The mode for reading, of the form "filemode[:compression]". Defaults to "r". + target_dirpath (str, optional): Directory in which to save + the extracted files. + tmode (str, optional): The mode for reading, + of the form "filemode[:compression]". + Defaults to "r". Returns: str: Path to extracted STS Benchmark data. @@ -79,31 +76,59 @@ def _extract_sts(tarpath, target_dirpath=".", tmode="r"): return os.path.join(target_dirpath, extracted) -def _clean_sts(filenames, src_dir, target_dir): - """Drop columns containing irrelevant metadata and save as new csv files in the target_dir +def _load_sts(src_file_path): + """Load datafile as dataframe Args: - filenames (list of str): List of filenames for the train/dev/test csv files. - src_dir (str): Directory for the raw csv files. - target_dir (str): Directory for the clean csv files to be written to. + src_file_path (str): filepath to train/dev/test csv files. """ - if not os.path.exists(target_dir): - os.makedirs(target_dir) - filepaths = [os.path.join(src_dir, f) for f in filenames] - for i, fp in enumerate(filepaths): - dat = dp.auto_read_file(path=fp) - s = dat.keep_columns(["Column5", "Column6", "Column7"]).rename_columns( - { - "Column5": "score", - "Column6": "sentence1", - "Column7": "sentence2", - } - ) - print( - "Writing clean dataframe to {}".format( - os.path.join(target_dir, filenames[i]) + with open(src_file_path, "r", encoding="utf-8") as f: + sent_pairs = [] + for line in f: + line = line.strip().split("\t") + sent_pairs.append( + [ + line[0].strip(), + line[1].strip(), + line[2].strip(), + line[3].strip(), + float(line[4]), + line[5].strip(), + line[6].strip(), + ] ) + + sdf = pd.DataFrame( + sent_pairs, + columns=[ + "column_0", + "column_1", + "column_2", + "column_3", + "column_4", + "column_5", + "column_6", + ], ) - sdf = s.to_pandas_dataframe().to_csv( - os.path.join(target_dir, filenames[i]), sep="\t" - ) + return sdf + + +def clean_sts(df): + """Drop columns containing irrelevant metadata and + save as new csv files in the target_dir. + + Args: + df (pandas.Dataframe): drop columns from train/test/dev files. + """ + clean_df = df.drop( + ["column_0", "column_1", "column_2", "column_3"], axis=1 + ) + clean_df = clean_df.rename( + index=str, + columns={ + "column_4": "score", + "column_5": "sentence1", + "column_6": "sentence2", + }, + ) + return clean_df diff --git a/utils_nlp/dataset/xnli.py b/utils_nlp/dataset/xnli.py index e7bbcf4cb..a233c9e7b 100644 --- a/utils_nlp/dataset/xnli.py +++ b/utils_nlp/dataset/xnli.py @@ -10,37 +10,86 @@ import pandas as pd from utils_nlp.dataset.url_utils import extract_zip, maybe_download +from utils_nlp.dataset.preprocess import convert_to_unicode -URL = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" +URL_XNLI = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" +URL_XNLI_MT = "https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip" -DATA_FILES = { - "dev": "XNLI-1.0/xnli.dev.jsonl", - "test": "XNLI-1.0/xnli.test.jsonl", -} +def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): + """Downloads and extracts the dataset files. -def load_pandas_df(local_cache_path=None, file_split="dev"): - """Downloads and extracts the dataset files Args: - local_cache_path ([type], optional): [description]. - Defaults to None. + local_cache_path (str, optional): Path to store the data. + Defaults to "./". file_split (str, optional): The subset to load. - One of: {"dev", "test"} - Defaults to "train". + One of: {"train", "dev", "test"} + Defaults to "dev". + language (str, optional): language subset to read. + One of: {"en", "fr", "es", "de", "el", "bg", "ru", + "tr", "ar", "vi", "th", "zh", "hi", "sw", "ur"} + Defaults to "zh" (Chinese). Returns: pd.DataFrame: pandas DataFrame containing the specified XNLI subset. """ - file_name = URL.split("/")[-1] - maybe_download(URL, file_name, local_cache_path) + if file_split in ("dev", "test"): + url = URL_XNLI + sentence_1_index = 6 + sentence_2_index = 7 + label_index = 1 - if not os.path.exists( - os.path.join(local_cache_path, DATA_FILES[file_split]) - ): + zip_file_name = url.split("/")[-1] + folder_name = ".".join(zip_file_name.split(".")[:-1]) + file_name = folder_name + "/" + ".".join(["xnli", file_split, "tsv"]) + elif file_split == "train": + url = URL_XNLI_MT + sentence_1_index = 0 + sentence_2_index = 1 + label_index = 2 + + zip_file_name = url.split("/")[-1] + folder_name = ".".join(zip_file_name.split(".")[:-1]) + file_name = ( + folder_name + + "/multinli/" + + ".".join(["multinli", file_split, language, "tsv"]) + ) + + maybe_download(url, zip_file_name, local_cache_path) + + if not os.path.exists(os.path.join(local_cache_path, folder_name)): extract_zip( - os.path.join(local_cache_path, file_name), local_cache_path + os.path.join(local_cache_path, zip_file_name), local_cache_path + ) + + with open( + os.path.join(local_cache_path, file_name), "r", encoding="utf-8" + ) as f: + lines = f.read().splitlines() + + line_list = [line.split("\t") for line in lines] + # Remove the column name row + line_list.pop(0) + if file_split != "train": + line_list = [line for line in line_list if line[0] == language] + + label_list = [convert_to_unicode(line[label_index]) for line in line_list] + old_contradict_label = convert_to_unicode("contradictory") + new_contradict_label = convert_to_unicode("contradiction") + label_list = [ + new_contradict_label if label == old_contradict_label else label + for label in label_list + ] + text_list = [ + ( + convert_to_unicode(line[sentence_1_index]), + convert_to_unicode(line[sentence_2_index]), ) - return pd.read_json( - os.path.join(local_cache_path, DATA_FILES[file_split]), lines=True - ) + for line in line_list + ] + + df = pd.DataFrame({"text": text_list, "label": label_list}) + + return df From 6c90ceae633f559b73cdb856c481867fbce2ad35 Mon Sep 17 00:00:00 2001 From: Hong Lu Date: Mon, 24 Jun 2019 12:11:18 -0400 Subject: [PATCH 051/108] Updated readme of NER scenario. --- scenarios/named_entity_recognition/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scenarios/named_entity_recognition/README.md b/scenarios/named_entity_recognition/README.md index e69de29bb..cbcc3c07e 100644 --- a/scenarios/named_entity_recognition/README.md +++ b/scenarios/named_entity_recognition/README.md @@ -0,0 +1,8 @@ +# Named Entity Recognition (NER) +Named Entity Recognition (NER) is the task of detecting and classifying +real-world objects mentioned in text. Common named entities include person +names, locations, organizations, etc. The state-of-the art NER methods include +combining Long Short-Term Memory neural network with Conditional Random Field +(LSTM-CRF) and pretrained language models like BERT. NER can be used for +information extraction and filtering. It also plays an important role in other +NLP tasks like question answering and texts summarization. From e5d149257ef39dfcb4f38ca630208d5745fa0310 Mon Sep 17 00:00:00 2001 From: Liqun Shao Date: Mon, 24 Jun 2019 12:58:39 -0400 Subject: [PATCH 052/108] fix broken link for gensen aml notebook in readme --- scenarios/sentence_similarity/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/sentence_similarity/README.md b/scenarios/sentence_similarity/README.md index 9fa0b6805..0bfe52821 100644 --- a/scenarios/sentence_similarity/README.md +++ b/scenarios/sentence_similarity/README.md @@ -21,4 +21,4 @@ The following summarizes each notebook for Sentence Similarity. Each notebook pr |---|---|---| |[Creating a Baseline model](baseline_deep_dive.ipynb)| Yes| A baseline model is a basic solution that serves as a point of reference for comparing other models to. The baseline model's performance gives us an indication of how much better our models can perform relative to a naive approach.| |Senteval |[local](senteval_local.ipynb), [AzureML](senteval_azureml.ipynb)|SentEval is a widely used benchmarking tool for evaluating general-purpose sentence embeddings. Running SentEval locally is easy, but not necessarily efficient depending on the model specs. We provide an example on how to do this efficiently in Azure Machine Learning Service. | -|[GenSen on AzureML](gensen_aml_deep_dive.ipynb_)| No | This notebook serves as an introduction to an end-to-end NLP solution for sentence similarity building one of the State of the Art models, GenSen, on the AzureML platform. We show the advantages of AzureML when training large NLP models with GPU. +|[GenSen on AzureML](gensen_aml_deep_dive.ipynb)| No | This notebook serves as an introduction to an end-to-end NLP solution for sentence similarity building one of the State of the Art models, GenSen, on the AzureML platform. We show the advantages of AzureML when training large NLP models with GPU. From 48ce3bf9896a979769c7e3ab704256a1aa6b509b Mon Sep 17 00:00:00 2001 From: hlums Date: Mon, 24 Jun 2019 19:21:16 +0000 Subject: [PATCH 053/108] Changed tokenizer_preprocess_ner_text to tokenize_ner. --- .../ner_wikigold_bert.ipynb | 92 +++++++++---------- tests/unit/test_bert_common.py | 4 +- utils_nlp/bert/common.py | 10 +- utils_nlp/bert/token_classification.py | 7 +- 4 files changed, 56 insertions(+), 57 deletions(-) diff --git a/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb b/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb index c637a33f2..565151a7b 100644 --- a/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb +++ b/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb @@ -8,7 +8,7 @@ "*Licensed under the MIT License.*\n", "# Named Entity Recognition Using BERT\n", "## Summary\n", - "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, and model evaluation. \n", + "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, model scoring, and model evaluation. \n", "\n", "[BERT (Bidirectional Transformers for Language Understanding)](https://arxiv.org/pdf/1810.04805.pdf) is a powerful pre-trained lanaguage model that can be used for multiple NLP tasks, including text classification, question answering, named entity recognition, etc. It's able to achieve state of the art performance with only a few epochs of fine tuning on task specific datasets. \n", "The figure below illustrates how BERT can be fine tuned for NER tasks. The input data is a list of tokens representing a sentence. In the training data, each token has an entity label. After fine tuning, the model predicts an entity label for each token in a given testing sentence. \n", @@ -38,10 +38,8 @@ "import sys\n", "import os\n", "import random\n", - "from seqeval.metrics import f1_score, classification_report\n", - "\n", + "from seqeval.metrics import classification_report\n", "import torch\n", - "from pytorch_pretrained_bert.tokenization import BertTokenizer\n", "\n", "nlp_path = os.path.abspath('../../')\n", "if nlp_path not in sys.path:\n", @@ -67,7 +65,7 @@ }, "outputs": [], "source": [ - "# path configurations\n", + "# path configuration\n", "CACHE_DIR=\"./temp\"\n", "\n", "# set random seeds\n", @@ -105,7 +103,7 @@ "### Get training and testing data\n", "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). \n", "\n", - "The helper function `load_train_test_dfs` downloads the data file if it doesn't exist in local_cache_path. It splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", + "The helper function `load_train_test_dfs` downloads the data file if it doesn't exist in `local_cache_path`. It splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", "\n", "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). \n", "\n", @@ -158,7 +156,7 @@ "metadata": {}, "source": [ "**Create a dictionary that maps labels to numerical values** \n", - "Note there is an argument called trailing_piece_tag. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. \"criticize\" is tokenized into \"critic\" and \"##ize\". Since the input data only come with one token label for \"criticize\", within Tokenizer.prerocess_ner_tokens, the original token label is assigned to the first token \"critic\" and the second token \"##ize\" is labeled as \"X\". By default, trailing_piece_tag is set to \"X\". If \"X\" already exists in your data, you can set trailing_piece_tag to another value that doesn't exist in your data." + "Note there is an argument called `trailing_piece_tag`. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. \"criticize\" is tokenized into \"critic\" and \"##ize\". Since the input data only come with one token label for \"criticize\", within Tokenizer.prerocess_ner_tokens, the original token label is assigned to the first token \"critic\" and the second token \"##ize\" is labeled as \"X\". By default, `trailing_piece_tag` is set to \"X\". If \"X\" already exists in your data, you can set `trailing_piece_tag` to another value that doesn't exist in your data." ] }, { @@ -197,7 +195,7 @@ "metadata": {}, "source": [ "**Tokenize and preprocess text** \n", - "The `tokenize_preprocess_ner_text` method of the `Tokenizer` class converts text and labels in strings to numerical features, involving the following steps:\n", + "The `tokenize_ner` method of the `Tokenizer` class converts text and labels in strings to numerical features, involving the following steps:\n", "1. WordPiece tokenization.\n", "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", "3. Sequence padding or truncation according to the `max_seq_length` configuration." @@ -212,24 +210,24 @@ "outputs": [], "source": [ "train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \\\n", - " tokenizer.tokenize_preprocess_ner_text(text=train_df[TEXT_COL],\n", - " label_map=label_map,\n", - " max_len=MAX_SEQ_LENGTH,\n", - " labels=train_df[LABELS_COL],\n", - " trailing_piece_tag=\"X\")\n", + " tokenizer.tokenize_ner(text=train_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=train_df[LABELS_COL],\n", + " trailing_piece_tag=\"X\")\n", "test_token_ids, test_input_mask, test_trailing_token_mask, test_label_ids = \\\n", - " tokenizer.tokenize_preprocess_ner_text(text=test_df[TEXT_COL],\n", - " label_map=label_map,\n", - " max_len=MAX_SEQ_LENGTH,\n", - " labels=test_df[LABELS_COL],\n", - " trailing_piece_tag=\"X\")" + " tokenizer.tokenize_ner(text=test_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=test_df[LABELS_COL],\n", + " trailing_piece_tag=\"X\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "`Tokenizer.tokenize_preprocess_ner_text` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", + "`Tokenizer.tokenize_ner` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", "1. token ids: list of numerical values each corresponds to a token.\n", "2. attention mask: list of 1s and 0s, 1 for input tokens and 0 for padded tokens, so that padded tokens are not attended to. \n", "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ize. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", @@ -282,7 +280,7 @@ "* Language.CHINESE: \"bert-base-chinese\"\n", "* Language.MULTILINGUAL: \"bert-base-multilingual-cased\"\n", "\n", - "Here we use the base, uncased pretrained model." + "Here we use the base, cased pretrained model." ] }, { @@ -294,7 +292,7 @@ "outputs": [], "source": [ "token_classifier = BERTTokenClassifier(language=LANGUAGE,\n", - " num_labels=len(label_list),\n", + " num_labels=len(label_map),\n", " cache_dir=CACHE_DIR)" ] }, @@ -333,10 +331,10 @@ "output_type": "stream", "text": [ "\n", - "Iteration: 43%|████▎ | 25/58 [00:30<00:40, 1.22s/it]\u001b[A\n", - "Iteration: 43%|████▎ | 25/58 [00:49<00:40, 1.22s/it]\u001b[A\n", - "Iteration: 86%|████████▌ | 50/58 [01:00<00:09, 1.22s/it]\u001b[A\n", - "Epoch: 20%|██ | 1/5 [01:10<04:42, 70.55s/it]1s/it]\u001b[A\n", + "Iteration: 40%|███▉ | 23/58 [00:30<00:45, 1.31s/it]\u001b[A\n", + "Iteration: 40%|███▉ | 23/58 [00:49<00:45, 1.31s/it]\u001b[A\n", + "Iteration: 81%|████████ | 47/58 [01:00<00:14, 1.30s/it]\u001b[A\n", + "Epoch: 20%|██ | 1/5 [01:14<04:58, 74.52s/it]8s/it]\u001b[A\n", "Iteration: 0%| | 0/58 [00:00 BERT_MAX_LEN: warnings.warn( "setting max_len to max allowed tokens: {}".format( diff --git a/utils_nlp/bert/token_classification.py b/utils_nlp/bert/token_classification.py index ecce743ae..f4c0f50bf 100644 --- a/utils_nlp/bert/token_classification.py +++ b/utils_nlp/bert/token_classification.py @@ -271,7 +271,7 @@ def predict( logits = logits.detach().cpu() if step == 0: - logits_all = logits + logits_all = logits.numpy() else: logits_all = np.append(logits_all, logits, axis=0) @@ -293,10 +293,11 @@ def predict( def create_label_map(label_list, trailing_piece_tag="X"): - if trailing_piece_tag not in label_list: - label_list.append(trailing_piece_tag) label_map = {label: i for i, label in enumerate(label_list)} + if trailing_piece_tag not in label_list: + label_map[trailing_piece_tag] = len(label_list) + return label_map From 5d86d03d1046f1ce0a54f2dc032bace7366e84f5 Mon Sep 17 00:00:00 2001 From: Abhiram E Date: Mon, 24 Jun 2019 19:00:48 -0400 Subject: [PATCH 054/108] Updated pip version of AzureML Mlflow used in the Pytorch estimator --- scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb index 7e40b298d..4148272e4 100644 --- a/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb +++ b/scenarios/sentence_similarity/gensen_aml_deep_dive.ipynb @@ -946,7 +946,7 @@ " distributed_training=MpiConfiguration(),\n", " use_gpu=True,\n", " conda_packages=['scikit-learn=0.20.3', 'h5py', 'nltk'],\n", - " pip_packages=['azureml-mlflow>=1.0.41','numpy>=1.16.0']\n", + " pip_packages=['azureml-mlflow>=1.0.43.1','numpy>=1.16.0']\n", " )" ] }, From d3c417ff661fdc11a387ecc1f9104f51b100d0b8 Mon Sep 17 00:00:00 2001 From: hlums Date: Tue, 25 Jun 2019 20:02:40 +0000 Subject: [PATCH 055/108] Update notebook text. --- .../ner_msra_bert_chinese.ipynb | 26221 +++++++++++++++- 1 file changed, 25841 insertions(+), 380 deletions(-) diff --git a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb index c11b54a6e..136b2f6db 100644 --- a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb +++ b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb @@ -8,12 +8,14 @@ "*Licensed under the MIT License.*\n", "# Named Entity Recognition Using BERT on Chinese\n", "## Summary\n", - "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task on Chinese text. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, and model evaluation. \n", + "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task on Chinese text. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, model scoring and model evaluation.\n", "\n", "[BERT (Bidirectional Transformers for Language Understanding)](https://arxiv.org/pdf/1810.04805.pdf) is a powerful pre-trained lanaguage model that can be used for multiple NLP tasks, including text classification, question answering, named entity recognition, etc. It's able to achieve state of the art performance with only a few epochs of fine tuning on task specific datasets. \n", "The figure below illustrates how BERT can be fine tuned for NER tasks. The input data is a list of tokens representing a sentence. In the training data, each token has an entity label. After fine tuning, the model predicts an entity label for each token in a given testing sentence. \n", "\n", - "" + "\n", + "\n", + "Named Entity Recognition on non-English text is not very differnt from that on English text. The only difference is the model used, which is configured by the `LANGUAGE` variable below. For non-English languages including Chinese, the *bert-base-multilingual-cased* model can be used by setting `LANGUAGE = Language.MULTILINGUAL`. For Chinese, the *bert-base-chinese* model can also be used by setting `LANGUAGE = Language.CHINESE`. On Chinese text, the performance of *bert-base-chinese* is usually better than *bert-base-multilingual-cased* because the *bert-base-chinese* model is pretrained on Chinese data only. " ] }, { @@ -38,10 +40,9 @@ "import sys\n", "import os\n", "import random\n", - "from seqeval.metrics import f1_score, classification_report\n", + "from seqeval.metrics import classification_report\n", "\n", "import torch\n", - "from pytorch_pretrained_bert.tokenization import BertTokenizer\n", "\n", "nlp_path = os.path.abspath('../../')\n", "if nlp_path not in sys.path:\n", @@ -68,10 +69,7 @@ "outputs": [], "source": [ "# path configurations\n", - "DATA_DIR = \"./data\"\n", - "TRAIN_DATA_FILE = \"../MSRA/msra-bakeoff3-training-utf8.2col\"\n", - "Test_DATA_FILE = \"../MSRA/bakeoff3_goldstandard.txt\"\n", - "CACHE_DIR=\"./temp\"\n", + "CACHE_DIR = \"./temp\"\n", "\n", "# set random seeds\n", "RANDOM_SEED = 100\n", @@ -88,12 +86,9 @@ "\n", "# optimizer configuration\n", "LEARNING_RATE = 3e-5\n", - "WARMUP_PROPORTION = 0.1\n", "\n", "TEXT_COL = \"sentence\"\n", - "LABEL_COL = \"labels\"\n", - "\n", - "CACHE_DIR = \"../../../\"" + "LABEL_COL = \"labels\"" ] }, { @@ -108,13 +103,20 @@ "metadata": {}, "source": [ "### Get training and testing data\n", - "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). The `download` function downloads the data file to a user-specified directory. \n", + "The dataset used in this notebook is the MSRA NER dataset. The dataset consists of 45000 training sentences and 3940 testing sentences. \n", "\n", - "The helper function `get_train_test_data` splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", + "The helper function `load_pandas_df` downloads the data files if they don't exist in `local_cache_path`. It returns the training or testing data frame based on `file_split`\n", "\n", - "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). An 'X' label is added for the trailing word pieces generated by BERT, because BERT uses WordPiece tokenizer. \n", + "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 7 unique labels in the dataset: \n", + "* 'O': non-entity \n", + "* 'B-LOC': beginning of location entity\n", + "* 'I-LOC': within location entity\n", + "* 'B-PER': beginning of person entity\n", + "* 'I-PER': within person entity\n", + "* 'B-ORG': beginning of organization entity\n", + "* 'I-ORG': within organization entity\n", "\n", - "The maximum number of words in a sentence is 144, so we set MAX_SEQ_LENGTH to 200 above, because the number of tokens will grow after WordPiece tokenization." + "The maximum number of words in a sentence is 756. We set MAX_SEQ_LENGTH to 200 above to reduce the GPU memory needed to run this notebook. Less than 1% of testing data are longer than 200, so this should have negligible impact on the model performance evaluation." ] }, { @@ -127,9 +129,9 @@ "output_type": "stream", "text": [ "Maximum sequence length in train data is: 746\n", - "Maximum sequence length in test data is: 439\n", + "Maximum sequence length in test data is: 2427\n", "Number of sentences in training data: 45000\n", - "Number of sentences in testing data: 3940\n", + "Number of sentences in testing data: 3442\n", "Unique labels: ['O', 'B-LOC', 'B-ORG', 'B-PER', 'I-LOC', 'I-ORG', 'I-PER']\n" ] } @@ -176,27 +178,27 @@ " \n", " \n", " 0\n", - " 当 希 望 工 程 救 助 的 百 万 儿 童 成 长 起 来 , 科 教 兴 国 蔚 然 ...\n", + " 当希望工程救助的百万儿童成长起来,科教兴国蔚然成风时,今天有收藏价值的书你没买,明日就叫你悔...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", " 1\n", - " 藏 书 本 来 就 是 所 有 传 统 收 藏 门 类 中 的 第 一 大 户 , 只 是 ...\n", + " 藏书本来就是所有传统收藏门类中的第一大户,只是我们结束温饱的时间太短而已。\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", " 2\n", - " 因 有 关 日 寇 在 京 掠 夺 文 物 详 情 , 藏 界 较 为 重 视 , 也 是 ...\n", + " 因有关日寇在京掠夺文物详情,藏界较为重视,也是我们收藏北京史料中的要件之一。\n", " [O, O, O, B-LOC, O, O, B-LOC, O, O, O, O, O, O...\n", " \n", " \n", " 3\n", - " 我 们 藏 有 一 册 1 9 4 5 年 6 月 油 印 的 《 北 京 文 物 保 存 ...\n", + " 我们藏有一册1945年6月油印的《北京文物保存保管状态之调查报告》,调查范围涉及故宫、历博、...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", " 4\n", - " 以 家 乡 的 历 史 文 献 、 特 定 历 史 时 期 书 刊 、 某 一 名 家 或 ...\n", + " 以家乡的历史文献、特定历史时期书刊、某一名家或名著的多种出版物为专题,注意精品、非卖品、纪念...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", @@ -205,11 +207,11 @@ ], "text/plain": [ " sentence \\\n", - "0 当 希 望 工 程 救 助 的 百 万 儿 童 成 长 起 来 , 科 教 兴 国 蔚 然 ... \n", - "1 藏 书 本 来 就 是 所 有 传 统 收 藏 门 类 中 的 第 一 大 户 , 只 是 ... \n", - "2 因 有 关 日 寇 在 京 掠 夺 文 物 详 情 , 藏 界 较 为 重 视 , 也 是 ... \n", - "3 我 们 藏 有 一 册 1 9 4 5 年 6 月 油 印 的 《 北 京 文 物 保 存 ... \n", - "4 以 家 乡 的 历 史 文 献 、 特 定 历 史 时 期 书 刊 、 某 一 名 家 或 ... \n", + "0 当希望工程救助的百万儿童成长起来,科教兴国蔚然成风时,今天有收藏价值的书你没买,明日就叫你悔... \n", + "1 藏书本来就是所有传统收藏门类中的第一大户,只是我们结束温饱的时间太短而已。 \n", + "2 因有关日寇在京掠夺文物详情,藏界较为重视,也是我们收藏北京史料中的要件之一。 \n", + "3 我们藏有一册1945年6月油印的《北京文物保存保管状态之调查报告》,调查范围涉及故宫、历博、... \n", + "4 以家乡的历史文献、特定历史时期书刊、某一名家或名著的多种出版物为专题,注意精品、非卖品、纪念... \n", "\n", " labels \n", "0 [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ... \n", @@ -233,7 +235,7 @@ "metadata": {}, "source": [ "### Tokenization and Preprocessing\n", - "The `preprocess_ner_tokens` method of the `Tokenizer` class converts raw string data to numerical features, involving the following steps:\n", + "The `tokenize_ner` method of the `Tokenizer` class converts raw string data to numerical features, involving the following steps:\n", "1. WordPiece tokenization.\n", "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", "3. Sequence padding or truncation according to the `max_seq_length` configuration." @@ -248,7 +250,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": { "scrolled": false }, @@ -266,7 +268,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "metadata": { "scrolled": false }, @@ -290,34 +292,25764 @@ "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1945\n", + "》,\n", + "”。\n", + "1997\n", + "(1937\n", + "—1945\n", + "”。\n", + "》(1919\n", + ")、\n", + "》(1923\n", + ")、\n", + "(1927\n", + ")、\n", + "》(1926\n", + ")、\n", + "》(1930\n", + "1908\n", + ")。\n", + "”,\n", + "”。\n", + "1974\n", + "”,\n", + "”。\n", + "”,“\n", + "”,“\n", + "”,“\n", + "……\n", + "”,“\n", + "”,\n", + "”,\n", + ",“\n", + "”。\n", + "”;\n", + "”。\n", + "1967\n", + "”、“\n", + ":“\n", + "”,\n", + "”,\n", + "”。\n", + ":“\n", + ":“\n", + "……”\n", + "……\n", + "”,\n", + "……\n", + "———\n", + ",“\n", + ":“\n", + ",“\n", + ",“\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + "……\n", + ":“\n", + "”。\n", + "”。\n", + "、“\n", + ",“\n", + "”!\n", + ":“\n", + "”。\n", + ":“\n", + "———《\n", + "》,\n", + "……\n", + "”。\n", + "》,\n", + "》,\n", + "》,\n", + "……\n", + "”。\n", + "……\n", + "”。\n", + ",“\n", + "”。\n", + "———\n", + "》(\n", + "———《\n", + "———\n", + "》(\n", + "”,\n", + ":“\n", + "”,\n", + "”,\n", + "”;\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "———\n", + ",“\n", + "”,“\n", + "”,\n", + "”,\n", + ",《\n", + "———\n", + "……\n", + "1938\n", + "———\n", + "———“\n", + "”,\n", + "……\n", + "》,\n", + ":“\n", + ":“\n", + "1998\n", + "”。\n", + ",1983\n", + "1977\n", + ",1979\n", + ":(010)64014411\n", + "2908\n", + "”、“\n", + "”,\n", + "10%—20%\n", + "”(\n", + "82)\n", + "”,\n", + "”,\n", + ":“\n", + "1997\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ":“\n", + ",“\n", + "”。\n", + ",“\n", + "”,\n", + "),\n", + ",“\n", + ",“\n", + "———\n", + "”。\n", + "200\n", + "1997\n", + "1400\n", + "1995\n", + "”。\n", + "1993\n", + ",1978\n", + "1985\n", + "1988\n", + ")(\n", + "』,\n", + "》。\n", + "……\n", + ":“\n", + "1991\n", + "”,\n", + ":“\n", + ":“\n", + ":“\n", + "……\n", + "……”\n", + ":“\n", + ":“\n", + "1940\n", + "———\n", + "———“\n", + "”,\n", + "”(\n", + ")。\n", + "”。\n", + "”,\n", + "”,\n", + "”。\n", + ",“\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "———\n", + "———\n", + ":“\n", + ":“\n", + "……\n", + "……\n", + "1996\n", + "……\n", + "———\n", + "26\n", + "”,\n", + ":“\n", + ";“\n", + "”。\n", + ",“16”\n", + "27,\n", + "17\n", + "10%\n", + ",90%\n", + "1997\n", + "1.33\n", + "2000\n", + "0.07\n", + "1997\n", + "303.9%。\n", + "100\n", + "”。\n", + "1997\n", + "4000\n", + "2000—3000\n", + "200\n", + "0.75\n", + "4.2\n", + "0.75\n", + "12.7\n", + "200\n", + "90\n", + "1997\n", + "———\n", + "———\n", + ";———\n", + ";———\n", + "20%—30%;———\n", + "40%;———\n", + "1.5\n", + ",1991\n", + "37\n", + ";———\n", + "300\n", + "500\n", + "———\n", + "1959\n", + "4000\n", + "200\n", + "200\n", + ")、\n", + ")、\n", + "1978\n", + ":“\n", + "300\n", + "160—200\n", + "350\n", + "33%。\n", + "1300\n", + "26\n", + "500\n", + "1463\n", + "5.2\n", + "3.6\n", + "1/4,\n", + "1/3\n", + "1998\n", + "2000\n", + "”。\n", + "1997\n", + "》。\n", + "1994\n", + "1992\n", + "1991\n", + "1990\n", + "147\n", + "160\n", + "”。\n", + "80\n", + "28\n", + "2000\n", + "200\n", + "5.\n", + "4.\n", + "3.\n", + ",“\n", + "”(DETBLAPAKHUS),\n", + "1/4\n", + "CASH\n", + "1/4\n", + "1/4\n", + "3.\n", + "2.\n", + "1.\n", + "4000\n", + "1991\n", + "7200\n", + "CASH\n", + "1995\n", + "60\n", + "CASH\n", + "400\n", + "”,\n", + "……\n", + ":“\n", + "”。\n", + "”。\n", + "”,\n", + ":“\n", + "”。\n", + ",“\n", + ":“\n", + "”。\n", + "”,\n", + ":“\n", + ",‘\n", + "’。\n", + "”。\n", + ":“\n", + "”。\n", + "1994\n", + "”。\n", + "……\n", + ",“\n", + "OK,\n", + "1993\n", + "……\n", + "———\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + ":“\n", + "1977\n", + ":“\n", + ":“\n", + "”,\n", + ":“\n", + "”、“\n", + "”。\n", + "”,\n", + "22\n", + "”,\n", + "---\n", + "』、『\n", + "』、『\n", + "』,\n", + ",『\n", + "》,\n", + ":“\n", + ",1949\n", + "1065”\n", + ":1949\n", + "1986\n", + "1942\n", + ",1943\n", + "1959\n", + ",“\n", + "……\n", + "1941\n", + "25\n", + ";“\n", + "……\n", + "———\n", + "———《\n", + "》。\n", + "1997\n", + "———\n", + "》。\n", + "”、“\n", + "1996\n", + ",1997\n", + "1997\n", + "1996\n", + "1997\n", + ",《\n", + "》、《\n", + "》、《\n", + ",《\n", + ";《\n", + "》、《\n", + ",《\n", + ";《\n", + ",《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1997\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + ":《\n", + "》、《\n", + "》、《\n", + "》;\n", + ":《\n", + "》、《\n", + "———\n", + "》、《\n", + "》、《\n", + "》。\n", + ":《\n", + "》、《\n", + "———\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》。\n", + "1997\n", + "1997\n", + "1997\n", + "1996\n", + "1997\n", + "1997\n", + "1996\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1996\n", + ":“\n", + "”———\n", + "”,\n", + "”,\n", + "1000\n", + "2600\n", + "120\n", + "”。\n", + "”。\n", + "52\n", + "185\n", + "100\n", + "”2\n", + "”、“\n", + "800\n", + "”。\n", + "9800\n", + "2.13\n", + "300\n", + "100\n", + "150\n", + "20000\n", + "300\n", + "”,\n", + "400\n", + "200\n", + "250\n", + "500\n", + "19\n", + "175\n", + "100\n", + "300\n", + "1600\n", + "66\n", + "90\n", + "1979\n", + "5.7\n", + "、2\n", + ":“\n", + "5000\n", + "1.5\n", + "60\n", + ":“\n", + "’”。\n", + "806\n", + "1996\n", + "”,\n", + "3.4\n", + "45\n", + "225\n", + "250\n", + "”。\n", + ",“\n", + "”(\n", + "400\n", + "2000\n", + "5.95\n", + "5.95\n", + "”。\n", + "5000\n", + "980\n", + "1000\n", + "196\n", + ":100733。\n", + ":“\n", + "”。\n", + "”。\n", + ":“\n", + "”,\n", + ":“\n", + ":“\n", + "1965\n", + "3000\n", + "”、“\n", + "”,\n", + "),\n", + "),\n", + ")。\n", + "1993\n", + "25\n", + "1989\n", + "』,\n", + "21\n", + "”,\n", + ",“\n", + "35\n", + "1986\n", + "”,\n", + "”。\n", + "21\n", + ":“\n", + "”20\n", + "8000\n", + "36\n", + "110\n", + ":“\n", + ":“\n", + "”60\n", + "1954\n", + "1942\n", + ",1943\n", + ",1922\n", + "100\n", + "38\n", + "300\n", + "1997\n", + "60\n", + "1996\n", + "2000\n", + "……\n", + "1994\n", + "”,\n", + "”,\n", + "1993\n", + "”,\n", + ",5\n", + "33\n", + "1995\n", + "1994\n", + "17\n", + "200\n", + "1994\n", + "1992\n", + ",10\n", + ",“\n", + "2800\n", + "4000\n", + "3000\n", + "1.4\n", + "40\n", + "300\n", + "110\n", + "1996\n", + "”。\n", + "26\n", + ")(\n", + "”,\n", + "19\n", + "1500\n", + "———\n", + "”、“\n", + "”,\n", + ":“\n", + "1995\n", + "1968\n", + ",1995\n", + ",1950\n", + ")(\n", + "1996\n", + "400\n", + ":“\n", + "”。\n", + "1928\n", + "”(\n", + "”)。\n", + "1993\n", + "70\n", + "198\n", + "》(\n", + "1998\n", + ":“\n", + ",70\n", + "198\n", + "2.6\n", + "”,\n", + ":“\n", + ",690\n", + "》,\n", + "180\n", + "》、《\n", + "600—800\n", + "”、“\n", + "1280\n", + "400\n", + "780\n", + "》,230\n", + "660\n", + "180\n", + "”、“\n", + "”,\n", + ",“\n", + "59\n", + ",4\n", + "70\n", + ",10\n", + "80\n", + ",“\n", + "”,\n", + "25\n", + "3000\n", + "”。\n", + "200\n", + "218\n", + "40\n", + "47\n", + "2、\n", + "1、\n", + "———\n", + "4500\n", + "10000\n", + "4500\n", + "200\n", + "17\n", + "1994\n", + ",50\n", + "90\n", + "15%—20%\n", + "1050\n", + "———\n", + "150\n", + "3500\n", + "1997\n", + "2000\n", + "4000\n", + "90\n", + "4000\n", + "2000\n", + "---(\n", + "”,\n", + "”。\n", + "”,\n", + ",“\n", + "20%,\n", + "20%,\n", + "50%,\n", + "”、“\n", + "1990\n", + "1600\n", + "1/3)。\n", + ",200\n", + "1%\n", + "3‰;\n", + "———\n", + "”,\n", + "”,\n", + "”。\n", + "”,“\n", + "”,\n", + "———\n", + "”(\n", + "17\n", + "2000\n", + "300\n", + "8000\n", + "3799\n", + "82285\n", + "9200\n", + "1997\n", + "26.8%,\n", + "70%\n", + "40%\n", + "32.5%,\n", + "50%—60%\n", + "35%\n", + ",1990\n", + "1997\n", + "9.4%,\n", + "15.7%\n", + "11.2%\n", + "6800\n", + "5700\n", + "90\n", + "1990\n", + "61.1%,\n", + "1997\n", + "49.3%,\n", + "10.8\n", + "4100\n", + "21.4%\n", + "23.9%,\n", + "2.5\n", + "18.5%\n", + "26.8%,\n", + "8.3\n", + "8%\n", + "700\n", + "125\n", + "1991\n", + "1998\n", + "11.2%,\n", + "1400\n", + ",7\n", + "9805\n", + ":『\n", + ":『\n", + "SPEOS\n", + "———\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "19\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1983\n", + ",《\n", + ",1988\n", + "》,\n", + ",6\n", + ",6\n", + "31\n", + "28\n", + ",5\n", + "、6\n", + "》,\n", + "……\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1983\n", + "———\n", + "1985\n", + ",《\n", + "”,\n", + "……\n", + ",14\n", + "———\n", + "1960\n", + "”。\n", + "2、\n", + "1、\n", + "———\n", + "———\n", + "”,\n", + "”,\n", + "———\n", + "”,\n", + "”。\n", + "———\n", + "”。\n", + "———\n", + "1994\n", + "1994\n", + "1985\n", + "1981\n", + ",“\n", + "---\n", + "---\n", + "”,\n", + "”。\n", + "1997\n", + "6000\n", + "1996\n", + "3000\n", + "1000\n", + "57\n", + "”,\n", + ",2000\n", + "1500\n", + "”。\n", + ",“\n", + "”,\n", + "600\n", + "50%\n", + "1996\n", + "37%,\n", + "10%。\n", + "1995\n", + "5000\n", + "”,\n", + "2.45\n", + "3000\n", + "1996\n", + "1657.2\n", + "),\n", + "50%,\n", + "48\n", + "540\n", + "”(\n", + "17\n", + "150\n", + "400\n", + "1000\n", + "350\n", + "70\n", + "”,\n", + ",“\n", + ",“\n", + "……\n", + ",“\n", + "……\n", + ",“\n", + "”,\n", + "200\n", + "……\n", + "1.2\n", + ";“\n", + "……\n", + "500\n", + ",4\n", + "200\n", + "”。\n", + ",4\n", + "……\n", + "38\n", + "1200\n", + "4.96\n", + "”(\n", + "66\n", + ",58\n", + "”,\n", + "”。\n", + "73\n", + "25\n", + "1000\n", + "”,\n", + "40\n", + "200\n", + ",70\n", + "2000\n", + "”。\n", + "”。\n", + "……\n", + ",『\n", + "』,\n", + "85\n", + "”,\n", + "……\n", + "1994\n", + "2000\n", + "1989\n", + ":“\n", + "85\n", + "———\n", + "1992\n", + "1996\n", + "……\n", + "1995\n", + "8000\n", + "7000\n", + "5000\n", + "1995\n", + "……\n", + "200\n", + "4000\n", + "60\n", + "1995\n", + "3·1\n", + ":“\n", + "”1996\n", + ":“\n", + "”1996\n", + "1995\n", + "2000\n", + "1995\n", + "1996\n", + "18.72\n", + "21.40\n", + "114.3%。\n", + "1990\n", + "1997\n", + "23\n", + "1997\n", + "8.9\n", + "1.6\n", + ":“\n", + "150\n", + "1995\n", + "129\n", + "”!\n", + "1996\n", + ":“\n", + "1∶5。\n", + "40\n", + "”,\n", + "1995\n", + "”,\n", + ":“\n", + "1997\n", + "1992\n", + "300\n", + "”1991\n", + "1983\n", + "1979\n", + ",20\n", + "”。\n", + "”。\n", + ":“\n", + "60\n", + "40%,\n", + "3∶1\n", + "6000\n", + "100\n", + ",300\n", + ",800\n", + "”,\n", + "”。\n", + "2.1\n", + "”。\n", + "100\n", + "1.2\n", + "3800\n", + "267\n", + "1000\n", + "、10\n", + "1·2\n", + "、40\n", + ")(\n", + "140\n", + "1997\n", + "2700\n", + "1996\n", + "1997\n", + "1998\n", + "1995\n", + "23\n", + ")(\n", + "21\n", + "0·6\n", + ",1995\n", + "5000\n", + ",1996\n", + "3000\n", + ",1997\n", + "60%\n", + "40%\n", + "20%。\n", + "2000\n", + "1/3\n", + "1/2\n", + "700\n", + "1.6\n", + "、50%—60%\n", + "27\n", + "164\n", + "14.7\n", + "63\n", + "81\n", + ":“\n", + "……\n", + "47\n", + "”,\n", + "1993\n", + ":“\n", + "54\n", + ",1992\n", + "40\n", + "”,\n", + "1991\n", + "47\n", + "1945\n", + "53\n", + "300\n", + "”。\n", + "---\n", + "90\n", + "—40\n", + "”,\n", + "”,\n", + "”。\n", + ")。\n", + "1/3\n", + "1/5。\n", + "21\n", + "1205\n", + "29\n", + "4274\n", + "1996\n", + ":5\n", + "60%\n", + ":1997\n", + "34.4%,\n", + "1993\n", + "61.8%,\n", + "62.1%,\n", + "50%\n", + "60%,\n", + "50%。\n", + "1994\n", + "……\n", + "”,\n", + "500\n", + "100—200\n", + ");\n", + "7000\n", + "》。\n", + ",BP\n", + "》。\n", + "》(\n", + "》,\n", + "》。\n", + "》。\n", + ":“\n", + "……”\n", + "1993\n", + "”;\n", + "》,\n", + "》,\n", + "”。\n", + "1966\n", + "1978\n", + "1942\n", + "》。\n", + "”,\n", + "》。\n", + "》、《\n", + "》,\n", + "》(\n", + "),\n", + "1940\n", + "1937\n", + "》,\n", + "》。\n", + "1936\n", + "》,\n", + "》,\n", + "》,\n", + ",《\n", + ":“\n", + "”1956\n", + ",‘\n", + "”;\n", + ",“\n", + ":“\n", + ",“\n", + "———\n", + "……\n", + ",“\n", + "”,\n", + "……\n", + "”。\n", + "》、《\n", + "》、《\n", + "》,\n", + "》、《\n", + "》、《\n", + "”。\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "”、“\n", + "”,\n", + "”。\n", + "1996\n", + ",“\n", + "》,\n", + "”,\n", + "”。\n", + "———\n", + "1997\n", + "》,\n", + "”《\n", + "》。\n", + "”,\n", + "”。\n", + "365\n", + "0.61\n", + "1996\n", + ";6\n", + "87\n", + "1996\n", + "25\n", + "28\n", + "”,\n", + "』,\n", + "600\n", + "”,\n", + "”,\n", + "35%\n", + "150\n", + "110\n", + "60\n", + "38\n", + "87\n", + "700\n", + "800\n", + ",20\n", + "100\n", + "”,“\n", + "”,\n", + "34\n", + "3.6\n", + "28\n", + "、40\n", + "25\n", + "1.5\n", + "1.2\n", + "150\n", + "1986\n", + "1500\n", + "1997\n", + "2000\n", + "2000\n", + "7000\n", + "500\n", + "1987\n", + "1269\n", + "80\n", + "1996\n", + "5000\n", + "”,\n", + "350\n", + "”,\n", + "———\n", + "6.14\n", + "13.7%,\n", + "2800\n", + "8.7%。\n", + ":“\n", + ":“\n", + "”,\n", + ",150\n", + "43\n", + "150\n", + "”。\n", + "1100\n", + "400\n", + ":“\n", + "”,\n", + "”,\n", + "”。\n", + "1997\n", + ";“\n", + "500\n", + "1/8。\n", + "1996\n", + "300\n", + "3290\n", + "90\n", + "1978\n", + "———\n", + "———\n", + "———(\n", + ":“\n", + "0.2\n", + "1998\n", + "27\n", + "1994\n", + "1990\n", + "1987\n", + "1.5\n", + "23\n", + "26·7\n", + "……\n", + ",5\n", + ",10\n", + "100\n", + ",1995\n", + "150\n", + ",1994\n", + ",“\n", + "”、“\n", + "1993\n", + "———\n", + "”,\n", + "60%\n", + "200\n", + "1996\n", + "34\n", + ":“\n", + "80\n", + "”。\n", + "1000\n", + "10%。\n", + "52.8\n", + "1998\n", + "—2000\n", + ",“\n", + "1700\n", + "1400\n", + "300\n", + "17\n", + "1700\n", + "1/3,\n", + ":“\n", + "1986\n", + ",1997\n", + "1994\n", + "46\n", + "40\n", + ":“\n", + "———\n", + "”。\n", + "”,\n", + ":“\n", + ",78\n", + "”,\n", + "———\n", + "1200\n", + "2·8\n", + "6900\n", + ":“\n", + "……\n", + ":“\n", + "……\n", + "”,\n", + "———\n", + "”;\n", + "……\n", + ",《\n", + "”(1997\n", + "),\n", + "”。\n", + "”(1996\n", + "),\n", + "”。\n", + "”(1995\n", + "),\n", + "”,\n", + "”(1994\n", + "),\n", + "”。\n", + "”(1993\n", + "),\n", + "”,\n", + "”(1992\n", + "),\n", + "”。\n", + "”。\n", + "1991\n", + "”,\n", + "”,\n", + "”。\n", + "23790\n", + "27\n", + "6000\n", + "1997\n", + "1996\n", + "66\n", + "51\n", + "1989\n", + "1.8\n", + "600\n", + "17\n", + "1997\n", + "28\n", + "”,\n", + "600\n", + "70\n", + "2339\n", + ",24\n", + "27\n", + "123\n", + "1993\n", + ",1995\n", + "1000\n", + "500\n", + "———\n", + "1995\n", + "1997\n", + "”,\n", + "”。\n", + "600\n", + "3000\n", + "”,\n", + "”。\n", + "———\n", + ",1971\n", + "1995\n", + ",1957\n", + "”,\n", + "”。\n", + "100\n", + "200\n", + "4000\n", + "400\n", + "100\n", + "26\n", + "21\n", + ",1994\n", + ":“\n", + ",1995\n", + "”。\n", + "”。\n", + "”,\n", + "200\n", + "》,\n", + "100\n", + "1.5\n", + "1994\n", + "2280\n", + ":“\n", + "1994\n", + "1994\n", + "———\n", + "3500\n", + ",“\n", + "”。\n", + "———\n", + "———1997\n", + "”;\n", + ":“\n", + "---(\n", + ")(\n", + ")2:\n", + "1:\n", + "13.26\n", + "1992\n", + "”、“\n", + "”。\n", + "70%\n", + "”。\n", + ",4\n", + "27\n", + "”,\n", + "60\n", + "”。\n", + "500\n", + "22\n", + "2600\n", + "”,\n", + ",1997\n", + "2:\n", + "8000\n", + "1:\n", + "1985\n", + "—30\n", + ")4\n", + "28\n", + "124\n", + "8000\n", + "226\n", + "2000\n", + "”,\n", + "1200\n", + "”。\n", + "1996\n", + "26\n", + "”,38\n", + "1996\n", + "27\n", + "121\n", + "”。\n", + ")(\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + "”、“\n", + ",“\n", + ",“\n", + "6000\n", + "TICO\n", + "……\n", + "500\n", + "”。\n", + "1996\n", + "48\n", + "1997\n", + "36\n", + "80%\n", + "60%\n", + "”,\n", + "1100\n", + "400\n", + "8000—10000\n", + "5000—7000\n", + "3000\n", + "90\n", + "……(\n", + "》,\n", + "1991\n", + "》,\n", + "》、\n", + "》、\n", + "》、《\n", + "》、\n", + "》、\n", + "》、\n", + "》、\n", + "》,\n", + "1998\n", + "31\n", + ",“\n", + "”。\n", + "……”\n", + "109\n", + "”,\n", + "”,\n", + "《B\n", + "”,\n", + ":“\n", + "”,\n", + "1996\n", + "2000\n", + "”,\n", + "”,\n", + "1996\n", + "……\n", + "《B\n", + "》,\n", + "1·29\n", + "1996\n", + "90\n", + ",1997\n", + "1.5\n", + "200\n", + "4000\n", + ",80%\n", + "850\n", + "2000\n", + "、10\n", + "、6\n", + "55\n", + "5000\n", + "260\n", + "”,\n", + "1996\n", + "2500\n", + ",1997\n", + "70\n", + "100\n", + "4500\n", + "”。\n", + "1.5\n", + "80\n", + "8000\n", + "———\n", + "2000\n", + "……\n", + "2500\n", + ",1994\n", + "8000\n", + "1.2\n", + "90\n", + "100\n", + "、100\n", + "、1000\n", + "”,\n", + "8%。\n", + "”。\n", + "———\n", + "5—10\n", + ",“\n", + "4500\n", + "3%,\n", + "480\n", + "200\n", + "2000\n", + "———\n", + "2071\n", + "1200\n", + "100\n", + "125\n", + "”。\n", + ")“\n", + "60\n", + "300\n", + "140\n", + "160\n", + ",28\n", + "7·5\n", + "600\n", + "40\n", + "6.4\n", + "5.8\n", + ",1997\n", + "2.4\n", + "”,\n", + "”:\n", + "1994\n", + "1000\n", + "1997\n", + "8100\n", + "”,\n", + "2000\n", + "135\n", + "500\n", + "1995\n", + "370\n", + ",1996\n", + "2050\n", + ",1994\n", + "317\n", + "3500\n", + "1000\n", + "1200\n", + "———\n", + "600\n", + "1000\n", + "400\n", + "17\n", + "”。\n", + "--\n", + "628\n", + ",15\n", + "17\n", + "”,\n", + "1996\n", + ",1997\n", + "》,\n", + "279.7\n", + "”,\n", + ":“\n", + ":“\n", + ",1000\n", + "1912\n", + "”。\n", + ")2:\n", + "70\n", + "1:\n", + "———\n", + "70\n", + "23\n", + "100%,\n", + "80%\n", + "1997\n", + ":“\n", + "”。\n", + "1.5\n", + "”。\n", + "”、\n", + "27300\n", + "95%,\n", + "47%,\n", + "43%。\n", + ",1992\n", + "90\n", + "1954\n", + "37\n", + "---\n", + "17\n", + "、18\n", + "60\n", + "×210(\n", + "430\n", + "60\n", + "19\n", + "”,\n", + "……\n", + "1994\n", + ":“\n", + ",“\n", + "”。\n", + ",1\n", + "2000\n", + "70\n", + "”。\n", + "1996\n", + ",73\n", + ":“\n", + "340\n", + "”,\n", + "1996\n", + "、1997\n", + "400\n", + "100\n", + ":“\n", + "”30\n", + "17\n", + "1996\n", + "39\n", + "”,\n", + "”,\n", + "”,\n", + "5%。\n", + "1500\n", + "80\n", + "1984\n", + "85%\n", + "1500\n", + "40\n", + "600\n", + "1954\n", + "1952\n", + "73\n", + ",1946\n", + "———\n", + "———\n", + "300\n", + "300\n", + "”“\n", + "1938\n", + ",10\n", + "40\n", + "1993\n", + ")(\n", + "40\n", + ",“\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + "1997\n", + "“10·9”\n", + "———\n", + "2340\n", + "),\n", + "190\n", + "1991\n", + "11600\n", + "),\n", + "1984\n", + ",1991\n", + ",1951\n", + ",1970\n", + "、67\n", + "58\n", + "”,\n", + "40\n", + ",95%\n", + ":“\n", + "500\n", + "619\n", + "3200\n", + "86\n", + "40\n", + "3000\n", + "140\n", + "74\n", + "346\n", + "6400\n", + "80%。\n", + "48\n", + "”———\n", + "———20\n", + "———20\n", + "》,\n", + "19\n", + "1968\n", + "19\n", + "———\n", + "1994\n", + "200\n", + "”。\n", + ",“\n", + ":“\n", + "”,\n", + "1985\n", + "”———\n", + "Email\n", + "1992\n", + "”。\n", + "19\n", + "36\n", + "500\n", + "”,\n", + "29\n", + "”31\n", + "---(\n", + ")(\n", + "),\n", + ",“\n", + "”。\n", + "———\n", + "”(\n", + "15·8%,\n", + "18·1%。\n", + "233\n", + "17%,\n", + "1997\n", + "20·4%。\n", + "149\n", + "19·8%,\n", + "210\n", + "359\n", + "19·2%,\n", + "5·5%。\n", + "9·3%、10·4%、13·7%。\n", + "88·6\n", + "6·4%。\n", + "328\n", + "1·3%。\n", + "”,\n", + "58·45%,\n", + "45·51%,\n", + "41·89%,\n", + "31·8%,\n", + "26·02%,\n", + "23·46%,\n", + "16·19%,\n", + "15·47%。\n", + "50·94%,\n", + "48·14%,\n", + "32·31%,\n", + "30·07%,\n", + "22·75%,\n", + "18·32%,\n", + "15·6%,\n", + "4·97%。\n", + "21\n", + "”。\n", + "80\n", + "90\n", + "(PIP)、\n", + "(POP)、\n", + "(DOULESCAN)、\n", + "(NICAM)\n", + "”,\n", + "1998\n", + "2006\n", + "(HDTV),\n", + "1000\n", + "”。\n", + "1996\n", + "(FCC)\n", + "80\n", + "90\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "、“\n", + "”。\n", + ":“\n", + ",1997\n", + "88\n", + "1996\n", + "28\n", + ":“\n", + "60\n", + "1.6\n", + ",1996\n", + ",“\n", + "”。\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "1、\n", + "”。\n", + "1996\n", + "1996\n", + ",《\n", + "》、《\n", + "》、《\n", + "51069\n", + "”,\n", + ":“\n", + ":“\n", + ",4\n", + "32\n", + "6000\n", + "5、\n", + "3、\n", + "1、\n", + "”、“\n", + "”。\n", + ",“\n", + "———\n", + "▲『\n", + "1860\n", + "8848\n", + "27\n", + "28\n", + "19\n", + "28\n", + ",“\n", + "”。\n", + "1989\n", + "6000\n", + "7000\n", + "……\n", + "21\n", + "8021\n", + "8000\n", + "———\n", + "1997\n", + "”DS97\n", + ":“\n", + ":“\n", + "1988\n", + ",“\n", + "”。\n", + ",95%\n", + "5%。\n", + "”———\n", + "”,\n", + "”。\n", + "36\n", + "336\n", + "”。\n", + ",4\n", + "、6\n", + ",71∶65;\n", + ",66∶60。\n", + ",88∶69;\n", + ",96∶62。\n", + ",73∶66;\n", + ",87∶61。\n", + "107∶32\n", + "30%\n", + "31∶41\n", + "21\n", + "96∶59\n", + "28\n", + "55∶72\n", + "27\n", + ":26\n", + "70\n", + "25\n", + ",3\n", + "600\n", + "》。\n", + "》。\n", + "---『\n", + ",“\n", + "1991\n", + "104∶99\n", + ",“\n", + "1987\n", + "1986\n", + ",34\n", + "70\n", + "80\n", + "85\n", + ",3\n", + ":“\n", + "1992\n", + "76∶66\n", + "2000\n", + ";2010\n", + "2000\n", + ",2010\n", + "1997\n", + "1100\n", + "100\n", + "1995\n", + "300\n", + "1000\n", + ",80\n", + "300\n", + "OK,\n", + ":“\n", + "”,\n", + "---\n", + "0∶6\n", + "0∶0\n", + "2∶0\n", + "3∶2\n", + "2∶2\n", + "4∶1\n", + "5∶0\n", + "2∶3\n", + "5∶3\n", + "1∶0\n", + "2∶0\n", + "2∶1\n", + "2∶3\n", + "27\n", + "65∶75\n", + "95∶89\n", + "46\n", + "19\n", + "90\n", + "24∶36\n", + "12∶8\n", + "27\n", + "52∶70\n", + "25\n", + "2∶0\n", + "25\n", + ":“\n", + "……”\n", + "”,\n", + "2∶1\n", + "2∶1\n", + "4∶1\n", + "”。\n", + "”。\n", + "---\n", + "”,\n", + "90\n", + "WNBA\n", + "”,\n", + "NBA\n", + ",B\n", + ";C\n", + ";D\n", + ",A\n", + "02\n", + "19\n", + "1994\n", + "1992\n", + "750\n", + "0∶2\n", + ",23\n", + ",14\n", + "26\n", + "3∶1\n", + "、5\n", + "、6\n", + "、7\n", + "、8\n", + "、9\n", + "、10\n", + "、11\n", + "、12\n", + "、13\n", + "、14\n", + "、15\n", + ",B\n", + ",C\n", + "89∶37\n", + "64∶45\n", + "26\n", + "27\n", + ":“\n", + "2∶0\n", + ",“\n", + "’,\n", + "”。\n", + ",36\n", + "1996\n", + "26\n", + "260.06\n", + "2000\n", + "33\n", + "、42\n", + "28\n", + "60\n", + "80\n", + "17\n", + "23\n", + "29\n", + "2200\n", + "25\n", + "23\n", + "103\n", + "33\n", + "6∶4、4∶6\n", + "6∶2\n", + "6∶1\n", + "6∶4\n", + "1989\n", + "1994\n", + "25\n", + "3∶0。\n", + "684.0\n", + "3×20\n", + "2000\n", + "23\n", + ",5\n", + "118.5\n", + "75\n", + "580\n", + "556\n", + "431\n", + "585\n", + "416\n", + "376\n", + "、8\n", + "’。\n", + ",“\n", + "”,\n", + "”,\n", + "”。\n", + "04\n", + "……\n", + "———\n", + "26\n", + "25\n", + "2∶0\n", + "0∶2\n", + "23\n", + "26\n", + "、28\n", + "11∶4\n", + "……\n", + "……\n", + "2∶1\n", + "11∶4\n", + "12∶10\n", + "0∶2\n", + "9∶11\n", + "7∶10\n", + "15∶7、15∶11\n", + ",“\n", + "”。\n", + "11∶9、10∶12、11∶5\n", + "”。\n", + "1992\n", + "1997\n", + ":“\n", + "……”\n", + ":“\n", + "……”\n", + "1994\n", + ";1998\n", + "1994—1998\n", + "23\n", + "……\n", + "---\n", + "15∶10、11∶15、15∶2\n", + "2∶3。\n", + "18∶14、15∶7\n", + "3∶2\n", + "8700\n", + "8300\n", + "8300\n", + "———\n", + "1954\n", + "65\n", + "57\n", + "”。\n", + "”。\n", + "2∶1\n", + "0∶0。\n", + "2∶1\n", + "1∶0\n", + "3∶2\n", + "32\n", + "0∶2\n", + "2∶3\n", + "6∶0\n", + "、“\n", + "17\n", + "4∶1\n", + ":“\n", + "”;\n", + ":“\n", + "”,\n", + ":“\n", + "”。\n", + "”。\n", + "”。\n", + ":“\n", + "”。\n", + ":“\n", + ":“\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + ":“\n", + "”:\n", + "1996\n", + ":“\n", + ":“\n", + "”。\n", + ")(\n", + "),\n", + "1958\n", + "1954\n", + "》(\n", + "》)\n", + "“54.7.2,\n", + "1992\n", + "3∶1\n", + "0∶2\n", + "50%\n", + "”。\n", + "2∶3\n", + "4000\n", + "63\n", + "6200\n", + "21\n", + "0∶1\n", + "25\n", + "5∶0\n", + "29\n", + "19\n", + "1∶0\n", + "1966\n", + "32\n", + "21\n", + "1∶0\n", + "……\n", + "1000\n", + "25\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1995\n", + "……\n", + "4∶0\n", + "2∶3\n", + "2∶2\n", + "1∶2\n", + "---\n", + "195\n", + "345\n", + "(150\n", + "),\n", + "7.5\n", + "77\n", + "58\n", + "87.5\n", + "122.5\n", + "210\n", + "23\n", + "15∶1、15∶8\n", + "11∶8、11∶8\n", + "11∶4、11∶0\n", + "15∶10、15∶8\n", + "11∶5、11∶7\n", + "21\n", + "5∶0\n", + "4∶1\n", + "1860\n", + "5∶4\n", + ":1998\n", + ",“\n", + "”。\n", + ",26\n", + ",22\n", + "3∶2\n", + "2∶3\n", + "、11\n", + "21\n", + "90\n", + ")21\n", + "、22\n", + "19\n", + "5∶2\n", + ";20\n", + "4∶3\n", + ",5\n", + "19\n", + "7∶15、13∶15\n", + "15∶11、15∶6\n", + "4∶1\n", + "”。\n", + "———\n", + "21\n", + "1∶0\n", + ",4\n", + "17\n", + "19\n", + "1∶1\n", + "3∶2\n", + "4∶1\n", + "22\n", + ":A\n", + ";A\n", + "2∶3\n", + "4∶1\n", + "3∶2\n", + "1∶2\n", + "1∶2\n", + "2∶1\n", + "”。\n", + "2∶0\n", + "1∶1\n", + "16∶17\n", + "0∶2\n", + "4∶8\n", + "14∶14。\n", + "15∶3\n", + ",A\n", + "---\n", + "69\n", + "145\n", + "315\n", + "90.5\n", + "92.5\n", + "112.5\n", + "205\n", + "53\n", + "1986\n", + "21\n", + "25\n", + "17\n", + "”。\n", + "81\n", + "》,5\n", + "”———\n", + "2∶3\n", + "11∶6、11∶3\n", + "2∶3。\n", + "”。\n", + "2∶3\n", + "2∶2\n", + "2∶0\n", + "3∶2\n", + "21\n", + "5∶0\n", + "5∶0\n", + "17\n", + "1∶2\n", + "———\n", + "19\n", + "4∶1\n", + "5∶0\n", + "21\n", + ":A\n", + ";B\n", + "2∶0\n", + "”。\n", + "5∶0\n", + "3∶2\n", + "19\n", + ":21\n", + "12∶35\n", + "7∶00\n", + "11∶42\n", + "21\n", + "1∶0\n", + "AC\n", + "19\n", + "8300\n", + "19\n", + "(180\n", + "140\n", + "175\n", + "315\n", + "62\n", + "56\n", + "120\n", + "150\n", + "270\n", + "95\n", + "2.5\n", + "48\n", + ",16\n", + "70\n", + "2.5\n", + "”,\n", + "19\n", + "0∶1\n", + "1∶0\n", + "1∶1\n", + "17\n", + "1∶0\n", + "7790\n", + "8300\n", + ")、\n", + "),\n", + "8300\n", + "1∶3\n", + "4∶1、8∶0、2∶1\n", + "0∶0,\n", + "BTV\n", + "5∶4\n", + "26\n", + "31\n", + "63∶69。\n", + "87∶114\n", + "……(\n", + "6∶15\n", + "15∶9、18∶15\n", + "0∶3\n", + "1∶4\n", + "2∶0\n", + "0∶3\n", + "4∶1\n", + "5∶0\n", + "4∶1\n", + "15∶5、16∶17、8∶15\n", + "2∶0\n", + "5∶0\n", + "5∶0\n", + "2∶3\n", + "”,\n", + "”。\n", + "……\n", + "11∶0、11∶1\n", + "———\n", + "200\n", + "1996\n", + "23\n", + "—1997\n", + "29\n", + "2000\n", + "1∶11\n", + "11∶4\n", + "1∶11、3∶11\n", + "———\n", + "5∶0\n", + "3∶2\n", + "4∶1\n", + "4∶1\n", + "2∶0\n", + "2∶0\n", + "11∶4\n", + "1∶11、3∶11\n", + "1∶2\n", + "11∶5、11∶7\n", + "4∶1\n", + "17\n", + ",62\n", + "2002\n", + "———\n", + "31\n", + "2∶2\n", + "2∶1\n", + "0∶0\n", + "0∶0\n", + "0∶2\n", + "2∶1\n", + "0∶1\n", + "17\n", + "1∶0\n", + "23\n", + "21∶13、21∶11\n", + "12∶8,\n", + "21∶13\n", + "3∶2、2∶0\n", + "1994\n", + "AC\n", + ",16\n", + "26\n", + "6、“\n", + "”———\n", + "5、\n", + "”———\n", + "4、“\n", + "”———\n", + "3、“\n", + "———\n", + "2、“\n", + "”———\n", + "1、“\n", + "”———\n", + ":“\n", + "……\n", + "”,\n", + "”———\n", + "4、\n", + "3、\n", + "2、\n", + "1、\n", + "2、\n", + "1、\n", + "1493\n", + "4480\n", + "62\n", + "586\n", + "180\n", + "64\n", + "100\n", + "2∶0\n", + "2∶4\n", + ":“\n", + "26\n", + "”,\n", + "“15\n", + "、20\n", + "9.8\n", + "1997\n", + "”,\n", + "1000\n", + "4618\n", + "3132\n", + "1990\n", + "1997\n", + "120\n", + "60\n", + "400\n", + "1996\n", + "80\n", + "260\n", + "”。\n", + ",“\n", + "”。\n", + "65\n", + "1995\n", + "15%\n", + "———\n", + ")(\n", + ":“\n", + ":“\n", + "”5\n", + ":“\n", + ":“\n", + "……”\n", + ";5\n", + "、12\n", + ";5\n", + "———\n", + "……\n", + "22\n", + "103\n", + "1∶0\n", + "25\n", + "1∶0\n", + "21\n", + "23\n", + ",8\n", + "28\n", + ",9\n", + "150\n", + "21\n", + "19\n", + ",23\n", + "57\n", + "1090\n", + "32\n", + "”。\n", + "80\n", + "1100\n", + ",1997\n", + ",“\n", + "”。\n", + "”(\n", + "1996\n", + "2000\n", + "80\n", + "”。\n", + "4300\n", + "2/3\n", + ",1994\n", + "106\n", + "110\n", + "———\n", + "、3\n", + "、1\n", + "2.6\n", + ":“\n", + ":4\n", + "”。\n", + "”。\n", + ",15\n", + ",18\n", + ",19\n", + ":5\n", + ";9\n", + ";11\n", + "23\n", + "1998\n", + "22\n", + "17\n", + "3000\n", + "6000\n", + "500\n", + "、1000\n", + "3000\n", + "166\n", + "5、6\n", + "”,\n", + ",“\n", + "79,\n", + ",“\n", + "100\n", + "200\n", + "100\n", + "200\n", + ",12\n", + "37、11\n", + "49\n", + "51\n", + "06、11\n", + "09\n", + "40\n", + "55\n", + "21\n", + "71\n", + ":“\n", + "22\n", + ":“\n", + "100\n", + "、200\n", + "、400\n", + ":“\n", + "79\n", + "0.08\n", + "71,\n", + "”,\n", + "2000\n", + "”,\n", + "”。\n", + "1998\n", + "70%;\n", + "27\n", + "33\n", + "968\n", + ")。\n", + "45\n", + "ATP\n", + "260\n", + "1.5\n", + "36\n", + "52\n", + "、56\n", + "、60\n", + "、65\n", + "、70\n", + "、75\n", + "、80\n", + "、85\n", + "85\n", + "673.98\n", + "480.09\n", + "517.26\n", + "23.31\n", + "76\n", + "79\n", + "94\n", + "03\n", + "25\n", + ":“\n", + "49\n", + "05\n", + "1997\n", + "1997\n", + "76\n", + "100\n", + "100\n", + "1997\n", + "1996\n", + "22.8\n", + "78\n", + "4×100\n", + "1992\n", + "100\n", + "1989\n", + ",15\n", + "100\n", + "、200\n", + "17、22\n", + "26,\n", + "”。\n", + "80\n", + "60。\n", + "100\n", + "79\n", + "300\n", + "———\n", + "》(\n", + "》)。\n", + "———\n", + "2500\n", + "1998\n", + "NEC\n", + "NEC\n", + "8000\n", + "8586\n", + "———\n", + "100\n", + ")。\n", + "100\n", + "79\n", + "“Hello”\n", + "CA1407\n", + "2∶1\n", + "0∶0,\n", + "90\n", + "0∶0\n", + "2∶2\n", + "3∶1\n", + "1∶2\n", + "3∶0\n", + "75\n", + "295\n", + "130\n", + "165.5\n", + "295\n", + "165.5\n", + "56\n", + "165\n", + "487\n", + "894\n", + "7×5.35\n", + "42\n", + "43\n", + "345\n", + "21∶8\n", + "21∶12。\n", + ",20\n", + "21∶18\n", + "3∶1\n", + "1996\n", + "22\n", + "667.59\n", + "654.54\n", + "692.58\n", + "509.64\n", + "487.14\n", + "513.63\n", + "252.99\n", + "262.38\n", + "3∶0\n", + "380\n", + "315\n", + "408\n", + "378\n", + "),\n", + "》、\n", + "……\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + ":“\n", + "”。\n", + ",“\n", + "”,\n", + "”、“\n", + "”、“\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ",“\n", + "”。\n", + ":“\n", + "》。\n", + "……\n", + "1995\n", + "1986\n", + "1991\n", + ")。\n", + "1988\n", + "1986\n", + "),\n", + "》,\n", + "1951\n", + ",《\n", + "》,\n", + ":“\n", + ":“\n", + "”1995\n", + ",《\n", + "5000\n", + ",100\n", + "1992\n", + "1994\n", + "1999\n", + "2000\n", + "3∶0\n", + "3∶0\n", + "17\n", + "23\n", + "5067\n", + "1998\n", + "”,\n", + "3∶1\n", + "3∶6\n", + "0∶6\n", + "”,\n", + "40\n", + ",10\n", + "90\n", + "200\n", + "、400\n", + "22\n", + "01;\n", + "60\n", + "05\n", + "01\n", + "———\n", + "2001\n", + "1/10,\n", + "1/5,\n", + "211\n", + "———\n", + "2000\n", + ",2002\n", + "2002\n", + "”。\n", + "5∶0\n", + "2∶2\n", + "……\n", + "1∶3\n", + "5∶0\n", + "2∶3\n", + "25\n", + "47\n", + "1975\n", + "1985\n", + "1993\n", + "”。\n", + "8300\n", + "8700\n", + "40\n", + "113\n", + "61\n", + "22\n", + ",12\n", + ",6\n", + "19\n", + "2∶0\n", + "5000\n", + ",4\n", + "8000\n", + "5000\n", + "”,\n", + "N2\n", + "3000\n", + "2000\n", + "19\n", + "400\n", + "5000\n", + ",“\n", + "”,\n", + "200\n", + "”。\n", + "40\n", + "”,\n", + "500\n", + "26\n", + "28\n", + "37\n", + "1995\n", + "1987\n", + "、“\n", + "80\n", + "’,\n", + "”。\n", + "---\n", + "A,\n", + "———\n", + ":“\n", + "398\n", + "400\n", + "、8\n", + "1∶2\n", + ":1996\n", + "1986\n", + "103\n", + "21\n", + "2∶0\n", + "2∶0\n", + "3∶1\n", + "3∶0\n", + "40\n", + ":“\n", + "、2\n", + "4—6\n", + "100\n", + "、8\n", + "750\n", + "1500\n", + "240\n", + "1998\n", + "”,\n", + "240\n", + "6000\n", + "—21\n", + "”,\n", + "19\n", + "25\n", + "71∶70\n", + "14∶5\n", + "71∶70\n", + "1/4\n", + "1/4\n", + "3∶1\n", + "2∶0\n", + ",5\n", + "”,\n", + "64\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "1995\n", + "1200\n", + "0∶0\n", + "2∶1\n", + "1∶1\n", + "0∶0\n", + "2∶0\n", + "1∶3\n", + "0∶5\n", + "5∶1\n", + "71122\n", + "53118\n", + "44116\n", + "44016\n", + "34213\n", + "41413\n", + "32411\n", + "31410\n", + "24310\n", + "3069\n", + "2349\n", + "2349\n", + "1356\n", + "1356\n", + "4∶14\n", + "99∶52\n", + "92∶77\n", + ":“\n", + "17\n", + "8201\n", + "———\n", + "66\n", + "88,\n", + "35\n", + "37\n", + "2∶0\n", + "1992\n", + "400\n", + "A1\n", + "』,\n", + "3、\n", + "2、\n", + "1、\n", + "1998\n", + "2000\n", + "———“\n", + ":“\n", + "1954\n", + ",1979\n", + ",1996\n", + "”。\n", + ")、\n", + "2000\n", + "140\n", + "2000\n", + ",“\n", + "———\n", + "---\n", + "……\n", + "60\n", + "1988\n", + "”,\n", + "”。\n", + "”。\n", + "25\n", + "”。\n", + "”。\n", + "”(\n", + "”,\n", + "”:\n", + "100%。\n", + "32\n", + "400\n", + "———\n", + "116\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + ",5\n", + "1996\n", + "32\n", + ",1995\n", + "———\n", + "180\n", + "……\n", + "”,\n", + "”,\n", + "”、\n", + "33\n", + "65\n", + "———\n", + ",“\n", + "”,\n", + "”。\n", + ":“\n", + "”、\n", + "”、\n", + "”、\n", + "100\n", + "”(4\n", + "80\n", + "1.1\n", + "3000\n", + "”。\n", + ",“\n", + "”。\n", + "1995\n", + "1990\n", + "1986\n", + "60\n", + "”。\n", + "……\n", + "300\n", + ":“\n", + "1990\n", + "28\n", + "46\n", + ":“\n", + "1993\n", + "2000\n", + "———\n", + "1992\n", + "”。\n", + "3400\n", + "1991\n", + ",40\n", + "300\n", + "4×350MW\n", + "》、《\n", + "》、《\n", + ",1996\n", + "44%\n", + "75%\n", + "13%\n", + "50%。\n", + "25\n", + "1000\n", + "GSM\n", + "S80\n", + "”,1996\n", + "28\n", + "28\n", + "”。\n", + ":“\n", + "8000\n", + "27\n", + "140\n", + "6.7\n", + "28\n", + "140\n", + "27\n", + "150%。\n", + ",29\n", + "28\n", + "27\n", + "60%\n", + "80%,\n", + "1∶6.2。\n", + "28\n", + "、30\n", + "、45\n", + "”,\n", + "”,\n", + "1992\n", + "”。\n", + "”,\n", + "”,\n", + "———\n", + "10%\n", + "12%。\n", + ":1996\n", + "19%,1997\n", + "22%,\n", + "318\n", + "1997\n", + "29%,\n", + "800\n", + "1997\n", + "2000\n", + "1997\n", + "66%,\n", + "5.29\n", + "1994\n", + "1997\n", + "1987\n", + "1994\n", + "1993\n", + "———\n", + "27\n", + "1996\n", + "3240\n", + "3150\n", + ",1997\n", + "3·7%。\n", + "27\n", + "27\n", + "1/4\n", + "17\n", + "1990\n", + "1300\n", + "4380\n", + "40%\n", + "21%\n", + "3060\n", + ",20\n", + "40\n", + "16.4%。\n", + "27095\n", + "19\n", + "9000\n", + "5820\n", + "27\n", + "———\n", + "123\n", + "———\n", + "8000\n", + ",1986\n", + "950\n", + "350\n", + "1986\n", + ",“\n", + "”,\n", + "1994\n", + "750\n", + "2000\n", + "55\n", + "500\n", + "———\n", + "25\n", + "8%\n", + "1998\n", + "26\n", + "25\n", + "———\n", + "70\n", + ")(\n", + "1999\n", + "250\n", + "6·5\n", + ")。\n", + "26\n", + "8250\n", + "217\n", + "1999\n", + "(1998\n", + "1999\n", + "1998\n", + "3%—3.5%\n", + "4%—4.5%;\n", + "6.2%\n", + "1.4%(\n", + ")。\n", + "1998\n", + "26\n", + "1997\n", + "1997\n", + "8·65\n", + "”。\n", + "26\n", + "17·09\n", + "4·89\n", + ",5\n", + "25\n", + "4000\n", + "1.5\n", + "750\n", + ")。\n", + "200\n", + "1.76\n", + "———\n", + "26\n", + "21\n", + "40\n", + "500\n", + "25\n", + "200\n", + "250\n", + "40%\n", + ",35%\n", + "26\n", + "20%\n", + "261\n", + "250\n", + "25\n", + "1989\n", + "12478\n", + "250\n", + "”。\n", + "1400\n", + "2610\n", + "25\n", + "”)\n", + "40\n", + "4%,\n", + "85%\n", + "1948\n", + "16.5\n", + "44\n", + "60%。\n", + "300\n", + "4200\n", + "”、\n", + "580\n", + "”,\n", + "7%。\n", + "25\n", + "24.15\n", + "350\n", + "331.90\n", + "1987\n", + ",5\n", + "22\n", + "40.67\n", + "41.15\n", + "40.94\n", + "25\n", + "1991\n", + "137·73\n", + "1.210%,\n", + "26\n", + "137.67\n", + "0·54\n", + "2001\n", + "1300\n", + "1945\n", + "1943\n", + "1941\n", + "58\n", + "120\n", + "2.57\n", + "7.4\n", + "87%,\n", + "17\n", + "79%。\n", + "200\n", + "”。\n", + "26\n", + "1996\n", + "100\n", + ",20\n", + "880\n", + "80%\n", + "2/3\n", + "100\n", + "1/3\n", + "1996\n", + "1996\n", + "100\n", + ",1997\n", + "、5\n", + "、2\n", + "1990\n", + "1996\n", + "1997\n", + "13620\n", + "800\n", + "75%\n", + "3600\n", + "4500\n", + "1500\n", + "3000\n", + "23\n", + ",1998\n", + "84\n", + "4.5\n", + "6.8\n", + "23\n", + "),\n", + "600\n", + "5000\n", + "》65×81\n", + "1906\n", + ",50×60\n", + "1890\n", + ",61×50\n", + "1889\n", + "……\n", + "“112”,\n", + "23\n", + "150\n", + "100\n", + ":“\n", + "》、\n", + "》。\n", + ",1500\n", + "19\n", + "100\n", + "22%,\n", + "19\n", + ",“\n", + "”。\n", + "70\n", + "1994\n", + "”:\n", + ":“\n", + "”,\n", + "21\n", + ",1996\n", + "21\n", + "22\n", + "”。\n", + "IBM\n", + "1969\n", + "1982\n", + "ATT\n", + "1974\n", + "1982\n", + "ATT\n", + "”。\n", + "Win98,\n", + "Win98;\n", + "”。\n", + "1990\n", + "”。\n", + "office\n", + "NT\n", + "Java\n", + "Win98。\n", + "PC\n", + "PC\n", + "Windows\n", + ",Win98\n", + "PC\n", + "25\n", + ":“\n", + "”。\n", + "21\n", + ",“\n", + "23\n", + "115·6\n", + "1193\n", + "80%\n", + "152\n", + "67\n", + "555\n", + "681\n", + "3%,\n", + "22\n", + ",1997\n", + "1455\n", + "40%\n", + "50%\n", + "4%\n", + "5%\n", + "1997\n", + ",1997\n", + "3·7%,\n", + "———\n", + "1998\n", + "25\n", + "98”\n", + "1972\n", + ",1982\n", + "1969\n", + "IBM\n", + "1982\n", + "1937\n", + "1890\n", + ":1906\n", + ",“\n", + "98’,\n", + "”。\n", + ":“\n", + "”,“\n", + "IBM、\n", + "28\n", + "98”\n", + "98”\n", + ",“\n", + "98”\n", + "95”,\n", + "98”\n", + "98”\n", + "———\n", + "95”\n", + "1995\n", + "100\n", + "95”\n", + "JAVA\n", + "1974\n", + "85%\n", + "98”\n", + "25\n", + "———\n", + "”、“\n", + "”、“\n", + "”、“\n", + "21\n", + "21\n", + "MBA(\n", + ",MBA\n", + "”。\n", + "21\n", + "1000\n", + "1997\n", + "700\n", + "46.5%。\n", + "100\n", + "200\n", + "100\n", + "2000\n", + "100\n", + "200\n", + "1994\n", + "0.02%,\n", + "0.06\n", + "80%\n", + "90%\n", + "28\n", + "……\n", + "110\n", + "50%\n", + "30%。\n", + ")(\n", + ")(\n", + "80%。\n", + "29\n", + "43.3%。\n", + "》(\n", + "》1998\n", + "、“\n", + "”,\n", + "、《\n", + "……\n", + "……\n", + "』,\n", + "》、《\n", + "———\n", + "———\n", + ",“\n", + "”,\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "……\n", + "———\n", + "……“\n", + "———\n", + "……“\n", + "———\n", + "……\n", + "》,\n", + "……\n", + ":“\n", + "’,‘\n", + "……”\n", + "———\n", + "———《\n", + "1980\n", + ",《\n", + "1995\n", + "1995\n", + "—1996\n", + "———\n", + "》(\n", + "》、\n", + "》、\n", + "●《\n", + "”,\n", + "”、“\n", + "”、“\n", + "”,\n", + "”。\n", + ")、\n", + "”,\n", + "”,\n", + "……\n", + "”。\n", + ":《\n", + "1998\n", + ",“\n", + ",“\n", + ":《\n", + "1998\n", + ":《\n", + "1998\n", + "……(\n", + "……\n", + ":《\n", + "1998\n", + "———\n", + "……\n", + "1995\n", + "———\n", + "……\n", + "……\n", + "》、《\n", + "》、《\n", + "》,\n", + "》、\n", + "》,\n", + "》、《\n", + "》、《\n", + "”。\n", + "”,\n", + ",1949\n", + ",1976\n", + "、“\n", + "66\n", + "”,\n", + ",1997\n", + "”。\n", + ",3\n", + "”,\n", + "1990\n", + "300\n", + "100%。\n", + "”。\n", + "”,\n", + "”。\n", + "50%,\n", + "4500\n", + "3000\n", + "1987\n", + "”、\n", + "———\n", + ",1997\n", + "”,\n", + "”,\n", + "150\n", + "1.5\n", + ",1995\n", + "300\n", + "70\n", + "300\n", + "———“\n", + "90\n", + "……\n", + "1995\n", + ":“\n", + "……\n", + ":“\n", + ":“\n", + "1995\n", + "”,\n", + "———\n", + "1000\n", + "100\n", + "”,\n", + "”,\n", + "1938\n", + "1943\n", + "23\n", + "9500\n", + "2.1\n", + "2000\n", + ":1995\n", + "……\n", + ",“\n", + "”“\n", + "1995\n", + "1982\n", + "”,23\n", + "33\n", + "33\n", + "……\n", + "……\n", + "100\n", + "”。\n", + "”。\n", + "”,\n", + "1997\n", + "54\n", + "23\n", + ",85.1%\n", + "35\n", + "82%\n", + "……\n", + ":“‘\n", + "”3\n", + ",26\n", + "MTU\n", + "”。\n", + "”,\n", + "25\n", + "300\n", + "475\n", + "23\n", + "60\n", + "”,\n", + ":“\n", + "13939\n", + ":“\n", + "127\n", + ",27\n", + "296\n", + "15%,\n", + "200\n", + "92\n", + "1.44\n", + "45\n", + "2.3\n", + "1200\n", + "6.6\n", + "55\n", + ":(010)68180800\n", + "”,\n", + ":“\n", + "86)\n", + "1984\n", + "73\n", + "43\n", + "1994\n", + "200\n", + "25\n", + ",4000\n", + ",《\n", + "119\n", + "1998\n", + "35.58\n", + "25\n", + "”,\n", + "”,\n", + "”,\n", + "”、“\n", + "”、“\n", + "”。\n", + ":“\n", + "1997\n", + "1997\n", + "100\n", + "1989\n", + "1996\n", + "1996\n", + "”,\n", + "ID\n", + "25\n", + "1.3\n", + "……\n", + ",1996\n", + "ISO9002\n", + "),\n", + "1992\n", + "1990\n", + "80\n", + "———\n", + "2、\n", + "Y222/221\n", + "),\n", + "151\n", + "1、\n", + "———\n", + "1900\n", + "1996\n", + "1.7\n", + "9400\n", + "6200\n", + ",3\n", + ",5\n", + "1996\n", + "”———\n", + "800\n", + "7000\n", + ",1/3\n", + ",60\n", + "1000\n", + "160\n", + "160\n", + "1995\n", + "1988\n", + "80\n", + ",1997\n", + "1.8\n", + "1700\n", + "1987\n", + "1200\n", + "120\n", + "1987\n", + "85%\n", + "120\n", + "”———\n", + "2000\n", + "1500\n", + "41.9\n", + "2000\n", + "”,\n", + "”。\n", + "1994\n", + "1996\n", + "15%\n", + "34\n", + ",16\n", + "19\n", + "34\n", + "”,\n", + "2000\n", + "》,\n", + "”:\n", + "80%\n", + "”“\n", + "”“\n", + "62\n", + "》,\n", + "》,\n", + "”,\n", + ",“\n", + ",“\n", + "”,\n", + ",“\n", + "”,\n", + "”(\n", + "”,\n", + "1996\n", + "60\n", + "1996\n", + ":“\n", + "———\n", + "》,\n", + "———\n", + ":“\n", + "100\n", + ":“\n", + ",1992\n", + ",1996\n", + "1995\n", + "149\n", + "1990\n", + ",“\n", + "”。\n", + "3390\n", + "600\n", + "150\n", + "109\n", + "1990\n", + "———\n", + "A;\n", + "D;\n", + "E;\n", + "B1\n", + "A;\n", + "D;\n", + "RELAX(\n", + "RELAX(\n", + "1998\n", + "1200\n", + "1500\n", + "1994\n", + "———\n", + ")。\n", + ")。\n", + "Slot2\n", + "RISC\n", + "Intel\n", + "———NetCenter740/745\n", + ":“\n", + "80\n", + ",“\n", + "”,\n", + ":“\n", + "××—××\n", + "72\n", + "72\n", + "———\n", + "2000\n", + "1996\n", + "1997\n", + "70%。\n", + "”。\n", + "80%。\n", + "”,\n", + "”,\n", + "”,\n", + "1000\n", + "100\n", + "40\n", + "25\n", + "30—40\n", + "),\n", + "”。\n", + "”、“××\n", + "”、“××\n", + ",“\n", + ":(\n", + "(010—64217984),\n", + "50%\n", + "———\n", + "—4\n", + "1/10,\n", + "65%。\n", + "1/3。\n", + ",7\n", + ",4\n", + "95%\n", + "90%\n", + ",60\n", + "”,\n", + "---\n", + "》、《\n", + "》、《\n", + "———\n", + "1991\n", + ";1993\n", + "”;1995\n", + "”。\n", + "”、\n", + "”、“\n", + "”。\n", + "“1988\n", + "1977\n", + ",1957\n", + ")(\n", + "1956\n", + "”,\n", + "》、《\n", + "』,\n", + "TCL\n", + "》。\n", + "”,\n", + "》、《\n", + "》“\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "2、\n", + "1、\n", + "》、《\n", + "』,\n", + ",『\n", + "』,\n", + ",『\n", + "』,\n", + "”,\n", + "———\n", + ",10\n", + "———“\n", + "”。\n", + ":“\n", + ":“\n", + "70%\n", + "……“\n", + ":“\n", + ",36\n", + "1965\n", + "———\n", + "———\n", + "———\n", + ",1991\n", + "……\n", + "”,\n", + "60\n", + "1962\n", + ",“\n", + "108\n", + "1962\n", + ",108\n", + "———\n", + "2000\n", + "”,“\n", + "”,“\n", + "”,\n", + "2.1\n", + "1.5\n", + "”、“\n", + "”。\n", + "56\n", + ",56\n", + ",56\n", + "56\n", + "”、“\n", + "”,\n", + "———\n", + "1300\n", + "45\n", + "1324\n", + ":“\n", + "1983\n", + "1995\n", + "19\n", + ",21\n", + "21\n", + "21\n", + "”。\n", + "1/3\n", + "19\n", + "“007”\n", + "1997\n", + "“007”\n", + "1962\n", + "“007”\n", + "“007”\n", + "25\n", + "“007”\n", + "19\n", + "———\n", + ",3\n", + "19\n", + "》,\n", + "1906\n", + "》,\n", + "1889\n", + "19\n", + "172\n", + "”。\n", + ",7\n", + "40.99\n", + "40.95\n", + "50.05\n", + "50.03\n", + ",6\n", + "14.07\n", + "12.96\n", + "9%。\n", + "19\n", + "1988\n", + "19\n", + "31\n", + "21\n", + ",10\n", + "580\n", + "8%。\n", + "”、“\n", + "95%,\n", + "4%,\n", + "20%\n", + "1997\n", + "1997—2002\n", + "1996\n", + "80\n", + "1996\n", + "600\n", + "1800\n", + "9000\n", + "2010\n", + "1.6\n", + "8000\n", + "2010\n", + "70\n", + "8000\n", + "300\n", + "200\n", + ",21\n", + "50%\n", + "10%,\n", + "40%\n", + "80%\n", + "100\n", + "390\n", + "28\n", + "1997\n", + "1984\n", + "1997\n", + "60\n", + ",5\n", + "100\n", + "1997\n", + "78\n", + "1996\n", + "1996\n", + "1997\n", + "205\n", + "300\n", + "),\n", + "25%,\n", + "7.8%,\n", + "13.4%\n", + "23.5%。\n", + "21\n", + "3.2%。\n", + "7%\n", + "25\n", + "10%\n", + "8%。\n", + "13.8%,\n", + "31196\n", + ",4\n", + "1.7%,\n", + "43516\n", + "33\n", + ",4\n", + "12320\n", + "52.6%,\n", + "1998\n", + "21\n", + ",1998\n", + ",1998\n", + "1997\n", + "7.7%,\n", + "3.8%。\n", + "19\n", + "12·2\n", + "57.8%。\n", + "16192\n", + "9023\n", + "1811\n", + "1741\n", + "1000\n", + "30.1%\n", + "21%。\n", + "19\n", + "1997\n", + "1991\n", + "136.65\n", + "2.93%。\n", + "100\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "3500\n", + "112\n", + "22\n", + "”。\n", + "1995\n", + "44\n", + ",45\n", + "48\n", + "25\n", + "60\n", + "49\n", + "44\n", + "34\n", + "17\n", + "21\n", + "24%。\n", + "19\n", + "753\n", + "80\n", + "1996\n", + "1948\n", + "、130\n", + "19\n", + "21\n", + "1000\n", + "1997\n", + "”,\n", + "1000\n", + "IBM\n", + "RS/6000\n", + "1000\n", + "DNA\n", + "DNA\n", + "10〈17\n", + "DNA\n", + "Adleman\n", + "Adleman\n", + "1994\n", + "Science\n", + "DNA\n", + "DNA\n", + "DNA\n", + "0,\n", + "(qubits)\n", + "2(L\n", + "2(L\n", + "Shor\n", + "1000\n", + "AT&T\n", + "PeterShor\n", + ",1994\n", + "1986\n", + "1985\n", + "Likharev\n", + "(RSFQ)\n", + "RSFQ\n", + "0.3\n", + "60\n", + ",IBM\n", + ",80\n", + "0,\n", + "2010\n", + "1000\n", + "DNA\n", + "10—20\n", + "1994\n", + ",1995\n", + ",1998\n", + "60\n", + "1000\n", + "1990\n", + "1987\n", + "1985—1986\n", + "CSL\n", + "1943\n", + "19\n", + "1975\n", + "110\n", + "63\n", + "1859\n", + "139\n", + "1998\n", + "1998\n", + "100\n", + "”。\n", + "1·22\n", + ",200\n", + "112\n", + "3500\n", + "、2\n", + "1995\n", + "”。\n", + "”。\n", + "85%\n", + "20%\n", + "30%\n", + "1941\n", + "2000—2400\n", + "75\n", + "90\n", + "8∶2\n", + "”。\n", + "85%。\n", + "17%。\n", + "82.1\n", + "98”,\n", + "95”\n", + "98”\n", + "60\n", + "850\n", + "17\n", + "”,\n", + "1957\n", + "”。\n", + "1886\n", + "”?\n", + "17\n", + "80\n", + "17\n", + "23·86\n", + "200\n", + "900\n", + "),\n", + "88\n", + "72.54\n", + "1966\n", + "1960\n", + "1915\n", + "400\n", + "8500\n", + "4800\n", + "1476\n", + "1961\n", + "、1966\n", + "15.2\n", + "171、52\n", + "32\n", + "134.5\n", + "368.4\n", + "675\n", + "100\n", + "———\n", + "———\n", + "98”\n", + "95”\n", + "98”\n", + "98”\n", + "17\n", + "98”\n", + "5·5%\n", + "7·2%,\n", + "17\n", + "200\n", + "450\n", + "2、\n", + "1、“\n", + "”16\n", + "56\n", + "230\n", + "250\n", + "295\n", + "132\n", + "31\n", + "19\n", + "10%,\n", + "60\n", + "200\n", + "40\n", + "164.5\n", + "124.5\n", + "4.5%。\n", + "1.02\n", + "2.5%。\n", + "38\n", + "1.2\n", + "22\n", + ")。\n", + "1998—1999\n", + "66\n", + "),\n", + "”。\n", + "375\n", + "240\n", + "22\n", + ",4\n", + "12.20\n", + "0.76\n", + "12.09\n", + "3.39\n", + "2001\n", + "2002\n", + "—M”\n", + "—2”\n", + "85\n", + ",2003\n", + "690\n", + "274\n", + "1/10\n", + "1%。\n", + "50PPM(1PPM\n", + "1/10。\n", + "1997\n", + "37\n", + "300\n", + "(1\n", + "0·16\n", + "1996\n", + "1995\n", + "”,\n", + "33%\n", + "19%,\n", + "”。\n", + "70%\n", + "”。\n", + "500\n", + "229\n", + "115\n", + "50·2%;\n", + "104\n", + "45·4%。\n", + "80\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "”。\n", + "44\n", + "1996\n", + "1996\n", + "1984\n", + "80\n", + "620\n", + "370\n", + ",5\n", + "”,\n", + "39·50\n", + "40·55\n", + "1700·03\n", + "71·29\n", + "172·40\n", + "———\n", + "162·37\n", + "1997\n", + "75\n", + "3%。\n", + "450\n", + "600\n", + "56\n", + "250\n", + "80\n", + "DEC\n", + "●DEC\n", + "DIGITALPC\n", + "P6300\n", + "GB13000/GBK\n", + "1995\n", + "———\n", + ":“\n", + "800\n", + "……\n", + "……\n", + "40\n", + "2BQ—140\n", + "7CB—2\n", + "1996\n", + "○○○\n", + "”。\n", + ":“\n", + "X—33\n", + "X—33\n", + "1666\n", + "80\n", + "1992\n", + "X—33\n", + "1998\n", + "X—33\n", + "X—33\n", + ":“\n", + "X—33\n", + "X—33\n", + "7500\n", + "”。\n", + "1975\n", + "1965\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + "———\n", + "500\n", + "900\n", + "”,\n", + "———\n", + ",“\n", + "”。\n", + "700\n", + "86\n", + "100\n", + "90\n", + "———\n", + ":“\n", + "80\n", + "……\n", + ":“\n", + "1000\n", + "”,\n", + "”、“\n", + "”,\n", + "》。\n", + "———\n", + "1986\n", + "1997\n", + "90\n", + "”,\n", + "1/5。\n", + "80\n", + "132\n", + "200\n", + "40\n", + "1997\n", + "700\n", + "90%\n", + "31%,\n", + "50—60\n", + "120\n", + "、10\n", + "、39\n", + "2002\n", + "”。\n", + "90\n", + "BBL\n", + ",5\n", + ",“\n", + "420\n", + "(1\n", + "1/3\n", + "3000\n", + "800\n", + "2264\n", + "1996\n", + "150\n", + "2%,\n", + "40\n", + "20%\n", + "10%,\n", + "1.5%。\n", + "3.75%\n", + "3%—3.25%。\n", + "300\n", + "3.5\n", + "1200\n", + "2200\n", + "5.25\n", + "34\n", + "1300\n", + ";2400\n", + "29\n", + "1.75\n", + "2.8\n", + "22\n", + "103\n", + "27\n", + "21\n", + ")2、\n", + "40\n", + "70\n", + "1500\n", + "21\n", + "1、5\n", + ",1996\n", + "2·96%。\n", + "《1997\n", + "1998\n", + "”。\n", + "1998\n", + ",1998\n", + "”。\n", + "100\n", + "348.61\n", + "351.86\n", + "1989\n", + "400\n", + ",4\n", + "350\n", + "1989\n", + "1·7766\n", + "1·7762。\n", + "132·75\n", + "134·07。\n", + "38\n", + "420\n", + "———\n", + "400\n", + "2/3\n", + "1999\n", + "2·8\n", + "“1998\n", + "”,\n", + "”,\n", + "38\n", + "40\n", + ",1999\n", + "2000\n", + ",15\n", + "23\n", + "46\n", + "40%\n", + "500\n", + "1.8\n", + "(6\n", + "(20\n", + ",29\n", + ",1998\n", + ",*\n", + "82500\n", + "4.5\n", + "4400\n", + "6%—25%,\n", + "80\n", + "100\n", + "3.03%\n", + "736\n", + "804\n", + "853\n", + "1997\n", + "48\n", + "3AW\n", + "———“\n", + "”。\n", + "2、\n", + "1、\n", + "7·5\n", + "”。\n", + "1986\n", + "26\n", + "370\n", + "165\n", + "5700\n", + "1/3,\n", + "408·5\n", + "1996\n", + "———\n", + "(SBC)11\n", + "620\n", + "(Ameritech),\n", + "”。\n", + "21\n", + "40%\n", + "2002\n", + ",“\n", + "”,“\n", + "”。\n", + "2·9\n", + "1999—2002\n", + "90\n", + "70\n", + ",1997\n", + "21%,\n", + "18%,\n", + "1997\n", + "59%\n", + "83%\n", + "50%\n", + "1973\n", + ",1944\n", + "40%。\n", + "10%\n", + "20%\n", + "4000\n", + "65%\n", + "68%。\n", + "95%\n", + "85%\n", + "500\n", + "42\n", + "57\n", + "5%。\n", + "200\n", + "3000\n", + "200\n", + "1/3,\n", + "23%,\n", + "8%。\n", + "1380\n", + ",65%\n", + "6%。\n", + "1·199\n", + "1·149\n", + "8·96\n", + "9·493\n", + "8920\n", + "4%,\n", + "4790\n", + "10%,\n", + "4130\n", + "4010\n", + "2·65\n", + "1%;\n", + "6·31\n", + "1997—1998\n", + "2000\n", + "”,\n", + "128\n", + "4、5\n", + "9、10\n", + "1300\n", + "19%,\n", + "1997\n", + "1993\n", + "1998\n", + "191\n", + "1989\n", + "”,\n", + ",15\n", + ",“\n", + ":1\n", + "、2\n", + ";10\n", + "、20\n", + ";1\n", + "1200\n", + "2002\n", + "76\n", + "、2\n", + "、5\n", + "、10\n", + "、20\n", + "、50\n", + "、1\n", + "60\n", + "747\n", + "767\n", + "737—100\n", + "200\n", + "737—300、400\n", + "500\n", + "118\n", + "282\n", + "737—100\n", + "200\n", + "152\n", + "196\n", + "26\n", + "737—100\n", + "200\n", + "25\n", + "737\n", + "737—300、400、500\n", + "8%,\n", + "6%\n", + "3%\n", + "”。\n", + "26\n", + ",35%\n", + "33\n", + "23\n", + "100\n", + "150\n", + "4600\n", + "7.46\n", + "2287\n", + "1060\n", + "17\n", + "70%,\n", + "1980\n", + "1992\n", + "28%\n", + "3%,\n", + "”。\n", + ":IMF\n", + "96\n", + "),\n", + "22%。\n", + "530\n", + "3000\n", + ")。\n", + "119\n", + "400\n", + ",1000\n", + ")。\n", + ":“\n", + "”。\n", + "”。\n", + "”。\n", + "70%\n", + "”。\n", + "2.7%\n", + "2.9%;\n", + "1.5%;\n", + "1%,\n", + "2003\n", + "107%。\n", + "17\n", + "1999\n", + "2001\n", + "》,\n", + "”,\n", + "”。\n", + "”,\n", + "”,“\n", + "60%\n", + "”,\n", + ",“\n", + "”。\n", + "1997\n", + "4.7%,\n", + "4.2%\n", + "1%。\n", + ":“\n", + "”。\n", + "1994\n", + "1999\n", + "333·8\n", + "1997\n", + "1.5%,\n", + "121.6%,\n", + ":“\n", + "14·7\n", + "1.3%。\n", + "”,\n", + "1998\n", + "3%\n", + "10%\n", + "1/4。\n", + "80\n", + "10%\n", + "11%,1994\n", + "124.9%。\n", + "70\n", + "”,\n", + "”。\n", + "41\n", + "”。\n", + ";“\n", + ";“\n", + "”,\n", + "1929\n", + "”,\n", + "”。\n", + "”。\n", + "、20\n", + "”。\n", + "506%\n", + "199%。\n", + "197%。\n", + "2002\n", + "85\n", + "1998\n", + "”,\n", + ":“\n", + ":“\n", + "”“\n", + "”“\n", + ":“\n", + "1967\n", + "……\n", + ",“\n", + ":“\n", + "”,\n", + "1000\n", + "7000\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + ":“\n", + ":“\n", + ":“\n", + "”,\n", + ":“\n", + "1.5\n", + "49\n", + "7000\n", + "”10\n", + ":“\n", + ":“\n", + "1996\n", + "1995\n", + "1994\n", + "400\n", + "1993\n", + ",200\n", + "200\n", + "17\n", + "———“\n", + "”!\n", + "———\n", + "1985\n", + "36\n", + "229\n", + "104\n", + ",1997\n", + "1900\n", + "450\n", + "3300\n", + "1993\n", + "0.4\n", + "4000\n", + "200\n", + "、40\n", + "”。\n", + "1991\n", + "8.37\n", + "”,1993\n", + "5.73\n", + "……\n", + "40\n", + "1/3\n", + ")、\n", + "”,56\n", + "56\n", + "56\n", + "56\n", + "56\n", + "150\n", + "、40\n", + "31\n", + "”,\n", + "—26\n", + "”,\n", + "7.4\n", + "0.547\n", + "43%\n", + "57%。\n", + ":“\n", + "1660\n", + "920\n", + "),\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + ",“\n", + "”。\n", + "”,“\n", + "”。\n", + ",“\n", + "”,“\n", + "”,\n", + "1989\n", + "”,\n", + ",1989\n", + "1979\n", + "80\n", + "1970\n", + "》、《\n", + "”,\n", + "”。\n", + "”。\n", + "”。\n", + ",1996\n", + "》,\n", + "”。\n", + "1959\n", + "》,\n", + "1995\n", + "82\n", + "1946\n", + "60\n", + "1941\n", + "1944\n", + "”。\n", + "110\n", + "47\n", + "》,\n", + "47\n", + "1.5\n", + "”,\n", + "1983\n", + ":“\n", + ";“\n", + "”(FACTS),\n", + "21\n", + "”,\n", + "(IGCC)、\n", + "(PFBC)\n", + "80%\n", + "5%—10%\n", + "37%\n", + "58%。\n", + "5000—6000\n", + "1KW(\n", + ")。\n", + "21\n", + "1965\n", + "1995\n", + "35\n", + "1995\n", + "25%\n", + "2020\n", + "24%。\n", + "1996\n", + "39%\n", + "2020\n", + "37%。\n", + "1995\n", + "20%,\n", + "2020\n", + "30%\n", + "1998\n", + "2020\n", + "63900\n", + "1970\n", + "575\n", + "21\n", + "”。\n", + ":“\n", + "100\n", + "1100\n", + "8000\n", + ":1998—1999》\n", + "———\n", + "”,\n", + ",“\n", + "”。\n", + "2000—2001\n", + "14%。\n", + ",“\n", + "—3”\n", + "”。\n", + "25\n", + "—3”\n", + "1989\n", + "”。\n", + "1979\n", + "1950\n", + "90\n", + "700\n", + "”,\n", + "5%\n", + "45\n", + "22\n", + "”。\n", + ")。\n", + "60\n", + "”,\n", + "”,\n", + "”,\n", + "1.2\n", + ":“\n", + ",“\n", + "2002\n", + "40%—50%\n", + ",30%—40%\n", + "1971\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + "27\n", + "23\n", + "),\n", + ",“\n", + "”,\n", + "1993\n", + "90\n", + "80\n", + "70\n", + "1979\n", + "2.25%\n", + "3.5\n", + "2.9\n", + "1951\n", + "1958\n", + "8%。\n", + "10%\n", + "9%\n", + "25%,\n", + ",1997\n", + "500\n", + "1991\n", + "300%,\n", + "21\n", + "1858\n", + "1859\n", + "400\n", + "1902\n", + "19\n", + "650\n", + "800\n", + "”,\n", + "》“\n", + "”。\n", + "34\n", + "49\n", + ",3\n", + "———\n", + "28\n", + "60\n", + ",20\n", + "38\n", + ")。\n", + "”。\n", + "S—\n", + ",“\n", + "A。\n", + "2、\n", + "1、\n", + "5300\n", + "3.9%,\n", + "200\n", + "750\n", + ",800\n", + "”。\n", + "1997\n", + "340\n", + "1995\n", + "5%\n", + "”。\n", + "”,\n", + ",“\n", + "”。\n", + "”。\n", + "9000\n", + "1949\n", + ",1\n", + ",100\n", + "72%,\n", + "5%\n", + "80\n", + "”,\n", + "0—2%。\n", + "1.5%,\n", + "”。\n", + "1997\n", + "1.7%\n", + "1998\n", + "0.2%。\n", + ",2\n", + "0·1%。\n", + "1610\n", + "LEAR—35A\n", + "89\n", + "2·4\n", + "),\n", + "2·4%。\n", + "1992\n", + "1994\n", + "72\n", + "54%\n", + "94\n", + "“9405”,\n", + "125\n", + "28\n", + "109\n", + "4·4%———\n", + "123\n", + "123\n", + ":123\n", + "”。\n", + "”。\n", + ",“\n", + "”。\n", + "”。\n", + "1982\n", + "38\n", + "70\n", + "62\n", + "”。\n", + "60\n", + "1000\n", + "60\n", + "26%,\n", + "30%。\n", + "7%—8%;\n", + "29\n", + "28\n", + "70\n", + "40\n", + "2002\n", + ",“\n", + "”。\n", + "”。\n", + ",“\n", + ",15\n", + ",3\n", + ",5\n", + "》。\n", + "1989\n", + "500\n", + "28\n", + "、7500\n", + "40\n", + ",“\n", + "”。\n", + "———\n", + "26\n", + "2.7\n", + "、19.6%、16.6%。\n", + "2.9\n", + "19.4%,\n", + "18.6%。\n", + "1999\n", + ",2002\n", + ",2002\n", + "62\n", + "2002\n", + "1996\n", + "25\n", + "31\n", + "”,\n", + "86\n", + "1996\n", + "150\n", + "1994\n", + "”,\n", + "3000\n", + "68\n", + "110\n", + "160\n", + "80\n", + "46\n", + "26\n", + "1.8\n", + ",1997\n", + "2.4\n", + "1800\n", + "31\n", + "1/4,\n", + "1/3,\n", + "2/3。\n", + "1995\n", + ",1993\n", + "200\n", + "1992\n", + "40\n", + "1988\n", + "150\n", + "1.4\n", + "150\n", + ";1997\n", + "1978\n", + "40\n", + "40\n", + "1978\n", + "”,\n", + "ISO9002\n", + "———\n", + ",“\n", + ",“\n", + ",“\n", + ",“\n", + ",“\n", + "”,\n", + "”。\n", + "42\n", + "300%\n", + "”。\n", + ")2、\n", + "———\n", + "100\n", + "1、\n", + "、18\n", + "”,\n", + "2000\n", + "1993\n", + "8.8\n", + "300\n", + "80\n", + "6.9\n", + "160\n", + "300\n", + "”。\n", + ",600\n", + ",9\n", + "2000\n", + "”,\n", + "100\n", + "1992\n", + "6.6\n", + "260\n", + ",1993\n", + "2.6\n", + ",1986\n", + "1985\n", + ",1980\n", + "1978\n", + "1993\n", + "1.2\n", + "37.6\n", + "”,\n", + "”。\n", + "90\n", + "110\n", + "……\n", + "1988\n", + "1986\n", + "19\n", + "400\n", + "300\n", + "19\n", + "5000\n", + "),\n", + "25\n", + "100\n", + "19\n", + "7000\n", + ";1997\n", + "90%\n", + "300\n", + "1997\n", + "3500\n", + "19\n", + "85\n", + "40\n", + "”,\n", + "40%\n", + "13%,\n", + "18%;13%\n", + "9%。\n", + "8%\n", + "11%,\n", + "28\n", + "28\n", + "28\n", + "28\n", + ",“\n", + "”。\n", + "28\n", + "26\n", + "”。\n", + "27\n", + "4、\n", + "130\n", + "3、\n", + "60\n", + ",100\n", + "2、\n", + "1、\n", + "170\n", + "27\n", + ",《\n", + "28\n", + "》。\n", + "”,\n", + "1%—2%\n", + "”:“\n", + "”。\n", + "20%\n", + "10%\n", + "600—700\n", + "(1\n", + "),\n", + ",“\n", + ")、\n", + "40\n", + "300\n", + "5000\n", + "”,\n", + "489\n", + ",“\n", + "”。\n", + "———“\n", + "”。\n", + "27\n", + "200\n", + "、2\n", + "3000\n", + ",3\n", + "2000\n", + "28\n", + "30%\n", + "94·4%\n", + "22\n", + "26\n", + "26\n", + "28\n", + "27\n", + "26\n", + "2·5\n", + "”。\n", + "27\n", + "26\n", + "29\n", + ":“\n", + ",29\n", + ",“\n", + "”。\n", + "82\n", + "1/50。\n", + "26\n", + "———\n", + "、1959\n", + "26\n", + "1994\n", + "25\n", + "23\n", + "26\n", + "1996\n", + "25\n", + "26\n", + "120\n", + "26\n", + "26\n", + "26\n", + ":“\n", + "27\n", + ",“\n", + "”。\n", + "26\n", + ",“\n", + "”。\n", + "”。\n", + "”。\n", + "27\n", + "1974\n", + "1998\n", + "27\n", + "26\n", + ",“\n", + "”。\n", + "26\n", + "”。\n", + "26\n", + "26\n", + "———\n", + "83\n", + ":“\n", + "1000\n", + ",26\n", + "、24\n", + "26\n", + "25\n", + "25\n", + "”。\n", + ",“\n", + "”,\n", + ":“\n", + "126\n", + "’。\n", + "”,\n", + "”,\n", + "126\n", + ":“\n", + "25\n", + "40\n", + "25\n", + "25\n", + "25\n", + "800\n", + "440\n", + "22\n", + "25\n", + "25\n", + ",“\n", + "”。\n", + "75\n", + ",“\n", + "”,“\n", + "50%\n", + "”。\n", + "”,\n", + "26\n", + "26\n", + "”。\n", + "”。\n", + "”,\n", + "》,\n", + "》。\n", + "26\n", + "25\n", + "2010\n", + ",20\n", + "25\n", + "25\n", + "2:\n", + "1:\n", + "25\n", + "93\n", + "800\n", + "26\n", + "26\n", + "”,\n", + "”,\n", + "》。\n", + "”,\n", + ",“\n", + "”,“\n", + "”。\n", + ",“\n", + ",“\n", + "1951\n", + "”。\n", + ",“\n", + ")”,\n", + "26\n", + "21\n", + "26\n", + "1997\n", + "”。\n", + "”。\n", + ",“\n", + "1997\n", + "3000\n", + "(5000\n", + "60%\n", + ",1997\n", + "700\n", + "280\n", + "”。\n", + ",2000\n", + "8000\n", + "6000\n", + "1997\n", + ",1996\n", + "6800\n", + "5000\n", + "73%\n", + "1996\n", + "(EIU)\n", + ",1997\n", + "90\n", + "80\n", + "70\n", + "”?\n", + ",20\n", + "”。\n", + ";5\n", + "1000\n", + "49\n", + "1.2\n", + "),\n", + "”。\n", + "……\n", + "”,\n", + "”。\n", + "”,\n", + "1971\n", + "25\n", + "19\n", + "22\n", + "23\n", + "22\n", + "”。\n", + ",24\n", + "45.6\n", + "49\n", + "48\n", + "25\n", + "25\n", + "111\n", + "100\n", + ",3\n", + "29\n", + "23\n", + "386\n", + "148\n", + "134\n", + "48\n", + "17\n", + "———\n", + "1997\n", + "”,\n", + "23\n", + "21\n", + "23\n", + "21\n", + "425\n", + "”,\n", + "1996\n", + "”,\n", + "”。\n", + ":“\n", + "1989\n", + "1995\n", + "255\n", + "1700\n", + "281\n", + "”。\n", + "1.8\n", + ",1.4\n", + "300\n", + "”——\n", + "150\n", + "、“\n", + "”“\n", + "”,\n", + "100\n", + "19\n", + ";19\n", + ";20\n", + ";20\n", + "1935\n", + "1936\n", + ",“\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + ":(1)\n", + ":(1)\n", + "70\n", + "”,\n", + ":(1)\n", + ":(1)\n", + ":(1)\n", + ";(2)\n", + ";(3)\n", + ":(1)\n", + "”,“\n", + ":18\n", + ";19\n", + "(15\n", + ");\n", + "(18\n", + "60\n", + "19\n", + "60\n", + ");\n", + "(19\n", + ");\n", + ":(1)\n", + "”,\n", + "21\n", + "”,\n", + "”、“\n", + "”、“\n", + "”。\n", + "200\n", + "———\n", + "———\n", + ",“\n", + "———\n", + "21\n", + "---\n", + "”,\n", + "”,\n", + ",“\n", + "”,\n", + "———\n", + "”,\n", + "———\n", + "1962\n", + "———\n", + "———\n", + "21\n", + "21\n", + "---\n", + ",“\n", + ":“\n", + "---\n", + ",20\n", + "』。\n", + "』、『\n", + "300\n", + "2002\n", + "7—8\n", + "230\n", + "180\n", + "6—10\n", + "70\n", + "198\n", + "———\n", + "40\n", + ":1998\n", + "1998\n", + "1241\n", + "1003\n", + "700\n", + "》;\n", + "25\n", + "1%。\n", + "1997\n", + "300\n", + "1995\n", + ":“\n", + "”。\n", + ",10\n", + "128\n", + "7500\n", + "21\n", + "1993\n", + "410\n", + "———\n", + "1996\n", + ",“\n", + "”。\n", + "1992\n", + "242\n", + "78\n", + "1992\n", + "1991\n", + "360\n", + "1991\n", + "》。\n", + "80\n", + "1000\n", + "1995\n", + "1988\n", + "1992\n", + "214.6\n", + "1988\n", + "5000\n", + ")。\n", + "69\n", + "69\n", + "1986\n", + "80\n", + "》(\n", + "1987\n", + "》,\n", + "》,\n", + "19\n", + "157\n", + "46\n", + ",28\n", + ",11\n", + "97\n", + "82\n", + "1994\n", + "382\n", + "510\n", + "1993\n", + "》,\n", + "19\n", + "1997\n", + "19\n", + "ISO14000\n", + "、“\n", + "”、“\n", + "”、“\n", + "《CAD\n", + "》。\n", + "CAD\n", + "200\n", + "CAD\n", + "95%。\n", + "CAD\n", + "CAD\n", + "CAD\n", + "17\n", + "(CAD)\n", + "CAD\n", + "21\n", + "21\n", + "、B\n", + "CT\n", + ",24\n", + "17\n", + "1997\n", + "3000\n", + "”,\n", + "1.8\n", + "”。\n", + "5000\n", + "”,\n", + "1997\n", + "8000\n", + "1991\n", + "1991\n", + "16.73‰\n", + "1997\n", + "8‰,\n", + "84%\n", + "95%。\n", + "”,\n", + "10%—20%\n", + "10.5\n", + "”,\n", + "90\n", + "”,\n", + "”。\n", + ",“\n", + "300\n", + "1996\n", + "、750\n", + "』。\n", + "』,\n", + "2、3、4\n", + "40\n", + "43.83%;\n", + "127.6\n", + "39.6%。\n", + "100\n", + "1/3\n", + "1.5%\n", + "1000\n", + ")、\n", + ")、\n", + ")、\n", + "(DVD\n", + ")、\n", + "1997\n", + "60\n", + "、4\n", + ",1995\n", + "、1997\n", + ":56\n", + "145\n", + "1030\n", + "300\n", + "800\n", + "0.44\n", + "1996\n", + "300—500\n", + "、12\n", + "1995\n", + "21\n", + "140.4\n", + "5.7\n", + "1992\n", + "6—12\n", + "58\n", + "13.5\n", + "88.3%;\n", + "18.5%,\n", + "21\n", + "……\n", + "---\n", + ",“\n", + "”。\n", + "96\n", + "80\n", + ":“\n", + ",1996\n", + "21\n", + "120\n", + ",3\n", + "4023\n", + "4.2\n", + ",“\n", + "”,\n", + "200\n", + "2300\n", + "》,\n", + "87\n", + "21\n", + "17\n", + "17\n", + "39\n", + "100\n", + "141\n", + "554\n", + "1.5\n", + "1100\n", + "17\n", + "”,\n", + "365\n", + "22\n", + "100\n", + "100\n", + "500\n", + "100\n", + "2、\n", + "1、5\n", + "17\n", + "91·57%\n", + "93·34%。\n", + "20%,6\n", + "180\n", + "19.13\n", + "50%\n", + "8%\n", + "40%\n", + ",60%\n", + "1%\n", + "1997\n", + "》。\n", + "40\n", + "17\n", + "99%。\n", + "70\n", + "3000\n", + "1997\n", + "ISO14000\n", + "”、“\n", + ":“\n", + "”1997\n", + "1994\n", + "1988\n", + "1994\n", + "240\n", + "17\n", + "8000\n", + "2600\n", + "36000\n", + ",0.5\n", + "1984\n", + "35800\n", + "”。\n", + "1986\n", + "』---\n", + "”,\n", + "1997\n", + "1995\n", + "24.9\n", + "23\n", + "1985\n", + "66.9\n", + "60%\n", + "1992\n", + "100\n", + "300\n", + "80\n", + "4400\n", + "90\n", + "300\n", + "1000\n", + "49.2%。\n", + "1960\n", + ",70\n", + "80\n", + "90\n", + "20%,\n", + "70\n", + "80\n", + "3.9%。\n", + "70\n", + "、80\n", + "90\n", + "70\n", + "”,\n", + "”,\n", + "”,\n", + "》,\n", + "1966\n", + "”,\n", + "”,\n", + "60\n", + "1958\n", + "”,\n", + "1956\n", + ",“\n", + "”。\n", + "1955\n", + "》(\n", + "25\n", + "100\n", + "》(\n", + "》(\n", + "》(\n", + "3·\n", + "1994\n", + "100\n", + "77\n", + "1994\n", + "2·\n", + ");\n", + "———\n", + ");\n", + ")、\n", + ":(1)\n", + ",“\n", + "”、\n", + ",“\n", + "”。\n", + "”,\n", + "”,“\n", + "80\n", + "1984\n", + "3000\n", + "80\n", + "40\n", + ",“\n", + "97.3%\n", + "99.1%。\n", + "”,\n", + "”1\n", + "———\n", + "31\n", + ")、\n", + "102\n", + "3835\n", + "1993\n", + "102\n", + "KILL\n", + "CA\n", + "KILL\n", + "KILL\n", + "CA\n", + "KILL98\n", + "1995—1996“\n", + ",1997\n", + "70\n", + "———\n", + "39%,29%\n", + "42%。\n", + "4.91%\n", + "16.2%,\n", + "21.5%。\n", + ",4\n", + "80\n", + "21\n", + "80\n", + ",20\n", + "60\n", + ";“\n", + "1997\n", + "94\n", + "45\n", + "260\n", + "1994\n", + ",4\n", + "108\n", + "60\n", + "1997\n", + "36\n", + "132.15\n", + "2114\n", + "1991\n", + ")、\n", + "》、\n", + "100\n", + "”。\n", + "”,\n", + "》,\n", + "60\n", + ":《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + "”。\n", + ",《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ",《\n", + "》(\n", + ")、《\n", + "》(\n", + "》(\n", + ")、\n", + "》(\n", + "”,\n", + ",1958\n", + "42\n", + "“‘\n", + ",“\n", + ",“\n", + "60\n", + "》。\n", + "”)\n", + "250\n", + "”、“\n", + "”、“\n", + "E1\n", + "200\n", + "1993\n", + "90\n", + "”,\n", + "1996\n", + "”、“\n", + "”、“\n", + "36\n", + "10%—30%,\n", + "—50\n", + "),\n", + "40\n", + "140\n", + "40\n", + "210\n", + "”,\n", + "”,\n", + "3300\n", + "———\n", + "21\n", + "、“\n", + "”。\n", + "1990\n", + "2400\n", + "60\n", + "1998\n", + "1996\n", + ",1997\n", + "”。\n", + "』。\n", + "”;\n", + "80\n", + "80\n", + "21\n", + "21\n", + "1/5\n", + "1997\n", + "62.5%,\n", + "6686\n", + "973\n", + "647\n", + ",1990\n", + "70.5\n", + "6%,\n", + "920\n", + "600\n", + "2500\n", + "50%,\n", + "80%。\n", + "532\n", + "122\n", + "1988\n", + "100\n", + "1995\n", + "27\n", + "1987\n", + ",1991\n", + "》、《\n", + "》、《\n", + ":“\n", + "5·12\n", + ",24\n", + "292\n", + "),\n", + "29\n", + "191\n", + "12.36\n", + "21\n", + "1995\n", + "》、《\n", + "《60\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "40\n", + "、800\n", + "1957\n", + "1990\n", + "1987\n", + "”、“\n", + "1982\n", + "》,\n", + "1957\n", + "”,\n", + ":“\n", + "1954\n", + "1948\n", + "1947\n", + ":“\n", + "1946\n", + "》。\n", + "6741、6742\n", + "———\n", + "6000\n", + "、2000\n", + ";“\n", + "”。\n", + "”;\n", + "”,\n", + "2000\n", + "”、“\n", + "”、“\n", + ");\n", + ":《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1995\n", + ",1995\n", + "1963\n", + ",1987\n", + "1987\n", + "27\n", + "88\n", + "1000\n", + "》、《\n", + "3000\n", + "1996\n", + "60\n", + "”。\n", + "”,\n", + "”,\n", + "———\n", + "”、“\n", + "”、“\n", + "63”\n", + "33\n", + "———\n", + "———\n", + "60”\n", + "80\n", + "60%。\n", + "SPE\n", + "”,\n", + "8000\n", + "300\n", + "”,\n", + "———\n", + "40\n", + "280\n", + "85%\n", + "、X\n", + ",“\n", + "”。\n", + "2000\n", + "1997\n", + "1993\n", + "300\n", + ",“\n", + "1.2\n", + "”。\n", + ":“\n", + "”,“\n", + "”。\n", + "196\n", + "365\n", + "———\n", + "3.4\n", + "3.4\n", + "1.6\n", + "1995\n", + "1993、1994\n", + "8000\n", + ",4\n", + "23\n", + ",4\n", + "26\n", + ":4\n", + "26\n", + "”,\n", + "”,\n", + "”,\n", + "200\n", + ",40\n", + "2100\n", + "7782\n", + "7234\n", + "200\n", + ",75\n", + "———\n", + "———\n", + "313\n", + "26\n", + "1997\n", + "”、“\n", + "2000\n", + "20%。\n", + "100\n", + ")、\n", + "”。\n", + ",“\n", + "7000\n", + ",7\n", + "99.9%,\n", + "93.8\n", + "90%\n", + "1967\n", + "35.2‰,\n", + "1997\n", + "5.25‰。\n", + ":60\n", + "1∶5;70\n", + "1∶16;80\n", + "1∶45;90\n", + "1∶120。\n", + "1996\n", + "《1996\n", + "38\n", + "1994\n", + "98.5%,\n", + "99.5%,\n", + "99.9%,\n", + "98.7%,\n", + "31\n", + "121\n", + "110\n", + "4.22\n", + "1990\n", + "22.6%;\n", + "21.7\n", + "1990\n", + "30%;\n", + "675\n", + ",1992\n", + ",“\n", + "1986\n", + "1990\n", + ",1990\n", + "95\n", + "》,\n", + "》(\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + ")。\n", + "80\n", + "90\n", + ":(1)\n", + "”,\n", + "”。\n", + "”、\n", + "”、“\n", + ":(1)\n", + ",“\n", + ":(1)\n", + "”,\n", + ":(1)\n", + "1996\n", + "”。\n", + "21\n", + "70\n", + "”“\n", + "21\n", + "21\n", + "35\n", + "28\n", + ",300\n", + "484\n", + "』,\n", + "』。\n", + "》、《\n", + "1993\n", + "”。\n", + "》、《\n", + "44\n", + ",1987\n", + "300\n", + "100%,\n", + "600\n", + "1964\n", + "1947\n", + ",1988\n", + "1971\n", + "”,\n", + "1996\n", + "”,\n", + "400\n", + "21\n", + "160\n", + "300\n", + "29\n", + "10%,\n", + "80%\n", + ",95%\n", + "”。\n", + "”,\n", + "———\n", + "1995\n", + "———\n", + "3500\n", + "2000\n", + "”。\n", + "———\n", + "1991\n", + "”,\n", + "---\n", + ",“\n", + "”,\n", + ",“\n", + "”,\n", + "”。\n", + "”、“\n", + "”,\n", + ",1993\n", + "、“\n", + "———\n", + "500\n", + "CE—7479DBS\n", + "73\n", + "200\n", + "150\n", + "”,\n", + "30%\n", + "》,\n", + "9202\n", + "(9202\n", + "21\n", + ":“\n", + ":“\n", + "’,\n", + "96\n", + ":“\n", + ":“\n", + "40\n", + "、800\n", + "”、“\n", + "31457\n", + "1994\n", + "31\n", + "6741、6742\n", + "116\n", + "”、“\n", + "”、“\n", + "100\n", + "———\n", + "1998\n", + "10—24\n", + "10%\n", + "19\n", + "1998\n", + ",30\n", + "9970\n", + "290\n", + "173\n", + "1985\n", + "94\n", + "100\n", + "94\n", + "100\n", + "199452\n", + ",2000\n", + "7—14\n", + "1.7\n", + "500\n", + ",4\n", + "100\n", + "1997\n", + "”、“\n", + "”,\n", + "21\n", + "”。\n", + "2000\n", + "180\n", + "1997\n", + "40\n", + "1999\n", + "500\n", + "140\n", + "450\n", + "800\n", + "1995\n", + "1997\n", + "1995\n", + "95\n", + "34\n", + "DEC\n", + "2450\n", + "200\n", + "100%。\n", + "786\n", + "780\n", + "99.24%。\n", + "700\n", + "7000\n", + ":“\n", + "”,\n", + "120\n", + "”,\n", + ":“\n", + "”,\n", + "”;\n", + "”,\n", + "“300\n", + "”,\n", + "1980\n", + ";1985\n", + ",100\n", + "、200\n", + "、1000\n", + "6000\n", + "”。\n", + "”,\n", + "1994\n", + "55\n", + ":“\n", + "、“\n", + "---\n", + "100\n", + "1917—1927\n", + "130\n", + "(VeraSchwarcz)\n", + "(1917—1927)”\n", + "1996\n", + "7072\n", + "”。\n", + "49\n", + "19\n", + ":“\n", + "”“\n", + "》:\n", + "21\n", + "21\n", + "60\n", + ":“\n", + ",1998\n", + ",8000\n", + "1944\n", + "2.7\n", + "69\n", + "3000\n", + "》,\n", + "200\n", + ",52\n", + "41\n", + "79\n", + ",“\n", + ";79\n", + "》。\n", + "100\n", + ",“\n", + "———\n", + "》、\n", + "》、\n", + "100\n", + "100\n", + "10000\n", + "1998\n", + "1898\n", + "100\n", + "〉》。\n", + "〉》,\n", + "》。\n", + "》“\n", + "……\n", + "———“\n", + "———\n", + ",“\n", + "71\n", + "》,\n", + "》,\n", + "”,\n", + "252\n", + ":《\n", + "》,“\n", + "……”\n", + "100\n", + ",《\n", + "28\n", + "———\n", + "”。\n", + "600\n", + ",“\n", + "、“\n", + "100\n", + "26\n", + ",11\n", + "1.3\n", + "》。\n", + "”、“\n", + ":“1997\n", + "1996\n", + "1997\n", + "A。\n", + "———\n", + "1971\n", + ",3000\n", + "”。\n", + "17\n", + ",200\n", + "”。\n", + ":“\n", + "”9\n", + "150\n", + ",“\n", + "1997\n", + "---\n", + ",“\n", + "58\n", + ",86%\n", + "”;98%\n", + ":“\n", + ":“\n", + "3000\n", + "1000\n", + "300\n", + "2000\n", + "”、“\n", + "1996\n", + "1997\n", + "1995\n", + "1994\n", + "1996\n", + "90\n", + "1992\n", + "1993\n", + ":“\n", + ":“\n", + ":“\n", + ":5\n", + "27\n", + ":“\n", + ",80\n", + "28\n", + ",“\n", + "483\n", + "65\n", + "2000\n", + "———\n", + "1997\n", + "1—2\n", + ",4\n", + ",5\n", + ",15\n", + ",15\n", + "———\n", + "”。\n", + "9711\n", + "800\n", + "28\n", + "”。\n", + "1998\n", + "500\n", + "90\n", + "(INTERNET)\n", + "90\n", + ",“\n", + "”,\n", + "21\n", + "INTERNET)、\n", + "1996\n", + "1995\n", + "90\n", + "NII\n", + "1991\n", + ";1994\n", + "NII,\n", + "90\n", + "2020\n", + "2%,\n", + "2%\n", + "90%。\n", + "70\n", + "50%,\n", + "78%,\n", + "50%—75%,\n", + "35%—50%,\n", + "35%。\n", + "21\n", + ",60\n", + "70\n", + ",80\n", + ",90\n", + ",1991\n", + "238\n", + ",1997\n", + "3.42\n", + "”。\n", + "1992\n", + "”(\n", + ",“\n", + "”,\n", + "60%。\n", + "90\n", + "1962\n", + "》,\n", + "1957\n", + ",1909\n", + "1949\n", + "87.5%,\n", + "12.5%。\n", + "1912\n", + "50%\n", + "”《\n", + ":“\n", + "33.54%。\n", + "80\n", + "18000\n", + "28\n", + "25\n", + "51\n", + "6600\n", + "29\n", + "26\n", + "29\n", + "”,\n", + ",5\n", + "25\n", + "28\n", + "』,\n", + "』、『\n", + "』、『\n", + ",《\n", + "580\n", + "686.6\n", + "28\n", + "”———\n", + "、“\n", + "”———\n", + "、“\n", + "”———\n", + "、“\n", + "”———\n", + "”,\n", + "”。\n", + "———“\n", + "”,\n", + "”。\n", + ",12\n", + "66%,\n", + "96∶59\n", + ":“\n", + "42∶30,\n", + "21\n", + "8∶12,\n", + "13∶16、19∶20……\n", + "……\n", + ":“\n", + "),\n", + ":31\n", + ",31\n", + "22\n", + ",6\n", + "22\n", + "C、D\n", + ",A、B\n", + ",A\n", + ";B\n", + ";C\n", + ";D\n", + "103∶94\n", + "29\n", + ",12\n", + "13℃/29℃\n", + "16℃/28℃\n", + "17℃/30℃\n", + "10℃/28℃\n", + "8℃/22℃\n", + "10℃/24℃\n", + "14℃/18℃\n", + "10℃/22℃\n", + "8℃/21℃\n", + "18℃/29℃\n", + "21℃/30℃\n", + "19℃/27℃\n", + "18℃/29℃\n", + "21℃/29℃\n", + "21℃/29℃\n", + "19℃/29℃\n", + "15℃/19℃\n", + "14℃/29℃\n", + "22℃/32℃\n", + "20℃/25℃\n", + "25℃/31℃\n", + "23℃/30℃\n", + "26℃/34℃\n", + "20℃/26℃\n", + "20℃/28℃\n", + "17℃/26℃\n", + "13℃/24℃\n", + "21℃/27℃\n", + "15℃/20℃\n", + "12℃/27℃\n", + "10℃/20℃\n", + "8℃/26℃\n", + "16℃/24℃\n", + "23℃/30℃\n", + "24℃/29℃\n", + "24℃/29℃\n", + "15℃/22℃\n", + "28℃/34℃\n", + "9℃/20℃\n", + "27℃/36℃\n", + "17℃/30℃\n", + "10℃/18℃\n", + "9℃/17℃\n", + "8℃/14℃\n", + "8℃/15℃\n", + "15℃/24℃\n", + ":29\n", + "(5\n", + "29\n", + "—5\n", + ")『\n", + "』,\n", + "1980\n", + "29\n", + ",100\n", + "100\n", + "1935\n", + ",1956\n", + "28\n", + "28\n", + "28\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "300\n", + "28\n", + "27\n", + "28\n", + "80\n", + "1998\n", + "8%\n", + ",5\n", + "21\n", + "、22\n", + "8%。\n", + "28\n", + "28\n", + "1998\n", + "27\n", + "28\n", + "》,\n", + "1997\n", + "1993\n", + "”,1996\n", + "1997\n", + "1996\n", + "”。\n", + "”。\n", + "1993\n", + "1998\n", + "1991\n", + "”,\n", + "36\n", + "43\n", + "35\n", + "42\n", + "28\n", + "—4\n", + "4—6\n", + "28\n", + ":22\n", + "—24\n", + "7℃—10℃。\n", + "28\n", + ":『\n", + "4—5\n", + ":28\n", + "29\n", + "(5\n", + "28\n", + "—5\n", + "29\n", + "25\n", + "21\n", + "27\n", + ":『\n", + "33\n", + "BNO\n", + "150\n", + "27\n", + "1997\n", + "1997\n", + "272\n", + ",15\n", + ",32\n", + ",5\n", + "》,\n", + "1997\n", + "”(\n", + ")。\n", + "27\n", + "》1998\n", + "1997\n", + ",5\n", + "26\n", + "29\n", + "27\n", + "26\n", + "27\n", + "”,\n", + ")、\n", + "”,\n", + "”。\n", + "2、\n", + "1996\n", + "1、\n", + "“6·26”\n", + "27\n", + "“6·26”\n", + "14℃/27℃\n", + "18℃/26℃\n", + "18℃/26℃\n", + "9℃/29℃\n", + "9℃/22℃\n", + "12℃/21℃\n", + "16℃/20℃\n", + "8℃/22℃\n", + "8℃/18℃\n", + "19℃/29℃\n", + "20℃/30℃\n", + "18℃/30℃\n", + "17℃/25℃\n", + "22℃/30℃\n", + "18℃/28℃\n", + "15℃/27℃\n", + "13℃/22℃\n", + "16℃/29℃\n", + "20℃/26℃\n", + "20℃/30℃\n", + "23℃/31℃\n", + "22℃/33℃\n", + "26℃/33℃\n", + "20℃/27℃\n", + "22℃/30℃\n", + "15℃/22℃\n", + "16℃/25℃\n", + "6℃/26℃\n", + "14℃/26℃\n", + "12℃/28℃\n", + "9℃/25℃\n", + "13℃/27℃\n", + "17℃/23℃\n", + "22℃/30℃\n", + "22℃/30℃\n", + "22℃/30℃\n", + "14℃/25℃\n", + "26℃/34℃\n", + "11℃/20℃\n", + "27℃/35℃\n", + "19℃/33℃\n", + "9℃/20℃\n", + "10℃/18℃\n", + "11℃/17℃\n", + "8℃/15℃\n", + "13℃/24℃\n", + "27\n", + "28\n", + "5—6\n", + "(5\n", + "27\n", + "—5\n", + "28\n", + "”,\n", + "”,\n", + "26\n", + "”,\n", + "”、“\n", + "”、“\n", + "”,\n", + "1988\n", + "1992\n", + "7%,\n", + "26%。\n", + "957\n", + "》,\n", + "26\n", + "”、\n", + "26\n", + "”(\n", + "26\n", + "26\n", + "26\n", + "』。\n", + ":『\n", + "』。\n", + "……\n", + "』。\n", + ",『\n", + ",『\n", + "』,\n", + ",『\n", + "1996\n", + "40\n", + "15℃/27℃\n", + "16℃/26℃\n", + "18℃/27℃\n", + "12℃/29℃\n", + "7℃/19℃\n", + "15℃/25℃\n", + "14℃/19℃\n", + "8℃/20℃\n", + "10℃/25℃\n", + "20℃/30℃\n", + "17℃/27℃\n", + "17℃/29℃\n", + "18℃/29℃\n", + "21℃/25℃\n", + "20℃/29℃\n", + "18℃/27℃\n", + "15℃/21℃\n", + "17℃/28℃\n", + "20℃/30℃\n", + "20℃/30℃\n", + "24℃/31℃\n", + "23℃/33℃\n", + "24℃/30℃\n", + "20℃/26℃\n", + "20℃/31℃\n", + "18℃/27℃\n", + "16℃/26℃\n", + "9℃/25℃\n", + "14℃/25℃\n", + "13℃/29℃\n", + "8℃/20℃\n", + "9℃/22℃\n", + "15℃/25℃\n", + "20℃/23℃\n", + "24℃/30℃\n", + "24℃/30℃\n", + "17℃/27℃\n", + "27℃/34℃\n", + "11℃/21℃\n", + "27℃/36℃\n", + "18℃/30℃\n", + "8℃/15℃\n", + "9℃/14℃\n", + "11℃/16℃\n", + "9℃/14℃\n", + "15℃/22℃\n", + "26\n", + "27\n", + "(5\n", + "26\n", + "—5\n", + "27\n", + "49\n", + "200\n", + "”。\n", + ",《\n", + "25\n", + "》,\n", + ")、\n", + "25\n", + ",60\n", + "):\n", + "1997\n", + "1996\n", + "1/3,\n", + "28%。\n", + "200\n", + ",“\n", + ",“\n", + "21\n", + "25\n", + "———“\n", + "35\n", + "45\n", + "45\n", + "”,\n", + "……\n", + "』、『\n", + "』『\n", + "———\n", + ":“\n", + "1/3\n", + "———\n", + ":“\n", + "1000\n", + "》”,5\n", + "22\n", + ",1996\n", + "8.16\n", + "31\n", + "1997\n", + "1997\n", + "1997\n", + "28\n", + ",1996\n", + "1992\n", + ",1993\n", + ",1997\n", + "1056\n", + "3432\n", + "159\n", + "3300\n", + "6732\n", + ",63\n", + "293\n", + "198\n", + "784.52\n", + "1000\n", + "3300\n", + "4000\n", + "63\n", + "316\n", + "118\n", + "80\n", + "25\n", + "25\n", + "26\n", + "(5\n", + "25\n", + "—5\n", + "26\n", + "166\n", + "60\n", + "800\n", + "22\n", + "22\n", + "1489705\n", + "53.29%,\n", + "1995\n", + "57\n", + "17\n", + "77813\n", + "63.5%;\n", + "790\n", + "98.75%。\n", + "1568308\n", + "496\n", + "777—300\n", + "73.8\n", + "747—400\n", + "3.1\n", + "777—200\n", + "10.1\n", + "367\n", + "777—300\n", + "23\n", + "100\n", + "200\n", + "1000\n", + "100\n", + "1000\n", + "1000\n", + "700\n", + "2200\n", + "2200\n", + "22\n", + "》,\n", + "———\n", + ",“\n", + "85%\n", + "”。\n", + ":“\n", + "7000\n", + "3.5\n", + "1/2\n", + "1995\n", + "》,\n", + "23\n", + "500\n", + "”8000\n", + "3000\n", + "100\n", + "1000\n", + "300\n", + ":“\n", + "’,\n", + ":“《\n", + "”8\n", + ",《\n", + "……”\n", + "300\n", + "40\n", + "23\n", + "26%。\n", + "2370\n", + ",2\n", + "4000\n", + "1997\n", + "1996\n", + "”。\n", + "90%,\n", + "”。\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + "』。\n", + "200\n", + "865\n", + ",5\n", + "23\n", + "56\n", + "1.17\n", + "5333\n", + "507\n", + "40\n", + "23\n", + "40\n", + "2000\n", + "100\n", + ",5\n", + "2000\n", + ",C—1—83\n", + "C—1—83\n", + ",15\n", + "……\n", + "”“\n", + "”,\n", + "400\n", + "C—1—83\n", + "”。\n", + "90%\n", + "》。\n", + "22\n", + "23\n", + ",23\n", + "23\n", + "”,\n", + "23\n", + "23\n", + "”,\n", + ",“\n", + "”,\n", + "1998\n", + "2000\n", + "●“\n", + "22\n", + "34\n", + "1997\n", + "22\n", + "1997\n", + "19\n", + "1996\n", + "”。\n", + "1—2\n", + ")3、\n", + "2、\n", + "1、\n", + ":“\n", + "”;\n", + "2000\n", + "……\n", + "800\n", + "1200\n", + "1548\n", + "452\n", + "2000\n", + ":“\n", + ":“\n", + "”,\n", + "40%,\n", + "”。\n", + ":“\n", + "”,\n", + "3000\n", + "”,\n", + "55\n", + ",10\n", + "”。\n", + "19\n", + "”。\n", + "3000\n", + "———\n", + ",5\n", + "0∶2\n", + "21\n", + "2000\n", + "90\n", + "1996\n", + "),\n", + "2∶3\n", + "———\n", + "100%。\n", + "150\n", + "1998\n", + ",4\n", + "28\n", + "———\n", + ",5\n", + "2∶0\n", + "5∶0\n", + "1∶2\n", + ")。\n", + "……\n", + "):\n", + "):\n", + "5∶0\n", + "”。\n", + "23\n", + "2000\n", + "2∶3\n", + "2∶3\n", + "15∶10、17∶14\n", + "1∶3\n", + "17∶18、15∶4、15∶1\n", + "6∶15、2∶15\n", + "9∶15、4∶15\n", + "1∶2\n", + "3∶15、6∶15\n", + "3∶2\n", + "2∶3\n", + "22\n", + "13℃/27℃\n", + "17℃/25℃\n", + "17℃/28℃\n", + "10℃/26℃\n", + "6℃/20℃\n", + "18℃/24℃\n", + "10℃/18℃\n", + "12℃/19℃\n", + "11℃/24℃\n", + "20℃/28℃\n", + "19℃/26℃\n", + "19℃/24℃\n", + "20℃/25℃\n", + "22℃/29℃\n", + "18℃/25℃\n", + "10℃/18℃\n", + "13℃/19℃\n", + "15℃/25℃\n", + "24℃/30℃\n", + "21℃/30℃\n", + "24℃/29℃\n", + "23℃/30℃\n", + "26℃/32℃\n", + "20℃/28℃\n", + "21℃/31℃\n", + "17℃/27℃\n", + "17℃/26℃\n", + "12℃/29℃\n", + "15℃/26℃\n", + "12℃/27℃\n", + "7℃/23℃\n", + "8℃/24℃\n", + "7℃/18℃\n", + "24℃/29℃\n", + "26℃/29℃\n", + "25℃/29℃\n", + "19℃/30℃\n", + "28℃/33℃\n", + "11℃/22℃\n", + "29℃/36℃\n", + "13℃/25℃\n", + "11℃/18℃\n", + "13℃/23℃\n", + "12℃/23℃\n", + "12℃/25℃\n", + "12℃/24℃\n", + ",22\n", + "23\n", + "5—7\n", + "25\n", + "27\n", + "23\n", + "22\n", + "21\n", + "45\n", + "300\n", + "211\n", + "6000\n", + "21\n", + ",20\n", + ",20\n", + "”,\n", + "100\n", + "”。\n", + "”,\n", + "100\n", + "2、\n", + "1、\n", + "、35\n", + "1·8\n", + "1·26\n", + "328\n", + "6.24\n", + "4.98\n", + "79.7%。\n", + "21\n", + "500\n", + "10%—20%\n", + "19\n", + "、CT、MRI\n", + "20%\n", + "21\n", + "”,\n", + "』,\n", + "』,\n", + "』、『\n", + "』、『\n", + "』、『\n", + "』、『\n", + "』、『\n", + "』,\n", + "』、『\n", + "』、『\n", + "』,\n", + "』,\n", + "』,\n", + "21\n", + "”,\n", + ",5\n", + "21\n", + "21\n", + "25\n", + "28\n", + "……\n", + "1.9\n", + ",1800\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + ",4\n", + "17\n", + "14℃/27℃\n", + "17℃/26℃\n", + "15℃/19℃\n", + "10℃/19℃\n", + "8℃/20℃\n", + "18℃/24℃\n", + "13℃/19℃\n", + "15℃/24℃\n", + "16℃/26℃\n", + "21℃/28℃\n", + "18℃/25℃\n", + "23℃/33℃\n", + "24℃/31℃\n", + "24℃/32℃\n", + "25℃/33℃\n", + "16℃/22℃\n", + "15℃/20℃\n", + "16℃/18℃\n", + "25℃/30℃\n", + "22℃/33℃\n", + "25℃/31℃\n", + "23℃/28℃\n", + "26℃/30℃\n", + "18℃/25℃\n", + "8℃/23℃\n", + "19℃/25℃\n", + "15℃/25℃\n", + "10℃/25℃\n", + "13℃/22℃\n", + "12℃/23℃\n", + "8℃/23℃\n", + "11℃/19℃\n", + "4℃/14℃\n", + "24℃/32℃\n", + "24℃/29℃\n", + "24℃/29℃\n", + "18℃/25℃\n", + "27℃/34℃\n", + "14℃/24℃\n", + "27℃/41℃\n", + "22℃/31℃\n", + "11℃/20℃\n", + "14℃/25℃\n", + "15℃/26℃\n", + "12℃/25℃\n", + "18℃/26℃\n", + ",21\n", + "22\n", + "(5\n", + "21\n", + "—5\n", + "22\n", + ")1986\n", + "———\n", + "900\n", + "127\n", + "”。\n", + "———\n", + "、2\n", + "3000\n", + "21.7\n", + "”,\n", + "”。\n", + "1954\n", + "1998\n", + "2002\n", + "WestChinaInfor.),\n", + "3-4\n", + "19\n", + "6-7\n", + "19\n", + ":“\n", + "》。\n", + "”。\n", + ":“\n", + "3000\n", + "17\n", + "”(\n", + "20%—30%\n", + "97%,\n", + "—13\n", + ":“\n", + ":“\n", + "1993\n", + ":“\n", + "1993\n", + "2、5\n", + "250\n", + "300\n", + "1、5\n", + ":“\n", + ",“\n", + "”。\n", + ",73\n", + ":“\n", + "”。\n", + ",5\n", + "200\n", + "600\n", + "1985\n", + "”,\n", + "”,\n", + "”。\n", + ":8\n", + "”?\n", + "1507\n", + "44\n", + "9000\n", + "44\n", + "4—6\n", + "(5\n", + "—5\n", + "21\n", + ",3\n", + "70\n", + "1.5\n", + ",3000\n", + "130\n", + "60\n", + ",“\n", + "1938\n", + "19\n", + "56\n", + "60\n", + "80\n", + "2000\n", + "……3\n", + "23\n", + "”,\n", + "300\n", + "43\n", + "25\n", + "”,5\n", + "21\n", + "140\n", + "》,\n", + ",15\n", + "8.6\n", + "”,\n", + "”。\n", + "19\n", + "200\n", + "100\n", + "1000\n", + "17000\n", + ",14\n", + "、12\n", + "7.1\n", + "4.5\n", + "5180\n", + "5.3\n", + "19\n", + ":“\n", + "IC\n", + "21\n", + "37\n", + "、72\n", + "19\n", + "1998\n", + "22\n", + "28\n", + "19\n", + "1998\n", + "23\n", + "27\n", + "19\n", + "19\n", + "19\n", + "1997\n", + "124.7\n", + "8.4%。\n", + "C·\n", + ",1938\n", + "28\n", + "81\n", + "500\n", + ":“\n", + "15℃/27℃\n", + "16℃/28℃\n", + "16℃/27℃\n", + "13℃/29℃\n", + "11℃/26℃\n", + "13℃/27℃\n", + "16℃/23℃\n", + "14℃/24℃\n", + "9℃/25℃\n", + "20℃/30℃\n", + "17℃/29℃\n", + "20℃/31℃\n", + "19℃/29℃\n", + "22℃/30℃\n", + "20℃/28℃\n", + "23℃/30℃\n", + "16℃/26℃\n", + "18℃/27℃\n", + "20℃/32℃\n", + "20℃/31℃\n", + "25℃/32℃\n", + "25℃/32℃\n", + "26℃/35℃\n", + "22℃/30℃\n", + "22℃/32℃\n", + "18℃/29℃\n", + "17℃/26℃\n", + "6℃/23℃\n", + "16℃/25℃\n", + "13℃/29℃\n", + "10℃/22℃\n", + "14℃/23℃\n", + "3℃/8℃\n", + "23℃/29℃\n", + "24℃/30℃\n", + "24℃/30℃\n", + "16℃/24℃\n", + "27℃/31℃\n", + "15℃/22℃\n", + "26℃/34℃\n", + "19℃/30℃\n", + "9℃/18℃\n", + "9℃/24℃\n", + "14℃/24℃\n", + "13℃/23℃\n", + "12℃/20℃\n", + ",19\n", + "4—6\n", + "(5\n", + "19\n", + "—5\n", + "1996\n", + ",24\n", + ",17\n", + "』、\n", + "』(\n", + "3200\n", + "150\n", + "100\n", + "21\n", + "1995\n", + ":“\n", + "”“\n", + "150\n", + ":“\n", + ",“\n", + "1994\n", + "70\n", + "1994\n", + ":“\n", + "”1997\n", + "1992\n", + "”5\n", + ")、\n", + ")、\n", + ")、\n", + "1999\n", + "2000\n", + "1929\n", + "150\n", + "”,\n", + "”,\n", + "》、《\n", + "1991\n", + "3000\n", + "70\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》(\n", + "42%\n", + ",1995\n", + ":“\n", + "、3000\n", + "800\n", + "1500\n", + ",1500\n", + ",674\n", + "500\n", + "22\n", + "”,\n", + "50%\n", + "350\n", + "1.3\n", + "1.5\n", + "”。\n", + ":“\n", + "”。\n", + "1995\n", + "6.7%,\n", + "8%,\n", + "100\n", + "51%,100\n", + "300\n", + "9.5%,300\n", + "3%。\n", + ",1994\n", + "3385\n", + "47\n", + "70%\n", + "”,\n", + "”。\n", + "1962\n", + "1953\n", + "”,\n", + ",1947\n", + "1962\n", + "“1962\n", + "”,\n", + "”。\n", + "》,\n", + "48\n", + "14℃/27℃\n", + "17℃/26℃\n", + "15℃/27℃\n", + "13℃/29℃\n", + "10℃/23℃\n", + "15℃/28℃\n", + "16℃/25℃\n", + "13℃/25℃\n", + "12℃/26℃\n", + "18℃/27℃\n", + "17℃/30℃\n", + "18℃/29℃\n", + "19℃/29℃\n", + "22℃/30℃\n", + "20℃/29℃\n", + "21℃/33℃\n", + "14℃/20℃\n", + "16℃/31℃\n", + "18℃/30℃\n", + "19℃/28℃\n", + "25℃/31℃\n", + "26℃/34℃\n", + "26℃/34℃\n", + "24℃/32℃\n", + "21℃/32℃\n", + "18℃/29℃\n", + "16℃/27℃\n", + "8℃/21℃\n", + "16℃/26℃\n", + "13℃/26℃\n", + "10℃/23℃\n", + "13℃/27℃\n", + "9℃/12℃\n", + "23℃/29℃\n", + "25℃/31℃\n", + "25℃/30℃\n", + "16℃/25℃\n", + "30℃/37℃\n", + "19℃/24℃\n", + "27℃/36℃\n", + "15℃/31℃\n", + "12℃/21℃\n", + "11℃/23℃\n", + "13℃/25℃\n", + "12℃/26℃\n", + "17℃/27℃\n", + ":18\n", + "19\n", + "4—6\n", + ");\n", + "(5\n", + "—5\n", + "19\n", + "1938\n", + ",1929\n", + ",1931\n", + "1998\n", + "87\n", + "2000\n", + "190\n", + ",158\n", + ",32\n", + "———50\n", + "540\n", + "1/3\n", + "17\n", + "』。\n", + "--\n", + "2000\n", + "6000\n", + ",“\n", + "1994\n", + "”。\n", + "”,\n", + "21\n", + "1986\n", + "』,\n", + ",『\n", + "』,\n", + "』,\n", + ":『\n", + "』,\n", + ",『\n", + ",『\n", + "』,\n", + "』(\n", + "”,\n", + "”,\n", + "1988\n", + "1996\n", + "1962\n", + "60\n", + "”。\n", + "》,\n", + "”。\n", + "48\n", + "”。\n", + "296\n", + "108\n", + "19.8%。\n", + ":“\n", + ",10\n", + ",1000\n", + "”。\n", + ":“\n", + "”:\n", + "8%,\n", + "”,\n", + "”。\n", + ":“\n", + "”。\n", + "”,\n", + "80\n", + ":『\n", + "300\n", + "”,\n", + "45\n", + "80\n", + "300\n", + "”,50\n", + ",C—53\n", + "90\n", + "60\n", + "”,\n", + ",1943\n", + "1996\n", + "C—53\n", + "25\n", + "、“\n", + "”。\n", + ",“\n", + ",800\n", + "260\n", + "……\n", + "”,\n", + "———《\n", + "》”\n", + "78\n", + "”。\n", + "184\n", + "2∶0\n", + "1996\n", + "150\n", + "……\n", + "———\n", + "———\n", + "1997\n", + "25\n", + "40\n", + "———《\n", + "》。\n", + "B12\n", + "4000\n", + "”,\n", + "”,\n", + "B,\n", + "28\n", + "1998\n", + "1997\n", + "B。\n", + "1997\n", + "170\n", + "470\n", + "67\n", + "2916\n", + "92\n", + "“TEDA”\n", + "---\n", + ")。\n", + "1997—1998\n", + "51\n", + "、42\n", + "、38\n", + ":12\n", + "2∶0\n", + "2∶0\n", + "5∶0\n", + "5∶0\n", + "15℃/24℃\n", + "18℃/25℃\n", + "16℃/24℃\n", + "10℃/20℃\n", + "8℃/19℃\n", + "18℃/23℃\n", + "13℃/19℃\n", + "19℃/29℃\n", + "19℃/32℃\n", + "16℃/22℃\n", + "14℃/23℃\n", + "17℃/24℃\n", + "16℃/21℃\n", + "21℃/28℃\n", + "18℃/26℃\n", + "16℃/23℃\n", + "13℃/20℃\n", + "16℃/23℃\n", + "15℃/23℃\n", + "18℃/30℃\n", + "23℃/29℃\n", + "22℃/29℃\n", + "26℃/33℃\n", + "17℃/22℃\n", + "20℃/26℃\n", + "16℃/24℃\n", + "18℃/30℃\n", + "6℃/19℃\n", + "12℃/21℃\n", + "12℃/23℃\n", + "6℃/18℃\n", + "8℃/19℃\n", + "10℃/20℃\n", + "22℃/26℃\n", + "24℃/28℃\n", + "24℃/28℃\n", + "16℃/27℃\n", + "27℃/37℃\n", + "14℃/21℃\n", + "27℃/39℃\n", + "16℃/26℃\n", + "5℃/9℃\n", + "11℃/21℃\n", + "15℃/24℃\n", + "13℃/22℃\n", + "12℃/24℃\n", + "(5\n", + "—5\n", + "B19\n", + "B19\n", + "B19\n", + "VP2\n", + "B19\n", + "》,\n", + "———\n", + "22\n", + ")『\n", + "70\n", + "、80\n", + "100\n", + "60\n", + ",3\n", + "28\n", + "26\n", + "”。\n", + "》,\n", + "100\n", + "”,\n", + "”。\n", + "48\n", + "23\n", + "250\n", + "738\n", + "1976\n", + "》、《\n", + "1999\n", + "1999\n", + "),\n", + ":『\n", + "』,\n", + "』。\n", + "19\n", + "21\n", + "15℃/24℃\n", + "17℃/28℃\n", + "14℃/24℃\n", + "14℃/25℃\n", + "13℃/22℃\n", + "16℃/30℃\n", + "13℃/19℃\n", + "17℃/29℃\n", + "16℃/32℃\n", + "17℃/21℃\n", + "14℃/20℃\n", + "17℃/22℃\n", + "15℃/20℃\n", + "21℃/28℃\n", + "17℃/23℃\n", + "17℃/25℃\n", + "13℃/20℃\n", + "13℃/26℃\n", + "15℃/22℃\n", + "18℃/30℃\n", + "25℃/30℃\n", + "26℃/34℃\n", + "27℃/34℃\n", + "18℃/23℃\n", + "20℃/27℃\n", + "19℃/26℃\n", + "18℃/30℃\n", + "6℃/17℃\n", + "14℃/23℃\n", + "14℃/27℃\n", + "6℃/18℃\n", + "13℃/20℃\n", + "8℃/20℃\n", + "23℃/29℃\n", + "24℃/31℃\n", + "25℃/31℃\n", + "14℃/24℃\n", + "26℃/36℃\n", + "14℃/22℃\n", + "26℃/39℃\n", + "17℃/28℃\n", + "11℃/22℃\n", + "11℃/25℃\n", + "12℃/23℃\n", + "10℃/21℃\n", + "12℃/20℃\n", + "5—7\n", + "(5\n", + "—5\n", + "107.4%。\n", + "”、“\n", + "”、“\n", + "17\n", + "17\n", + "》、《\n", + "》;\n", + "》、《\n", + "》;\n", + "》、《\n", + "》;\n", + "》、《\n", + "》;\n", + "》、《\n", + "》;\n", + "》;\n", + "》、《\n", + "》;\n", + "》;\n", + "》;\n", + "》;\n", + "》。\n", + "———《\n", + "》,\n", + "300\n", + "(1898—1956)\n", + "1997\n", + "21\n", + "、《\n", + ":《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》。\n", + ":1997\n", + "3、\n", + "120\n", + "2、\n", + "1、\n", + "58\n", + "318\n", + "5000\n", + "”,\n", + ",5\n", + "3:\n", + "2:\n", + "1:\n", + "”。\n", + "5000\n", + "60%\n", + "》,\n", + "———\n", + "35\n", + "3800\n", + "100\n", + ";“\n", + ":“\n", + "17\n", + "70%。\n", + "———\n", + "---\n", + "80%\n", + "”,\n", + ",“\n", + "》、《\n", + "”。\n", + "1937\n", + "1938\n", + "1937\n", + "》,\n", + "1934\n", + "1923\n", + ",1925\n", + "”,\n", + "”,\n", + "1920\n", + ",1922\n", + ",15\n", + "100\n", + "15℃/26℃\n", + "14℃/28℃\n", + "15℃/24℃\n", + "11℃/28℃\n", + "10℃/23℃\n", + "13℃/28℃\n", + "13℃/19℃\n", + "14℃/27℃\n", + "11℃/27℃\n", + "18℃/22℃\n", + "16℃/20℃\n", + "16℃/22℃\n", + "14℃/19℃\n", + "21℃/25℃\n", + "18℃/24℃\n", + "16℃/26℃\n", + "12℃/17℃\n", + "15℃/22℃\n", + "14℃/18℃\n", + "19℃/25℃\n", + "24℃/31℃\n", + "24℃/32℃\n", + "27℃/35℃\n", + "17℃/23℃\n", + "19℃/27℃\n", + "16℃/21℃\n", + "20℃/29℃\n", + "5℃/20℃\n", + "13℃/18℃\n", + "11℃/22℃\n", + "10℃/19℃\n", + "14℃/24℃\n", + "5℃/16℃\n", + "23℃/28℃\n", + "25℃/31℃\n", + "25℃/30℃\n", + "11℃/19℃\n", + "29℃/35℃\n", + "12℃/23℃\n", + "25℃/39℃\n", + "16℃/31℃\n", + "9℃/18℃\n", + "12℃/25℃\n", + "13℃/27℃\n", + "12℃/22℃\n", + "11℃/17℃\n", + "5—6\n", + "(5\n", + "—5\n", + "(1998—2000)》,\n", + "ABB\n", + "”,\n", + "》,\n", + "”。\n", + ",《\n", + "60\n", + "》,\n", + "43\n", + "60\n", + ",《\n", + ",《\n", + "……\n", + "』,\n", + "』,\n", + "』,\n", + "1937\n", + ",1938\n", + ",1930\n", + "85\n", + "500\n", + "、4\n", + "13℃/27℃\n", + "15℃/26℃\n", + "14℃/26℃\n", + "11℃/24℃\n", + "8℃/22℃\n", + "11℃/25℃\n", + "12℃/19℃\n", + "13℃/24℃\n", + "10℃/25℃\n", + "15℃/23℃\n", + "13℃/23℃\n", + "16℃/25℃\n", + "14℃/19℃\n", + "22℃/29℃\n", + "17℃/26℃\n", + "16℃/28℃\n", + "11℃/18℃\n", + "12℃/23℃\n", + "15℃/18℃\n", + "15℃/25℃\n", + "24℃/30℃\n", + "26℃/33℃\n", + "26℃/32℃\n", + "17℃/23℃\n", + "18℃/26℃\n", + "17℃/24℃\n", + "17℃/28℃\n", + "10℃/20℃\n", + "15℃/20℃\n", + "13℃/24℃\n", + "9℃/20℃\n", + "11℃/22℃\n", + "7℃/18℃\n", + "23℃/31℃\n", + "24℃/29℃\n", + "24℃/29℃\n", + "15℃/19℃\n", + "28℃/36℃\n", + "9℃/23℃\n", + "25℃/39℃\n", + "18℃/27℃\n", + "6℃/14℃\n", + "13℃/28℃\n", + "14℃/29℃\n", + "12℃/24℃\n", + "11℃/16℃\n", + "5—6\n", + "5—7\n", + "(5\n", + "—5\n", + "309\n", + "21\n", + "———“\n", + ",“\n", + "”。\n", + "》。\n", + "———\n", + "”。\n", + ",《\n", + "》,\n", + ":『\n", + "』,\n", + "』、『\n", + "』,\n", + "),\n", + ")。\n", + "』。\n", + ":『\n", + "---\n", + ",20\n", + "8%\n", + "7.2%,\n", + "8%\n", + "8.2%,\n", + "12.8%,\n", + "”。\n", + ",8\n", + "—11\n", + "(5\n", + "—5\n", + "150\n", + "81\n", + "80%\n", + "2、\n", + "1、\n", + "256\n", + "1500\n", + "、5\n", + ":“\n", + "2/3,\n", + "1/2,\n", + "”,\n", + "108\n", + "”,\n", + "”、“\n", + "54\n", + "170\n", + "1.3\n", + ",78\n", + "38\n", + "1955\n", + ",1976\n", + "1942\n", + "1945\n", + "1933\n", + "1937\n", + ",1937\n", + ",1939\n", + ",1932\n", + ",1937\n", + "92\n", + "200\n", + "80\n", + "12341\n", + "10729\n", + "7255\n", + "5510\n", + "23\n", + ",5\n", + "28\n", + "21\n", + "2:\n", + ",1\n", + "1:\n", + ",3\n", + "40\n", + "———\n", + "》、《\n", + "》、《\n", + "1935\n", + "———\n", + "”,\n", + "”。\n", + "1930\n", + ",1947\n", + "98\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + "』;\n", + "』,\n", + "』。\n", + "』(\n", + "”、“\n", + "”、“\n", + "”,\n", + "500\n", + "1998\n", + "3000\n", + "38\n", + ")”\n", + "7740\n", + "80\n", + "1200\n", + "120\n", + "1800\n", + ",10\n", + "6000\n", + "”,2000\n", + ",500\n", + ",2000\n", + "4035\n", + "40\n", + "6.7\n", + "9902\n", + "75\n", + "100\n", + "”,\n", + "”。\n", + "”,\n", + ")5\n", + "》,\n", + "100\n", + "》,\n", + "”。\n", + "”,\n", + "、10\n", + "”,\n", + "2673\n", + "1800\n", + "CO\n", + "70.8%,SO2\n", + "87.6%,CO2\n", + "30%。\n", + "1·6\n", + "、30\n", + "850\n", + "200\n", + "60.33%,\n", + "26.74%\n", + "2·25\n", + "1.42%,\n", + "17%\n", + ",1·4\n", + "25\n", + "34\n", + "VTOS\n", + "1997\n", + "8200\n", + "”,\n", + ",1996\n", + ",122\n", + "1·5\n", + "25\n", + "122\n", + "122\n", + ",“\n", + "”———\n", + "”。\n", + "122\n", + "122\n", + "3、\n", + "1、\n", + ":“\n", + "117\n", + "886\n", + "561\n", + "”。\n", + "91.1%。\n", + "110\n", + "622\n", + ",637\n", + "”,\n", + "110\n", + "46\n", + "110\n", + "110\n", + "110\n", + "110\n", + ",110\n", + "110\n", + "110\n", + "1·1\n", + "234\n", + "488\n", + "311\n", + "28\n", + "”。\n", + "、191\n", + "”,\n", + "60%,\n", + "”,\n", + "”。\n", + "1996\n", + "1995\n", + ":“\n", + "”。\n", + "———\n", + "113\n", + ",10\n", + "1260\n", + ")18:00\n", + "17:35\n", + "100\n", + "17:25\n", + "110\n", + "19\n", + "398.73\n", + "0.96\n", + "411.78\n", + "”(\n", + "———\n", + "NBA、\n", + "、NEC\n", + "NEC\n", + "NEC\n", + "31\n", + "45\n", + "300\n", + "1996—1997\n", + "27\n", + "60\n", + "100\n", + ",AMF\n", + "22\n", + ":1998\n", + "7.05\n", + ";200\n", + "22\n", + "57,\n", + ";400\n", + "30,\n", + "01\n", + "12℃/25℃\n", + "15℃/27℃\n", + "15℃/26℃\n", + "14℃/20℃\n", + "9℃/22℃\n", + "7℃/23℃\n", + "10℃/20℃\n", + "4℃/19℃\n", + "3℃/19℃\n", + "18℃/27℃\n", + "19℃/23℃\n", + "19℃/28℃\n", + "15℃/23℃\n", + "19℃/26℃\n", + "23℃/32℃\n", + "18℃/28℃\n", + "11℃/22℃\n", + "16℃/19℃\n", + "18℃/25℃\n", + "20℃/25℃\n", + "25℃/31℃\n", + "24℃/33℃\n", + "26℃/32℃\n", + "20℃/25℃\n", + "22℃/28℃\n", + "18℃/27℃\n", + "16℃/26℃\n", + "8℃/21℃\n", + "16℃/24℃\n", + "11℃/23℃\n", + "8℃/19℃\n", + "14℃/22℃\n", + "12℃/22℃\n", + "20℃/26℃\n", + "25℃/31℃\n", + "25℃/30℃\n", + "18℃/24℃\n", + "27℃/37℃\n", + "14℃/21℃\n", + "27℃/36℃\n", + "19℃/30℃\n", + "8℃/17℃\n", + "12℃/18℃\n", + "11℃/17℃\n", + "10℃/15℃\n", + "13℃/23℃\n", + ":8\n", + "(5\n", + "—5\n", + ":510620\n", + ":020—85512526\n", + "159\n", + "1997\n", + "1000\n", + "“1997\n", + "”。\n", + "“863”\n", + "28\n", + "105\n", + "4000\n", + "6000\n", + "2000\n", + ",1000\n", + "1000\n", + "2、\n", + "1、5\n", + "1000\n", + ",“\n", + "7000\n", + ",“\n", + "”1.5\n", + ",“\n", + "”,\n", + "”、“\n", + "”,\n", + "1990\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "』。\n", + "4—6\n", + "5—7\n", + "4—5\n", + "(5\n", + "—5\n", + "60\n", + ",“\n", + "510\n", + ",1999\n", + "1998\n", + "(1998\n", + "21\n", + "1996\n", + "21\n", + "21\n", + "6.\n", + "5.\n", + ";4.\n", + ";3.\n", + ";2.\n", + ";1.\n", + "2—3\n", + "5.\n", + ";4.\n", + ";3.\n", + ";2.\n", + "4.\n", + "3.\n", + ";1.\n", + "”)\n", + "12℃/24℃\n", + "13℃/21℃\n", + "15℃/24℃\n", + "8℃/21℃\n", + "8℃/19℃\n", + "13℃/22℃\n", + "11℃/18℃\n", + "9℃/17℃\n", + "10℃/21℃\n", + "18℃/25℃\n", + "18℃/26℃\n", + "19℃/26℃\n", + "19℃/29℃\n", + "22℃/29℃\n", + "22℃/30℃\n", + "16℃/24℃\n", + "11℃/17℃\n", + "17℃/23℃\n", + "20℃/28℃\n", + "20℃/28℃\n", + "25℃/32℃\n", + "24℃/33℃\n", + "25℃/32℃\n", + "19℃/26℃\n", + "22℃/28℃\n", + "17℃/23℃\n", + "16℃/27℃\n", + "7℃/23℃\n", + "15℃/24℃\n", + "13℃/25℃\n", + "9℃/22℃\n", + "6℃/24℃\n", + "7℃/20℃\n", + "22℃/27℃\n", + "25℃/31℃\n", + "24℃/31℃\n", + "15℃/24℃\n", + "27℃/36℃\n", + "14℃/18℃\n", + "26℃/35℃\n", + "23℃/37℃\n", + "6℃/16℃\n", + "8℃/15℃\n", + "7℃/14℃\n", + "8℃/16℃\n", + "15℃/21℃\n", + ",6\n", + "(5\n", + "—5\n", + ",5\n", + "60%\n", + "》,\n", + "1988\n", + ",1993\n", + "31\n", + "1987\n", + "1999\n", + ",16\n", + "100\n", + "1954\n", + "300\n", + ":『\n", + "25\n", + ",“\n", + "1999\n", + "1999\n", + "”、\n", + "”,\n", + "200\n", + "13℃/20℃\n", + "15℃/24℃\n", + "15℃/25℃\n", + "10℃/25℃\n", + "8℃/14℃\n", + "14℃/18℃\n", + "11℃/17℃\n", + "12℃/18℃\n", + "12℃/18℃\n", + "17℃/27℃\n", + "17℃/27℃\n", + "17℃/24℃\n", + "19℃/29℃\n", + "21℃/27℃\n", + "21℃/29℃\n", + "18℃/28℃\n", + "11℃/18℃\n", + "16℃/27℃\n", + "20℃/31℃\n", + "19℃/32℃\n", + "25℃/30℃\n", + "24℃/34℃\n", + "25℃/33℃\n", + "22℃/28℃\n", + "21℃/31℃\n", + "17℃/26℃\n", + "17℃/28℃\n", + "5℃/20℃\n", + "14℃/22℃\n", + "13℃/22℃\n", + "9℃/20℃\n", + "7℃/21℃\n", + "3℃/14℃\n", + "23℃/28℃\n", + "24℃/29℃\n", + "24℃/29℃\n", + "13℃/24℃\n", + "30℃/36℃\n", + "13℃/22℃\n", + "25℃/34℃\n", + "20℃/36℃\n", + "13℃/21℃\n", + "11℃/17℃\n", + "7℃/15℃\n", + "6℃/15℃\n", + "11℃/17℃\n", + "5—6\n", + "(5\n", + "—5\n", + "37\n", + "25\n", + "”,\n", + "”、“\n", + "”。\n", + "1996\n", + "23\n", + "28\n", + "900\n", + "70\n", + "100\n", + "1986\n", + "81\n", + "84%,\n", + "23\n", + "》、《\n", + "301\n", + "301\n", + "”,\n", + "301\n", + "1997\n", + "28\n", + "77\n", + "21\n", + "275\n", + "65\n", + "68\n", + "2138\n", + "2160\n", + "194\n", + "2137\n", + ",400\n", + "100\n", + "800\n", + ",35\n", + "70%,\n", + "40\n", + "80\n", + "29\n", + "31\n", + "65\n", + "、“\n", + "”,“\n", + "1200\n", + "17\n", + "100\n", + ";“\n", + "”,\n", + "3、5\n", + "95\n", + "38\n", + ":“\n", + "……”2、\n", + "———\n", + "100\n", + "100\n", + "1、5\n", + "71\n", + "92\n", + "400\n", + ":“\n", + "4000\n", + "400\n", + "13℃/23℃\n", + "15℃/27℃\n", + "14℃/28℃\n", + "13℃/28℃\n", + "10℃/26℃\n", + "11℃/25℃\n", + "13℃/21℃\n", + "12℃/26℃\n", + "14℃/25℃\n", + "20℃/28℃\n", + "20℃/29℃\n", + "21℃/31℃\n", + "19℃/29℃\n", + "21℃/27℃\n", + "22℃/31℃\n", + "17℃/28℃\n", + "13℃/18℃\n", + "17℃/28℃\n", + "20℃/32℃\n", + "19℃/31℃\n", + "23℃/30℃\n", + "25℃/34℃\n", + "25℃/32℃\n", + "25℃/32℃\n", + "22℃/34℃\n", + "19℃/28℃\n", + "17℃/28℃\n", + "5℃/24℃\n", + "15℃/24℃\n", + "18℃/26℃\n", + "9℃/20℃\n", + "14℃/21℃\n", + "3℃/10℃\n", + "21℃/26℃\n", + "23℃/28℃\n", + "23℃/27℃\n", + "15℃/22℃\n", + "29℃/37℃\n", + "14℃/21℃\n", + "23℃/34℃\n", + "26℃/38℃\n", + "12℃/23℃\n", + "11℃/16℃\n", + "8℃/16℃\n", + "9℃/17℃\n", + "13℃/22℃\n", + "4—6\n", + "5—6\n", + "(5\n", + "—5\n", + "29\n", + "100\n", + "4000\n", + "600\n", + "27000\n", + "300\n", + "1948\n", + "(18cm×13cm)。\n", + "1948\n", + "”“\n", + "747—200\n", + "26\n", + ";『\n", + "』,\n", + "……\n", + ",『\n", + ",“\n", + "6000\n", + "20%\n", + "50%\n", + "29\n", + "》,\n", + "》、《\n", + "》、《\n", + "》,\n", + "》,\n", + "》,\n", + "》(\n", + "———《\n", + ":“\n", + ":5\n", + "……\n", + "”,\n", + "”,“\n", + "”,\n", + "———\n", + "”,\n", + "”。\n", + "”。\n", + ":“\n", + "”。\n", + "”,\n", + "”,\n", + "”;\n", + "”;\n", + ":“\n", + "”。\n", + "”,\n", + "”,\n", + "》,\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ":《\n", + "、“\n", + "”,\n", + "1921\n", + "”,\n", + "———\n", + "”,\n", + ",“\n", + "”(1919\n", + "》)。\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + "》,\n", + "、《\n", + "》(\n", + "》(\n", + ":“\n", + "”1898—1998,\n", + "……\n", + ":《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》……\n", + ":“\n", + "”,“\n", + "”。\n", + "”。\n", + ":“\n", + "———\n", + "———\n", + "———\n", + "……\n", + ":“\n", + "”:\n", + "》,\n", + "1947\n", + "1948\n", + "”、“\n", + "1947\n", + "1948\n", + "》、《\n", + "》,\n", + "》。\n", + "”、“\n", + "》、\n", + "》、\n", + "1919\n", + ",1935\n", + "1945\n", + "、“\n", + "”;\n", + "、“\n", + "”。\n", + "1925\n", + ",“\n", + ";“\n", + "”,\n", + "1943\n", + "”,\n", + "”,\n", + "10、\n", + "9、\n", + "8、\n", + "7、\n", + "6、\n", + "627\n", + "5、\n", + "4、\n", + "3、\n", + "2、\n", + "1、\n", + "2/3。\n", + "7、\n", + "6、\n", + "5、\n", + "4、\n", + "1202\n", + "3、\n", + "2、\n", + "230\n", + "100\n", + "500\n", + "1、\n", + "———\n", + "”、“\n", + "400\n", + "”,\n", + "”。\n", + "1571\n", + "200\n", + "1892\n", + "”:\n", + "1640\n", + "1572\n", + ",4\n", + "785\n", + "485\n", + "1571\n", + "———\n", + "2000\n", + "1982\n", + "400\n", + "70\n", + "”。\n", + "”。\n", + "1993\n", + "”,\n", + "”,\n", + "”,\n", + "……\n", + "8000\n", + "600\n", + "500\n", + ":“\n", + "”36\n", + ":“\n", + "1997\n", + "———\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + "23\n", + "1997\n", + "1997\n", + ":“\n", + "875\n", + "370\n", + "125\n", + "30%\n", + ")、\n", + "70\n", + "1000\n", + "1926\n", + "”。\n", + "”。\n", + "”,\n", + "”。\n", + "1200\n", + "300\n", + "426\n", + "2690\n", + "1070\n", + "29\n", + "40\n", + "”。\n", + "”。\n", + ",“\n", + "22\n", + "”。\n", + "29\n", + "27\n", + "29\n", + ":“‘\n", + "28\n", + "80\n", + "78\n", + "28\n", + "29\n", + "○,\n", + "138.70\n", + "1.57\n", + ",4\n", + "29\n", + "139.20\n", + "1991\n", + "2·2%,\n", + "4·8%\n", + "2·6%,\n", + "8970.2\n", + "33.63\n", + "28\n", + "28\n", + "1933\n", + "29\n", + "28\n", + "28\n", + "27\n", + "27\n", + "211.2\n", + "1.5\n", + "3.44\n", + "27\n", + "18.8\n", + "28\n", + "29\n", + "45%\n", + "50%\n", + "51%\n", + "56%,\n", + "27\n", + "150%。\n", + "27\n", + "60%\n", + "80%,\n", + "1∶6.2。\n", + "28\n", + "2010\n", + ",1997\n", + "55.7\n", + "1996\n", + "34.3%,\n", + "8.3%\n", + "1997\n", + "1272\n", + "41.54\n", + "19.83\n", + "27\n", + "1997\n", + "21\n", + "28\n", + "1999\n", + ",“\n", + "”。\n", + "28\n", + "”。\n", + ",“\n", + "”。\n", + ",“\n", + "”,\n", + "”。\n", + "”。\n", + ",“\n", + ":“\n", + "”。\n", + "”。\n", + ",“\n", + "”,\n", + ",“\n", + "》。\n", + "”。\n", + "160\n", + "”,\n", + "”。\n", + "”,\n", + ",“\n", + "28\n", + "28\n", + "\n", + "\n", + "19\n", + "28\n", + "”,\n", + "、16\n", + "”,\n", + "“202”\n", + "”“\n", + "200\n", + ":“\n", + ",2000\n", + "”1997\n", + "1996\n", + "———\n", + "810\n", + "3000\n", + "98%。\n", + "98%\n", + "1997\n", + "91\n", + "48%,\n", + "11.3%,“\n", + "”、“\n", + "70%。\n", + "110”\n", + "110”\n", + "”,\n", + "45\n", + "200\n", + "3000\n", + "———“\n", + "”,\n", + "40\n", + "27\n", + "2560\n", + "150\n", + "”。\n", + "5000\n", + "1996\n", + "42\n", + "1100\n", + ",3\n", + "”,\n", + "700\n", + "”。\n", + ",70%\n", + "230\n", + "“110”\n", + "、“\n", + "607\n", + "”,\n", + "36\n", + ",58\n", + "51\n", + "1996\n", + "1995\n", + "200\n", + "1997\n", + "275188\n", + ",1982\n", + "91091\n", + ",10\n", + "1993\n", + "191657\n", + "”,\n", + "1984\n", + ",8\n", + "500\n", + "1000\n", + "1979\n", + "》,\n", + "---\n", + "1994\n", + "28\n", + "8406\n", + "1173\n", + "400\n", + "113\n", + "100%。\n", + "1996\n", + "23\n", + "6080\n", + ",1638\n", + "”,\n", + "1578\n", + "”,\n", + "1996\n", + "”:\n", + "99.46%,\n", + "94.58%。\n", + "110\n", + "”、“\n", + "”;“\n", + "”。\n", + "》、《\n", + "》,\n", + "”。\n", + "2·\n", + "1·\n", + "1998\n", + ":4·\n", + "3·\n", + ";2·\n", + ";1·\n", + ";4·\n", + "(“189”\n", + ")。\n", + "3·\n", + ";2·\n", + ";1·\n", + "100\n", + "3000\n", + "”。\n", + "”。\n", + "”,\n", + "40\n", + "”,\n", + "1000\n", + "6.8\n", + "6·8\n", + ",“\n", + "……\n", + "”,\n", + "”。\n", + ":“\n", + "1000\n", + "”。\n", + "1997\n", + ":“\n", + "300\n", + "』---\n", + "204\n", + "45%\n", + "”,\n", + "”,\n", + "130\n", + "25\n", + ":“\n", + "25\n", + "———\n", + "26\n", + "———\n", + "26\n", + "”、\n", + "”、\n", + "26\n", + ",60\n", + "26\n", + "1993\n", + "1988\n", + "26\n", + "』,\n", + "、189\n", + ":“\n", + "5.1\n", + "60\n", + "23\n", + "23\n", + ",1948\n", + "25\n", + ",19\n", + "1998\n", + "“ISDN\n", + "”。\n", + "147\n", + "52\n", + "”,\n", + ",“\n", + "”。\n", + ",CT、\n", + "”,\n", + "162\n", + "46\n", + ")。\n", + "19\n", + "90%\n", + "530\n", + "59·4%。\n", + "60\n", + "》,\n", + "”、\n", + "25\n", + "23\n", + "25\n", + "60\n", + "”、\n", + "”,\n", + ":“\n", + "1995\n", + "1489705\n", + "53.29%,\n", + ":“\n", + "150\n", + ",250\n", + "6000\n", + ";12\n", + ";3\n", + "280\n", + "800\n", + "25\n", + "25\n", + "28\n", + "60\n", + ",81\n", + "60\n", + "166\n", + "60\n", + "800\n", + "496\n", + "22\n", + "4000\n", + "》,\n", + "》,\n", + "”、“\n", + "”、\n", + "》、\n", + ",5\n", + "25\n", + "———\n", + "“110”\n", + "“122”\n", + "4150\n", + "105\n", + "1996\n", + "110\n", + "174\n", + "35\n", + "110\n", + "”、“\n", + "19\n", + "21\n", + "102\n", + "102\n", + "170\n", + "220\n", + "2200\n", + "”。\n", + ":“\n", + "”250\n", + "250\n", + "900\n", + "200\n", + "”,\n", + "21\n", + "”,\n", + ":“\n", + "”、\n", + "『\n", + "』\n", + "『\n", + "』\n", + "』,\n", + "』,\n", + "23\n", + "BP\n", + "28\n", + "300\n", + "25%,\n", + "”、“\n", + "”,\n", + "50%\n", + "1998\n", + "22\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + "———\n", + "”,\n", + "———\n", + "———\n", + "147\n", + "2115\n", + "500\n", + "……\n", + "40\n", + "120\n", + "1997\n", + "”。\n", + ",6000\n", + "28\n", + ":“\n", + "……\n", + "1996\n", + "”。\n", + ":“\n", + "”。\n", + ":“\n", + "41%、21%\n", + "29%\n", + ":“\n", + ":“\n", + ",3\n", + "25\n", + "40\n", + "23\n", + "31\n", + "23\n", + "26\n", + ",1\n", + "32\n", + "31\n", + ",365\n", + "400\n", + ":“\n", + "》,\n", + "1983\n", + "80\n", + "19\n", + "……\n", + ":“\n", + "118\n", + "80%\n", + "……\n", + ",50\n", + "34\n", + "70\n", + "”14\n", + ":“\n", + "……\n", + "……”\n", + "”,\n", + "”。\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "21\n", + "32\n", + "21\n", + "21\n", + "36\n", + "22\n", + "77\n", + "21\n", + "1620\n", + "1982\n", + "2.2\n", + "840\n", + "”。\n", + "91%\n", + "146\n", + "753\n", + "(1730\n", + "17\n", + "170\n", + "23\n", + "1948\n", + ",1945\n", + "1944\n", + "”、“\n", + "”、“\n", + "”,\n", + "1941\n", + "1938\n", + ",1940\n", + ",200\n", + "700\n", + "1937\n", + ",1940\n", + "1945\n", + "3000\n", + "———\n", + "”。\n", + "1935\n", + "1936\n", + "5900\n", + "1936\n", + "51\n", + "1935\n", + "1931\n", + "”,\n", + "1884\n", + ",1915\n", + "———\n", + "21\n", + "21\n", + "21\n", + ",21\n", + "1/5\n", + "1978\n", + "25\n", + "22\n", + "200\n", + "”,\n", + "”,\n", + "》、《\n", + "》、NHK\n", + ",1937\n", + "”。\n", + "”。\n", + "100\n", + "”。\n", + "100\n", + "”。\n", + "1946\n", + ":“\n", + ",1946\n", + "27\n", + ":“\n", + "”,\n", + "”。\n", + "66、67、69\n", + "22\n", + "———\n", + ",110\n", + "、110\n", + "3·\n", + "2·\n", + "、110\n", + "、110\n", + "、110\n", + "”、“\n", + "”。\n", + "“110”\n", + "“110”\n", + "”。\n", + "21\n", + ":“\n", + "1994\n", + "———\n", + "80\n", + "1995\n", + ",“\n", + "118\n", + "231500\n", + "1996\n", + "35\n", + "118\n", + "1995\n", + ",1995\n", + "---\n", + ",1995\n", + "120\n", + "1997\n", + ")35\n", + "285\n", + "2920\n", + "”,\n", + "”,\n", + ",1997\n", + "67\n", + "450\n", + "”,\n", + "1100\n", + "3·3\n", + "2·3\n", + "25\n", + "1995—2010\n", + "2·3\n", + "”。\n", + ":“\n", + "……\n", + "———\n", + ",1993\n", + "1.45\n", + "200\n", + "25\n", + ",28\n", + "8000\n", + "35\n", + "31\n", + "5000\n", + "5000\n", + "50%\n", + "》。\n", + "1988\n", + "100\n", + "5200\n", + "),\n", + "』;\n", + "』,\n", + "』,\n", + "》。\n", + "』。\n", + "》,\n", + "23\n", + "7·\n", + "6·\n", + ";4·\n", + ";3·\n", + ";2·\n", + "“110”\n", + "1994\n", + "8000\n", + "45\n", + "410\n", + "1995\n", + "1.25\n", + "1300\n", + "”、“\n", + "130\n", + ",33\n", + ",35\n", + ",78\n", + ",51\n", + "”:\n", + "”,\n", + ",“\n", + "”,\n", + "”,\n", + ":“\n", + "70\n", + "122\n", + "21\n", + "169\n", + ",1997\n", + "27.3\n", + "19\n", + "21\n", + "3501\n", + "300\n", + "21%。\n", + "”。\n", + "1996\n", + "35\n", + "2.6\n", + "29\n", + "23\n", + "60\n", + "19\n", + ",“\n", + ",5\n", + ",5\n", + "1994\n", + "1993\n", + "1986\n", + "1993\n", + "、1998\n", + "”,\n", + ",“\n", + "”“\n", + "11.7\n", + "36.2%。\n", + "31.3\n", + "48.9%。\n", + "84\n", + "2500\n", + "140\n", + "3500\n", + "1100\n", + "1500\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "289\n", + "195\n", + "98\n", + ",《\n", + ",40\n", + "400\n", + "19\n", + "110\n", + "、VCD\n", + "』,\n", + "』,\n", + "110\n", + "48273\n", + "28481\n", + "3958\n", + "159\n", + "1998\n", + "45\n", + "59\n", + "35\n", + "1997\n", + "740\n", + "17.1\n", + "1993\n", + "”,\n", + "”。\n", + "、24\n", + "、11\n", + "、4\n", + "、8\n", + "”,\n", + "3.8\n", + "1996\n", + "8.3\n", + "36.5\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "19\n", + "),\n", + "1995\n", + "”———\n", + ":“\n", + "”“\n", + "600\n", + "803\n", + "292\n", + ",1994\n", + "1995\n", + "———\n", + "———\n", + "1987\n", + "1983\n", + "90%\n", + "1984\n", + "1/3,\n", + "12%\n", + "———\n", + "———\n", + "1997\n", + "3.26\n", + "25.16\n", + "80\n", + "2000\n", + "28\n", + "1983\n", + "1987\n", + "2475\n", + "”。\n", + "70\n", + "80\n", + ":“\n", + "40\n", + "1997\n", + "2837\n", + "1989\n", + ",6\n", + ",1981\n", + "49\n", + "———\n", + ":『\n", + ":『\n", + "……\n", + "5522.86\n", + "———\n", + "1979\n", + "1996\n", + "6100\n", + "1978\n", + "》,\n", + ",“\n", + "1856\n", + "1873\n", + "17\n", + "1839\n", + "”、“\n", + "”、“\n", + "”、“\n", + "200\n", + "22\n", + "26\n", + "89\n", + "60\n", + "86%,\n", + "10.8%,\n", + "3%,\n", + "60%\n", + ",“\n", + "”。\n", + "1995\n", + ",“\n", + "……\n", + "”。\n", + "”,\n", + ")、\n", + "”,\n", + "”,\n", + "0.1\n", + "48\n", + "48\n", + "31\n", + "100\n", + "100\n", + "……\n", + ",11\n", + ",1\n", + "21\n", + "25\n", + "90\n", + "95\n", + "85\n", + "———\n", + "90\n", + "80\n", + "900\n", + "900\n", + "900\n", + "1996\n", + "1994\n", + ",1997\n", + ":“\n", + "……\n", + "90\n", + "6.6\n", + "”。\n", + "”。\n", + "6.6\n", + "6.6\n", + "6.6\n", + "———\n", + ",“\n", + ",“\n", + "”。\n", + ",“\n", + "”“\n", + "”、“\n", + ",“\n", + "97%,\n", + "187\n", + "95\n", + "100%,\n", + "95%,\n", + "23%,\n", + "21.2%,\n", + "95%,\n", + "”138\n", + "73%,\n", + "250\n", + "39\n", + "19.2\n", + "”,\n", + "55\n", + ",49\n", + "”,\n", + "”、\n", + "”。\n", + "5000\n", + "25\n", + "77\n", + "150\n", + "”、“\n", + "”。\n", + "8000\n", + "300\n", + "215\n", + "1.58\n", + "”。\n", + "110\n", + ",1997\n", + ",28\n", + "1995\n", + "47\n", + "200\n", + "1.5\n", + "4200\n", + "70\n", + "200\n", + "1680\n", + "”:\n", + "1990\n", + "28\n", + "2.5\n", + "1.5\n", + ",1997\n", + "600\n", + "536\n", + "1996\n", + "1160\n", + "”、\n", + "”、\n", + "”。\n", + "”、“\n", + "”;\n", + "1991\n", + "”,\n", + "”。\n", + "---\n", + "100\n", + ",90\n", + ",100\n", + ",50\n", + "200\n", + "》。\n", + ",40\n", + "”,\n", + "”、“\n", + "95%\n", + "”。\n", + "”。\n", + ":“\n", + "80\n", + "》,\n", + "200\n", + "400\n", + "”,\n", + "”。\n", + "1995\n", + "1%\n", + "1%—2%,\n", + "8%—13%,\n", + "40%—50%,\n", + "70%—80%。\n", + "50%\n", + "100%\n", + "300\n", + "1978\n", + "1978\n", + ",1997\n", + "》,\n", + "1000\n", + "7%—10%\n", + "4000\n", + "1500\n", + "……\n", + "……\n", + "……\n", + "……\n", + "……\n", + ":『\n", + ":“\n", + "……\n", + "”,\n", + "”……\n", + "……\n", + "1997\n", + "1965\n", + "———\n", + "———\n", + "……\n", + "———\n", + "……\n", + "———\n", + "……\n", + ",“\n", + "”,\n", + ",“\n", + "”。\n", + "———\n", + "———\n", + "……\n", + "———\n", + ",T\n", + "———\n", + "———\n", + "———\n", + "……\n", + ",“\n", + "”,\n", + "”,\n", + "———\n", + "”,\n", + "……\n", + ")1996\n", + "”,\n", + ",《\n", + ",“\n", + "”,\n", + "”,\n", + "———“\n", + ":“\n", + ",《\n", + "1994\n", + "》。\n", + "———\n", + "》,\n", + "———\n", + "”。\n", + "”,\n", + "1000—50000\n", + "”,\n", + "”。\n", + "———\n", + "”、“\n", + "1947\n", + "》,\n", + "1.4\n", + "》。\n", + "---\n", + "”,\n", + ",“\n", + "———\n", + "———\n", + "———\n", + "”,\n", + ",“\n", + "99%\n", + "90%\n", + "1%\n", + "10%\n", + "5000\n", + "5000\n", + "4000\n", + "99%\n", + "90%\n", + "1%\n", + "10%\n", + "———\n", + "……\n", + "———\n", + "》。\n", + ",50\n", + "”。\n", + "---\n", + ")ChinaByte———“\n", + "SKY:www.sky.co.uk/worldcup\n", + ":www.fifa.com\n", + ":france98.srsnet.com\n", + ":worldcup98.sport.gov.cn\n", + ":www.worldcup98.net.cn\n", + ",ChinaByte\n", + "(hit)\n", + "45\n", + "ChinaByte\n", + ",“\n", + ";“\n", + ",ChinaByte\n", + "IT\n", + "ChinaByte,\n", + "———\n", + "(www.worldcup.com.cn)。\n", + "SportsLine\n", + "100\n", + "CBSSportsLine\n", + "Yahoo\n", + ",“\n", + "www.france98.com\n", + "www.worldcup98.com\n", + "、ESPN(\n", + "2000\n", + "PC\n", + "100\n", + "10.5\n", + "SYBASE\n", + "www.france98.com\n", + "170\n", + "57\n", + "5000\n", + "1996\n", + "Web\n", + "”。\n", + "1930\n", + "110\n", + "150\n", + "2.5\n", + "”。\n", + "……\n", + "……\n", + "4000\n", + "23\n", + "40\n", + "120\n", + "40\n", + "),\n", + "8000\n", + "6000\n", + "180\n", + "5000\n", + "9000\n", + "1000\n", + "150\n", + "29\n", + ",1996\n", + "62\n", + "”,\n", + "3000\n", + "40\n", + "1954\n", + "———\n", + "1950\n", + ",20\n", + "1948\n", + "1947\n", + "”。\n", + "70\n", + "●《\n", + "》(\n", + "●『\n", + ",『\n", + "》、《\n", + "》、《\n", + "》(\n", + ",《\n", + "》、《\n", + "》、《\n", + "》)\n", + "》、《\n", + "●“\n", + "》,\n", + ",《\n", + "、《\n", + "》(\n", + ")、\n", + "》、\n", + "》、\n", + "》、\n", + "”,\n", + "》,\n", + "》、《\n", + "》,\n", + "》,\n", + "》,\n", + "”,\n", + "》、《\n", + "》、《\n", + "》,\n", + "》,\n", + "863》,\n", + "》、《\n", + "》、\n", + "》、《\n", + "》、《\n", + "———\n", + "》,\n", + "———\n", + "”。\n", + "……\n", + ":《\n", + "1998\n", + "……\n", + "”。\n", + "……\n", + ":《\n", + "1998\n", + "……\n", + ":《\n", + "1998\n", + "、“\n", + "”、\n", + "……\n", + ":“\n", + "”。\n", + "”,\n", + "』,\n", + ":『\n", + "』,『\n", + "』,『\n", + "』,『\n", + "』,\n", + "……『\n", + "』。\n", + "》、\n", + "》。\n", + ":『\n", + "……』\n", + "』,\n", + "』、『\n", + "』、『\n", + "》,\n", + "》,\n", + "》。\n", + "』、『\n", + "》,\n", + "》,\n", + "》,\n", + "》、\n", + "》、\n", + "》、\n", + "》、\n", + "》、\n", + "》、\n", + "”、\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”、\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "1997\n", + "0.81%,\n", + "”,\n", + "26\n", + "1993\n", + "”,\n", + ":“\n", + "182\n", + "———\n", + ",“\n", + "”,\n", + "……\n", + "8%\n", + ":“\n", + "100\n", + "……\n", + "40\n", + ":“\n", + ":“\n", + "———\n", + "1996\n", + "……\n", + "1991\n", + "33\n", + "———“\n", + "1000\n", + "”。\n", + "———\n", + "1992\n", + "———\n", + "———\n", + "”,\n", + "680\n", + "”、“\n", + "”、“\n", + "”。\n", + "150\n", + "3000\n", + "》、《\n", + "》、《\n", + "77\n", + "』,\n", + ",1997\n", + "1995\n", + "150\n", + "31\n", + ",4\n", + "17\n", + "———\n", + "———\n", + ":(010)64265344\n", + "3—5\n", + "5%\n", + "5·76%6·30%\n", + ",4\n", + ",1\n", + "400\n", + "100\n", + "550\n", + "1000\n", + "5000\n", + "40\n", + "”,\n", + "”。\n", + "40\n", + "”,\n", + "100\n", + ",1996\n", + "40\n", + "”、“\n", + "1997\n", + "”,\n", + "1997\n", + "134\n", + "200\n", + "1996\n", + "1995\n", + ",3\n", + "1995\n", + "600\n", + "34\n", + "1995\n", + "1993\n", + "31\n", + ",1987\n", + "1985\n", + "1∶3\n", + "60\n", + "5000\n", + "1900\n", + "”,\n", + "600\n", + "1995\n", + ",11\n", + "---(\n", + ")(\n", + "”,\n", + "』(\n", + "”。\n", + "”,\n", + "15%。\n", + "”、“\n", + ",“\n", + ",2\n", + "6.5\n", + "”,\n", + "”,\n", + "”、“\n", + "80%—85%,\n", + "6.5\n", + ",“\n", + ",6.5\n", + "”,\n", + "1994\n", + "6.5\n", + ",4\n", + ":“\n", + ":“\n", + "6.5\n", + "———\n", + "”;\n", + "1998\n", + "1997\n", + "400\n", + "719\n", + ",1996\n", + "647\n", + "1998\n", + "1997\n", + "4.6%,\n", + "4.4\n", + "1994\n", + "1992—1996\n", + "5.9%、3.2%、5%、5.6%、9%。\n", + "1985—1988\n", + "1.2%,\n", + "1989—1991\n", + "0.7%。\n", + "”,\n", + "10—15\n", + ")。\n", + "80\n", + "90\n", + "1985\n", + "1996\n", + "58.5%\n", + "43.5%,\n", + "75%\n", + "”。\n", + ",1996\n", + "2.5\n", + ",80\n", + "48%。\n", + ",1996\n", + "56.63%,\n", + "(1985—1996\n", + "56.33%—58.8%\n", + "1985—1996\n", + "397.6\n", + "1926.04\n", + "4.17%,\n", + "5.68%\n", + "80\n", + "62%,\n", + "1978—1984\n", + "133.6\n", + "355.3\n", + "15.1%。\n", + "80\n", + "●9\n", + ":“\n", + "”。\n", + "”。\n", + "200\n", + "”。\n", + "80\n", + "———\n", + "300\n", + "2:6\n", + "27\n", + "1:\n", + "0∶2\n", + "28\n", + "1/8\n", + ",4\n", + "1/8\n", + "”。\n", + "”,\n", + "1/8\n", + "0∶2\n", + "100\n", + "1∶4\n", + "3∶0\n", + "3∶0\n", + "2∶1\n", + "2∶1\n", + "2∶1\n", + "1∶2\n", + "2∶1\n", + "0∶2\n", + "27\n", + "500\n", + "1—6\n", + "17\n", + "……“\n", + ":“\n", + "”,\n", + "80%\n", + "3∶1\n", + "1/8\n", + "”。\n", + "0∶0\n", + "0∶0\n", + "”。\n", + "”———\n", + "1/8\n", + "1∶4\n", + "0∶1\n", + "2∶1\n", + "29\n", + "4、6\n", + "———“\n", + "3、\n", + "2、\n", + "』:\n", + "GNJ\n", + "1、\n", + "1994\n", + "4、\n", + "3、\n", + "2、\n", + ":“\n", + "”1、\n", + "100\n", + "》,\n", + "6000\n", + "4、\n", + "3、\n", + "2、\n", + "1、\n", + "”、“\n", + "1997\n", + "2600\n", + "”、“\n", + "”,\n", + "”。\n", + "1992\n", + "21\n", + "25\n", + ",“\n", + "”、“\n", + "”。\n", + "2000\n", + "2004\n", + "”。\n", + "6∶1\n", + "”,\n", + "0∶0\n", + ",D\n", + "、BBC\n", + ":“\n", + "1978\n", + "1994\n", + "”、“\n", + "---\n", + "40\n", + "7∶2\n", + ":“\n", + "”,\n", + "”。\n", + ":“\n", + ":“\n", + "6∶1\n", + ":“\n", + ",“\n", + "……”\n", + "A、B\n", + "23\n", + "17\n", + "A、B\n", + ",A\n", + ";B\n", + "27\n", + "1998\n", + "27\n", + "17\n", + "300\n", + "3∶0\n", + "32\n", + "1/8\n", + ",C、D\n", + ",8\n", + "6∶1\n", + "3∶1\n", + "26\n", + ",6\n", + "1∶1\n", + "2∶2\n", + "25\n", + "LG\n", + "0∶0\n", + "LG\n", + ")6\n", + "23\n", + "21∶12,20∶22,21∶14。\n", + "2∶1\n", + "2∶1\n", + "23\n", + "—60\n", + "—55\n", + "105328\n", + "1096\n", + "3007142\n", + "1997\n", + "19\n", + "1997\n", + "66.5\n", + "56.8\n", + "169.2\n", + "158.6\n", + "28.6%\n", + "1997\n", + "71.4%,\n", + "71.2%,\n", + "71.7%。\n", + ":“\n", + "”;“\n", + "”;“\n", + "……”\n", + ":“\n", + "”,“\n", + "”。\n", + "2∶1\n", + ",“\n", + "3∶0,\n", + "”。\n", + "”。\n", + "1%\n", + "”。\n", + "”,\n", + "23\n", + "23\n", + "200\n", + "1∶2\n", + "3∶0\n", + "17\n", + "2∶2\n", + "1∶2\n", + "600\n", + "25\n", + "10000\n", + "400\n", + "21\n", + "34\n", + "19\n", + "22\n", + "383.42\n", + "1543.38\n", + "N2\n", + ",555\n", + "22\n", + ":1998\n", + ",555\n", + ")6\n", + "23\n", + ",6\n", + "60\n", + "58\n", + "1966\n", + "1600\n", + "1/8\n", + "1∶0\n", + "22\n", + ",G\n", + ":D\n", + "32\n", + "22\n", + "32\n", + "”。\n", + "96\n", + ",15\n", + "21\n", + "112\n", + "19\n", + "”,\n", + "”,\n", + "43\n", + "2.5\n", + "22\n", + "2∶1\n", + ",10\n", + "44\n", + ",19\n", + ",10\n", + ",21\n", + ",4\n", + "21\n", + ",10\n", + "1∶1\n", + "2∶1\n", + "23\n", + "500\n", + "500\n", + "1998\n", + "”,\n", + "21\n", + "21\n", + "NEC\n", + "21\n", + "2∶1\n", + "27∶15,\n", + "6∶2。\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "”。\n", + "”、“\n", + "”。\n", + "”。\n", + "2∶1\n", + ";4\n", + ",3\n", + "———\n", + "H1,\n", + "1/8\n", + "E1,\n", + "0∶0,\n", + "B1,\n", + "1/8\n", + "B2\n", + "60\n", + "21\n", + "21\n", + ",15\n", + "100\n", + "39\n", + "40\n", + "2∶1\n", + "21\n", + "1∶0\n", + "22\n", + "36\n", + "2∶1\n", + "3∶0\n", + "2∶1\n", + "0∶2\n", + "2∶1\n", + "2∶1\n", + "2∶1\n", + "2∶1\n", + "2∶1\n", + ")。\n", + ",6\n", + "4000\n", + "101\n", + "26\n", + "”。\n", + ",“\n", + "”,“\n", + "”。\n", + "”。\n", + ",“\n", + "”。\n", + ":“\n", + "90\n", + ",8\n", + ",“\n", + "”。\n", + "21\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "”。\n", + ",“\n", + "”。\n", + "”,\n", + ":“\n", + "34\n", + "N2\n", + "555\n", + "22\n", + "555\n", + "21\n", + "19\n", + "0∶1\n", + "2∶2\n", + "5∶0\n", + "0∶0\n", + "34\n", + ",20\n", + "27\n", + "21\n", + "22\n", + "2∶0\n", + "2∶2\n", + "31\n", + "21\n", + ",H\n", + "5∶0\n", + "、《\n", + "1998\n", + "homesick。\n", + "”!\n", + ",《\n", + "”,\n", + ")(\n", + "》,\n", + "》、《\n", + "……\n", + "1950\n", + "1953\n", + ",1956\n", + "……\n", + ":“\n", + "……”\n", + "……\n", + "……\n", + "》,\n", + "》、《\n", + "……\n", + "》、《\n", + "》,\n", + "》,\n", + "》、《\n", + "》,\n", + "1950\n", + "1976\n", + ",1979\n", + "》。\n", + "1958\n", + "1956\n", + "26\n", + "》。\n", + ",《\n", + "》,\n", + "1∶0\n", + "92\n", + "”,\n", + "”,\n", + "1962\n", + "3∶0。\n", + ":“\n", + ",“\n", + "”。\n", + "1982\n", + "1∶1\n", + "1/4\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "5000\n", + "9800\n", + "1.4\n", + ",“\n", + "”。\n", + "3∶0\n", + ",B\n", + ",“\n", + "”。\n", + "1982\n", + "0∶3\n", + "1/4\n", + ",LG\n", + ",B\n", + ":1998\n", + "28\n", + "25、26\n", + "43\n", + ",5\n", + "3∶0\n", + ",6\n", + "17\n", + "1∶0。\n", + ",7\n", + ",C\n", + "1∶1\n", + "4∶0\n", + "6∶4\n", + "0∶0\n", + "3∶1\n", + "442\n", + "433\n", + "60\n", + "1938\n", + "”。\n", + "32\n", + "19\n", + "600\n", + ",A\n", + "”。\n", + "23\n", + ",《\n", + "”。\n", + "”、“\n", + "”,\n", + "3∶0\n", + "3∶0\n", + "23\n", + ",9\n", + ",11\n", + "1∶1\n", + "17\n", + "19\n", + "160\n", + "90\n", + "105\n", + "21\n", + "2∶0。\n", + "33\n", + "2∶0\n", + "21\n", + "17\n", + "1∶1\n", + "1∶1\n", + ")(\n", + ",“\n", + "”。\n", + "32\n", + ",6\n", + ",“\n", + ":“\n", + "》,\n", + ",16\n", + "50%。\n", + "1992\n", + "40%;1994\n", + "31%;1996\n", + "28%。\n", + "”。\n", + "1994\n", + "2.5\n", + ",1990\n", + "2.25\n", + ",16\n", + "37\n", + "2.31\n", + "”。\n", + "32\n", + "———\n", + "”、“\n", + "”、“\n", + "(www.worldcup98.net.cn)\n", + "200\n", + "1400\n", + "49\n", + "37\n", + "2.31\n", + "3、\n", + "2、\n", + "1、\n", + "……\n", + "5000\n", + "4000\n", + "1000\n", + "2000\n", + "3700\n", + ",5\n", + "40\n", + ":“\n", + ":“\n", + ",20\n", + "3、\n", + ",\n", + "○\n", + "1、\n", + "110\n", + "110\n", + "1997\n", + "110\n", + "2、\n", + "1、\n", + "3、\n", + "2、\n", + ",3000\n", + "7000\n", + "21\n", + "34\n", + "28\n", + "NBA\n", + "86∶82\n", + "3∶1\n", + "100\n", + "704\n", + "4∶2\n", + "3∶1,\n", + ":“\n", + "”1·5\n", + "”。\n", + "”,\n", + "———\n", + "32\n", + "”,\n", + "32\n", + "19\n", + "”。\n", + "58\n", + ",9\n", + "37\n", + ",7\n", + "2∶2\n", + "40\n", + "2∶2\n", + ",10\n", + "700\n", + "1999\n", + "1999\n", + "“9”\n", + ",1999\n", + "500\n", + "46\n", + ",“\n", + ":“\n", + ":《\n", + "———\n", + "15%\n", + "27\n", + "1997\n", + "1998\n", + "18.7\n", + "94\n", + "1904\n", + "”。\n", + ":6\n", + "26\n", + "2∶1\n", + "28\n", + "38\n", + ",7\n", + "39\n", + ",5\n", + "2∶1\n", + "1995\n", + "2∶1\n", + "3∶0\n", + ",“\n", + "45\n", + "”,\n", + ":“\n", + "8000\n", + "———\n", + "”,\n", + "32\n", + ",1982\n", + "1996\n", + "———\n", + "41.8%\n", + "5625\n", + ":“\n", + "22\n", + ",“\n", + "”。\n", + ":“(\n", + "”。\n", + "21\n", + ",1200\n", + "———\n", + "”。\n", + "———\n", + "2006\n", + ",2002\n", + "2000\n", + "40\n", + "2002\n", + "23\n", + "”。\n", + "”———\n", + "”,\n", + ",“\n", + "2、\n", + "———“\n", + "”。\n", + "1、\n", + "———“\n", + "”。\n", + ":“\n", + ":“\n", + "88∶93\n", + "NBA\n", + "2∶1\n", + "NBA\n", + "43\n", + "54∶96\n", + ",“\n", + "”。\n", + "32\n", + "40\n", + "“4”,\n", + "70\n", + "———\n", + "———\n", + "70∶59\n", + "83∶81\n", + "41\n", + "37∶28\n", + "72∶67\n", + "71∶65\n", + "No.1(\n", + ")。\n", + "”,\n", + "---\n", + ":“\n", + ":“\n", + "70\n", + "191\n", + "111\n", + "80\n", + "———\n", + "21\n", + "62\n", + "2/3\n", + "111\n", + "80\n", + "191\n", + "10∶21、21∶19\n", + "21∶9\n", + "18∶21、21∶17\n", + "21∶11\n", + "79∶93\n", + "45∶44\n", + "17\n", + "100\n", + "180\n", + "2/3\n", + "(24\n", + "94\n", + "83227\n", + "66124\n", + "57122\n", + "47219\n", + "54419\n", + "53518\n", + "53518\n", + "45417\n", + "36415\n", + "42714\n", + "35514\n", + "26512\n", + "32811\n", + "17510\n", + "50。\n", + "62\n", + "56。\n", + "74。\n", + "10000\n", + "32\n", + "57\n", + "49。\n", + "5000\n", + "10000\n", + "5000\n", + "46\n", + "23,\n", + "53\n", + "76\n", + "1500\n", + "61。\n", + "800\n", + "01\n", + "48。\n", + "200\n", + "22\n", + "96。\n", + "100\n", + "95\n", + "17\n", + "36\n", + "2∶1\n", + "2∶1\n", + "21∶10\n", + "14∶22\n", + "13∶17\n", + "21∶19\n", + "3∶0\n", + "1∶1\n", + "0∶0\n", + "1∶1\n", + "0∶1\n", + "4∶2\n", + "0∶0\n", + "37\n", + "0∶1\n", + "1∶1\n", + "WNBA\n", + "WNBA\n", + "WNBA(\n", + "3∶0\n", + "0∶3\n", + ",3\n", + "7∶15、12∶15、7∶15。\n", + "———\n", + "》,\n", + ";“\n", + "”,“\n", + "”。\n", + ",“\n", + "”。\n", + "”,\n", + "1984\n", + "……”\n", + ",“\n", + ",“\n", + ",“\n", + "”。\n", + "”。\n", + "”,\n", + "”。\n", + "》,\n", + "》(\n", + "),\n", + "”,\n", + "”。\n", + "”。\n", + ":“\n", + "……\n", + "……\n", + "1952\n", + "》,\n", + "1972\n", + "———\n", + "、、\n", + "》,\n", + "”。\n", + "1947\n", + "”;\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + "1994\n", + ",“\n", + "”。\n", + "”。\n", + ":“\n", + "2∶1\n", + "0∶2(3∶6、2∶6)\n", + "3000\n", + "2000\n", + "32\n", + "”,\n", + "7000\n", + "8000\n", + "80\n", + "45\n", + "2.5\n", + "”,\n", + "26\n", + ",56\n", + ";6\n", + ",“\n", + "”。\n", + "1∶1\n", + "1∶0\n", + "1∶2\n", + "1998\n", + "1∶1\n", + "”。\n", + ",“\n", + "”。\n", + "”,\n", + "”。\n", + "1988\n", + "3∶1\n", + "15∶11\n", + "15∶7\n", + "16∶14\n", + "3∶0\n", + "15∶7\n", + "3∶1\n", + "31\n", + "35\n", + "83∶83,\n", + "45∶40\n", + "71∶88\n", + "93∶95\n", + "NBA\n", + "”,\n", + "3.5\n", + "85∶88\n", + "(NBA)\n", + ",300\n", + "13267\n", + "12483\n", + "17\n", + "354\n", + ",218\n", + "218\n", + "1997\n", + "600\n", + "”。\n", + ":“\n", + "”……\n", + "”。\n", + ":“\n", + "”。\n", + "”———\n", + "”,\n", + "100\n", + ":“\n", + "63%,\n", + "15%\n", + ":“\n", + ":“\n", + ":“\n", + "1/8\n", + ",1/8\n", + ":“D\n", + ",13\n", + ",“\n", + "”。\n", + ":“\n", + ",“\n", + "”。\n", + ",“\n", + "1∶1\n", + ":“\n", + "”。\n", + ":“\n", + ":“\n", + "———\n", + ")(\n", + "82\n", + "62\n", + "”。\n", + "、5\n", + "88∶83\n", + ",7\n", + "NBA\n", + "NBA\n", + "、10\n", + "65∶54\n", + "2∶2\n", + "2∶3\n", + "4∶1\n", + "138\n", + "4∶1。\n", + ",“\n", + "138\n", + "NBA\n", + "“ILOVETHISGAME”。\n", + "NBA\n", + ",“\n", + "”,\n", + "6000\n", + "1/3\n", + "”。\n", + "200\n", + "22\n", + "2000\n", + "○○○\n", + "83,\n", + "”,\n", + "”,\n", + "1996\n", + "、1997\n", + "90\n", + "”。\n", + "1990\n", + ",32\n", + "75%—90%\n", + ":3200\n", + ",11\n", + ",12\n", + "”。\n", + "0∶3\n", + "”。\n", + "”,\n", + "”。\n", + "1∶0\n", + "、2∶2\n", + "25\n", + "”。\n", + "22\n", + "”。\n", + "173\n", + "23\n", + "38\n", + "33\n", + "233\n", + "38\n", + "33\n", + "”,\n", + "”……\n", + "”、“\n", + "”、“\n", + "”……\n", + "”,\n", + "”:“\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "———“\n", + "54∶60\n", + "96∶89\n", + "40\n", + "76∶75\n", + "66∶63\n", + "———26\n", + "96∶60。\n", + ":E\n", + ",F\n", + "15%\n", + "27\n", + "32\n", + "90%\n", + "”。\n", + "2500\n", + "3.3\n", + ",6\n", + "370\n", + "480\n", + "4000\n", + ",850\n", + "1.3\n", + "7500\n", + "64\n", + "、32\n", + "1998\n", + "1994\n", + "———“\n", + "”,\n", + "(14)、\n", + "(9)、\n", + "(7)、\n", + "(6)、\n", + "(2)、\n", + "(2)。\n", + "4∶1\n", + "40\n", + "31\n", + "26\n", + "……\n", + "60—65\n", + ":“\n", + "……”\n", + "100\n", + "、400\n", + "、1500\n", + "、5000\n", + "17\n", + "35\n", + "90\n", + "———\n", + "70\n", + "80\n", + "19\n", + "85\n", + "90\n", + "80\n", + "90\n", + "95\n", + "90\n", + "88\n", + "(http∶//www.peopledaily.com.cn)\n", + ":http∶//www.worldcup.net.cn。\n", + ",E\n", + "79∶68\n", + ";E\n", + "103∶76\n", + ";F\n", + "82∶57\n", + ";F\n", + "88∶79\n", + "74\n", + "49∶37\n", + "40%。\n", + "83%。\n", + "94∶93\n", + "110—120\n", + "200\n", + "192\n", + "”2002\n", + "31\n", + "40\n", + "80%\n", + ":“\n", + "———\n", + "”,\n", + "”。\n", + "90\n", + "”,\n", + "1992\n", + "67%。\n", + "58∶97\n", + "61∶52\n", + "28∶42\n", + "20%,\n", + "54∶72\n", + "60%,\n", + "22\n", + "40\n", + "67∶72\n", + "70∶72,\n", + "32∶28\n", + "31\n", + "70∶72\n", + "100\n", + "1∶1\n", + "1∶1\n", + "3∶1\n", + "1∶1\n", + "37\n", + ",6\n", + "82226\n", + "65123\n", + "57022\n", + "46218\n", + "52517\n", + "44416\n", + "43515\n", + "35414\n", + "35414\n", + "42614\n", + "34513\n", + "26412\n", + "31810\n", + "1659\n", + "31\n", + "“2\n", + "”、“\n", + "360\n", + "9.800\n", + "0.38\n", + "360\n", + "9.575\n", + "“SE(\n", + "E)”\n", + "1998\n", + "、1\n", + "nef\n", + ",nef\n", + "nef\n", + "11%\n", + "49%。\n", + "CCR5\n", + "gp120,\n", + "“AIDSVAX”\n", + "25\n", + "”。\n", + "》28\n", + "”。\n", + "28\n", + "29\n", + "1996\n", + "25\n", + ",1975\n", + "7000\n", + "400\n", + "1995\n", + ",1996\n", + "17\n", + "39\n", + ",65\n", + ",20\n", + "1996\n", + "348\n", + "300\n", + "40%,\n", + "25%。\n", + "15%,\n", + "60%。\n", + "8%\n", + "50%。\n", + "1995\n", + ";1961\n", + ";1962\n", + ",1996\n", + "7700\n", + "8999\n", + ",1996\n", + "3600\n", + ",6.6\n", + "9.1\n", + "1998\n", + "1997\n", + "3000\n", + "2100\n", + ",100\n", + "1996\n", + "135\n", + "1·2\n", + "28\n", + "27\n", + ",90%\n", + "100%\n", + "2003\n", + "1995\n", + ",1996\n", + "23\n", + "27\n", + "(SOHO)\n", + "3000\n", + "20%。\n", + "80%。\n", + "80%\n", + "70\n", + "27\n", + "80%。\n", + "》25\n", + "1000\n", + "76\n", + "17\n", + "25\n", + "6000\n", + "3200\n", + ",“\n", + ",“\n", + "”,\n", + "2、\n", + "”。\n", + "1、\n", + "2000\n", + "3000\n", + ":“\n", + "’,\n", + "’。\n", + "1998\n", + "370\n", + "40,\n", + ",1997\n", + "39\n", + "39\n", + "”。\n", + ",“\n", + "1993\n", + "”,\n", + "———“\n", + "”。\n", + "1994\n", + "1993\n", + "25000\n", + "1991\n", + "”,\n", + "400\n", + "、3\n", + "、8000\n", + ",7\n", + "1996\n", + "1995\n", + "”、“\n", + ",1996\n", + "”,\n", + "”、\n", + "”,\n", + "1990\n", + "26\n", + "“122”\n", + ":“\n", + "”4\n", + "40\n", + "“122”\n", + "”,\n", + ":“\n", + "”4\n", + "09\n", + ",“122”\n", + ",0\n", + "35\n", + "“122”\n", + "“122”\n", + ",“122”\n", + "“122”\n", + ",“\n", + "“122”\n", + ")。\n", + "”。\n", + "1998\n", + "”。\n", + "95%,\n", + "80%,\n", + "”。\n", + "———\n", + ":“\n", + "600\n", + "124\n", + "126\n", + "1995\n", + ":“\n", + "……\n", + "1000\n", + "300\n", + "40\n", + "1997\n", + "600\n", + "3/5,\n", + "———\n", + "”,\n", + "”。\n", + ",“\n", + ",6\n", + ":“\n", + ",“\n", + "”,\n", + "3000\n", + "2%\n", + "”,\n", + "”,\n", + "”。\n", + "1994\n", + "1994\n", + "5000\n", + "1993\n", + ":1991\n", + "1989\n", + "1991\n", + "2000\n", + "),\n", + "1992\n", + "5000\n", + "3.3\n", + "、1\n", + "、5000\n", + "、1\n", + "8.8\n", + "”———\n", + "600\n", + "”,\n", + "1.765\n", + "1.25\n", + "1800\n", + "1250\n", + "1998\n", + "9.5%\n", + "4.9%\n", + ":“\n", + "1998\n", + "26.16\n", + "9%,\n", + "1989\n", + "236\n", + "234\n", + "———\n", + ",“\n", + "215\n", + "260\n", + "185\n", + "222\n", + ",“\n", + "522\n", + "100\n", + "3.5\n", + "25\n", + ",27600\n", + ",20\n", + ",3787\n", + "1000\n", + "(1\n", + "38000\n", + "100\n", + "150\n", + "”,\n", + "”。\n", + "0.3%\n", + ",2\n", + "19.9%,3、4\n", + "6.4%\n", + "7%,5\n", + "3%\n", + "6.6%。\n", + "140\n", + "140.34\n", + "2.60\n", + "25\n", + ":“\n", + "8000\n", + "8000\n", + "7500\n", + "32\n", + "30%。\n", + "20%—30%。\n", + "1996\n", + "8000\n", + "40\n", + "),\n", + "1.5\n", + "39\n", + "1000\n", + "3000\n", + "60%,\n", + "90\n", + "8%,\n", + "1990\n", + "390\n", + "1995\n", + "640\n", + "5.5%\n", + "7.6%。\n", + "21\n", + "33%\n", + "68%,\n", + "26%\n", + "5%。\n", + "34\n", + "75%,50\n", + "50%,60\n", + "10%。\n", + "70%,\n", + "1.5\n", + "11%。\n", + "1976\n", + "9%\n", + "1996\n", + "32%。\n", + "6.5\n", + "1.65\n", + "3400\n", + "1994\n", + "1995\n", + "3000\n", + ",1996\n", + "7000\n", + "1.3\n", + ",3000\n", + "997\n", + "25\n", + "2000\n", + "124.5\n", + "2690\n", + "2650\n", + "260\n", + ")24\n", + "135.5\n", + "60%\n", + "0·25%,\n", + "1/4\n", + "23\n", + "1·3\n", + "23\n", + ",1996\n", + "1998\n", + ",12\n", + "1996\n", + "6570\n", + "6000\n", + "6369\n", + "23\n", + "23\n", + "44%,\n", + "26%。\n", + "41%,\n", + ",“\n", + "”。\n", + "23\n", + "23\n", + "295\n", + "2500\n", + "1.3\n", + "2010\n", + "2020\n", + "1966\n", + "25\n", + "25\n", + "26\n", + "39\n", + "1974\n", + "、1979\n", + "、1981\n", + "1987\n", + "1/3\n", + "2/5。\n", + "56.7\n", + "12063.5\n", + ")。\n", + "35%,\n", + "162\n", + "(1\n", + "1.20635\n", + "218.7\n", + "75\n", + "1300\n", + "23\n", + "22\n", + "100\n", + "2000\n", + ":“\n", + "90\n", + "1800\n", + "1998\n", + "22\n", + "2、6\n", + "21\n", + "3000\n", + "1、\n", + "1997\n", + "16·9\n", + "1880\n", + "1997\n", + "0·25\n", + ",1998\n", + "124·5\n", + "13·16\n", + "12·76\n", + "21\n", + "18·68\n", + "13·02\n", + "22\n", + "11·92\n", + "10·55\n", + ",“\n", + "”。\n", + "22\n", + "”。\n", + ",1997\n", + "131\n", + "31.8\n", + "1994\n", + "0.7%\n", + ",1997\n", + "10%。\n", + "”,\n", + "1/3\n", + ",1/3\n", + "1997\n", + "5000\n", + ",160\n", + ",10\n", + "1800\n", + ",1997\n", + "80\n", + "31\n", + "”。\n", + "2005\n", + "10·8%,\n", + "2010\n", + "2020\n", + "22\n", + "2.5\n", + "(1\n", + "1997—2001\n", + "4.85\n", + "1.5\n", + "1996\n", + "2002\n", + "232\n", + "7%\n", + "75\n", + "73\n", + "22\n", + "138·53\n", + "13600\n", + "12000\n", + "17\n", + "40%。\n", + "26\n", + "145\n", + "2670\n", + "1600\n", + "17\n", + "224\n", + ",1\n", + "1·9\n", + "530\n", + "70%。\n", + "2020\n", + "22\n", + "1998\n", + "3000\n", + ",1000\n", + "600\n", + "4·5\n", + "200\n", + "31\n", + "35\n", + ",122\n", + "21\n", + "1999\n", + "5000\n", + "8·3\n", + "(LaNina)\n", + "70%—80%。\n", + "”,\n", + "25\n", + "25\n", + "40\n", + "”,\n", + ",X\n", + "”,\n", + "1200\n", + "12%,\n", + "43\n", + "1/3\n", + "8%\n", + "3500\n", + "1500\n", + "750\n", + ",1996\n", + "18.25\n", + "1995\n", + "5.25%。\n", + ",1996\n", + "1995\n", + "9.4\n", + "---\n", + "22\n", + "25\n", + "22\n", + "”。\n", + "15.19%。\n", + "7.08%,\n", + "79.55%;\n", + "67.39%;\n", + "9.81%。\n", + "195.64\n", + "3.2%,\n", + "5.4%\n", + "2、\n", + "1、\n", + ",21\n", + ",21\n", + "21\n", + "21\n", + "1000\n", + "21\n", + "1996\n", + ";1997\n", + "”。\n", + "28\n", + "1996\n", + "21\n", + ",6\n", + "8AFE\n", + "1.3\n", + "1.6\n", + "1.0\n", + ";6\n", + "3,\n", + "5·897\n", + "6·934\n", + "200\n", + "122·07\n", + "100·14\n", + "8712·87\n", + "19\n", + "”。\n", + "”。\n", + "72\n", + "28\n", + ",1996\n", + "”,\n", + "1985\n", + "100%。\n", + "19\n", + "11%\n", + "200\n", + "160\n", + "19\n", + "420\n", + "40\n", + "60\n", + "55%\n", + "100%\n", + "10%、\n", + "12·5\n", + "25\n", + "17\n", + "95\n", + "6·7\n", + "19\n", + ",6·187\n", + "36\n", + "1962\n", + "4%、\n", + "1·6%\n", + "”,\n", + ",“\n", + "”。\n", + "”,\n", + "”,\n", + "800\n", + "400\n", + "1996\n", + "”。\n", + "65%。\n", + "17\n", + ",《\n", + ":55%\n", + ",60%\n", + ",“\n", + "”“\n", + ":“\n", + ",“\n", + "———\n", + "’。\n", + ":“\n", + "……”\n", + "”。\n", + "”,\n", + "57\n", + ":“\n", + ":“\n", + "》,\n", + ":“\n", + "……\n", + ":1987\n", + ";1988\n", + ";1990\n", + "5000\n", + ";1991\n", + ";1991\n", + "—1993\n", + ";1995\n", + ";1996\n", + "37\n", + "”。\n", + "》(\n", + "---\n", + "》(\n", + "1999\n", + ",2000\n", + "”,\n", + "1925\n", + "》(5\n", + "19\n", + "),\n", + "100733。\n", + "”,\n", + "、《\n", + "):\n", + "1994\n", + "》,\n", + ":“\n", + "———“\n", + ":“\n", + "):\n", + "”,\n", + "):\n", + "BISS\n", + "”,\n", + "”,\n", + "……\n", + "”。\n", + "):\n", + "---\n", + "④)\n", + "90\n", + "40\n", + "1100\n", + "……\n", + "60\n", + "1959\n", + "43\n", + "28\n", + "120\n", + "1997\n", + "27\n", + ",77\n", + "1994\n", + "1985\n", + "’”。\n", + "60\n", + ":“\n", + "”:1959\n", + "》,\n", + "”。\n", + "、《\n", + "170\n", + ",《\n", + "1946\n", + "60\n", + "1942\n", + "1944\n", + "76\n", + "17\n", + ":“\n", + "17\n", + "17\n", + ",“\n", + "》),\n", + "》)\n", + "1992\n", + "”。\n", + "》17\n", + ",100\n", + ",88\n", + "17\n", + "313\n", + "55\n", + "、LG\n", + "5.3%。\n", + "0.7%,\n", + "200\n", + "17\n", + ",17\n", + "137\n", + "17\n", + "3.15\n", + "141\n", + ",“\n", + "”,\n", + "60\n", + ",80\n", + "3000—3500\n", + "4000\n", + "3000\n", + "4000\n", + "2000\n", + "2000\n", + "1200\n", + "300\n", + "400\n", + "1000\n", + "80\n", + "90\n", + "80\n", + "3000\n", + "1991\n", + "1984\n", + "1985\n", + "1933\n", + ",1956\n", + ")(\n", + "135·50\n", + "41.00\n", + "41.50\n", + "1。\n", + "22.21\n", + "295.15\n", + "143.25\n", + "137.89。\n", + "———\n", + "23.28\n", + "1776.41\n", + "500\n", + "19.52\n", + "1107.14\n", + "37\n", + "164.17\n", + "8829.46\n", + "17\n", + "17\n", + ",17\n", + "143.47\n", + "136.37。\n", + "17\n", + "”。\n", + "356\n", + "747—200\n", + "17\n", + "1982\n", + ":“\n", + ":“\n", + "》16\n", + "1972\n", + "TRW\n", + "7500\n", + "140\n", + "”,\n", + "200\n", + "17\n", + "141.98\n", + "3.15\n", + "“NO”。\n", + "150\n", + "”,\n", + "”,\n", + "”,\n", + "”;\n", + ":“\n", + "”。\n", + "”。\n", + "220\n", + "、1\n", + "1600\n", + "1985\n", + "2025\n", + "GDP\n", + ",65\n", + "1991\n", + "12%\n", + "2010\n", + "20%\n", + "2020\n", + "25%,\n", + ";1986\n", + "1997\n", + "90\n", + "2020\n", + "150\n", + "),\n", + "“140\n", + "146.43\n", + "1990\n", + ",“\n", + "”。\n", + "29\n", + ",“\n", + "”。\n", + "1991\n", + "25\n", + "”———\n", + "VOA\n", + "1997\n", + ":“\n", + "”,\n", + ",VOA\n", + "”,\n", + "120\n", + "1993\n", + ",VOA\n", + "1987\n", + ",VOA\n", + "VOA\n", + "1896\n", + "1987\n", + "VOA\n", + "VOA\n", + "6000\n", + "VOA\n", + "(VOA)\n", + "———\n", + "———\n", + "36\n", + "65\n", + "100\n", + "———\n", + "90%\n", + "———\n", + "”。\n", + "———\n", + ",“\n", + "”。\n", + "》、《\n", + "、80\n", + "90\n", + "1997\n", + "1983\n", + "1982\n", + "1975\n", + "1929\n", + "---\n", + "80\n", + "”。\n", + ",1997\n", + "1996\n", + ":“\n", + "1995\n", + "1997\n", + "60—70\n", + "1991\n", + "1988\n", + "1986\n", + "5000\n", + "1986\n", + ",12\n", + ",20\n", + "1982\n", + "———\n", + "62\n", + "1964\n", + "”,\n", + "』---\n", + "……\n", + "1901\n", + "10%\n", + "15%\n", + "60\n", + "150\n", + "1000\n", + "945\n", + "40\n", + "(DVD)\n", + "”(SACD)\n", + "660\n", + "1996\n", + "380\n", + "38·79\n", + "12%。\n", + "263·39\n", + "15·98\n", + "5·7%。\n", + "42·45\n", + "5·2%,\n", + "140∶1\n", + "1434∶1\n", + "300\n", + "288.21\n", + "1987\n", + "13·8%。\n", + "2·4%,\n", + "33\n", + "11699\n", + "10·3%。\n", + "15000\n", + "14825.17\n", + "197.16\n", + "“140\n", + ",6\n", + "146.43\n", + "2.42\n", + "1990\n", + "3·\n", + "《2000\n", + "2·\n", + "”,\n", + ":1·\n", + ")6\n", + ":“\n", + ",“\n", + "”。\n", + ",“\n", + "”,“\n", + "”。\n", + "》12\n", + "21\n", + "1992\n", + "———\n", + "80%\n", + "100%,\n", + ",“\n", + "”。\n", + ",“\n", + "200\n", + "90\n", + "37\n", + ",5\n", + "”(\n", + "),\n", + ",30\n", + "2、\n", + "1、\n", + "48\n", + "1∶143.79\n", + "144.72\n", + ",1998\n", + "1.9%\n", + "21.1%,\n", + "1.2%,\n", + "1955\n", + "23\n", + ",0.7%\n", + "1974\n", + "0.5%,\n", + "1.3%,\n", + "5.3%。\n", + "1997\n", + "0.7%。\n", + "2100\n", + "3100\n", + "1995\n", + "5%\n", + "10%\n", + "1998\n", + "5·5\n", + "7000\n", + "140\n", + "150\n", + "160\n", + "———\n", + "———\n", + "1997—1998\n", + "75000\n", + "570\n", + "),\n", + "12900\n", + "98\n", + "70%,\n", + "240000\n", + "150∶1,\n", + "2933\n", + "90%\n", + "1998\n", + "46%\n", + "1999\n", + ";18%\n", + "1998\n", + "”;25%\n", + "1998\n", + "———\n", + ",4\n", + "3.9%\n", + "4.1%,\n", + "1953\n", + "7.2%,\n", + "1995\n", + "80%。\n", + "144.72∶1,\n", + "”。\n", + ",“\n", + "”(\n", + "),\n", + "”,\n", + "”。\n", + "1997\n", + "2.9%,\n", + "-0.7%。\n", + "”。\n", + "1998\n", + "1997\n", + "”,\n", + "1997\n", + "”(\n", + "”(\n", + "),\n", + "”,\n", + ",“\n", + "”。\n", + ",1998\n", + "0.6%。\n", + "”。\n", + ",《\n", + ",《\n", + "”,\n", + "”。\n", + "ATM\n", + "(ATM)\n", + "2—12\n", + "40%\n", + "”:\n", + "IBM\n", + "30%—50%。\n", + "6%,\n", + "”,\n", + "”,\n", + "30%\n", + "1000\n", + "70%\n", + "1/3,\n", + "1/3。\n", + "“WINTELCO”。\n", + "770\n", + "”。\n", + "40\n", + "(SANFRANCISCO),\n", + "“FRANCISCO”。\n", + "———\n", + "(CISCO)\n", + "”,\n", + "———\n", + "(CISCO)\n", + "……\n", + "237\n", + "———\n", + "124\n", + "21\n", + ":“\n", + "”1996\n", + "VCD\n", + "VCD\n", + "———\n", + "———\n", + "”,\n", + "VCD,\n", + "VCD\n", + "C-Cube\n", + "OK、\n", + "VCD\n", + "VCD\n", + "VCD\n", + "VCD\n", + "VCD\n", + "VCD\n", + "VCD\n", + "VCD\n", + "DVD\n", + "VCD\n", + "300\n", + "VCD\n", + ",1997\n", + "1000\n", + "60%—70%\n", + "---\n", + "VCD\n", + "……\n", + "100\n", + "100\n", + ",《\n", + "100\n", + ",“\n", + "100\n", + "》,\n", + "《“\n", + "》,\n", + "》,\n", + "》,\n", + "……\n", + "》,\n", + "》,\n", + "》,\n", + "》,\n", + "1000\n", + "”———\n", + "”,\n", + "”,\n", + "100\n", + ":“\n", + "”,“\n", + "”。\n", + ",《\n", + "100\n", + ",3\n", + "49\n", + "43%,\n", + "1∶140\n", + "1993\n", + "28%,\n", + "18%,\n", + "9.1\n", + "1987\n", + "4.5%\n", + ",1997\n", + "7400\n", + "1998\n", + "1996\n", + "25\n", + "637\n", + "17\n", + "450\n", + "1996\n", + ",“\n", + "”。\n", + "1998\n", + ":“\n", + "”,“\n", + "”。\n", + "20,\n", + "”。\n", + "36\n", + "190\n", + "280\n", + "150\n", + "141\n", + "“145\n", + ":10\n", + "412\n", + "、11\n", + ",“\n", + "”、“\n", + "10%,\n", + "0.56%,\n", + "1.31%。\n", + ":10\n", + ",8\n", + "“140\n", + ",11\n", + "141\n", + "142\n", + ":“\n", + "2000\n", + "400\n", + "),\n", + "1700\n", + "1997\n", + "1995\n", + "”———\n", + "80%\n", + "500\n", + "2.5%\n", + "4.5%。\n", + "9.1%。\n", + "1.6%\n", + "6%。\n", + ",4\n", + "1.3%。\n", + "40\n", + "1128.57\n", + "1073.47\n", + "15014.04\n", + "325.22\n", + "141∶1,\n", + "142∶1,\n", + "141.67∶1,\n", + "0.84\n", + "5250\n", + "134.6\n", + "1336\n", + "34.3\n", + "),\n", + "51%。\n", + "15%—18%\n", + "0.5\n", + "(AMD)\n", + "(NSI)\n", + "1000\n", + "7%,\n", + "20%—30%\n", + "80%\n", + "90%\n", + "“X86”\n", + "”。\n", + "(DEC)、\n", + "40\n", + "”,\n", + "21\n", + "140·67\n", + "1991\n", + "140·30\n", + "1∶141·35\n", + "40∶1\n", + "2·5%,5\n", + "9·2%。\n", + "40%\n", + "50%\n", + "8·27%。\n", + "》。\n", + "150\n", + "90\n", + "1·6\n", + "6000\n", + "70\n", + "100\n", + "100\n", + "1·5\n", + "100\n", + "662\n", + "2340\n", + "2、\n", + "35\n", + "1、1998\n", + "”,\n", + ",“\n", + "”,\n", + "100\n", + "150\n", + ",“\n", + "250\n", + "”。\n", + "40%,\n", + "2500\n", + "2750\n", + "74\n", + "99·99%\n", + "60%,\n", + "”。\n", + "120\n", + ",“\n", + "39\n", + "39%\n", + "4%。\n", + "———\n", + "(DNA),\n", + "200\n", + "1997\n", + "70\n", + ",90\n", + "“RVA4EV”\n", + "3.985\n", + "3.990\n", + "1.6934\n", + "1.7095\n", + "43.12\n", + "43.95\n", + ",8\n", + "140\n", + "5·79\n", + "312·37\n", + "311·70\n", + "68\n", + "1998\n", + "1997\n", + "2、\n", + "1、\n", + "200\n", + ",1997\n", + "430\n", + "8.4%,\n", + "1—4\n", + "135.4\n", + "14.1%。\n", + "1997\n", + "1997\n", + ",1997\n", + ",1997\n", + "1997\n", + "9%\n", + ",1997\n", + "1992\n", + ":“\n", + "1996》\n", + "”,\n", + "”;\n", + "21\n", + "80\n", + "11·5\n", + "40\n", + "25\n", + "35\n", + "0·9\n", + "1899\n", + "1894\n", + "”,\n", + "》7\n", + "29\n", + "”。\n", + "”。\n", + "27\n", + "2000\n", + "31\n", + "90%\n", + "———\n", + "1400\n", + "2200\n", + "25%\n", + "1200\n", + "1960\n", + ",“\n", + "”。\n", + ":“\n", + "”,\n", + "5000\n", + "31\n", + "140∶1。\n", + "”,“\n", + "”。\n", + ",“\n", + "”。\n", + "9000\n", + "“140\n", + ",9\n", + "140∶1\n", + "”,\n", + "140·70\n", + "1·13\n", + "1996\n", + "84001”\n", + "84001”\n", + "1799\n", + "199\n", + "19\n", + "》6\n", + "40\n", + "2/3\n", + ",1/3\n", + "5000\n", + "DNA\n", + "DNA\n", + "DNA(\n", + "2、\n", + "1、6\n", + ",140\n", + "60\n", + "170\n", + "170\n", + "1965\n", + "1968\n", + "”,\n", + ",30\n", + "2006\n", + "2006\n", + ",“\n", + ":“\n", + ",‘\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1975\n", + "85\n", + "1974\n", + "34\n", + ",29\n", + "1720\n", + "13.5\n", + "1750\n", + ")。\n", + ":“\n", + "”。\n", + "”;\n", + ",“\n", + "”。\n", + "”;\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "29\n", + "”,\n", + "1969\n", + "”。\n", + ":“\n", + "200\n", + "、60\n", + "1940\n", + "”……\n", + "25\n", + ":“\n", + "”。\n", + "1998—1999\n", + "41·78\n", + "41·80\n", + "1,\n", + "268·50\n", + "130·20\n", + "1.2\n", + "3.5\n", + "4.3\n", + ")。\n", + "29\n", + "11.75\n", + "3.8\n", + ")。\n", + "2.7\n", + "),\n", + "3.4\n", + ")。\n", + "1000\n", + "80\n", + "46\n", + "1/3。\n", + "80\n", + "1984\n", + "1975\n", + ",3\n", + ",1957\n", + "5000\n", + "1935\n", + "”(ORANGINA)\n", + "”,\n", + "75\n", + "6000\n", + "1992\n", + "4000\n", + ",1992\n", + "300\n", + "1600\n", + "1997\n", + "1996\n", + "46\n", + "45\n", + "100\n", + "1995\n", + "2000\n", + "300\n", + "2000\n", + "40\n", + "2600\n", + "1996\n", + "1000\n", + "188.3\n", + "61.4%。\n", + "6000\n", + "1995\n", + "100\n", + "100\n", + "1300\n", + ":“\n", + "200\n", + "”。\n", + "200\n", + "1993\n", + "”。\n", + "”。\n", + "1992\n", + "2000\n", + "96%,\n", + "1800\n", + ",30\n", + "”,\n", + "”,\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”……\n", + "8000\n", + "2.6\n", + "68%。\n", + "80\n", + "”。\n", + ":“\n", + "1800\n", + "……\n", + "”18\n", + "237\n", + "19\n", + "200\n", + "”,\n", + "……\n", + "610\n", + "40\n", + ",19\n", + "”。\n", + "……\n", + "1994\n", + "45\n", + "2500\n", + "35\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "2/3\n", + "”,\n", + "”。\n", + ":“\n", + "”,\n", + ",“\n", + ",“\n", + "”,\n", + ":“\n", + ",80\n", + "”(\n", + ")(\n", + "21\n", + "”、“\n", + "”。\n", + "”,\n", + "”,\n", + ",70\n", + "KU\n", + "1.9\n", + "540\n", + ",“\n", + ",“\n", + ",1999\n", + "3·5%\n", + "70\n", + "”。\n", + "150\n", + "1.6\n", + "10·1%。\n", + "3.0976\n", + "18·9%。\n", + "139.70\n", + "138.83。\n", + ",2\n", + "“140\n", + "139.75∶1,\n", + "9000\n", + "”,\n", + ",“\n", + "1997\n", + "50%;4\n", + "4.1%,\n", + "4%\n", + "”;0.5%\n", + "……\n", + "1995\n", + "80∶1,3\n", + "40%\n", + "130\n", + "135∶1,\n", + "139∶1,\n", + "“140\n", + "”,\n", + "“140\n", + "”———\n", + ",14\n", + "410\n", + "759\n", + "100\n", + "3512\n", + "6.5\n", + "1991\n", + "280\n", + "200\n", + "19\n", + "884\n", + ",13\n", + "120\n", + "”,\n", + "”,\n", + ":“\n", + "150%,\n", + "10%,\n", + "139\n", + "150\n", + "》,\n", + "2·73\n", + "700\n", + "1000\n", + "2·25\n", + "1/3\n", + "”。\n", + "8·65\n", + "2·064\n", + "1·3\n", + "7640\n", + "1973\n", + ",25\n", + "25\n", + "90%。\n", + "),\n", + "150\n", + "2002\n", + "19\n", + ",”\n", + ",90%\n", + "1996\n", + "1.6\n", + "1.2\n", + "4000\n", + ",”\n", + "1998\n", + "29\n", + ",”\n", + "1997\n", + "1995\n", + "1/2\n", + ",1996\n", + ",1997\n", + "1994\n", + ",“\n", + "37\n", + "200\n", + "———\n", + "”(\n", + "21\n", + "250—300\n", + "900\n", + "100\n", + "250\n", + "21\n", + "60%\n", + "300\n", + "30%—35%。\n", + "450\n", + "550\n", + "450—600\n", + "1997\n", + "”,\n", + "500\n", + "21\n", + "500\n", + "350—400\n", + "280\n", + "ICE\n", + "TGV\n", + "1832.5\n", + "270\n", + "300\n", + "515.3\n", + "160\n", + "200\n", + "300—350\n", + "20.9\n", + "39.2\n", + "4000\n", + "90%\n", + "1964\n", + "210\n", + "0.65%。\n", + "3.14\n", + "1.77\n", + "4.13\n", + "3.43\n", + "19\n", + "120\n", + "70%\n", + ",1825\n", + "38\n", + "600\n", + "34\n", + "”。\n", + "14%—15%,\n", + "10%—20%,\n", + "20%,\n", + "40%。\n", + "90\n", + "350\n", + "1991\n", + "300—350\n", + "AAR\n", + "250\n", + "21\n", + "1983\n", + "”。\n", + "1994\n", + "1991\n", + "80\n", + "1961\n", + "31\n", + "150\n", + ",150\n", + "17\n", + "200\n", + ",130\n", + "5·32%\n", + "40·6%。\n", + "-6·21%。\n", + ",1998\n", + "-10·1%,\n", + "85%。\n", + "200\n", + "200\n", + ")。\n", + "21\n", + "98%。\n", + "———\n", + "(SAT)\n", + ";14\n", + "85\n", + "138.1\n", + "62%。\n", + "3609.9\n", + "4120\n", + "107\n", + "),\n", + "14%;\n", + "82.8\n", + "139.1\n", + "68%,\n", + ":6\n", + "1998—1999\n", + "1/4\n", + "2、6\n", + "1、5\n", + "31\n", + "71\n", + "”,\n", + "”。\n", + ",6\n", + "1200\n", + "60%\n", + "”。\n", + "1999\n", + "1997\n", + "381\n", + "1400\n", + "2200\n", + "25%\n", + ",“\n", + "”,\n", + "”,\n", + "1996\n", + "5.29\n", + ",44%\n", + "》,\n", + ",76%\n", + ";91%\n", + "1997\n", + "1996\n", + "”,\n", + "1994\n", + ",5\n", + "115\n", + "1984\n", + "、80\n", + "”5\n", + "1997\n", + "160\n", + "37\n", + "》5\n", + "29\n", + "《〈\n", + "》。\n", + "49·5\n", + "44\n", + "45\n", + "100\n", + "27\n", + "47\n", + "700\n", + "26\n", + "100\n", + "300\n", + "318\n", + "1994\n", + "1863\n", + "《20\n", + "100\n", + ":1960\n", + ",“\n", + "500\n", + "150\n", + "),\n", + "”。\n", + "2000\n", + "300\n", + "19\n", + ",5\n", + "27\n", + "500\n", + "”。\n", + ",6.8\n", + "2500\n", + "2025\n", + "4800\n", + "22\n", + "800\n", + "30%,\n", + "7%。\n", + "150\n", + "90%\n", + "2/3\n", + "50%,\n", + "40%,\n", + "1/3\n", + ":1995\n", + "39.2\n", + ",1996\n", + "40.3\n", + ",1997\n", + "150\n", + "、200\n", + "1991\n", + ",“\n", + "31\n", + "1600\n", + "31\n", + ",5\n", + "400\n", + "”。\n", + "140\n", + "”。\n", + ",70%\n", + "200\n", + "60\n", + "46.8%,\n", + "1700\n", + "1991\n", + "1/3\n", + "”。\n", + "600\n", + "700\n", + "1300\n", + "27\n", + "140\n", + "”。\n", + "”,\n", + "150%。\n", + "27\n", + "60%\n", + "80%;\n", + "6.20\n", + "1.2\n", + "27\n", + "6·7\n", + "31\n", + "31\n", + "31\n", + ",“\n", + ",“\n", + ":“\n", + ":“\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + ",“\n", + "2500\n", + "22\n", + "1∶10。\n", + ":“\n", + "27%—33%,\n", + "60%\n", + "……\n", + ",“\n", + ":“\n", + "28\n", + ",1995\n", + "10·8\n", + "31\n", + "90%\n", + "14%\n", + "20%\n", + "5%\n", + "12%。\n", + "”,\n", + "4000\n", + "2·5\n", + "31\n", + "28\n", + "1989\n", + "29\n", + "31\n", + "6·4\n", + "3500\n", + "250\n", + "7·1\n", + "64000\n", + "800\n", + "2840\n", + ",1·23\n", + ",2·5\n", + "6·014\n", + "255.2\n", + "31\n", + "200\n", + "41·75—41·78\n", + "100(BSE—100)\n", + "200(BSE—200)\n", + "———\n", + "3897·10\n", + "210·71\n", + "》30\n", + "140\n", + "29\n", + "6·7\n", + "50%\n", + "150%。\n", + "10%。\n", + "27\n", + "80%,\n", + "1∶6·2,\n", + "1∶6·188\n", + "380%。\n", + "29\n", + "80%;\n", + "29\n", + "27\n", + "1∶6·2\n", + "1∶6·133;\n", + "28\n", + "28\n", + "23\n", + "21\n", + "4.1\n", + "1.6\n", + "5.9\n", + "A。\n", + "A。\n", + "23\n", + "29\n", + "1500\n", + "1988\n", + ",1990\n", + "97%\n", + "”,\n", + "”。\n", + ":“\n", + "120\n", + "1227\n", + ",“\n", + "100%”。\n", + "1.8\n", + "100%,\n", + ":“\n", + "23\n", + "”,\n", + ",1997\n", + "22\n", + "———\n", + "19\n", + "”,\n", + "1995\n", + ":“\n", + "1995\n", + ":“\n", + "1995\n", + "1997\n", + ":“\n", + "1996\n", + ":“\n", + "1995\n", + "”。\n", + "35\n", + "”。\n", + "28\n", + "28\n", + "1993\n", + "27\n", + "833\n", + "833\n", + "》29\n", + "833\n", + "833\n", + "21\n", + "2000\n", + "12%。\n", + ",1992\n", + "2010\n", + "1990\n", + "80%。\n", + ",“\n", + "2500\n", + "30%。\n", + "21\n", + "》。\n", + "ISO14000\n", + "200\n", + "593\n", + "、400\n", + "、209\n", + "1970\n", + "365\n", + "100\n", + "230\n", + "25%,\n", + "1990—1995\n", + "12%,\n", + "17\n", + "61%。\n", + "1000\n", + ",95%\n", + "2500\n", + "1/4\n", + "1/3;\n", + "600\n", + "15%\n", + "270\n", + "9000\n", + "21\n", + "100\n", + "60\n", + "1945\n", + "60\n", + "(6\n", + "……\n", + "28\n", + ",3\n", + "3000\n", + "72\n", + ";90\n", + "251\n", + "22\n", + "”。\n", + "』(\n", + ",“\n", + "630\n", + "40\n", + "』(\n", + "28\n", + "27\n", + "27\n", + "29\n", + "25\n", + "80\n", + "108\n", + "74%,\n", + "28\n", + "”,\n", + "28\n", + "28\n", + "28\n", + "21\n", + "”。\n", + "100\n", + "”。\n", + "”。\n", + "1929\n", + "8%—10%,\n", + "3·85\n", + "168\n", + "2400\n", + "2000\n", + "》27\n", + "》26\n", + "》27\n", + "”。\n", + "28\n", + "29\n", + "27\n", + "1987\n", + ":“\n", + ":“\n", + ":“\n", + "28\n", + ",27\n", + ":“\n", + "40\n", + "》、\n", + "》、\n", + ",1000\n", + "28\n", + ",《\n", + "28\n", + "》26\n", + "》26\n", + "》25\n", + "》26\n", + "》25\n", + "》26\n", + ",“\n", + "”。\n", + "》28\n", + ",21\n", + "28\n", + "”。\n", + "28\n", + "27\n", + "28\n", + "”,\n", + "”。\n", + "1996\n", + "”,\n", + "13.1%\n", + "40%\n", + "30%,\n", + "9%。\n", + "1997\n", + ",1996\n", + "85%\n", + "1997\n", + "1995\n", + "1996\n", + "40%\n", + "56\n", + "56\n", + "101\n", + ",650\n", + "27\n", + "6.3\n", + ",1991\n", + "”。\n", + "28\n", + "1999\n", + "27\n", + "27\n", + ",30\n", + ",160\n", + "》、\n", + "2000\n", + "80\n", + "28\n", + "27\n", + "”,\n", + "40%,\n", + "———\n", + "300\n", + "108\n", + "1972\n", + "”。\n", + "75%\n", + "108\n", + "28\n", + ")24\n", + "25\n", + "27\n", + ")(\n", + ",1993\n", + "26\n", + "27\n", + "26\n", + ":“\n", + "》50\n", + ",“\n", + "”。\n", + "27\n", + "1999\n", + "17\n", + "———\n", + "25\n", + "70\n", + "600\n", + "”4\n", + "1200\n", + "》。\n", + "90\n", + "1995\n", + "33\n", + "1950\n", + "1997\n", + "2.1\n", + "72.32\n", + "7.3\n", + "6828\n", + "64%,\n", + "”。\n", + "“KCC”\n", + "63.2\n", + "“KCC”\n", + "2:\n", + "1:\n", + ":“\n", + "80%,\n", + "600\n", + "56%。\n", + "”。\n", + "、“\n", + ",1997\n", + "4000\n", + ":“\n", + "98%\n", + "100%,1997\n", + "3000\n", + "1000\n", + "70\n", + "—80\n", + "—30\n", + ":“\n", + "70\n", + "1990\n", + "1000\n", + "200\n", + "”。\n", + "”。\n", + "500\n", + "1997\n", + "2.5\n", + "19.1%。\n", + "350\n", + "90\n", + "K,\n", + "”。\n", + "”:\n", + "”,\n", + "1992\n", + "2000\n", + "90\n", + "40\n", + "1993\n", + "”,\n", + "”。\n", + "”,\n", + "1997\n", + "1.2\n", + "600\n", + "K”\n", + "90\n", + "……\n", + "K”\n", + "K”,\n", + "1/4\n", + "1/3\n", + "1/4\n", + "2/3\n", + ",9\n", + ",1997\n", + "2090\n", + "5140\n", + "8%\n", + "1996\n", + "9·47%,\n", + "1997\n", + "10·8%。\n", + "1997\n", + "1996\n", + "1994\n", + "”,\n", + "120\n", + "5000\n", + "1·26\n", + "1000\n", + "1500\n", + "1996\n", + "3000\n", + "1·5\n", + "5000\n", + "15%\n", + "1·5\n", + "1/3\n", + ",5\n", + "5.15\n", + "2000\n", + "1992\n", + "500\n", + "1/3\n", + "———\n", + ",“\n", + "”。\n", + "21—23\n", + ";30%\n", + "27\n", + "66\n", + "1988\n", + "2000\n", + "25\n", + "1997\n", + "1992\n", + "1.2\n", + "CDMA(\n", + "1987\n", + "22\n", + "”。\n", + ",“\n", + "”。\n", + "21\n", + "》,\n", + "”、\n", + "600\n", + "……\n", + ":“\n", + "100\n", + "》、《\n", + "》、《\n", + "》3\n", + "”。\n", + ",“\n", + "”,\n", + "”。\n", + "40\n", + "25\n", + ")(\n", + "25\n", + "25\n", + "21\n", + "23\n", + "25\n", + "23\n", + "25\n", + "VX\n", + "23\n", + "122\n", + ")、\n", + "25\n", + ")。\n", + "25\n", + ",39\n", + "25\n", + "26\n", + "23\n", + "22\n", + "”,\n", + "”,\n", + ":“\n", + "23\n", + "26\n", + "23\n", + ",《\n", + ",《\n", + "2045\n", + "200\n", + ",《\n", + "40%\n", + "1.5\n", + "35\n", + "75%\n", + "80%\n", + "20%\n", + "25%。\n", + "1101\n", + "》,\n", + "153\n", + "75%\n", + "82%\n", + "”、“\n", + "1999\n", + "2500\n", + ",“\n", + "”。\n", + "600\n", + "21\n", + "”,\n", + "13·1%\n", + "1948\n", + "1967\n", + "”,\n", + "2020\n", + "70%\n", + "6000\n", + "28\n", + ",28\n", + "1998\n", + ":“\n", + ",“\n", + ",《\n", + "”,“\n", + "”。\n", + "》21\n", + "27\n", + "5.4\n", + "23\n", + "22\n", + ",400\n", + "200\n", + "2600\n", + "1100\n", + "539\n", + "98%。\n", + "”。\n", + "23\n", + "”。\n", + "22\n", + "2、6\n", + "23\n", + ",1983\n", + "1、\n", + "100\n", + "23\n", + "23\n", + "2/3\n", + "”。\n", + "23\n", + "23\n", + "”,\n", + "”,\n", + "21\n", + "”。\n", + "”。\n", + "70\n", + "22\n", + ":“\n", + "”。\n", + "1998\n", + "1991\n", + "23\n", + "19\n", + "1996\n", + "22\n", + "22\n", + "”。\n", + "21\n", + "21\n", + "”。\n", + "22\n", + "”,\n", + "”。\n", + "”;\n", + "”。\n", + "”。\n", + ":“\n", + "”。\n", + "17\n", + "”。\n", + "”。\n", + "”,\n", + "21\n", + "……\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”,\n", + "73\n", + "300\n", + ":“\n", + "21\n", + "21\n", + "16%\n", + "12%,\n", + "25\n", + "2000\n", + "”“\n", + "”。\n", + "”,\n", + "14.5%,\n", + "37%。\n", + "1994\n", + "1986\n", + "”,\n", + "282\n", + "50%。\n", + "31\n", + "1994\n", + "46·24%。\n", + "40\n", + "96%\n", + "50·64%\n", + "21\n", + ",1200\n", + "21\n", + ",3\n", + "500\n", + "21\n", + "21\n", + ",1991\n", + "1954\n", + "21\n", + "21\n", + "31\n", + "265\n", + ",137\n", + ",117\n", + ",11\n", + "21\n", + ",“\n", + "35\n", + "22\n", + "———“\n", + "17\n", + ")、\n", + "),\n", + "22\n", + ")、\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + "”。\n", + ",“\n", + "”,\n", + "25\n", + ",5\n", + "21\n", + ",“\n", + "21\n", + "21\n", + "19\n", + "1980\n", + "21\n", + ":“\n", + "》。\n", + "400\n", + "600\n", + "83\n", + "4000\n", + "1881\n", + "1885\n", + "1.7\n", + "100\n", + "19\n", + "19\n", + "1974\n", + "”。\n", + "1993\n", + "1997\n", + "1146\n", + "1146\n", + "”,\n", + "”。\n", + "21\n", + "19\n", + "40\n", + "25\n", + "1000\n", + "”,\n", + "”,\n", + "”,\n", + ":“\n", + "19\n", + "19\n", + ",“\n", + "21\n", + "22\n", + "45\n", + "52\n", + "1175\n", + "52\n", + "1175\n", + "”,\n", + "18%\n", + "21\n", + ":“\n", + "”,\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "”。\n", + "1963\n", + ",17\n", + ",“\n", + "”。\n", + "……\n", + "1993\n", + "1979\n", + "21\n", + "90\n", + ")。\n", + "100\n", + "47\n", + ":“\n", + ":“\n", + ":“\n", + "1/4,\n", + "21\n", + "19\n", + "”,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + ",《\n", + ",《\n", + "———\n", + ",“\n", + "、“\n", + "”,\n", + "1950\n", + "》(1972),\n", + "”。\n", + "》(1964—1965),\n", + ",《\n", + ":“\n", + "》(1948.11)\n", + "1948\n", + ":“\n", + "》。\n", + "(1905—1951)\n", + "》(1965—1966)、\n", + "》(1970—1972)、\n", + "》(1960—1963)\n", + "》(1965)\n", + "》(1975)\n", + "》(1963)\n", + "》(1947)\n", + "》(1948)\n", + "1945\n", + "”,\n", + "”)\n", + "1945\n", + "”,\n", + "”,\n", + ",“\n", + "”。\n", + "”。\n", + "———\n", + "”。\n", + "》(\n", + "),\n", + "”,\n", + "”,\n", + "》、《\n", + "》、《\n", + "》(\n", + "1971\n", + "1997\n", + ")。\n", + "”(\n", + ")。\n", + ",“\n", + ":“\n", + ",“\n", + "”。\n", + "》(1989)\n", + "”。\n", + ")、\n", + "”。\n", + "”!\n", + ":“\n", + "”;“\n", + ")、\n", + "”。\n", + "1953\n", + "(1890—1969)\n", + "”、“\n", + "”、“\n", + "”,\n", + "”,\n", + "”,\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "75%\n", + "13·1%\n", + "1948\n", + "1967\n", + ",“\n", + "”,\n", + "”。\n", + "”、\n", + "15%。\n", + "13·1%\n", + "”。\n", + "……\n", + "300\n", + "800\n", + "21\n", + "19\n", + ":“\n", + "200\n", + "47\n", + "21\n", + ",“\n", + "”。\n", + ",1700\n", + "200\n", + "”。\n", + "1856\n", + "17\n", + "),\n", + "),\n", + "1000\n", + "3000\n", + "60\n", + "80\n", + "1996\n", + "17\n", + ",89\n", + "1999\n", + "1999\n", + "1995\n", + ",43\n", + "4、\n", + "”。\n", + "2、\n", + "———\n", + "1、\n", + "1823\n", + "”。\n", + "6163\n", + "3—5\n", + "2700\n", + "1997\n", + ")2:\n", + "1:\n", + ",1997\n", + "389\n", + "』,\n", + "』。\n", + "3、\n", + "2、\n", + "1、\n", + "(TBM)\n", + "1998\n", + "166\n", + "1998\n", + "112\n", + "1995\n", + "9505\n", + "8951\n", + "264\n", + "237\n", + "456\n", + "18456\n", + "23\n", + "TBM\n", + "1996\n", + "267.8\n", + "97\n", + "160\n", + "60%,\n", + "”,\n", + "1997\n", + "36\n", + "、36\n", + ":“\n", + "40\n", + ":“\n", + ":“\n", + ":“\n", + "17\n", + "70\n", + "1/4\n", + ",4\n", + "“9”\n", + ",“CC”\n", + "“9”\n", + "———\n", + "40\n", + "、62\n", + "64\n", + "1987\n", + "、1995\n", + "1999\n", + "19\n", + "———\n", + "”。\n", + "》;\n", + "……\n", + ")。\n", + "”、\n", + "”。\n", + "---\n", + "3∶3\n", + ",“\n", + "5∶2\n", + "4∶3\n", + "9∶5\n", + ",10\n", + "29\n", + "109∶94\n", + "40\n", + ":105\n", + ",19\n", + "4∶6\n", + "6∶3\n", + "1∶6\n", + "1∶2\n", + ":“\n", + "1300\n", + "2002\n", + ":“\n", + "2002\n", + "10080\n", + "70\n", + "100\n", + "2100\n", + "1999\n", + "OTC\n", + "),\n", + "1998\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ");\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ")、\n", + ");\n", + ");\n", + ");\n", + ");\n", + ")。\n", + ",1997\n", + "1997\n", + ",1\n", + ",1997\n", + "3540\n", + "1647\n", + "1893\n", + "21\n", + "0.6%。\n", + ",1997\n", + "……\n", + "”,\n", + "207\n", + "Jeep\n", + ",1999\n", + "—2001\n", + ",“\n", + "”。\n", + "15000\n", + "———\n", + ",7\n", + "”。\n", + "35\n", + "15000\n", + "21\n", + ",“\n", + "250\n", + "2000\n", + "”。\n", + "400\n", + "”。\n", + "”。\n", + "90\n", + "40\n", + ",3\n", + "),\n", + "1123\n", + "880\n", + "60\n", + "45\n", + "),\n", + "),\n", + "60\n", + "60\n", + "45\n", + "———5\n", + "32\n", + "2∶0\n", + "21∶19\n", + "3∶1\n", + "23∶21\n", + "17∶9\n", + "20∶15\n", + "15∶18\n", + "21∶20。\n", + "”;\n", + "……\n", + "79\n", + "200\n", + "”,\n", + "4∶1\n", + "”,\n", + "”。\n", + "1/4\n", + "2∶1\n", + "5∶6\n", + "1/4\n", + "2∶0\n", + "2∶2。\n", + "90\n", + "———\n", + "1∶0\n", + ":“\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "》,\n", + "》、《\n", + "》,\n", + "———《\n", + "》、《\n", + "》、《\n", + "”。\n", + "……\n", + "1999\n", + "1992\n", + "1994\n", + "1998\n", + ";1996\n", + ";1997\n", + "……\n", + ":“\n", + "”;\n", + ":“\n", + "”;\n", + ":“\n", + "……”\n", + "1995\n", + "”,\n", + "”,\n", + "”(\n", + "”、\n", + "”。\n", + "1995\n", + "1996\n", + ",“\n", + "”。\n", + "”。\n", + "”,\n", + "”,\n", + "1993\n", + ",“\n", + "1994\n", + "’。\n", + "”,\n", + "“92\n", + "”,\n", + "1992\n", + "1993\n", + "”,\n", + "”,“\n", + ":“\n", + "1976\n", + "……\n", + "1992\n", + "———\n", + "”。\n", + "”,\n", + "……\n", + "”,\n", + ":“\n", + "”。\n", + "』(\n", + "……\n", + "———\n", + "———\n", + "……\n", + "”,\n", + "……\n", + "……\n", + "”,\n", + "1993\n", + "347\n", + "250\n", + "22\n", + ",4000\n", + "1000\n", + "×8\n", + "1968\n", + ",4\n", + "1.3\n", + ",23\n", + "17\n", + "2000\n", + "25\n", + "(18\n", + "25\n", + "3∶3\n", + "1975\n", + "1985\n", + "1993\n", + "47\n", + "”。\n", + "22\n", + "』,\n", + "”,\n", + "”。\n", + "1997\n", + "3000\n", + "28\n", + "300\n", + ",A、B、C\n", + "”,\n", + "”。\n", + ",7\n", + "”。\n", + "”。\n", + "0∶3\n", + "”,\n", + "”,\n", + ":“\n", + "”?\n", + ",12\n", + "———\n", + ":“\n", + "29\n", + "29\n", + "200\n", + "200\n", + "79\n", + "76\n", + "28\n", + "31\n", + ",1998\n", + "25\n", + "”。\n", + ",“\n", + "31\n", + "23\n", + ",3\n", + "1998\n", + "1999\n", + "101』\n", + "28\n", + "6500\n", + "28\n", + "26\n", + "28\n", + "42\n", + "60\n", + "429\n", + "4100\n", + "200\n", + "”。\n", + "”。\n", + "”。\n", + "”,“\n", + "”,“\n", + ",“\n", + ",“\n", + ":“\n", + "”,\n", + "200\n", + "400\n", + "800\n", + "2000\n", + "5.6\n", + "1.6\n", + "39\n", + "1872\n", + "125\n", + "57\n", + ",1992\n", + "27\n", + "1998\n", + "———\n", + "1966\n", + "33\n", + "1966\n", + "1993\n", + "1993\n", + "33\n", + "500\n", + "1991\n", + "35\n", + ",28\n", + "———\n", + "1966\n", + "1967\n", + "2000\n", + "29\n", + "15.7%\n", + "1979\n", + "1997\n", + "71\n", + "”。\n", + ":“\n", + "”。\n", + "27\n", + "17\n", + "”。\n", + "34\n", + "———\n", + "2004\n", + "———\n", + "1999\n", + ",1999\n", + ",2000\n", + "1997\n", + "35\n", + "26\n", + "283\n", + "2·6\n", + "2000\n", + "110\n", + "49\n", + "7·3%,\n", + "6·7%,\n", + "1965\n", + "1996\n", + "31\n", + "9·2%,\n", + "44350\n", + "1533\n", + "7096\n", + "、5699\n", + "5632\n", + "11521\n", + "11405\n", + "15336\n", + "23646\n", + "1996\n", + "74335\n", + "51492\n", + "1998\n", + "129\n", + "600\n", + "27\n", + "1997\n", + "17\n", + "28\n", + "28\n", + "1996\n", + "、1997\n", + "39\n", + "70\n", + "1997\n", + "157\n", + "1996\n", + "224\n", + "19770\n", + "COSCO(\n", + "1995\n", + "1995\n", + "1996\n", + "1997\n", + "1996\n", + "1997\n", + "1996\n", + "ISO9002\n", + ";1996\n", + "1997\n", + "14·66\n", + "1997\n", + "18·57\n", + ",3\n", + "25%\n", + ",“\n", + "1·7\n", + "160\n", + "27\n", + "100\n", + "2%,\n", + "11%。\n", + "1850\n", + "150\n", + "25\n", + "23\n", + "2、\n", + "1、\n", + "”,\n", + "1978\n", + "1996\n", + "450\n", + ",“\n", + "”。\n", + "27\n", + ",27\n", + "”。\n", + "38\n", + "600\n", + "250\n", + "350\n", + "100\n", + "735\n", + "”,\n", + "”。\n", + "22\n", + "40%。\n", + ")4\n", + "13.22\n", + "12.59\n", + ",“\n", + "23\n", + "1996\n", + "”,\n", + "1996\n", + "”,\n", + "20%,\n", + "20%\n", + "”,\n", + "1992\n", + "500\n", + "1991\n", + "”,\n", + ",”\n", + ",“\n", + "42\n", + ",5\n", + "1990\n", + "100\n", + ",“\n", + "”,\n", + "2000\n", + "”———\n", + "5000\n", + "1.5\n", + ":“\n", + "20%\n", + "”。\n", + "1999\n", + "90\n", + "23\n", + "”。\n", + "25\n", + "”,\n", + "4000\n", + ";3000\n", + "17\n", + "25\n", + "25·3\n", + "6·41\n", + "44·28\n", + "8·27\n", + "10·6\n", + ")。\n", + "54\n", + ",4000\n", + "22\n", + "16466\n", + "421\n", + "40\n", + "1·02\n", + "31\n", + "22\n", + "28\n", + "29\n", + ",9\n", + "9000\n", + "1995、1996\n", + "1997\n", + "21\n", + "1179\n", + "12·1%,\n", + "1/3\n", + ",1997\n", + "10·7%。\n", + "2、\n", + "”(\n", + ")。\n", + "1、\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + ":“\n", + "”。\n", + "2000\n", + "”。\n", + "4%\n", + "30%\n", + "35\n", + "3000\n", + "”,\n", + "300\n", + "60%\n", + "70\n", + "”、“\n", + "”,\n", + "560\n", + "3000\n", + "、300\n", + "21\n", + "”。\n", + ",4\n", + "1991\n", + "”、“\n", + "”、“\n", + ",3\n", + "8000\n", + "1986\n", + "”,\n", + ",83\n", + "”,\n", + "1985\n", + "1979\n", + ":“\n", + "39\n", + "77\n", + ",1979\n", + "”:\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ":“\n", + "”,\n", + "1991\n", + "》,\n", + "”,\n", + "”,\n", + "873\n", + "1980\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ":“\n", + "”1941\n", + "1937\n", + "”,\n", + ",“\n", + "”,“\n", + "”。\n", + "”。\n", + "34\n", + "”,\n", + "”。\n", + "》。\n", + "ABC》。\n", + ":“\n", + "”。\n", + ",“\n", + "60\n", + ",1/4\n", + "”。\n", + ",“\n", + ":“\n", + "1992\n", + ":“\n", + "70\n", + ",1937\n", + "1997\n", + "26\n", + "23\n", + "40\n", + "———\n", + "1989\n", + "1990\n", + "》。\n", + "100\n", + "40\n", + "BOT\n", + "1988\n", + "90\n", + "1949\n", + "1939\n", + "1937\n", + "”。\n", + "1931\n", + "100\n", + "---\n", + "1991\n", + "ISO9002\n", + "21\n", + "”。\n", + "》,\n", + "1998\n", + "”,\n", + "”。\n", + "1997\n", + "2003\n", + "3%\n", + "1998\n", + "260\n", + "40\n", + "23\n", + "21\n", + "22\n", + "2、22\n", + "1、\n", + "3%,\n", + "2.8%,\n", + "3%。\n", + "1800\n", + "10.6%。\n", + "1.7%\n", + "2.9%。\n", + "2.9%,\n", + "3.1%,\n", + "1998\n", + "1997\n", + "1996\n", + "4.4%,1998\n", + "1999\n", + "6.7%\n", + "7.0%。\n", + "5%,1999\n", + "4%。\n", + "1997\n", + "1996\n", + "8.5%,\n", + "195\n", + "1.8793\n", + ",1997\n", + "2.2%,1998\n", + "1999\n", + "2.7%\n", + "2.8%。\n", + "”,\n", + "”,\n", + "1996\n", + "0.3\n", + "60%\n", + "25%,\n", + "2.4%。\n", + "”。\n", + "”,\n", + "1998\n", + "”。\n", + "”。\n", + ":“\n", + ",1998\n", + "25\n", + "1998\n", + "》,\n", + "1998\n", + "1999\n", + "2.8%\n", + "3%。\n", + "1924\n", + "1931\n", + "150\n", + "200\n", + ",30\n", + "140\n", + "1996\n", + "22\n", + "3·2%\n", + "9%\n", + ",1998\n", + "8·8%\n", + "6%\n", + "7·1%\n", + "3·9%,\n", + "6·8%\n", + "4·8%。\n", + ",1997\n", + "《1998\n", + ",1997\n", + "7·5%\n", + "6·1%,\n", + "4%\n", + "22\n", + "53\n", + "22\n", + "727—200\n", + "53\n", + "21\n", + "5.5%\n", + "5%,\n", + "1973\n", + "1998\n", + "698·6\n", + "1111·6\n", + ",3\n", + "1179\n", + "1317\n", + "138\n", + "213\n", + "35·2%。\n", + "21\n", + "21\n", + "1/3\n", + "98”\n", + "8%,“\n", + "21\n", + "”,\n", + "500\n", + "21\n", + "1/5\n", + ":“\n", + "400\n", + "”。\n", + "———\n", + ",CAC40\n", + "25·29\n", + "3860·40\n", + "———\n", + "4%\n", + "5%,\n", + "DAX30\n", + "53·06\n", + "5388·94\n", + "22\n", + "21\n", + "500\n", + "2000\n", + "16·73\n", + "1903·87\n", + "6%,\n", + "115\n", + "43·10\n", + "9184·94\n", + "22\n", + "28\n", + "26\n", + ":“\n", + "80%\n", + "9000\n", + "1%。\n", + "19\n", + "———\n", + "77\n", + "GNP\n", + "2%,\n", + "5%—6%;\n", + "9%—10%,\n", + "5%\n", + "39\n", + "44\n", + "IMF\n", + "”,\n", + "23\n", + "21\n", + "33\n", + "67\n", + "57\n", + "IMF\n", + "4·\n", + "3·\n", + "2·\n", + "1·\n", + "”。\n", + "”。\n", + "IMF\n", + "1998\n", + "10%\n", + "IMF\n", + ",IMF\n", + "570\n", + "IMF\n", + "(IMF)\n", + "”(\n", + "”。\n", + "110\n", + "2、\n", + "1、4\n", + "2000\n", + "6500\n", + "8·5\n", + "6600\n", + "7600\n", + "8500\n", + "26\n", + "450\n", + "190\n", + "70\n", + ",1993\n", + "1·13\n", + ",1988\n", + "4000\n", + "330\n", + "1994\n", + "1995\n", + "1996\n", + "2005\n", + "1650\n", + "3.1\n", + "1.3\n", + "1990\n", + "13.3\n", + "1997\n", + "55.57\n", + "1990\n", + "41\n", + "1997\n", + "202\n", + "400%。\n", + "90\n", + "2000\n", + "”,\n", + "21\n", + "”。\n", + "33·7\n", + "4·3\n", + "1998\n", + "9·2\n", + "18·3\n", + "5·7\n", + "1996\n", + "117·2\n", + "116·2\n", + "418·2\n", + "9·79%。\n", + "19\n", + ",1997\n", + "534·4\n", + "1996\n", + "7·28%,\n", + "416·7\n", + "1996\n", + "2·9%。\n", + ":“\n", + ":“\n", + ",24\n", + "1.1\n", + "6500\n", + "2.3\n", + "”。\n", + "CNN\n", + ":“\n", + "26\n", + ",“\n", + "”。\n", + "1、\n", + "170\n", + "40%\n", + ",3\n", + "53%。\n", + "35\n", + "40\n", + ",3\n", + "25%。\n", + ",3\n", + "52·4\n", + "2·2\n", + "2000\n", + "50%\n", + "2000\n", + "1996\n", + "2001\n", + "60\n", + "62\n", + "2009\n", + "55\n", + "62\n", + "65\n", + "300\n", + "60\n", + "”),\n", + "0.5%\n", + "11%—12%\n", + "40\n", + "75%\n", + "24%,\n", + "22%。\n", + "6%,\n", + "9%。\n", + "1996\n", + "9.7%,\n", + "15.2%。\n", + "10%。\n", + "2%\n", + "3%。\n", + "1997\n", + "55%。\n", + "1995\n", + "6000\n", + "1995\n", + "20%\n", + "380\n", + "3997\n", + "”,\n", + ",80%\n", + "500\n", + "240\n", + ",6\n", + "103\n", + ":“\n", + "5%\n", + "”,\n", + ",1996\n", + "200\n", + "300\n", + "2000\n", + ",1989\n", + ",700\n", + "139\n", + ",540\n", + "173\n", + "26\n", + "13—16\n", + "26\n", + "1935\n", + "”,\n", + "4.4%,\n", + "3.8%;\n", + "14.1%;\n", + "4%,\n", + "9.3%;\n", + "7.2%,\n", + "3.9%,\n", + "3.2%,\n", + "1.9%。\n", + "4.7%,\n", + "640\n", + "37\n", + "0.8%\n", + "30%\n", + "60%。\n", + "45\n", + "45\n", + "60%\n", + "50%\n", + "45\n", + "50%,\n", + "66.7%,\n", + ")、\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”,\n", + "》、《\n", + "》(\n", + "---\n", + "”、“\n", + "”、“\n", + "”。\n", + "1996\n", + ",《\n", + "———《\n", + "》,\n", + ",“\n", + "”。\n", + "1992\n", + "1996\n", + ",1995\n", + ":“\n", + "1979\n", + ",30\n", + "---\n", + "60\n", + "60\n", + "61\n", + "64\n", + "1827\n", + "31\n", + ":“\n", + "”,“\n", + "”。\n", + "78\n", + "1927\n", + "”。\n", + "》、《\n", + "》、《\n", + "》、\n", + "、《\n", + "”,\n", + "”,\n", + ",《\n", + "1981\n", + "———\n", + "2007\n", + "———\n", + ",11\n", + "830\n", + "60\n", + "”,\n", + "40%,\n", + "……\n", + "300\n", + "38\n", + "38\n", + "”。\n", + "700\n", + ",“\n", + "”,\n", + "”。\n", + "”。\n", + "74\n", + "1941\n", + "1987\n", + "”,\n", + "17\n", + ",1500\n", + ",1300\n", + "”!\n", + "19\n", + "———\n", + ",“\n", + "”……\n", + ",1000\n", + "……(\n", + ")(\n", + "”。\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1979\n", + ":“\n", + ",“\n", + "”,\n", + "80\n", + "70\n", + "60\n", + ":60\n", + ",“\n", + "”,\n", + "200\n", + "》。\n", + ":“\n", + "300\n", + ",“\n", + "”。\n", + "(1898—1989)\n", + "5.4\n", + "100\n", + "40\n", + "750\n", + "1996\n", + "1997\n", + "1997\n", + "100\n", + "600\n", + "45%\n", + "60\n", + "3%。\n", + "8%\n", + "1500\n", + "1/4\n", + "22\n", + "22\n", + "277\n", + "163\n", + "10%。\n", + "50%\n", + "1.57\n", + "617\n", + "1.73\n", + "769\n", + "834\n", + "284\n", + "90\n", + "1/3,\n", + "1.5\n", + ",15\n", + "1%\n", + "15%。\n", + "1973\n", + "60\n", + "59%。\n", + "———\n", + "60\n", + "21\n", + "”。\n", + "2001\n", + "2200\n", + "EDI、\n", + "1996\n", + "”,\n", + "”,\n", + "”,\n", + "60\n", + "41\n", + "33\n", + ",1996\n", + "”。\n", + "”,“\n", + "”。\n", + ",7\n", + "1·2\n", + "31\n", + "28\n", + "2·02\n", + "(1\n", + "0·54\n", + "85%\n", + "1000\n", + "27%,\n", + "36%,\n", + "———\n", + "3000\n", + "———\n", + "22\n", + "15·82\n", + ",14\n", + "13·45\n", + "55\n", + "13·29\n", + "21\n", + "13·22\n", + "12·59\n", + "”,\n", + "”,\n", + "”,\n", + "15·1203\n", + "64·5%,\n", + ",1997\n", + "1000\n", + "17439\n", + "17·4%。\n", + "1030·4\n", + "1154·3\n", + "124\n", + "(1\n", + "38\n", + "),\n", + "85%。\n", + "”。\n", + "1998—1999\n", + ",1998\n", + "6.93\n", + ",1999\n", + "90\n", + "500\n", + "1997\n", + "700\n", + "25%。\n", + "150\n", + "1980\n", + "28%。\n", + "4.6%,\n", + "3%\n", + "250\n", + "330\n", + "90\n", + "7.5\n", + "1996\n", + "15%\n", + "”。\n", + "3%\n", + "22\n", + ",3\n", + "40%。\n", + "2、\n", + ",4\n", + "1、\n", + "9000\n", + "298\n", + "2300\n", + "648\n", + "5700\n", + "128\n", + "87\n", + "7·5\n", + "3·3\n", + "108\n", + ",“\n", + "1·3%\n", + "2·7%\n", + "4·4%,\n", + "6·7%\n", + "4·1%\n", + "2·4%。\n", + "2·9%。\n", + "3·1%,\n", + "3·5%。\n", + "1974—1975\n", + "、1980—1983\n", + "1990—1991\n", + "90\n", + "1970\n", + ",1997\n", + "29\n", + "1998\n", + "-0.3%,\n", + ",10\n", + "132∶1\n", + "129∶1。\n", + "”,\n", + ",4\n", + "”。\n", + ",“\n", + "”。\n", + "2003\n", + "1.75%\n", + "”,\n", + ";1998\n", + ",1999\n", + "1998\n", + "”。\n", + "34%\n", + "3%。\n", + "1997\n", + "575\n", + "7500\n", + "NEC\n", + "3·2\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "4000\n", + "454\n", + "”,\n", + "2006\n", + "2010\n", + "1999\n", + "”。\n", + "”。\n", + "60—70\n", + ",“\n", + "2005\n", + ";2007\n", + "2011\n", + "”。\n", + "2015\n", + "50—100\n", + "2000\n", + "299\n", + "1800\n", + "COLIA1\n", + "”。\n", + "P53\n", + "P53\n", + "1995\n", + "1500\n", + "27\n", + "1967\n", + "CBFI\n", + "COR。\n", + ",1988\n", + "》,\n", + ",1986\n", + "———\n", + "2、\n", + "1、4\n", + "34\n", + "12—18\n", + "1400\n", + "1.5\n", + "17\n", + "1996\n", + "》,\n", + "”。\n", + ",“\n", + "”,10\n", + "2.5\n", + "1400\n", + "700\n", + "5·9%\n", + "1·9%,\n", + "1995\n", + "18·4%。\n", + "17%,\n", + "12·8%\n", + "9·7%。\n", + "31·1%\n", + "36·3%,\n", + "2·3%\n", + "7·1%。\n", + "1383\n", + "45·6%,\n", + "41·7%\n", + "3030\n", + "2090\n", + "45%。\n", + ",《\n", + ";《\n", + "”。\n", + "”;《\n", + ";《\n", + "……\n", + "1982\n", + "humulin,\n", + "140\n", + "300\n", + "100\n", + "52·5%。\n", + "400\n", + "1400\n", + "29%\n", + "31%。\n", + "127\n", + "67%。\n", + "500\n", + "90\n", + "414\n", + "36\n", + "900\n", + "100\n", + "414\n", + "300\n", + "90\n", + "C—150\n", + ")1997\n", + "3000\n", + "75%\n", + "68%。\n", + "1996\n", + "9%\n", + "13%。\n", + "1997\n", + "81\n", + "1300\n", + "730\n", + "90%\n", + "———\n", + "》、《\n", + "》,\n", + "1998\n", + "”。\n", + "1850\n", + "1863\n", + "200\n", + "8%\n", + ",15\n", + "19\n", + "1997\n", + "(1997\n", + "1998\n", + "102190\n", + "163890\n", + "10%\n", + "30%,\n", + "1·6\n", + "19\n", + "1200\n", + "———\n", + "1998\n", + "59\n", + "39\n", + "71\n", + "4300\n", + "(2346\n", + "),\n", + "4530\n", + "249\n", + "65.7%、36.2%\n", + "33.7%;\n", + "11%,\n", + "5.3%。\n", + "7900\n", + "8700\n", + ",3\n", + "8904.44\n", + "4300\n", + "5009\n", + "5200\n", + "5911\n", + "2998\n", + "3800\n", + "8000、5000、6000\n", + "3000\n", + "1998\n", + "2.5%,\n", + "9000\n", + "、38\n", + "、3.550\n", + "、37\n", + "、1300\n", + "、1.59\n", + "———\n", + ",1\n", + ";2\n", + "1∶12000\n", + ";3\n", + "1∶133\n", + "1∶1.80\n", + "1∶1.84,\n", + "2.2%。\n", + "1957\n", + ",40\n", + "V2\n", + "V2。\n", + "1970\n", + "———\n", + "2006\n", + "236\n", + "、730\n", + "28\n", + ",1996\n", + "708\n", + "8000\n", + "400\n", + ",26\n", + ",300\n", + "”。\n", + ":18\n", + "———\n", + ",“\n", + "9.2\n", + "1990\n", + "32\n", + ",80\n", + "”,“\n", + "”。\n", + "1985\n", + "80\n", + "95%。\n", + "“2·15”\n", + "122\n", + "44\n", + "256\n", + "1997\n", + "、10\n", + "17\n", + "100%\n", + "10%\n", + "DPA)\n", + "DPA\n", + "70%\n", + "28\n", + "88%,\n", + ")、\n", + "200,\n", + "95%。\n", + ",“\n", + "28\n", + "1970\n", + "———\n", + "26\n", + "28\n", + "”。\n", + ":“\n", + "60\n", + "40\n", + "”,\n", + "36\n", + "25\n", + ",“\n", + "”。\n", + "4.3\n", + "”。\n", + "”,\n", + "———\n", + ",1997\n", + "31%,\n", + "30%,\n", + "18%、11%、10%。\n", + "1998\n", + "”。\n", + ",1997\n", + "1997\n", + "9.065\n", + "1996\n", + "2.669\n", + "1997\n", + "1998\n", + "110\n", + "1997\n", + "1998\n", + "240\n", + "28.7\n", + "33\n", + "2000\n", + ",25\n", + "”。\n", + "2.6\n", + "270\n", + "1995\n", + "40\n", + ")。\n", + "39\n", + "44\n", + "(ISO)\n", + "———\n", + "”,\n", + "”。\n", + "1994\n", + "(1998—2001)\n", + "27.5\n", + "30%。\n", + "500\n", + "1991\n", + "164\n", + "32\n", + "2000\n", + "1998\n", + "1998\n", + "1980\n", + "850\n", + "2350\n", + "70\n", + "45\n", + "28\n", + "1997\n", + "20660\n", + "2350\n", + "3%,\n", + "10%。\n", + "718\n", + "165\n", + "26%,\n", + "90\n", + "6600\n", + "1990\n", + "4080\n", + "61·8%。\n", + "2、\n", + "1、\n", + "442\n", + "1976\n", + ",20\n", + "1998\n", + "1·6\n", + "134∶1\n", + ",3\n", + "”,\n", + "”(\n", + "”)\n", + ",1998\n", + ",2\n", + "60\n", + ",3\n", + "———\n", + "———\n", + "———\n", + "———\n", + "4.3%,\n", + "700\n", + "5000\n", + "29\n", + "———\n", + ",3\n", + "430\n", + "70%\n", + "1998—1999\n", + "4%\n", + "20%\n", + "17%;\n", + "”,\n", + ",“\n", + "》,\n", + "》,7\n", + "23\n", + "3000\n", + "1993\n", + "”。\n", + ",1995\n", + "300\n", + "1992\n", + ",“\n", + "———“\n", + "”。\n", + ",3\n", + "1991\n", + ",3\n", + "”(\n", + "1990\n", + "90\n", + ":“\n", + "34\n", + "……\n", + "———\n", + "3.\n", + "2.\n", + "1.\n", + "),\n", + "C、A、E\n", + "9—10\n", + "1200\n", + "”,\n", + "(WHO)\n", + ":“\n", + ",1/3\n", + "”1992\n", + ",1991\n", + "1995\n", + ",1994\n", + "”,1994\n", + ",1995\n", + "”,\n", + ",1996\n", + "”,1996\n", + "CAD\n", + "”,\n", + "”,\n", + "、T\n", + "1990\n", + "”。\n", + "1990\n", + "、T\n", + "80\n", + "100\n", + "”。\n", + "”。\n", + "”。\n", + "』,\n", + "』,\n", + "』,\n", + "』。\n", + ":『\n", + "”,\n", + "3、\n", + "”,\n", + "2、\n", + "1、\n", + "32\n", + "29\n", + "466\n", + "……\n", + "120\n", + ":“\n", + ":“\n", + "”10\n", + ":“\n", + "、20\n", + "……\n", + ":“\n", + ":“\n", + ":“\n", + "1.8\n", + "1998\n", + "28\n", + "”:\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + "1994\n", + "”,\n", + "”,\n", + "』:\n", + "』、『\n", + "』、『\n", + "』,\n", + "』,\n", + "』,\n", + "”,\n", + "”,\n", + "19\n", + ")。\n", + "》,\n", + ",7\n", + "17\n", + "—19\n", + "32.662\n", + "22.383\n", + "A19.114\n", + "17.095\n", + "10.581\n", + "16.112\n", + "A15.663\n", + "A15.524\n", + "A13.895\n", + "A13.221\n", + "23.222\n", + "22.143\n", + "20.204\n", + "15.585\n", + "12.571\n", + "21.172\n", + "15.643\n", + "15.154\n", + "14.505\n", + "13.80\n", + "1988\n", + ",10\n", + "636\n", + "24.5\n", + "23\n", + "”。\n", + "2241\n", + "1228\n", + "282.49%。\n", + "26\n", + "110%,\n", + "1000\n", + "108\n", + "100\n", + "1000\n", + "50、55、60、65\n", + "———\n", + "Ku\n", + "———\n", + "1997\n", + "2.35\n", + "1998\n", + "17\n", + "35786\n", + "600\n", + "19\n", + ")。\n", + "”,\n", + "”,\n", + "140\n", + "146.75\n", + ",6\n", + "17\n", + "60\n", + "136\n", + "(IMF)\n", + "56\n", + "1.83\n", + "100%,\n", + "150%,\n", + "1996\n", + "1.64\n", + "25\n", + "7.5%,\n", + "1.67\n", + "5.5%\n", + "9000\n", + "17\n", + "、5\n", + "、10\n", + "、10\n", + "3000\n", + "500\n", + "700\n", + "1700\n", + "800\n", + "1000\n", + "1995\n", + "80\n", + "1997\n", + "200\n", + "1994\n", + "1993\n", + "1.2\n", + "80\n", + "80\n", + "140\n", + "1985\n", + "1995\n", + "250\n", + "79.83\n", + "1994\n", + "7%,\n", + "1981\n", + "18.5%,\n", + "1992\n", + "3.1875%。\n", + "1997\n", + "1300\n", + "1979\n", + "》、《\n", + "》,\n", + "》、《\n", + "》(\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + "———\n", + "《“\n", + ":〈\n", + ":“\n", + "》,\n", + ":“\n", + "———\n", + "》,\n", + "”“\n", + "》,\n", + "》,\n", + "1954\n", + "》,\n", + "》(\n", + ",1979\n", + "),\n", + "》,\n", + "———\n", + "》(\n", + "),\n", + "———\n", + "”、“\n", + "———\n", + "、“\n", + "”,\n", + ")、\n", + "”、“\n", + "”、“\n", + "”。\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + ":“\n", + "”,“\n", + "”。\n", + "”。\n", + "”,\n", + ":“\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "”;《\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "》,\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + ":“\n", + "”,\n", + "1992\n", + ":“\n", + "……\n", + "”。\n", + "”、\n", + "8%,\n", + "”。\n", + "”。\n", + ",『\n", + "』,\n", + ":“\n", + "”,\n", + "”,\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + ",“\n", + "”。\n", + "”。\n", + "”,\n", + ",“\n", + "”,\n", + "”———\n", + "ISO9001\n", + "UL、\n", + "VDE、\n", + "CSA\n", + "EEC、CSA\n", + "UL\n", + "———\n", + "”,\n", + ",“\n", + "1996\n", + "”,\n", + "”。\n", + ":“\n", + "0.01\n", + "GE\n", + "”,\n", + "”。\n", + "156\n", + "、545\n", + "”。\n", + "“OEC”\n", + "”,\n", + "”。\n", + "“OEC”\n", + "”,\n", + ",“\n", + "76\n", + "1984\n", + "———\n", + "”。\n", + "”。\n", + "1997\n", + "108\n", + "47\n", + "120\n", + "———“\n", + "70\n", + ":“\n", + "25\n", + "125\n", + ":“\n", + "25\n", + ":“\n", + "”5\n", + "』。\n", + "100\n", + "———“\n", + "”。\n", + "”(\n", + "17\n", + "2.2\n", + "40\n", + "60\n", + "40\n", + ",50\n", + "40\n", + ",40\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + "),\n", + "35\n", + "54\n", + "1997\n", + "120\n", + "90\n", + "240\n", + "32920\n", + "48770\n", + "23690\n", + "58000\n", + "81690\n", + "15.5\n", + "7.5\n", + "”,\n", + "”(\n", + "159\n", + "1997\n", + "47\n", + "47\n", + ",7\n", + "27\n", + ",8\n", + "31\n", + "1996\n", + "27\n", + "1995\n", + "1993\n", + ",1992\n", + "”。\n", + ",1996\n", + "1997\n", + "3%\n", + "“21\n", + "70\n", + "80\n", + "80\n", + "“21\n", + "”。\n", + "80\n", + "”:\n", + ";2\n", + "”,\n", + "”;\n", + "1/4\n", + ",40%\n", + "1/10\n", + "1%\n", + "』,\n", + "”,\n", + "”,\n", + "500\n", + "23·3%,\n", + "7·8%。\n", + "500\n", + "1996\n", + "500\n", + "500\n", + "1997\n", + "1780\n", + "6980\n", + "100\n", + "16.2\n", + "1987\n", + "5%\n", + "25%,\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "4∶3,\n", + "9000\n", + "9033.23\n", + "90\n", + "———\n", + "6000\n", + "100\n", + "1996\n", + "370\n", + "———\n", + "817\n", + "33.875\n", + "176.75\n", + "10.06\n", + "71.75\n", + "27\n", + "550\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + "8000\n", + "14%。\n", + "7900\n", + "9000\n", + "700\n", + "9000\n", + "9033.23\n", + "18%\n", + "16%\n", + "15%。\n", + "4·5%,\n", + "5·5%,\n", + "6%,\n", + "8%。\n", + ",1998\n", + "2001\n", + ",1997—1998\n", + "2·81\n", + "9%。\n", + "4·5%\n", + "8%\n", + ",11\n", + ",1997\n", + "1·1\n", + "1·9\n", + ":“\n", + "”。\n", + "43\n", + "”,\n", + "1968\n", + "58\n", + "3000\n", + "1500\n", + "100\n", + "1985\n", + "89\n", + "”。\n", + "3911\n", + "0.8\n", + "0.3\n", + "1.1\n", + "1995\n", + "7.2\n", + "”。\n", + "”,\n", + "40\n", + "100\n", + "”。\n", + "35\n", + "1.12\n", + "290\n", + "127\n", + "180\n", + "297\n", + "80\n", + "60\n", + "1988\n", + "5000\n", + "80\n", + "8.5\n", + "110\n", + "80\n", + "3911\n", + "1991\n", + "1410\n", + "13.1\n", + "200\n", + "———\n", + ")(\n", + ":“\n", + "”。\n", + "1600\n", + "58.5\n", + "90%\n", + ",25%—33%\n", + "8%\n", + "15%\n", + "”,\n", + ",95%\n", + ":“\n", + "”。\n", + ":“\n", + "”。\n", + "———\n", + "1996\n", + "95%。\n", + "60\n", + "1/4\n", + "80\n", + "10%。\n", + "”,\n", + "300\n", + "500\n", + "”,\n", + ",1998\n", + "7510\n", + "7675\n", + "40%,\n", + "124·5\n", + "22\n", + ",“\n", + "”。\n", + "2.2%。\n", + "2003\n", + "100%,2011\n", + "60%。\n", + "(1996\n", + "15.7%),\n", + "4700\n", + "8000\n", + "10%,\n", + "”,\n", + "122%,\n", + "5.1%,\n", + "23.8%,\n", + "14%。\n", + "60%,\n", + "121%。\n", + ":“\n", + "”,\n", + ",“\n", + ":“\n", + ")。\n", + "”。\n", + "”。\n", + "25\n", + "2000\n", + "100\n", + "33\n", + "10%\n", + "30%\n", + "17\n", + "700\n", + "》4\n", + "280\n", + "M—8\n", + "54\n", + "1996\n", + "716\n", + "212\n", + "928\n", + "1993\n", + "454\n", + ",1994\n", + "720\n", + ",1995\n", + "1025\n", + ",1996\n", + "6893\n", + "899\n", + "1993\n", + "1997\n", + "9781\n", + "54\n", + "(1\n", + "6.3\n", + "”。\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + "”。\n", + "225\n", + "185·12\n", + ",“\n", + "”。\n", + "3A\n", + ",1997\n", + "1800\n", + ",“\n", + "”。\n", + "”,\n", + "119\n", + "601\n", + "12.46\n", + ",5\n", + "1994\n", + "1991\n", + "1992\n", + "1.1\n", + "1990\n", + "3333\n", + "1993\n", + "1350\n", + "1913\n", + ",1971\n", + ",10\n", + "”,\n", + "9000\n", + "90\n", + "1904\n", + ",“\n", + "”;\n", + ":“\n", + ":“\n", + ":“\n", + "”3\n", + "———\n", + "3.4\n", + "---\n", + "M81\n", + ",1963\n", + "100\n", + "1000\n", + "X—\n", + "X—1\n", + "X—\n", + "X—\n", + "70\n", + "X—\n", + "60\n", + "1054\n", + "1.4\n", + ");1967\n", + "127\n", + "”(\n", + "rc,\n", + "rc=2GM/C\n", + "),\n", + ",G=6.67×10\n", + ",C\n", + "2.997×10\n", + "rc\n", + ",1938\n", + ",1963\n", + ",1991\n", + "FundamentalofCosmicPhysics\n", + "88\n", + "1998\n", + "1200\n", + "1997\n", + "700\n", + "GMP\n", + "6000\n", + "70%\n", + "1995\n", + ",53\n", + "、98%\n", + "1300\n", + ",2\n", + ",1998\n", + ":“\n", + ",“\n", + "29\n", + ":“\n", + "(97·12·30)\n", + ")。\n", + "1995\n", + "1997\n", + "32\n", + "---\n", + "BP\n", + ":“\n", + ":“\n", + "”1996\n", + ",1994\n", + "1998\n", + "”,\n", + "1997\n", + "”,\n", + ",7\n", + "1987\n", + "2000\n", + "2、24\n", + "114\n", + "1、\n", + "”!\n", + ":“\n", + "”,\n", + "1995\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”。\n", + "650\n", + "1987\n", + "36\n", + "”3\n", + ")(\n", + "1400\n", + "70\n", + "1994\n", + ",46\n", + "100\n", + "———\n", + "”,\n", + "……\n", + ",1993\n", + ",5\n", + "”,\n", + "100\n", + ",39\n", + ",“\n", + "600\n", + "……\n", + "70\n", + "22\n", + ":“\n", + "300\n", + "1995\n", + "———(\n", + "28\n", + "31\n", + "490\n", + "15%\n", + "2.3\n", + "2.8\n", + "2、\n", + "1、\n", + "737—700\n", + "737\n", + "144\n", + "737—700\n", + "7000\n", + "27\n", + "737—700\n", + "1997\n", + "》,\n", + "”,\n", + "》,\n", + "250\n", + "70\n", + "),\n", + "100\n", + "……\n", + "(1997—2002\n", + "1996—1997\n", + "150\n", + "110\n", + "2000\n", + "1997\n", + ")15\n", + "30%。\n", + "16.8\n", + "1990\n", + "18%\n", + "1997\n", + "1%;\n", + "1990\n", + "21%\n", + "1997\n", + "6.2%。\n", + "90\n", + "1996\n", + "40\n", + ":1997\n", + "480\n", + "5.3%;\n", + "80\n", + "200\n", + ");\n", + "14.7%,\n", + "2006\n", + "(1\n", + "3.4\n", + ")。\n", + ",1998—1999\n", + "6.2%。\n", + "1%\n", + "2%,\n", + "”。\n", + "”,\n", + "》2\n", + "”,\n", + "88%\n", + "12%,\n", + "8.16\n", + "5.3\n", + "3056\n", + "300\n", + "”,\n", + "1996\n", + "1.7\n", + "650\n", + "771\n", + "1620\n", + "2400\n", + "87\n", + "1996\n", + "5.56\n", + "5480\n", + "1.8\n", + "6.46\n", + "5487\n", + "1971\n", + "70\n", + "”,\n", + "、DDT\n", + "1996\n", + "”,\n", + "”,\n", + "”。\n", + "5.5∶1,\n", + "3.3∶1\n", + "6∶1。\n", + "3500\n", + "3000\n", + "2·3\n", + "1·5\n", + "2·2\n", + "2150\n", + "1914\n", + "4000\n", + "1·1\n", + "200\n", + "320\n", + "51\n", + "65\n", + "624\n", + "”,\n", + "”。\n", + "”(MERLIN)\n", + "”。\n", + "80\n", + "31\n", + "3000\n", + "109\n", + "29\n", + "39·4\n", + "40·6\n", + "》。\n", + ":“\n", + ",1997\n", + "17\n", + ")(\n", + "32\n", + "70%\n", + "30%\n", + "40%\n", + "),\n", + "”。\n", + "60\n", + ":“\n", + ",“\n", + "70\n", + "87\n", + "———\n", + "31\n", + "2800\n", + "1500\n", + "48\n", + "19\n", + "(6·8\n", + ")。\n", + "”。\n", + "7000\n", + "1·16\n", + "”。\n", + ",“\n", + "”。\n", + "2698·7\n", + "2574\n", + "1998\n", + "31\n", + "31\n", + "31\n", + "124·5\n", + "29\n", + ",《\n", + ",《\n", + "》,\n", + "150\n", + "1%,\n", + "150\n", + "1200\n", + "》。\n", + "———\n", + "》,\n", + "”,\n", + "2001\n", + "1997\n", + "1998\n", + "1997\n", + ");\n", + "1998\n", + "”(3\n", + "),\n", + "1999\n", + "2001\n", + "1996\n", + "80\n", + "”,\n", + "1986\n", + "BIGBANG\n", + "》、《\n", + "1996\n", + "2001\n", + "BIGBANG(\n", + "),\n", + "”。\n", + "2%。\n", + "48\n", + "33\n", + "80\n", + "29\n", + "21\n", + ":“\n", + "”64\n", + ",1972\n", + "”17\n", + "”。\n", + "1986\n", + "28\n", + "72\n", + ",7\n", + "”。\n", + "100\n", + "1000\n", + "———\n", + "”,\n", + "———\n", + "”。\n", + "———\n", + ")(\n", + ",300\n", + "……\n", + ":“4\n", + "”。\n", + "1992\n", + ",45\n", + "300\n", + "……4\n", + "27\n", + "”,\n", + "』(\n", + "1957\n", + ",1997\n", + "28\n", + "28\n", + "2000\n", + "29\n", + "9%\n", + "28\n", + "”。\n", + "30%,\n", + "13·1%。\n", + "40%。\n", + "29\n", + "13·1%\n", + ",“\n", + "”。\n", + "”。\n", + "”、“\n", + "”。\n", + "”,\n", + "70\n", + "83\n", + "27\n", + ",“\n", + "”,\n", + "29\n", + "1962\n", + "1996\n", + "156\n", + ",1993\n", + ":“\n", + ":“\n", + "72\n", + "27\n", + "29\n", + ")4\n", + "29\n", + "27\n", + ",15\n", + ",“\n", + "”。\n", + "”,\n", + "27\n", + "300\n", + "28\n", + "1995\n", + "》,\n", + "1996\n", + "1997\n", + "25\n", + "1993\n", + "1980\n", + "1925\n", + "》,\n", + "168\n", + "107\n", + ",《\n", + "》(\n", + "),\n", + "1993\n", + ",1997\n", + "29\n", + "1925\n", + "》。\n", + "》,\n", + ")。\n", + "》。\n", + ",“\n", + "28\n", + "”。\n", + "27\n", + "90\n", + "27\n", + "22\n", + "1997\n", + "28\n", + ",27\n", + "27\n", + "28\n", + "28\n", + "27\n", + "”,\n", + "27\n", + "21\n", + "55\n", + "27\n", + "2000\n", + ":“\n", + "”。\n", + "100\n", + ",21\n", + "2013\n", + "2015\n", + "》,\n", + "”,\n", + ",17\n", + "”,\n", + "255\n", + "”。\n", + "60\n", + "40\n", + "1969\n", + "---\n", + ")(\n", + "21\n", + "28\n", + "27\n", + "21\n", + ":“\n", + "”。\n", + "”,\n", + "2·5\n", + "300\n", + "27\n", + "”。\n", + "36%\n", + "22%,\n", + "1/3。\n", + "26\n", + "13%\n", + ")、\n", + "130\n", + "20%\n", + "80\n", + "112\n", + "32\n", + "、17\n", + "、14\n", + "27\n", + "112\n", + "550\n", + "1997\n", + ",24\n", + "470\n", + "820\n", + "45%\n", + "20%\n", + "30%,\n", + "4·6\n", + "10%\n", + "1996\n", + "”,\n", + "”。\n", + "”,“\n", + "”。\n", + "”、“\n", + "”。\n", + "1996\n", + "”,\n", + "1996\n", + "22\n", + "26\n", + "9%\n", + "13·1%\n", + "26\n", + ":“\n", + "1986\n", + ",1991\n", + "1915\n", + ",1930\n", + ",1936\n", + ")。\n", + "28\n", + "29\n", + "29\n", + "70\n", + "”,“\n", + "”。\n", + "83\n", + "27\n", + ",28\n", + "29\n", + "26\n", + "26\n", + "、LG\n", + "21\n", + "27\n", + "27\n", + "》2000\n", + "75\n", + "251\n", + ",4\n", + "27\n", + "35\n", + "23\n", + "2、\n", + "131\n", + "1、4\n", + "27\n", + "1991\n", + "44\n", + "1997\n", + "240\n", + "21\n", + "8%,\n", + "3%,\n", + "8·8%,\n", + "0·8%。\n", + "11%,\n", + "27\n", + ",“\n", + "700\n", + "35\n", + "206\n", + "36\n", + ",5000\n", + "75\n", + "、160\n", + "、110\n", + "—8\n", + "”。\n", + "”,\n", + "”。\n", + "”(\n", + ",12\n", + "———\n", + ")(\n", + ")(\n", + "80\n", + ",1995\n", + ",1996\n", + "5000\n", + ",12\n", + "400\n", + "23\n", + "25\n", + ",5\n", + "25\n", + ",“\n", + "”。\n", + "26\n", + "(4\n", + "25\n", + ")66\n", + "25\n", + "604\n", + "347\n", + "———\n", + ",“\n", + "———\n", + "”。\n", + "2000\n", + "、“\n", + "———\n", + "25\n", + "2000\n", + "11%\n", + "9%\n", + "13·1%\n", + "25\n", + "1946\n", + "60\n", + "76\n", + "1942\n", + "1944\n", + "22\n", + "425\n", + "25\n", + "500\n", + "17\n", + "3500\n", + "22\n", + "21\n", + "25\n", + "”。\n", + "”,\n", + "60\n", + "26\n", + ",“\n", + "、“\n", + "、“\n", + "———\n", + "32\n", + "251\n", + "、25\n", + ":25\n", + "25\n", + "”。\n", + "”。\n", + "26\n", + "21\n", + ",3\n", + "1995\n", + "26\n", + "2、\n", + "1、\n", + "”、“\n", + "”,\n", + "1997\n", + "4500\n", + "80%,\n", + "1997\n", + "1991\n", + "1991\n", + "250\n", + "3500\n", + "4.1\n", + "———\n", + "1993\n", + "3120\n", + "1995\n", + "、3\n", + "220\n", + "400\n", + "400\n", + "53%,\n", + "77·56%。\n", + "3383·7\n", + "32·35%。\n", + "8000\n", + "”!\n", + "1000\n", + "1000\n", + "28\n", + "40\n", + "60\n", + "80\n", + "500\n", + "40\n", + "1.5\n", + "40\n", + "”,\n", + "3.8\n", + "2.5\n", + "2200\n", + "”。\n", + "』、『\n", + "……\n", + "”。\n", + ":“\n", + "6%,\n", + "3900\n", + "94%,\n", + "6%\n", + ":“\n", + "”,\n", + "TITANIC\n", + "》。\n", + "80\n", + "”,\n", + "”,\n", + "”。\n", + "1.5%、10%、1.3%,\n", + "”,\n", + "1997\n", + "15%\n", + "1997\n", + "600\n", + "98.4%,\n", + "1.6%。\n", + "1984\n", + "1984\n", + "”,\n", + "23\n", + ")(\n", + "”。\n", + "63\n", + ":“\n", + ":“\n", + ":“\n", + "”,\n", + "9835\n", + "700\n", + "”。\n", + "”,“\n", + "”。\n", + "1853\n", + ",1884\n", + ",1946\n", + ",10\n", + "》,\n", + "22\n", + ":4\n", + "21\n", + "———“\n", + "”,\n", + "1995\n", + "”,\n", + "1997\n", + "》、\n", + "26\n", + "70\n", + "81\n", + "216\n", + "》、《\n", + "》、\n", + "》、\n", + "23\n", + "1996\n", + "1996\n", + "1990\n", + "1963\n", + "61\n", + "、6\n", + "23\n", + "22\n", + "”。\n", + "23\n", + "200\n", + "22\n", + "23\n", + "23\n", + "22\n", + "”。\n", + ",“\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ",“\n", + "”。\n", + "”,“\n", + "”。\n", + "”,\n", + "“‘\n", + "”。\n", + ",《\n", + "”。\n", + "》。\n", + "300\n", + ":“\n", + "21\n", + "21\n", + "22\n", + ",21\n", + ",“\n", + "21\n", + "21\n", + "100\n", + ",20\n", + ",21\n", + "》。\n", + "26\n", + "21\n", + ",13\n", + "23\n", + "21\n", + ":“\n", + "700\n", + "350\n", + "),\n", + "43%。\n", + "———3\n", + "”。\n", + "2000\n", + "7.5\n", + "—180\n", + ",1\n", + ",2\n", + "1991\n", + ",3\n", + "6000\n", + ",13\n", + ",300\n", + "1437\n", + "1986\n", + "、2\n", + "、3\n", + "1977\n", + "、1978\n", + "、1983\n", + "1984\n", + "1970\n", + "1000\n", + "———\n", + ")(\n", + ")(\n", + "21\n", + "60\n", + "1/10。\n", + ",1997\n", + "6455\n", + "1996\n", + "6664\n", + "21\n", + "1997\n", + "58\n", + "9800\n", + "23\n", + "22\n", + "23\n", + "1997\n", + "7.75\n", + "17·6%,\n", + "60%\n", + "”。\n", + "”,\n", + "”。\n", + "1991\n", + "250\n", + "70\n", + "55\n", + "21\n", + "22\n", + "3000\n", + "4000\n", + "4000\n", + "800\n", + "”,\n", + "21\n", + ",“\n", + "”。\n", + "22\n", + "59\n", + "、1\n", + "”。\n", + "1990\n", + "19\n", + ",18\n", + "22\n", + "21\n", + "1983\n", + "21\n", + "21\n", + "21\n", + "22\n", + ",350\n", + "1999\n", + "9%\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "17\n", + "21\n", + "21\n", + "”,\n", + "17\n", + ",14\n", + ")21\n", + "21\n", + "21\n", + "22\n", + ")。\n", + "”。\n", + "26\n", + "25\n", + ",“\n", + "”,\n", + "”、“\n", + "”,\n", + ":“\n", + "21\n", + "”,\n", + "”。\n", + "”。\n", + "500\n", + "2000\n", + "2000\n", + "1956\n", + "1993\n", + ",《\n", + "1956\n", + "1993\n", + "1993\n", + "”。\n", + "0.3%,\n", + "”,\n", + "2∶0\n", + "19\n", + "66\n", + "73%。\n", + "19\n", + "63·5%,\n", + "14%。\n", + "19\n", + "150\n", + "1998\n", + "19\n", + "19\n", + "55\n", + "6000\n", + "1980\n", + "1988\n", + "400\n", + "19\n", + "17\n", + "17\n", + "22\n", + ",26\n", + "17\n", + "1972\n", + "1960\n", + "17\n", + "17\n", + "CNN、《\n", + "》、《\n", + "200\n", + "……\n", + "WTO、\n", + "』---\n", + "”。\n", + "17\n", + "17\n", + "17\n", + ",6\n", + "17\n", + "17\n", + "17\n", + "17\n", + "17\n", + "17\n", + "17\n", + "17\n", + "”,“\n", + "”。\n", + "17\n", + "1991\n", + "1993\n", + "1996\n", + ",“\n", + "80\n", + "200\n", + "42\n", + ";《\n", + "……\n", + "》,\n", + "———\n", + ",74%\n", + "1948\n", + "1967\n", + "17\n", + ",540\n", + "97%\n", + "39\n", + "122\n", + ":1·\n", + ";2·\n", + ";3·\n", + "《2000\n", + "》。\n", + "》,\n", + ")6\n", + ",15\n", + "”、“\n", + "2000\n", + "1984\n", + "17\n", + ",“\n", + ",“\n", + "”,\n", + "30%\n", + "”,\n", + "”。\n", + "”。\n", + ",15\n", + "4%\n", + "6%\n", + "”;\n", + ",15\n", + "”。\n", + "17\n", + "”,\n", + "》17\n", + "17\n", + "》、《\n", + "》、《\n", + "1972\n", + "17\n", + "、5\n", + "、5\n", + "”,\n", + ",15\n", + ",“\n", + "83\n", + "2000\n", + "2000\n", + ",1997\n", + "30%,\n", + "2%。\n", + "1500\n", + ",2000\n", + "3/4\n", + "200\n", + "—300\n", + "2000\n", + "37\n", + "1993\n", + "2000\n", + "---\n", + "”。\n", + "19\n", + "”。\n", + "”,\n", + "27\n", + "”。\n", + "”。\n", + ",100\n", + "100\n", + "22\n", + "38.7\n", + "350\n", + "100\n", + "”,\n", + "”。\n", + "60\n", + "200\n", + "687\n", + "22\n", + "”。\n", + "”。\n", + "”、“\n", + "1995\n", + "1989\n", + "80\n", + "”。\n", + ":“\n", + ",70—80\n", + "90\n", + "1.2\n", + "8000\n", + "”,\n", + "F—16\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + ",1991\n", + "”。\n", + ",“\n", + "”。\n", + ",13\n", + "80\n", + "”,\n", + "3000\n", + ":“\n", + "”,\n", + "28\n", + "175\n", + "35\n", + ")35\n", + "3000\n", + "75\n", + ")。\n", + "207\n", + "、273\n", + ",“\n", + "———\n", + "”。\n", + "”。\n", + "”。\n", + ",“\n", + "”。\n", + "”。\n", + ":13\n", + ":“\n", + "、《\n", + "》、《\n", + "……\n", + "”。\n", + "1200\n", + "———\n", + "”。\n", + ",20\n", + "1998\n", + "93\n", + "500\n", + "1990\n", + "100\n", + "100\n", + "21\n", + "100\n", + "21\n", + "”。\n", + "36\n", + "1988\n", + "1996\n", + "1988\n", + "2:\n", + "67\n", + "、30\n", + "1:\n", + "1.18\n", + ",80%\n", + "),\n", + "———\n", + "———\n", + "1.2\n", + "130\n", + "100\n", + "1995\n", + "1997\n", + "2.6\n", + "1.3\n", + "”,\n", + "31\n", + "4600\n", + "1997\n", + "》、《\n", + "19\n", + "1.3\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "”。\n", + "1995\n", + "1997\n", + "10.5\n", + "1997\n", + "5.6\n", + "53%,\n", + "2%\n", + "90\n", + "80%\n", + "24%\n", + "”、“\n", + "”、“\n", + "2010\n", + "”,\n", + "1/4,\n", + "1997\n", + "52%;\n", + "8%,\n", + "600\n", + "74%,\n", + "26%,\n", + "7.2%,\n", + "8%,\n", + "3%,\n", + ",“\n", + "25\n", + "”,\n", + "”。\n", + "”。\n", + "”。\n", + ":“\n", + "70\n", + "”。\n", + ",“\n", + "”,\n", + "”,“\n", + "”。\n", + "”,“\n", + "25\n", + "”。\n", + "42\n", + "1998—1999\n", + "”,\n", + "”,\n", + "21\n", + "1172\n", + ":5\n", + "1172\n", + "》;\n", + "1172\n", + ":5\n", + ":“\n", + ",“\n", + ",10\n", + "88·7\n", + "55%,\n", + "70·3\n", + "”,\n", + "”,\n", + "”。\n", + "21\n", + "100\n", + "21\n", + "100\n", + "100\n", + "1993\n", + ",150\n", + "23\n", + ",1\n", + ",8\n", + "》、《\n", + "2008\n", + "400\n", + "34\n", + "》。\n", + "31\n", + "34\n", + ",“\n", + "”。\n", + ":“\n", + "”、“\n", + "”。\n", + "2000\n", + ",“\n", + "”。\n", + "25\n", + ",“\n", + "———\n", + "”。\n", + "1994\n", + "164\n", + "1997\n", + "230\n", + ",12\n", + "3.9%\n", + "1994\n", + "1993\n", + "1978\n", + "《1997\n", + "1994\n", + ",7\n", + "”。\n", + ";“\n", + "”。\n", + "”。\n", + "”。\n", + "6.8\n", + "7.2\n", + "1993\n", + "1991\n", + "”。\n", + ";5\n", + ";6\n", + "44\n", + ",135\n", + "”。\n", + "53\n", + "”。\n", + "1991\n", + "”。\n", + ",100\n", + "”。\n", + ",“\n", + "”。\n", + ",1/3\n", + "21\n", + "”,\n", + "》。\n", + "1994\n", + "”。\n", + "”,\n", + ",“\n", + "1996\n", + "1988\n", + "42\n", + "1993\n", + "17%\n", + ",50%\n", + ",33%\n", + "”。\n", + "”。\n", + "40\n", + "100\n", + "62\n", + "21\n", + ":“\n", + "———\n", + "”。\n", + "》。\n", + "25\n", + "1942\n", + "1993\n", + "1990\n", + "1963\n", + ",1990\n", + "1943\n", + "54\n", + "”。\n", + "”。\n", + "1985\n", + ",1987\n", + "》,\n", + "1991\n", + "1997\n", + "41.3\n", + "26.9\n", + "14.4\n", + "55\n", + ",1997\n", + "54\n", + "80\n", + "2000\n", + "21\n", + "”。\n", + "、《\n", + "》、\n", + "1979\n", + "21\n", + "43\n", + ",“\n", + "”。\n", + "1970\n", + "200\n", + "),\n", + ":7\n", + "”,\n", + "”。\n", + "51\n", + "48\n", + "1172\n", + "———\n", + "”。\n", + "”。\n", + "”。\n", + "”、“\n", + "”;\n", + "125\n", + ":“\n", + "”。\n", + "”,\n", + ",“\n", + "”。\n", + "2003\n", + "2008\n", + ",130\n", + "》、《\n", + "21\n", + "21\n", + ",15\n", + "19\n", + "4000\n", + ",1300\n", + "1.4\n", + "1.9\n", + "3.3%\n", + "4.1%。\n", + ":“\n", + "25\n", + "21\n", + "”。\n", + ",“\n", + "”,“\n", + "”。\n", + ",185\n", + "》、《\n", + "》、《\n", + "”。\n", + "”,\n", + "”;\n", + "2.9\n", + "20.4%,\n", + "18.6%,\n", + "20.6%。\n", + "”。\n", + "21\n", + "60%\n", + "40%\n", + "50%,\n", + "30%\n", + "40%。\n", + "60%。\n", + "”,\n", + ":“\n", + "》,\n", + "”。\n", + "2008\n", + ",“\n", + "”。\n", + "5·6\n", + "》6\n", + ",“\n", + ",“\n", + "”,\n", + "”。\n", + "5000\n", + "97\n", + "500\n", + "5000\n", + "7000\n", + "200\n", + "400\n", + "60\n", + "3000\n", + "2000\n", + "”。\n", + ",“\n", + "1800\n", + "500\n", + "800\n", + "”。\n", + "———“\n", + "1996\n", + "———\n", + "”,\n", + "130\n", + "———\n", + "”。\n", + "1998\n", + "2.7\n", + "3100\n", + "3.8\n", + ",6\n", + ",6\n", + "”A300\n", + "190\n", + "C130\n", + "40\n", + "60\n", + "”A310\n", + "210\n", + "70\n", + "80\n", + "C130\n", + "90\n", + "1993\n", + "1997\n", + "25\n", + "1992\n", + "0.7%\n", + "0.36%\n", + "77.7%\n", + "98%。\n", + "78%\n", + ",1993\n", + "2/3\n", + "7000\n", + ",1/10\n", + "60%;\n", + "60%\n", + "15%\n", + "50%\n", + "3.4\n", + "100\n", + "2100\n", + "95\n", + "25\n", + "35\n", + ",20\n", + "70\n", + "”,\n", + "70·8%\n", + "”。\n", + "”,\n", + "23\n", + "、30\n", + "》,\n", + "1172\n", + "》。\n", + "……\n", + "”,\n", + "”,\n", + "”,\n", + ":“\n", + "VCD\n", + "1995\n", + "1997\n", + "———\n", + "1995\n", + "1993\n", + ":“\n", + "1996\n", + ":“\n", + "”。\n", + "1996\n", + "”。\n", + "1994\n", + "———\n", + "……\n", + "———\n", + "……\n", + "———\n", + "……\n", + "……\n", + "……\n", + "……\n", + "4:\n", + "3:\n", + "2:\n", + ",6\n", + "1:\n", + "”。\n", + "”。\n", + "”。\n", + "”,\n", + "”。\n", + "3824\n", + "3642\n", + "5.6%,\n", + "”,\n", + "3609.9\n", + "4120\n", + "14%;\n", + "82.8\n", + "139.1\n", + "68%;\n", + "62%。\n", + "”。\n", + "”,\n", + "3.91\n", + "0.5\n", + "1998—1999\n", + ",2\n", + ",“\n", + "”,\n", + "”。\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + "40\n", + "”,\n", + "”,\n", + "”,“\n", + "”,“\n", + "”。\n", + ",“\n", + "”。\n", + "1998—1999\n", + "14%,\n", + "107\n", + "”。\n", + "28\n", + "”。\n", + "1996\n", + "1967\n", + "17\n", + "15%。\n", + "15%\n", + ":“\n", + "21\n", + ":“\n", + ":“\n", + "》。\n", + "”。\n", + "1/4\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + ",“\n", + "1999\n", + "1995\n", + "27\n", + "”,\n", + "27\n", + "1994\n", + "”,\n", + "”,\n", + "”;\n", + "”,\n", + ":“\n", + ",“\n", + "”。\n", + ")3\n", + "1995\n", + "1990\n", + "1982\n", + "”。\n", + "》、《\n", + "》、《\n", + "》3\n", + "25\n", + ",《\n", + "1·6\n", + "28\n", + "F—16\n", + "1997—1998\n", + "1995\n", + "2/3,\n", + "1948\n", + "1530\n", + "111\n", + "75\n", + "1993\n", + "77\n", + "1991\n", + "”,\n", + "1948\n", + "1988\n", + "40\n", + "1988\n", + "1998\n", + "36\n", + ",80\n", + "49\n", + "17\n", + "29\n", + "1948\n", + "29\n", + "---\n", + "”,\n", + ",“\n", + "”。\n", + "38000\n", + "”,\n", + "”。\n", + ":“\n", + "”,\n", + ",“\n", + "”,\n", + "28\n", + "3·3%\n", + "2·3%。\n", + "300\n", + "260\n", + "1·15\n", + "9%,\n", + "14·8\n", + "11%。\n", + "40\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + ",“\n", + "”,\n", + "”,\n", + "”。\n", + "200\n", + "28\n", + "2002\n", + "1993\n", + "50%\n", + "20%\n", + "22%。\n", + "200\n", + "220\n", + "12%\n", + "2010\n", + "80\n", + ":“\n", + ",5\n", + "31\n", + "2000\n", + "29\n", + "”。\n", + "”。\n", + ",1962\n", + "1948\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "”。\n", + "21\n", + "34\n", + "2、6\n", + "90\n", + "1、\n", + "28\n", + ":“\n", + "21\n", + "250\n", + "1980\n", + "5000\n", + "”。\n", + "”,\n", + "200\n", + "3.2\n", + "180\n", + ",1978\n", + "21\n", + "61\n", + "”,\n", + "”。\n", + "”。\n", + "46\n", + ",5\n", + ":“\n", + "》,5\n", + "1999\n", + ",5\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "2000\n", + ",“‘\n", + "”。\n", + "1998\n", + ",1999\n", + ",1999\n", + "2000\n", + "”。\n", + "21\n", + ":“\n", + "1998\n", + "”。\n", + "14·6%\n", + "40%,\n", + "21\n", + "132\n", + "47\n", + "132\n", + "———\n", + "1995\n", + "10%;\n", + "1948\n", + "23\n", + "19\n", + "132\n", + "31\n", + "1991\n", + "74\n", + "1964\n", + "1991\n", + "25\n", + "”。\n", + "70\n", + "5117\n", + "53\n", + "7654\n", + "23\n", + "”。\n", + "”,\n", + ",4\n", + "100\n", + "40\n", + "33\n", + ",17\n", + "29\n", + ",“\n", + "”。\n", + ",“\n", + ",“\n", + "113\n", + "3%,\n", + "78\n", + "50.43%,\n", + "34.38%\n", + "6·33%\n", + "72.77%\n", + ",31\n", + ",31\n", + "31\n", + "31\n", + ",“\n", + ",“\n", + "29\n", + "”、\n", + "》。\n", + "2、\n", + "1、5\n", + "”,\n", + "”,\n", + "》,\n", + "”,\n", + "21\n", + "”,\n", + "”。\n", + ":“\n", + ":“\n", + "》,\n", + "31\n", + "4000\n", + "29\n", + "31\n", + "425\n", + "”,\n", + "”。\n", + "1992\n", + ",“\n", + "”。\n", + "60\n", + "”5\n", + "28%\n", + "33%,\n", + "70\n", + "54\n", + "1070\n", + "40%,\n", + "”,\n", + "”。\n", + "”,\n", + ",“\n", + "”。\n", + "”,\n", + "29\n", + "29\n", + "19\n", + "31\n", + "29\n", + "28\n", + "27\n", + "31\n", + "27\n", + "31\n", + "2、5\n", + "29\n", + "1、\n", + "29\n", + "29\n", + "》,\n", + "》,\n", + "”。\n", + "》,\n", + "31\n", + ",“\n", + "”。\n", + "”。\n", + "31\n", + "》。\n", + "》。\n", + "21\n", + "21\n", + "31\n", + "31\n", + "552\n", + "10%\n", + "444\n", + ",90\n", + "》5645\n", + "390\n", + "28\n", + ":《\n", + "》。\n", + "40\n", + "4.8\n", + "50%。\n", + "500\n", + "28\n", + "500\n", + "2000\n", + "”,6\n", + "29\n", + "2000\n", + "70\n", + "21\n", + "56\n", + "1000\n", + "100\n", + "300\n", + "500\n", + "6000\n", + "),\n", + "”。\n", + "”。\n", + "1990\n", + ",“\n", + "”。\n", + "29\n", + "580\n", + "1164\n", + "5%,\n", + "200\n", + "28\n", + "15%\n", + "20%\n", + "70%,\n", + ")。\n", + "29\n", + "1997\n", + "160\n", + "13.6%。\n", + ",1996\n", + "25\n", + "……(\n", + "28\n", + ",1/4\n", + "(1/4\n", + ")。\n", + "……\n", + "1/8\n", + "27\n", + "1/8\n", + ",A、B\n", + "”,“\n", + "”。\n", + ",“\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "90\n", + "”。\n", + ",“\n", + "”。\n", + "1/8\n", + "———\n", + "1/8\n", + "1998\n", + "”。\n", + ",“\n", + "”。\n", + "1/8\n", + "1/8\n", + "1/8\n", + "———\n", + ",“\n", + "”。\n", + ":“\n", + ":“\n", + "1/8\n", + "1/8\n", + "26\n", + ",G\n", + "2∶0\n", + "1∶1\n", + "———\n", + "1∶0\n", + "4∶1\n", + ",20\n", + "1/8\n", + "28\n", + "115\n", + "』。\n", + ",10\n", + ":6\n", + ",40\n", + "”,\n", + "85%\n", + "62%,\n", + "1822\n", + "1100\n", + "90%,\n", + "24237\n", + "3000\n", + "”,\n", + "1996\n", + "240\n", + "80%。\n", + ":122\n", + "95%,\n", + "1000\n", + "”。\n", + "27\n", + "1994\n", + "500\n", + "1998\n", + "》,\n", + "”,\n", + "』:\n", + "』,\n", + "———\n", + "———\n", + "70\n", + "》,\n", + ",『\n", + ",《\n", + "”。\n", + ",80%\n", + "”,\n", + "、(\n", + "),\n", + "”,\n", + "1644\n", + "”,\n", + "”。\n", + "》,\n", + "”、“\n", + "”、“\n", + "1900\n", + "》、《\n", + ",“\n", + "”,\n", + ":“\n", + "”,\n", + "———\n", + "———\n", + "”。\n", + ",“\n", + "”,\n", + "”,\n", + "”。\n", + "1898\n", + "1998\n", + "》,\n", + "》,\n", + "》。\n", + "1997\n", + "1997\n", + "9711\n", + ",1680\n", + "、850\n", + "3300\n", + "2000\n", + "400\n", + "476\n", + "1997\n", + "6—8\n", + "5—7\n", + "(1996\n", + "—1997\n", + "1987\n", + "1997\n", + "1997\n", + "1997\n", + "(%)\n", + "》。\n", + "1997\n", + "”,3\n", + "”。\n", + "1000\n", + "870\n", + ",171\n", + "111\n", + "6000\n", + "1000\n", + "200\n", + "926\n", + "7698\n", + "7.64%(\n", + "3)。\n", + "1997\n", + "124\n", + "4000\n", + "1000\n", + "28\n", + "63\n", + "433\n", + "17300\n", + "667\n", + "30000\n", + "10%,\n", + "250\n", + "29.4%,\n", + "6347\n", + "14%,\n", + "1244\n", + "3862\n", + "212\n", + "36\n", + "113\n", + "55\n", + "77\n", + "52\n", + "37\n", + "17\n", + "599\n", + "》。\n", + "30%。\n", + "3.69\n", + "0.45\n", + "4.04\n", + "7.02\n", + "43\n", + "1780\n", + "),\n", + "3185.1\n", + "1.2\n", + "1997\n", + "375\n", + "90%\n", + "1.3\n", + "200\n", + "0.5%,\n", + "0.3%,\n", + "3.9\n", + "40%,\n", + "0.33\n", + "1/2。\n", + "19.9\n", + "1996\n", + "1997\n", + "25.8\n", + "(“\n", + "206.9\n", + "168.6\n", + "1997\n", + "25\n", + "421.2\n", + "100\n", + "386.7\n", + "1997\n", + "2465\n", + "35440\n", + "733.7\n", + "1.3\n", + "13.92%,\n", + "3%—4%;\n", + "0.114\n", + "160\n", + "105\n", + "6932.1\n", + "38%;\n", + "551.3\n", + "71.4%;\n", + "375.9\n", + "2100\n", + ",70%\n", + "4210\n", + "23.22\n", + "12.83\n", + ",1997\n", + "11.65\n", + "6.28\n", + "53.9%。\n", + "1993\n", + ",1994\n", + "1/3。\n", + "1000\n", + "5313\n", + "39.0%;\n", + "8310\n", + "61.0%。\n", + "40%\n", + ",1997\n", + "1997\n", + "1997\n", + "27\n", + "(%)\n", + "(%)(%)(\n", + "”。\n", + "192.7\n", + "7661\n", + "126.9\n", + "、66.3\n", + "578.6\n", + "1997\n", + "476\n", + "807.5\n", + "10.0%。\n", + "2)。\n", + "17\n", + "96\n", + "97\n", + "1375513827\n", + "67487661\n", + "90\n", + "91\n", + "92\n", + "93\n", + "94\n", + "95\n", + "8851982011264119591233713077\n", + "21212382828384547826014\n", + ",“\n", + "1997\n", + "1.4\n", + "1)。\n", + "57.1%;\n", + "71.7%;\n", + "80.4%;\n", + "21.7%;\n", + "50.0%。\n", + "43\n", + "55\n", + "33\n", + "60\n", + "53.5—65.8\n", + "56.5\n", + ")。\n", + "49\n", + "70\n", + "54.9%。\n", + "67.3—77.8\n", + "71.0\n", + ")。\n", + "1997\n", + ";“\n", + "”(\n", + "、pH\n", + "114\n", + "142\n", + "1997\n", + "15.30\n", + "/(\n", + "),\n", + "21.48\n", + "/(\n", + "),\n", + "9.29\n", + "/(\n", + ")。\n", + "381\n", + "200\n", + "(200\n", + "67\n", + "72.0%。\n", + "32—741\n", + "291\n", + "100\n", + "34\n", + "(50\n", + "),\n", + "36.2%。\n", + "49\n", + "41\n", + "4—140\n", + "45\n", + "72\n", + "60\n", + "52.3%\n", + "37.5%\n", + "(60\n", + ")。\n", + "3—248\n", + "66\n", + "1997\n", + "———\n", + "1997\n", + "1997\n", + "55\n", + "21\n", + "ISO14001\n", + "ISO14000\n", + ")12\n", + "1997\n", + "(ISO14000)\n", + "28\n", + "79\n", + "231\n", + "29\n", + "1997\n", + "361\n", + "29\n", + "1997\n", + "17\n", + "900\n", + "28\n", + "70\n", + "”,\n", + "1997\n", + "1997\n", + "180\n", + "106.6\n", + "523\n", + "177.8\n", + "1997\n", + "502.4\n", + "257.2\n", + "116.4\n", + "128.8\n", + "28\n", + "10.3%。\n", + "1997\n", + "45.2%,\n", + "2.2\n", + "1077\n", + "1.0%。\n", + "6.6\n", + "62.3%。\n", + "1997\n", + "10.6\n", + "2010\n", + "2000\n", + "pH\n", + "4.5\n", + "2000\n", + "2000\n", + "”(\n", + "1998\n", + "》,\n", + "2006\n", + "2010\n", + "1211\n", + "1301\n", + "1997\n", + "232\n", + "2.7\n", + "ODS60121(ODP\n", + "1996\n", + "ODS2.3(ODP\n", + "———\n", + "CFC11\n", + "1991\n", + "1997\n", + "1994\n", + "ODS\n", + "(ODS)\n", + ",1997\n", + "42\n", + "112\n", + "90.4%\n", + "79.4%,\n", + "0.4\n", + "4.4\n", + "1997\n", + "88.4%\n", + "76.9%。\n", + "pH\n", + "5.0,\n", + "70%。\n", + "pH\n", + "5.0,\n", + "70%。\n", + "pH\n", + "5.6\n", + "90%\n", + ",71.7%\n", + "pH\n", + "4.5\n", + "pH\n", + "5.6\n", + "44\n", + "47.8%,\n", + ",75%\n", + "pH\n", + "5.6。\n", + "1997\n", + "pH\n", + "3.74—7.79\n", + "1505\n", + "548\n", + "36.4%,\n", + "957\n", + "685\n", + "43.8%,\n", + "880\n", + "1873\n", + "1565\n", + "83.6%;\n", + "308\n", + "1363\n", + "73.6%;\n", + "489\n", + "1997\n", + "2346\n", + "1852\n", + "78.9%;\n", + "494\n", + "1997\n", + "31\n", + "100\n", + "1562\n", + "71.4%,\n", + "14.3%,\n", + "14.3%。\n", + "———《\n", + "》。\n", + "99\n", + "325\n", + "1997\n", + "78.9%,\n", + "84.7%;\n", + "54.4%,\n", + "61.8%,\n", + "11%\n", + "12%;\n", + "16%,\n", + "47%,\n", + "81%,\n", + "38%。\n", + "44%,\n", + "46%,\n", + "94%,\n", + "42%。\n", + "18.7%,\n", + "21.4%,\n", + "6.5%,\n", + "53.4%。\n", + ")、\n", + "70.6%\n", + "50%\n", + "50%\n", + "52%\n", + "71%\n", + "62.5%\n", + ",29.2%\n", + "70\n", + "21\n", + ",1996\n", + "133\n", + ",1997\n", + "226\n", + "66.7%\n", + "67.7%\n", + "COD\n", + "666\n", + "407\n", + "(COD)\n", + "1757\n", + "COD\n", + "1073\n", + "COD\n", + "684\n", + "188\n", + "39\n", + "227\n", + "189\n", + "416\n", + "1997\n", + "1997\n", + "”,\n", + ",《\n", + ",“\n", + "”,\n", + "CD\n", + "CD\n", + "》,\n", + "———\n", + ",“\n", + ",《\n", + "”。\n", + "》,\n", + "———\n", + "》,\n", + "》,\n", + "1997\n", + ",《\n", + "……\n", + ":“\n", + "”。\n", + ",“\n", + "”,\n", + "1991\n", + "1944\n", + "1945\n", + "1946\n", + "》、《\n", + "》,\n", + "……\n", + "———\n", + "———\n", + "》、《\n", + "》、《\n", + "》、《\n", + "130\n", + "(2)\n", + "75\n", + "1987\n", + "32\n", + "80\n", + "201\n", + "———《\n", + "》、《\n", + "》。\n", + "23\n", + "———\n", + "4.2\n", + "23\n", + "”,\n", + "23\n", + ")、\n", + "”,\n", + ":“\n", + "200\n", + ",5\n", + ":“\n", + "”6\n", + "23\n", + ":“\n", + "”90\n", + "1993\n", + "———“\n", + "90\n", + "“030”\n", + "29\n", + "1966\n", + "“030”\n", + "1956\n", + "1937\n", + ":“\n", + "2000\n", + ":“\n", + "……\n", + "……”2\n", + "25\n", + ":“\n", + ":“\n", + "1996\n", + "2600\n", + "250\n", + "500\n", + "1000\n", + "184\n", + "1996\n", + "80\n", + "300\n", + "1996\n", + "1989\n", + ",1992\n", + "1998\n", + "31\n", + "63\n", + "300\n", + "52%,\n", + "”。\n", + "1977\n", + ",1981\n", + "2006\n", + "95\n", + "500\n", + "2001\n", + "90\n", + "23\n", + "2000\n", + "1%;\n", + "30%\n", + "100%;\n", + "98.8%\n", + "84.4%,\n", + "1.5%;\n", + "95%,\n", + "85%。\n", + "268\n", + "25\n", + "2000\n", + "6.56\n", + "CFC\n", + "CFC\n", + "CFC\n", + "CFC\n", + ",1999\n", + "CFC\n", + "22\n", + "CFC(\n", + "CFC\n", + "CFC\n", + "VCD、CD、LD\n", + "102263\n", + "26455\n", + "74\n", + ")529\n", + "169005\n", + "7000\n", + "”。\n", + "100\n", + "22\n", + "100\n", + "40\n", + ":6\n", + "19\n", + "”。\n", + "”:\n", + "160\n", + "”,\n", + "70\n", + "40\n", + "19\n", + "230\n", + "”。\n", + "86\n", + "”,\n", + "……\n", + "”。\n", + "35\n", + "33\n", + "……\n", + "、11\n", + "、86\n", + "———\n", + "———\n", + "1978\n", + "23\n", + "、《\n", + "100\n", + "22\n", + "MPS\n", + "WORD97\n", + ":“\n", + "52\n", + "300\n", + "33\n", + "1000\n", + ",3000\n", + "300\n", + "”,\n", + "———\n", + "19\n", + "———\n", + "”,\n", + "1996\n", + "》,\n", + "”5\n", + "”7\n", + "”5\n", + "”,\n", + "”,\n", + "57\n", + "”、\n", + "”,\n", + "62\n", + "1981\n", + "”。\n", + "40\n", + "),\n", + "1848\n", + ")。\n", + "”、“\n", + "21\n", + ":6\n", + "19\n", + ")3\n", + "),\n", + "53\n", + "20%\n", + "1997\n", + "351\n", + "1/10\n", + "1997\n", + "3.1\n", + "1.7\n", + "27569\n", + "4.4%,\n", + "1997\n", + "30566\n", + "》、《863\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1997\n", + "368\n", + "10.4%。\n", + "1996\n", + "122.4\n", + "36.8%。\n", + "60%\n", + "”,\n", + "”。\n", + "1987\n", + "———\n", + ")、\n", + "1.2\n", + "1000\n", + "68\n", + "28\n", + "、IBM\n", + "3—5\n", + "1000\n", + "”。\n", + "60\n", + "1500\n", + "”,\n", + "30%\n", + "100\n", + ":“\n", + "”、“\n", + "”。\n", + "”,“\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”。\n", + "”,“\n", + "”,\n", + "———\n", + "、“\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "”(\n", + ")、\n", + ")、\n", + "”,\n", + "”,\n", + ",“\n", + "”,“\n", + "”。\n", + ":“\n", + "”,\n", + "”,\n", + "》,\n", + "》、\n", + "》、\n", + "1997\n", + "1980\n", + "1988\n", + "(1645\n", + "(1674\n", + "、“\n", + "”、“\n", + "”、\n", + "”。\n", + ")、\n", + "40\n", + "55\n", + "42\n", + ",16\n", + "27\n", + "、MBA(\n", + "1958\n", + ",40\n", + "40\n", + "2133\n", + "》,5\n", + "2.31\n", + "17\n", + "》,\n", + "1997\n", + "300\n", + "150\n", + "100\n", + "”,\n", + ",“\n", + "3663\n", + "”。\n", + "1.2\n", + "100\n", + "25\n", + "1996\n", + "1996\n", + "1993\n", + "1990\n", + "”。\n", + "1990\n", + "0.5\n", + "1100\n", + "300\n", + "1.2\n", + "GMP\n", + "171\n", + "1993\n", + "1693\n", + "38.05%,\n", + ",“\n", + "95%,\n", + "70%\n", + "40\n", + "1995\n", + "110\n", + ":“\n", + "171\n", + "1800\n", + "171\n", + "40\n", + "1982\n", + "1992\n", + "2500\n", + ",5\n", + "90%,\n", + "90%,\n", + "1959\n", + "40\n", + "40\n", + "50%\n", + "70\n", + "1000\n", + "1000\n", + "1997\n", + "(GDP)\n", + "74772\n", + ",“\n", + ",“\n", + ",“\n", + "”。\n", + ":“\n", + "……\n", + "100\n", + ":“\n", + "400\n", + "1∶5\n", + "、1∶1\n", + "80%\n", + "1∶5\n", + ";1∶1\n", + "5—10\n", + "250\n", + "22\n", + "1100\n", + ")“\n", + "—1\n", + "17\n", + "—1\n", + ",16\n", + "87.5\n", + "0.5\n", + "”,\n", + "80\n", + "40\n", + "40\n", + "》、《\n", + "》、《\n", + "26\n", + "1400\n", + "1600\n", + "1595\n", + "17\n", + "),\n", + "45\n", + "),\n", + "》(\n", + "》)\n", + "245\n", + "”。\n", + "”、“\n", + "”,\n", + "100\n", + "17\n", + "”,\n", + "100\n", + ",“\n", + "25\n", + "》,\n", + ",“\n", + "100\n", + "200\n", + "”,\n", + "150\n", + "100\n", + "”、“\n", + ")。\n", + "3.4%,\n", + "50%\n", + "30%。\n", + "200\n", + "200\n", + "”、“\n", + "1994\n", + ":“\n", + "5%,\n", + "52%,\n", + "3.3\n", + "1986\n", + "42\n", + "800\n", + "40\n", + ",1995\n", + "1997\n", + "52\n", + "23\n", + "1997\n", + "》,\n", + "1985\n", + "1995\n", + "、1996\n", + "、1997\n", + "33.87\n", + "、38.65\n", + "、42.57\n", + "9.33、1.83、12.08\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + "2460\n", + "1992\n", + "21\n", + "》。\n", + "540\n", + "262.2\n", + "27.3%。\n", + "17\n", + "”T\n", + "———\n", + "———\n", + "3∶0\n", + "1∶2\n", + "2∶0\n", + "17\n", + "1∶2\n", + "2∶1\n", + "』。\n", + "MVP\n", + "NBA\n", + ")。\n", + "NBA\n", + "(MVP)\n", + ",“\n", + "45\n", + "NBA\n", + "59\n", + "83\n", + "81∶83\n", + "90\n", + "”。\n", + "NBA\n", + "87∶86\n", + "4∶2\n", + "NBA\n", + "32\n", + "120\n", + "”。\n", + "73\n", + ",“\n", + "”。\n", + "1∶0\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + ",20\n", + "”(\n", + "2∶0,\n", + "42\n", + "2∶0\n", + "27\n", + "3∶1\n", + "1∶0\n", + "2700\n", + "21\n", + "148\n", + "3000\n", + "3/4。\n", + ",《\n", + ")》\n", + ":《\n", + ")》\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "20%\n", + "21\n", + "21\n", + "---\n", + "1996\n", + "390\n", + "1000\n", + "343\n", + "53042\n", + ",1996\n", + "20%,\n", + "75%。\n", + "1996\n", + "80\n", + ",1983\n", + "52%,75%\n", + "65\n", + "1996\n", + "100\n", + "1997\n", + "”(\n", + ")、“\n", + "”(\n", + ")、“\n", + "”(\n", + ":1996\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + "》。\n", + "2∶3\n", + "3∶0\n", + "15∶12\n", + "15∶8\n", + "1∶5\n", + "15∶11\n", + "17∶16\n", + "8∶4、13∶9\n", + "、14\n", + "、15\n", + "、16\n", + "1∶3\n", + "3∶2\n", + ":“\n", + ",“\n", + "”。\n", + "”。\n", + ",“\n", + ",“\n", + ":“\n", + "”“\n", + ":“\n", + "……”\n", + ":“\n", + "350\n", + "200\n", + ":“\n", + ":“300\n", + "“FILA”。\n", + ":“\n", + ",350\n", + "1200\n", + "”。\n", + ":“\n", + "145\n", + "“FILA”\n", + ":“\n", + "”6\n", + "”。\n", + "”。\n", + "”。\n", + ",“\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "1∶0\n", + "28\n", + "”。\n", + "300\n", + "”,\n", + "40\n", + "75\n", + "》6\n", + "19\n", + "》,\n", + "》,\n", + "》,\n", + "》。\n", + "97/98\n", + "0∶1\n", + "0∶0\n", + "1∶0\n", + "28\n", + "”。\n", + "300\n", + "100\n", + "97%\n", + "1995\n", + "1995\n", + "4000\n", + "1991\n", + ":“\n", + "’,‘\n", + "70\n", + "1988\n", + "500\n", + "2000\n", + "62\n", + "———\n", + "500\n", + "21\n", + "———\n", + "43\n", + "26\n", + ",20\n", + "200\n", + "》、《\n", + "》、《\n", + "34\n", + "43\n", + "43\n", + "2400\n", + "1995\n", + "43\n", + "———\n", + "———\n", + "———\n", + "———\n", + "》。\n", + "”、“\n", + "”、“\n", + "》,\n", + ")。\n", + "』。\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "”,\n", + "”,“\n", + "”、“\n", + "”,“\n", + "”,\n", + "、“\n", + ",《\n", + ",《\n", + ":『\n", + ":『\n", + "』。\n", + "』。\n", + ":『\n", + "》。\n", + ",《\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "---\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "1986—1997\n", + ",“\n", + ",1919\n", + "80\n", + "”、“\n", + "1840\n", + "1949\n", + "4000\n", + "”。\n", + "”。\n", + "1995\n", + "”。\n", + ",1\n", + "———\n", + "100\n", + "”、“\n", + "”、“\n", + "”、“\n", + "《21\n", + "100\n", + ":118\n", + "100\n", + "94.5%,\n", + "48.5\n", + "22.6\n", + "61\n", + "1995\n", + ",11\n", + "———\n", + "98%\n", + "5800\n", + "”,\n", + "14000\n", + "304\n", + "1997\n", + "45\n", + "7.36\n", + "19.50%。\n", + "”(\n", + "2%\n", + "3%;\n", + ",1997\n", + "68.65\n", + "19.68%。\n", + "22\n", + "1000\n", + "530\n", + "2400\n", + "3400\n", + "60\n", + "25%,\n", + "51%,1997\n", + "1992\n", + "5000\n", + "4000\n", + "4000\n", + "6000\n", + "》、《\n", + "》、《\n", + "》,\n", + "》、《\n", + "》、《\n", + "》。\n", + "”,\n", + "1991\n", + ",《\n", + "———\n", + "240\n", + "61.9%,\n", + "”。\n", + "42\n", + "90%\n", + "》。\n", + "1997\n", + "……\n", + ";1995\n", + "、1996\n", + "21\n", + "4%\n", + "1997\n", + "27\n", + "1997\n", + "27\n", + "”,\n", + "”,“\n", + "”。\n", + "1994\n", + "1995\n", + "80\n", + ",1997\n", + "600\n", + "11200\n", + "GPS\n", + "GPS\n", + "GPS\n", + "GPS\n", + "818\n", + "GPS\n", + "1396\n", + "533\n", + ",1998\n", + "100—150\n", + "”。\n", + ",“\n", + "”、“\n", + "”。\n", + "40%\n", + ",60%\n", + "17\n", + "”、“\n", + "1993\n", + "80\n", + "”。\n", + "”———\n", + "VCD、DVD,\n", + "VCD\n", + "2838\n", + "、2858\n", + "VCD\n", + "28\n", + ",“\n", + "29\n", + "100\n", + "VGA\n", + "”,\n", + "4000\n", + ",“\n", + ":“\n", + "”,“\n", + "13.08\n", + "0.61\n", + "2.55\n", + "8—14\n", + "4.73\n", + "3.29\n", + "5.73\n", + "1/3\n", + "5000\n", + "”,\n", + "3000\n", + "41\n", + ":“\n", + "40\n", + ",70\n", + "”。\n", + "”、“\n", + "”,\n", + "41\n", + "1000\n", + "5500\n", + "6500\n", + "600\n", + "160\n", + "600\n", + "21\n", + "14500\n", + "50%\n", + "”,\n", + "》,\n", + "18—20\n", + "”,\n", + ")6\n", + "105\n", + "25\n", + "》、《\n", + "》、《\n", + "》、《\n", + "JgE\n", + ":“\n", + ",“\n", + "”。\n", + "1995\n", + "1989\n", + ",1994\n", + "8800\n", + "”、“\n", + "3000\n", + "VCD\n", + "17、18\n", + "5000\n", + "”、“\n", + "1997\n", + "”、“\n", + "』,\n", + "』,\n", + "』、『\n", + "”“\n", + ",“\n", + ":“\n", + "55\n", + "75\n", + "2000\n", + "……”“\n", + "800\n", + "”“\n", + "400\n", + "”“\n", + "1000\n", + "、1000\n", + "40\n", + ",100\n", + "140\n", + "100\n", + "310.5\n", + "230\n", + "163\n", + "130\n", + "100\n", + "147.5\n", + ",1\n", + "100\n", + "100\n", + "100\n", + ":163\n", + ",4\n", + "310.5\n", + ":“\n", + ",47!\n", + ",1\n", + ":“\n", + "《1997\n", + "、“\n", + "”、\n", + "DNA\n", + "”,\n", + "”,\n", + "40\n", + "21\n", + "),\n", + "”,\n", + "200\n", + "175\n", + "180\n", + "1000\n", + "”,\n", + "35\n", + ",180\n", + "TIPS\n", + "5000\n", + ",6\n", + "、34\n", + "———\n", + "2000\n", + "15%。\n", + "83%\n", + ":“\n", + "———\n", + "1990\n", + "———\n", + ",80\n", + "200\n", + "100\n", + ",90\n", + "———5\n", + "》、\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1991\n", + "”。\n", + ",1997\n", + "”。\n", + "1997\n", + "1.5\n", + "400\n", + "1997\n", + "2000\n", + ",5\n", + "877\n", + ":5000\n", + ",3600\n", + "……\n", + "、VCD\n", + "、LD\n", + "『ChinaBaby\n", + "』(\n", + "———\n", + "1980\n", + "28\n", + ",13\n", + "”,\n", + "6763\n", + "15053\n", + "21003\n", + "”,\n", + "”,“\n", + "98\n", + "1986\n", + "”,\n", + "“98\n", + ",“\n", + "98\n", + ",“\n", + "5200\n", + "66%,\n", + "”、“\n", + "”。\n", + ")、78\n", + "),\n", + ",1072\n", + "70\n", + "2000\n", + ",80%\n", + ",“\n", + "7.4\n", + ",1.7\n", + "”、“\n", + "”、“\n", + "(50\n", + "27.6\n", + "85%\n", + ":“\n", + ",‘\n", + ",“\n", + "1996\n", + ":“\n", + "”,\n", + "65\n", + "350\n", + ",1997\n", + "16500\n", + "3388\n", + "3109\n", + "53\n", + "”,\n", + "1927\n", + "”。\n", + "———\n", + "》,\n", + "1944\n", + "1948\n", + "———\n", + "”,\n", + ",《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + ",“\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "』,\n", + ",“\n", + "’,\n", + "240\n", + "”、“\n", + "”、“\n", + "———1997\n", + "DNV\n", + "ISO9001\n", + "20.2\n", + "46.5\n", + "30.5%,\n", + "14.9\n", + ",33.5%\n", + "1000\n", + "”,\n", + "”,\n", + "(GNT)\n", + "1987\n", + "600\n", + "17\n", + "80\n", + "”,\n", + "》(\n", + "”。\n", + "1986\n", + "》,\n", + ";1994\n", + "》,\n", + ";1995\n", + "———“\n", + ";1997\n", + "》,\n", + "1967\n", + "”。\n", + "3000\n", + "、1000\n", + "300\n", + "70\n", + "600\n", + "1995\n", + "1994\n", + "1989\n", + "80\n", + "———\n", + "”,\n", + ":“\n", + ":“\n", + "168\n", + ":“\n", + "1997\n", + "23\n", + "37\n", + "21\n", + "”,\n", + "119\n", + ",15\n", + ",70\n", + "”、“\n", + "”、“\n", + "110\n", + "1984\n", + "29\n", + "1978\n", + "784\n", + ",12\n", + "21\n", + "450\n", + "90%\n", + "73\n", + "》CD\n", + "100\n", + "500\n", + "500\n", + "”,\n", + "”。\n", + "”。\n", + "”,\n", + "》,\n", + "》1—13\n", + "(1949\n", + "—1950\n", + "),\n", + ",18\n", + "107\n", + "(1949\n", + "—1999\n", + ")。\n", + "1000\n", + "243991\n", + "1996\n", + "———\n", + "”,\n", + ",《1997·\n", + "1997\n", + "《1997·\n", + "》,\n", + "1990\n", + "1000\n", + "1984\n", + "35\n", + "130\n", + ":20\n", + "2400\n", + ",60\n", + "(knowhow),\n", + "”。\n", + "1997\n", + "17\n", + "———\n", + "1998\n", + "》。\n", + "2346\n", + "1873\n", + "1505\n", + "《1997\n", + "2000\n", + "1998\n", + "EPS\n", + "》,\n", + "400\n", + ",《\n", + "》、《\n", + ",《\n", + "》、《\n", + "”,\n", + "”。\n", + ":5\n", + "31\n", + "》、\n", + "《D\n", + "》、\n", + "》、《\n", + "29\n", + "”,\n", + "25\n", + "”5\n", + "29\n", + ",《\n", + "、108\n", + "600\n", + "21\n", + "28\n", + "2001\n", + "100\n", + "———\n", + "———\n", + "1995\n", + "”,\n", + "550\n", + "1968\n", + "———\n", + "---\n", + "”,\n", + "”。\n", + "1998\n", + "”。\n", + "90\n", + "1980\n", + "”。\n", + ",“\n", + "———\n", + "264\n", + "1000\n", + "270\n", + ",1998\n", + "50%。\n", + "19\n", + "19\n", + "———\n", + "19\n", + ")、\n", + "19\n", + ":“\n", + ",3\n", + ",7\n", + "),\n", + "--\n", + "305\n", + "1994\n", + "300\n", + "31\n", + "62\n", + "29\n", + ")》。\n", + "》,\n", + "1000\n", + "1997\n", + "”,\n", + "100\n", + "1995\n", + "”,\n", + "1991\n", + "100\n", + "90\n", + ",《\n", + "》、《\n", + "1993\n", + ",5\n", + "、3\n", + "、9\n", + "863\n", + "”,\n", + "》,\n", + "1996\n", + "90\n", + "47\n", + "1.2\n", + "1993\n", + "),\n", + "7300\n", + "1988\n", + "500\n", + "》。\n", + "3000\n", + "《“\n", + "———\n", + ",“\n", + "100\n", + "200\n", + "1000\n", + "》VCD\n", + "106\n", + "》,\n", + "31\n", + "———\n", + "90\n", + "3000\n", + "90\n", + "60\n", + "3000\n", + "1997\n", + "90\n", + "3000\n", + "90\n", + "300\n", + ",“\n", + "”。\n", + "”。\n", + "”:\n", + "……\n", + "200\n", + "》。\n", + "226\n", + "28\n", + "49\n", + "》、\n", + "》……\n", + "260\n", + "1995\n", + "”、1996\n", + "”、1997\n", + "31\n", + "100\n", + ":“\n", + "1992\n", + "60\n", + "2000\n", + "”。\n", + "1994\n", + "31\n", + "”。\n", + "100\n", + "31\n", + "300\n", + "1000\n", + "———\n", + "”,\n", + "”。\n", + "124\n", + "60\n", + "———\n", + "———\n", + "2200\n", + "14%\n", + "16%。\n", + "1300\n", + "”,\n", + "180\n", + ",30\n", + "(6\n", + "—7\n", + "80\n", + "29\n", + "100\n", + "》。\n", + "》、《\n", + "》、《\n", + "22\n", + "28\n", + "29\n", + "21\n", + "38\n", + ",23\n", + "”、“\n", + "———\n", + ";2\n", + "99%\n", + ",1997\n", + "650\n", + "29\n", + "》。\n", + "28\n", + ",1998\n", + "29\n", + ":6\n", + "27\n", + "29\n", + "23\n", + "———\n", + ",29\n", + "1000\n", + "26\n", + "1500\n", + "29\n", + "29\n", + "27\n", + "17\n", + "1.617\n", + "200\n", + "500\n", + "3000\n", + "7685\n", + "22\n", + "22\n", + "26\n", + "10.2\n", + ":『\n", + "1989\n", + "25\n", + "45\n", + ",1989\n", + "100\n", + "158\n", + "1995\n", + "2.5\n", + "2500\n", + "120\n", + "17.5\n", + "209\n", + "1989\n", + ",2\n", + "1.5\n", + "1400\n", + "……\n", + ",1997\n", + "3500\n", + "1986\n", + "800\n", + "48\n", + ",29\n", + "(6\n", + "29\n", + "—6\n", + "2000\n", + "312\n", + "15.5%;\n", + "628\n", + "31.2%;35\n", + "293\n", + "14.5%。\n", + "81\n", + "2000\n", + "28\n", + "28\n", + "32.59\n", + "0.59\n", + "21.38\n", + "2.38\n", + "28\n", + "26.32\n", + "0.02\n", + "26.50\n", + "0.59\n", + "28\n", + "22\n", + "30%\n", + "28\n", + "———\n", + "31\n", + "79\n", + "126\n", + "31\n", + "33\n", + ")。\n", + "28\n", + "28\n", + "27\n", + "1997\n", + "1996\n", + "28\n", + "》,\n", + "”,\n", + "”。\n", + "28\n", + "),\n", + "293\n", + "48\n", + ")260\n", + "28\n", + "1995\n", + "1996\n", + "128\n", + "11808\n", + "67714\n", + "28\n", + "”,\n", + "8300\n", + "26\n", + "47\n", + ",19\n", + "984\n", + "101\n", + ",282\n", + ",300\n", + "335\n", + "2500\n", + "》,\n", + "28\n", + ",80%\n", + "”,\n", + "”,\n", + "》,\n", + "28\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + ":“\n", + "”5\n", + ",4\n", + "5345\n", + "70%\n", + "770\n", + "、945\n", + "、976.5\n", + "1050\n", + "27\n", + "200\n", + "”。\n", + "27\n", + "500\n", + "1993\n", + ",80\n", + "1987\n", + "1974\n", + "1972\n", + "2.4\n", + "1971\n", + "100\n", + "22\n", + ":“\n", + "21\n", + "1972\n", + "300\n", + ";5\n", + "808\n", + "784\n", + ",1993\n", + "784\n", + "97%,\n", + "27\n", + "808\n", + "”,\n", + "、12\n", + ",10\n", + "570\n", + "3400\n", + "60\n", + "1100\n", + "27\n", + "103\n", + "———\n", + "500\n", + "”。\n", + ",109\n", + "27\n", + "2000\n", + ",69\n", + "1.4\n", + "88.5\n", + "98.71%;\n", + "1104\n", + "77.55%。\n", + "3.25\n", + "27\n", + "73\n", + "1998\n", + "86\n", + "1996\n", + ",1997\n", + "1996\n", + "24.4\n", + "1996\n", + "1.8\n", + "2500\n", + "185\n", + ",30\n", + "8%,\n", + ":“\n", + "1997\n", + "10%\n", + "3%\n", + "4%\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "23\n", + "85%\n", + "35\n", + "80%\n", + "85%\n", + ":“\n", + "1998\n", + "66%\n", + "……\n", + "……\n", + ":“\n", + "48\n", + "……(\n", + "26\n", + "200\n", + ":“Goodluck,China!\n", + "108\n", + "4.85\n", + "56\n", + "40\n", + "1·3\n", + "800\n", + "650\n", + "3.2\n", + "1987\n", + "”。\n", + "2∶2\n", + ",“\n", + "”。\n", + "1/8\n", + "”。\n", + "40\n", + "7∶3\n", + "”,\n", + "”。\n", + "1/8\n", + "”,\n", + "”,\n", + "17\n", + "1∶0\n", + "36\n", + ",“\n", + "”。\n", + "56\n", + "38\n", + "、12\n", + "、6\n", + "---\n", + "100\n", + "200\n", + "、5\n", + "1983\n", + "28\n", + "2002\n", + "25\n", + "1∶0\n", + "2∶0\n", + "2∶1\n", + "28\n", + "———\n", + "35\n", + ",10\n", + ",4\n", + ",32\n", + "1∶0\n", + "26\n", + "22℃/33℃\n", + "21℃/29℃\n", + "24℃/33℃\n", + "19℃/32℃\n", + "16℃/29℃\n", + "18℃/27℃\n", + "18℃/24℃\n", + "18℃/25℃\n", + "21℃/30℃\n", + "23℃/30℃\n", + "21℃/25℃\n", + "22℃/29℃\n", + "23℃/26℃\n", + "26℃/34℃\n", + "24℃/29℃\n", + "23℃/33℃\n", + "19℃/26℃\n", + "24℃/33℃\n", + "23℃/28℃\n", + "22℃/30℃\n", + "26℃/30℃\n", + "25℃/32℃\n", + "26℃/34℃\n", + "22℃/30℃\n", + "22℃/31℃\n", + "20℃/25℃\n", + "18℃/25℃\n", + "15℃/26℃\n", + "25℃/35℃\n", + "18℃/33℃\n", + "10℃/24℃\n", + "18℃/31℃\n", + "22℃/32℃\n", + "26℃/34℃\n", + "26℃/32℃\n", + "26℃/32℃\n", + "21℃/29℃\n", + "27℃/35℃\n", + "8℃/14℃\n", + "28℃/34℃\n", + "19℃/31℃\n", + "9℃/15℃\n", + "16℃/27℃\n", + "18℃/31℃\n", + "14℃/23℃\n", + "20℃/26℃\n", + "26\n", + "27\n", + "27\n", + "(6\n", + "26\n", + "—6\n", + "27\n", + "34—38\n", + "29\n", + ":26\n", + "—27\n", + "25\n", + ":“\n", + "350\n", + "60\n", + "500\n", + ",《\n", + "》、《\n", + "5000\n", + "5300\n", + "80\n", + "19\n", + "25\n", + "98\n", + "98\n", + "1.3\n", + "140\n", + ",6\n", + "25\n", + "25\n", + "”、“\n", + "”、“\n", + "170\n", + "”,\n", + "100\n", + "1984\n", + "”。\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "28\n", + "》、《\n", + "》、《\n", + "》、《\n", + "25\n", + "、79\n", + "47\n", + "25\n", + "70\n", + "———《\n", + "》,\n", + "25\n", + "25\n", + "25\n", + "25\n", + "25\n", + "1980\n", + "17\n", + "1996\n", + "25\n", + ",1990\n", + "22\n", + ",1984\n", + "35\n", + ":“\n", + ",1949\n", + "》(\n", + "),《\n", + "1988\n", + ",《\n", + "》,\n", + "90\n", + "1988\n", + "1982\n", + "1987\n", + ")》\n", + "1981\n", + "”,\n", + "1997\n", + "905804\n", + "3788041\n", + "1983\n", + "25\n", + "20℃/28℃\n", + "21℃/28℃\n", + "24℃/33℃\n", + "19℃/32℃\n", + "18℃/30℃\n", + "19℃/27℃\n", + "17℃/23℃\n", + "20℃/27℃\n", + "19℃/28℃\n", + "23℃/28℃\n", + "22℃/27℃\n", + "22℃/26℃\n", + "23℃/26℃\n", + "23℃/27℃\n", + "22℃/28℃\n", + "24℃/31℃\n", + "19℃/25℃\n", + "22℃/33℃\n", + "23℃/27℃\n", + "22℃/33℃\n", + "25℃/31℃\n", + "25℃/29℃\n", + "26℃/33℃\n", + "22℃/30℃\n", + "22℃/30℃\n", + "20℃/24℃\n", + "18℃/24℃\n", + "11℃/24℃\n", + "23℃/33℃\n", + "18℃/30℃\n", + "11℃/26℃\n", + "17℃/30℃\n", + "22℃/32℃\n", + "25℃/28℃\n", + "25℃/32℃\n", + "25℃/32℃\n", + "20℃/28℃\n", + "28℃/35℃\n", + "8℃/17℃\n", + "29℃/37℃\n", + "22℃/33℃\n", + "10℃/17℃\n", + "13℃/23℃\n", + "11℃/19℃\n", + "10℃/18℃\n", + "21℃/28℃\n", + "25\n", + "26\n", + "(6\n", + "25\n", + "—6\n", + "26\n", + "———\n", + "———\n", + "”,\n", + "1997\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "1999\n", + "2000\n", + "100\n", + ",6\n", + "22\n", + "31\n", + ",“\n", + "70\n", + ")、\n", + ")、\n", + ")、\n", + "300\n", + "400\n", + "1998\n", + "1.1\n", + "1992\n", + "100\n", + "———\n", + "23\n", + "23\n", + "23\n", + "16.91\n", + "0.4\n", + "23\n", + "29.84\n", + "0.48\n", + "23\n", + "17\n", + "33.08\n", + "0.37\n", + ",6\n", + "23\n", + "”,\n", + ",3\n", + "476\n", + "337\n", + ",“\n", + "500\n", + "78\n", + "670\n", + ":“\n", + "……”36\n", + "---\n", + "80\n", + "2010\n", + "2000\n", + "21\n", + "23\n", + "1997\n", + "926\n", + "7.64%;\n", + "7000\n", + "700\n", + "”、“\n", + "”、“\n", + "”、“\n", + "44.85\n", + "2600\n", + "5700\n", + "1997\n", + "2.985\n", + "134\n", + ";1996\n", + "1997\n", + "2.1\n", + "6.5\n", + ",“\n", + "1306\n", + ",“\n", + "4500\n", + ",20\n", + "19\n", + "20℃/27℃\n", + "18℃/28℃\n", + "22℃/31℃\n", + "19℃/32℃\n", + "16℃/29℃\n", + "18℃/27℃\n", + "17℃/22℃\n", + "19℃/31℃\n", + "18℃/29℃\n", + "23℃/27℃\n", + "22℃/26℃\n", + "22℃/29℃\n", + "22℃/27℃\n", + "26℃/32℃\n", + "22℃/26℃\n", + "22℃/30℃\n", + "18℃/23℃\n", + "22℃/29℃\n", + "22℃/27℃\n", + "22℃/26℃\n", + "25℃/31℃\n", + "25℃/29℃\n", + "26℃/34℃\n", + "21℃/29℃\n", + "22℃/29℃\n", + "20℃/25℃\n", + "19℃/23℃\n", + "13℃/27℃\n", + "23℃/35℃\n", + "17℃/29℃\n", + "12℃/25℃\n", + "20℃/30℃\n", + "21℃/32℃\n", + "27℃/33℃\n", + "24℃/29℃\n", + "24℃/29℃\n", + "18℃/26℃\n", + "27℃/35℃\n", + "6℃/12℃\n", + "29℃/36℃\n", + "20℃/32℃\n", + "12℃/18℃\n", + "15℃/25℃\n", + "14℃/26℃\n", + "12℃/19℃\n", + "20℃/28℃\n", + ",24\n", + "25\n", + "(6\n", + "—6\n", + "25\n", + "23\n", + "21\n", + "》,\n", + "5.6\n", + "23\n", + "130\n", + "160\n", + "300\n", + "500\n", + "23\n", + "23\n", + "》,\n", + "》;\n", + "1998\n", + "23\n", + "23\n", + "”、“\n", + "”。\n", + "100\n", + "”,6\n", + "23\n", + "23\n", + "、11\n", + "23\n", + "』,\n", + "』『\n", + "』,\n", + "』,\n", + ",『\n", + "』,\n", + "』,\n", + "』?\n", + ":『\n", + "』,\n", + "』。\n", + "』(\n", + "”、“\n", + "》,\n", + "21\n", + "”,\n", + "18℃/28℃\n", + "18℃/28℃\n", + "23℃/29℃\n", + "18℃/28℃\n", + "14℃/27℃\n", + "18℃/26℃\n", + "18℃/23℃\n", + "16℃/26℃\n", + "18℃/28℃\n", + "22℃/26℃\n", + "22℃/29℃\n", + "22℃/28℃\n", + "23℃/28℃\n", + "24℃/27℃\n", + "20℃/26℃\n", + "21℃/28℃\n", + "19℃/22℃\n", + "22℃/29℃\n", + "22℃/27℃\n", + "23℃/27℃\n", + "28℃/33℃\n", + "25℃/29℃\n", + "27℃/36℃\n", + "21℃/28℃\n", + "24℃/30℃\n", + "20℃/25℃\n", + "18℃/22℃\n", + "14℃/26℃\n", + "24℃/34℃\n", + "19℃/28℃\n", + "11℃/24℃\n", + "17℃/28℃\n", + "19℃/30℃\n", + "25℃/32℃\n", + "27℃/31℃\n", + "27℃/30℃\n", + "17℃/26℃\n", + "27℃/34℃\n", + "6℃/11℃\n", + "28℃/37℃\n", + "20℃/32℃\n", + "11℃/18℃\n", + "13℃/25℃\n", + "13℃/24℃\n", + "12℃/22℃\n", + "20℃/29℃\n", + "23\n", + "(6\n", + "23\n", + "—6\n", + "22\n", + "———“\n", + "”。\n", + "1000\n", + "826.7\n", + "1994\n", + "67\n", + "83\n", + "21\n", + ":《\n", + "》(\n", + ")、《“\n", + "》(\n", + ")、《“\n", + "》(\n", + ")、《\n", + "”》(\n", + ")、《“\n", + "》(\n", + ")。\n", + ":《\n", + "”》(\n", + ")、《\n", + "》(\n", + ")、《“\n", + ":《21200》(\n", + ")。\n", + "”、“\n", + "”。\n", + "22\n", + "22\n", + "31\n", + "17\n", + "307\n", + "67\n", + ",87\n", + ",123\n", + ",30\n", + "22\n", + "22\n", + "22\n", + ",12\n", + "4.\n", + "3.\n", + "2.\n", + ":1.\n", + ",1993\n", + "22\n", + "21\n", + ",“\n", + ",1991\n", + ":“\n", + "1996\n", + ":“\n", + "”。\n", + ":“\n", + "22\n", + ":“\n", + "……\n", + "23\n", + "1979\n", + "25℃/31℃\n", + "20℃/28℃\n", + "23℃/30℃\n", + "18℃/26℃\n", + "13℃/26℃\n", + "18℃/22℃\n", + "16℃/23℃\n", + "18℃/26℃\n", + "16℃/27℃\n", + "22℃/29℃\n", + "24℃/28℃\n", + "22℃/26℃\n", + "23℃/29℃\n", + "26℃/33℃\n", + "21℃/28℃\n", + "26℃/34℃\n", + "19℃/23℃\n", + "25℃/33℃\n", + "26℃/30℃\n", + "24℃/29℃\n", + "28℃/34℃\n", + "26℃/32℃\n", + "27℃/36℃\n", + "21℃/28℃\n", + "22℃/29℃\n", + "21℃/27℃\n", + "17℃/26℃\n", + "13℃/27℃\n", + "26℃/36℃\n", + "17℃/31℃\n", + "7℃/22℃\n", + "17℃/29℃\n", + "19℃/29℃\n", + "26℃/33℃\n", + "28℃/31℃\n", + "29℃/31℃\n", + "18℃/28℃\n", + "27℃/35℃\n", + "7℃/13℃\n", + "29℃/37℃\n", + "17℃/29℃\n", + "8℃/17℃\n", + "13℃/24℃\n", + "15℃/24℃\n", + "12℃/21℃\n", + "21℃/33℃\n", + ",22\n", + "23\n", + "(6\n", + "22\n", + "—6\n", + "23\n", + ",30\n", + "21\n", + "21\n", + "19\n", + "1995\n", + "1996\n", + "8799\n", + "4000\n", + ")、\n", + "1993\n", + "1991\n", + "1995\n", + "”。\n", + "、9\n", + "、9\n", + ":“\n", + "》。\n", + ",《\n", + "”、“\n", + "”。\n", + ":“\n", + "67\n", + "”,\n", + "17\n", + "”。\n", + "》,\n", + "》。\n", + "40\n", + "》,50\n", + "”、“\n", + "200\n", + "———\n", + "300\n", + "500\n", + "200\n", + "200\n", + "2000\n", + "1995\n", + ":“\n", + "”,\n", + "27\n", + ":“\n", + "1000\n", + ":“\n", + "”、\n", + "700\n", + "65\n", + "500\n", + "200\n", + "2.5\n", + "4000\n", + "2000\n", + "60\n", + "2000\n", + ",2.5\n", + "2.5\n", + "1990\n", + ":“\n", + "27\n", + "2.3\n", + "124\n", + "112\n", + "4800\n", + "———\n", + "』。\n", + "』。\n", + "』、『\n", + "』,\n", + "』(\n", + ")。\n", + "8000\n", + "2000\n", + "40\n", + "10082\n", + "1956\n", + "1958\n", + "1000\n", + "14278\n", + "1122\n", + "3572\n", + "40\n", + "2000\n", + "31\n", + "930\n", + "5000\n", + "、10\n", + "3000\n", + "8000\n", + "1998\n", + "1996\n", + "》,\n", + "400\n", + "32\n", + ",80%\n", + "9.6\n", + "61\n", + "2000\n", + "1996\n", + "14500\n", + "100\n", + ")、\n", + ")4\n", + "1993\n", + ":“\n", + "150\n", + ":5\n", + ",335\n", + "300\n", + "22\n", + "”。\n", + "19\n", + ",7\n", + ",“\n", + "”,\n", + "1517\n", + "785\n", + ",“\n", + ",5\n", + ",“\n", + "4986\n", + "12%。\n", + "”)\n", + "200\n", + "100\n", + "”,\n", + "”———\n", + "”。\n", + ":6\n", + "19\n", + ",10\n", + "”,\n", + "700\n", + "1.2\n", + "35\n", + "1700\n", + "100\n", + ",9\n", + "22\n", + "800\n", + "80\n", + ",90\n", + "1.5\n", + "8.6\n", + "860\n", + "1800\n", + "1/3\n", + "1/3。\n", + "1986\n", + "650\n", + "700\n", + "』,\n", + "』。\n", + ":『\n", + "○○○\n", + "1/3”\n", + "1/3,\n", + "1/3,\n", + "1/3。\n", + "60%\n", + "1994\n", + "60\n", + "3000\n", + "184\n", + "7%。\n", + "1997\n", + "41\n", + "、36\n", + "52\n", + "1993\n", + "154\n", + "、“\n", + "、“\n", + "90\n", + ",“\n", + "”,“\n", + "”,\n", + "1997\n", + "88%;\n", + "36\n", + "86%,\n", + "70\n", + "80\n", + "”,\n", + "90\n", + "”。\n", + "”。\n", + ")(\n", + "———\n", + "”、“\n", + "”。\n", + "”。\n", + ",“\n", + "”。\n", + "17\n", + "”。\n", + "”,\n", + "”。\n", + "”。\n", + "”。\n", + "”,\n", + "0.18\n", + "1954\n", + "27\n", + "1/4\n", + "』。\n", + "……(\n", + "19\n", + "》,\n", + "3000\n", + "100\n", + "———\n", + ",“\n", + "、7\n", + ",2\n", + ",4\n", + "1/8\n", + "4∶0\n", + "———\n", + "22\n", + "19\n", + "555\n", + "11.66\n", + "0∶4\n", + "1∶0\n", + "19\n", + "18℃/31℃\n", + "18℃/33℃\n", + "22℃/36℃\n", + "16℃/31℃\n", + "13℃/27℃\n", + "17℃/26℃\n", + "15℃/21℃\n", + "14℃/26℃\n", + "16℃/26℃\n", + "22℃/26℃\n", + "21℃/29℃\n", + "21℃/27℃\n", + "24℃/31℃\n", + "23℃/27℃\n", + "20℃/26℃\n", + "24℃/35℃\n", + "18℃/23℃\n", + "22℃/37℃\n", + "25℃/32℃\n", + "20℃/25℃\n", + "28℃/33℃\n", + "25℃/30℃\n", + "26℃/36℃\n", + "22℃/31℃\n", + "21℃/28℃\n", + "22℃/28℃\n", + "17℃/26℃\n", + "12℃/27℃\n", + "25℃/36℃\n", + "17℃/31℃\n", + "14℃/27℃\n", + "14℃/30℃\n", + "20℃/30℃\n", + "26℃/34℃\n", + "28℃/34℃\n", + "28℃/34℃\n", + "22℃/28℃\n", + "26℃/35℃\n", + "7℃/16℃\n", + "28℃/38℃\n", + "22℃/35℃\n", + "17℃/25℃\n", + "12℃/22℃\n", + "12℃/21℃\n", + "13℃/22℃\n", + "17℃/27℃\n", + "(6\n", + "19\n", + "—6\n", + "98\n", + "19\n", + ",1995\n", + ":“\n", + "”。\n", + ",5\n", + "”,648\n", + "1500\n", + "150\n", + ",1993\n", + "19—22\n", + "”,\n", + "》,\n", + "”。\n", + ",1930\n", + "80\n", + "1000\n", + ",“\n", + "1998\n", + "27\n", + "93\n", + "1997\n", + "19\n", + ":“\n", + "”,\n", + "70\n", + "80\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "1978\n", + "1979\n", + "1972\n", + "”,\n", + "1963\n", + "1954\n", + "1949\n", + "96\n", + "46\n", + "1945\n", + "1944\n", + "1942\n", + "1937\n", + "1935\n", + "———\n", + "1930\n", + "1927\n", + "1926\n", + "1925\n", + "》,\n", + "”、“\n", + "”、“\n", + "”、“\n", + "1921\n", + "”,\n", + ",1920\n", + "”,\n", + ",1905\n", + "25\n", + "),\n", + "1998\n", + "27\n", + "93\n", + "……\n", + "“8·22”\n", + "6000\n", + "846\n", + "229\n", + "52\n", + "60\n", + "1400\n", + "1997\n", + "44.84\n", + "14.4\n", + "1.4\n", + ",1997\n", + "2981\n", + "316\n", + "2262\n", + "),\n", + "19℃/32℃\n", + "18℃/31℃\n", + "22℃/36℃\n", + "16℃/31℃\n", + "12℃/23℃\n", + "16℃/24℃\n", + "17℃/20℃\n", + "14℃/23℃\n", + "17℃/26℃\n", + "22℃/27℃\n", + "19℃/29℃\n", + "21℃/25℃\n", + "20℃/28℃\n", + "26℃/31℃\n", + "21℃/28℃\n", + "20℃/30℃\n", + "17℃/22℃\n", + "23℃/38℃\n", + "24℃/28℃\n", + "23℃/25℃\n", + "28℃/34℃\n", + "25℃/30℃\n", + "28℃/36℃\n", + "22℃/30℃\n", + "23℃/31℃\n", + "18℃/26℃\n", + "19℃/28℃\n", + "11℃/28℃\n", + "24℃/35℃\n", + "18℃/30℃\n", + "14℃/26℃\n", + "13℃/27℃\n", + "20℃/32℃\n", + "26℃/35℃\n", + "27℃/33℃\n", + "27℃/33℃\n", + "20℃/28℃\n", + "28℃/35℃\n", + "10℃/19℃\n", + "30℃/44℃\n", + "20℃/37℃\n", + "14℃/28℃\n", + "15℃/25℃\n", + "15℃/29℃\n", + "18℃/28℃\n", + "17℃/30℃\n", + "19\n", + "(6\n", + "—6\n", + "19\n", + ")3、\n", + "7500\n", + "4000\n", + "2、\n", + "1、\n", + "43\n", + "1986\n", + "500\n", + "130\n", + "148\n", + "300\n", + "”。\n", + ",1929\n", + ",1932\n", + "86\n", + "”。\n", + ",3000\n", + "130\n", + "———\n", + "5500\n", + "200\n", + "29\n", + "1.5\n", + "2.6\n", + "“2·13”\n", + ",1997\n", + "17\n", + "1997\n", + "17\n", + "……\n", + "”,\n", + "”———\n", + "”。\n", + "300\n", + "”。\n", + "1939\n", + ":“\n", + "1997\n", + "4200\n", + "3500\n", + ",“\n", + "”;\n", + "200\n", + "”。\n", + ",“\n", + "”,\n", + "25\n", + "”,\n", + "25\n", + ",1997\n", + "47\n", + "300\n", + "2500\n", + "3000\n", + ",(\n", + ",“\n", + "”、“\n", + "”、“\n", + "85%、98%\n", + "181\n", + "1976\n", + "200\n", + "”“\n", + "40\n", + "”———\n", + "”,\n", + "』---\n", + "”:\n", + "》,\n", + ",《\n", + "343\n", + "80\n", + "17\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》4\n", + "18℃/27℃\n", + "20℃/30℃\n", + "22℃/33℃\n", + "17℃/32℃\n", + "15℃/28℃\n", + "16℃/24℃\n", + "15℃/20℃\n", + "17℃/24℃\n", + "15℃/23℃\n", + "23℃/29℃\n", + "21℃/32℃\n", + "21℃/24℃\n", + "24℃/29℃\n", + "25℃/31℃\n", + "20℃/24℃\n", + "23℃/32℃\n", + "16℃/21℃\n", + "25℃/36℃\n", + "24℃/31℃\n", + "23℃/28℃\n", + "26℃/32℃\n", + "24℃/29℃\n", + "28℃/36℃\n", + "23℃/31℃\n", + "23℃/29℃\n", + "21℃/25℃\n", + "19℃/29℃\n", + "14℃/28℃\n", + "25℃/36℃\n", + "17℃/32℃\n", + "12℃/26℃\n", + "14℃/23℃\n", + "18℃/30℃\n", + "25℃/34℃\n", + "27℃/33℃\n", + "27℃/33℃\n", + "20℃/29℃\n", + "27℃/36℃\n", + "11℃/19℃\n", + "29℃/43℃\n", + "22℃/37℃\n", + "18℃/31℃\n", + "14℃/24℃\n", + "14℃/24℃\n", + "15℃/25℃\n", + "15℃/25℃\n", + ",17\n", + "(6\n", + "17\n", + "—6\n", + "》CD\n", + "、《\n", + "》,\n", + "60\n", + "97\n", + ":“\n", + "150\n", + "1982\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "25\n", + ",10\n", + "”,\n", + "1984\n", + "》、《\n", + "37\n", + "1994\n", + "19.319\n", + "73\n", + "1993\n", + "27\n", + ":1993\n", + "28\n", + ":“\n", + "”,\n", + "”。\n", + ",1985\n", + "1994\n", + ",5\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "17\n", + "28\n", + "———\n", + "』『\n", + "』『\n", + "』,\n", + "』。\n", + "』,\n", + "』,『\n", + "』,\n", + "』?\n", + "22℃/28℃\n", + "22℃/33℃\n", + "21℃/34℃\n", + "18℃/32℃\n", + "16℃/28℃\n", + "16℃/26℃\n", + "18℃/24℃\n", + "16℃/26℃\n", + "14℃/27℃\n", + "21℃/29℃\n", + "20℃/29℃\n", + "21℃/27℃\n", + "20℃/27℃\n", + "26℃/33℃\n", + "21℃/24℃\n", + "24℃/34℃\n", + "17℃/25℃\n", + "25℃/30℃\n", + "23℃/29℃\n", + "23℃/28℃\n", + "27℃/34℃\n", + "29℃/37℃\n", + "28℃/37℃\n", + "22℃/30℃\n", + "23℃/29℃\n", + "18℃/26℃\n", + "18℃/28℃\n", + "15℃/28℃\n", + "22℃/28℃\n", + "20℃/32℃\n", + "15℃/23℃\n", + "18℃/27℃\n", + "17℃/28℃\n", + "27℃/34℃\n", + "28℃/34℃\n", + "28℃/34℃\n", + "16℃/21℃\n", + "26℃/34℃\n", + "11℃/20℃\n", + "27℃/36℃\n", + "20℃/39℃\n", + "16℃/26℃\n", + "11℃/20℃\n", + "12℃/22℃\n", + "12℃/21℃\n", + "14℃/20℃\n", + "17\n", + "(6\n", + "—6\n", + "17\n", + ",5\n", + "1%。\n", + "30%、\n", + "30%\n", + "1—5\n", + "26.5\n", + "27.11%,\n", + "14.5\n", + "13.62\n", + ":5\n", + "”8\n", + ":“\n", + "300\n", + "200\n", + "100\n", + "200\n", + "”“\n", + "27\n", + "200\n", + "17\n", + "45\n", + "38\n", + ",3\n", + "11525\n", + ",33178\n", + "400\n", + "”,\n", + "”、“\n", + "———\n", + "》。\n", + "”,\n", + "1994\n", + "DTD\n", + "300\n", + ",50\n", + "1989\n", + "1977\n", + "———\n", + "”(\n", + ")(\n", + "1·5\n", + "1·2\n", + "3000\n", + "35\n", + "2、\n", + "700\n", + "1、\n", + "21\n", + "3045.69\n", + "7.9%,\n", + "37.6%,\n", + "4.1\n", + "1517.83\n", + "1%,\n", + "34.1%,\n", + "9.1\n", + "7.6\n", + "800\n", + "1000\n", + "8、\n", + "7、\n", + "6、\n", + "5、\n", + "4、\n", + "3、\n", + "2、\n", + "32\n", + "1、1996\n", + "26\n", + "、20\n", + "1996\n", + "7000\n", + ":“\n", + "21\n", + "32\n", + "150\n", + "20%\n", + "”、“\n", + "”、“\n", + "”、“\n", + "“21\n", + ",《\n", + "200\n", + "38—39\n", + "100\n", + "》(\n", + "110\n", + "1997\n", + ":6\n", + ",11\n", + "”。\n", + "4.2\n", + "1100\n", + "1000\n", + "”,\n", + "4.6\n", + ":6\n", + ",1996\n", + "7.5\n", + ",52.7\n", + "200\n", + "200\n", + "52.7\n", + "1992\n", + "263\n", + "268\n", + "》,\n", + "”,\n", + "》,\n", + "”,\n", + "”,\n", + "》、《\n", + "(1996)》、《1958\n", + "1993\n", + "99.8%,\n", + "66%\n", + "56.2%。\n", + "85%\n", + ",1997\n", + "99%\n", + "39%,\n", + "54%,\n", + "60%。\n", + "73%\n", + "1978—1996\n", + "70%\n", + "80%\n", + "90%\n", + "GDP\n", + ",1996\n", + "99.7%,1991—1996\n", + "76.6%。\n", + ":“\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + ":“\n", + "”,\n", + "”。\n", + "』(\n", + "”,\n", + "”。\n", + "”。\n", + "53.222\n", + "52.903\n", + "A35.844\n", + "30.405\n", + "22.871\n", + "21.502\n", + "19.823\n", + "A12.644\n", + "A11.195\n", + "11.121\n", + "28.442\n", + "27.663\n", + "15.384\n", + "13.935\n", + "11.911ST\n", + "18.642ST\n", + "16.773\n", + "14.084\n", + "13.075\n", + "12.82\n", + "、25\n", + "1.9\n", + "1000—2000\n", + "),\n", + "”、“\n", + "”,\n", + "90\n", + "200%—400%,\n", + "1000\n", + "40%\n", + "500\n", + "”,\n", + "2000\n", + "130\n", + "”,\n", + "1998\n", + "29\n", + ",7\n", + "54700\n", + "1240\n", + "2.37\n", + "3.5\n", + "200\n", + "4000\n", + "5/6\n", + "、9/10\n", + "1.2\n", + "16.9\n", + "1/10,\n", + "1993\n", + "4500\n", + "3900\n", + "80\n", + "90\n", + "1000\n", + "90%;\n", + "4%\n", + "1%;\n", + "400\n", + "9000\n", + "2000\n", + "1200\n", + "500\n", + "125\n", + "1275\n", + ",5.6\n", + "、95\n", + "、22\n", + "、73\n", + "、248\n", + "、1340\n", + "25.6\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "———\n", + "”;\n", + "200\n", + "100\n", + "1000\n", + "(240\n", + "),\n", + "(120\n", + "),\n", + "(80\n", + "),\n", + "(30\n", + "),\n", + "(30\n", + "),\n", + "(60\n", + "),\n", + "(50\n", + "1.5\n", + ",18\n", + ",24\n", + "146.16\n", + "147.70\n", + "2.70\n", + "297\n", + "2381\n", + "1673\n", + "1800\n", + "301\n", + "300\n", + "2000\n", + "1.5\n", + "3000\n", + "360\n", + "90\n", + "4.2\n", + "2000\n", + ",12—13\n", + "159\n", + "”,\n", + "1954\n", + "150\n", + "———\n", + "》、《\n", + ":《\n", + ":(\n", + ":(\n", + ",《\n", + ",《\n", + ":(《\n", + ":(《\n", + ":“\n", + ":(《\n", + ")《\n", + "》,\n", + "』,\n", + ",『\n", + ":『\n", + "》,\n", + "』;\n", + "……\n", + "———“\n", + "”。\n", + "”。\n", + "》,\n", + "、《\n", + "、《\n", + "),\n", + "”,\n", + "———\n", + "1928\n", + "28\n", + "1927\n", + "》,\n", + "1995\n", + "》。\n", + "……\n", + ":“\n", + "”,\n", + "”,\n", + ",《\n", + "”,\n", + "》,\n", + ",“\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "、《\n", + "、“\n", + "———\n", + "”,\n", + "”,\n", + "”,\n", + "12%\n", + ":“\n", + "100%,\n", + "50%,\n", + "1991\n", + "1987\n", + "”。\n", + ",1991\n", + "20%\n", + "1997\n", + "7%,\n", + "1996\n", + "41·4%,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + ":“\n", + "”,\n", + "———\n", + ",《\n", + "》(\n", + "》)\n", + ")《\n", + "”,\n", + "”,\n", + ",20\n", + "”;\n", + "”,\n", + "”,\n", + "”,\n", + "———\n", + "》。\n", + "200\n", + ":25\n", + "250\n", + "150\n", + "”,\n", + "216\n", + "80\n", + ":“\n", + ":“\n", + "……10\n", + "28\n", + "2020S\n", + "),\n", + "300\n", + "1633\n", + "OK”\n", + ":『\n", + "『CCIB』\n", + "(IWTO),\n", + "”。\n", + "”。\n", + "”,“\n", + "”,\n", + "35\n", + "”,\n", + "”、“\n", + "”。\n", + "……\n", + "”,\n", + "”。\n", + ":“\n", + "……”\n", + ":“\n", + "),\n", + "”5\n", + "29\n", + ",“\n", + "23\n", + "”,\n", + "“W”\n", + "———\n", + "1996\n", + "300\n", + "”,\n", + "———\n", + ",“\n", + "26\n", + ",80\n", + "---\n", + "”,\n", + ";“\n", + "”,“\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + ",“\n", + ",“\n", + "”,\n", + "”,\n", + "”。\n", + "”。\n", + ",1996\n", + "1995\n", + "77.6%,1997\n", + "1996\n", + "59·7%。\n", + "1996\n", + "1997\n", + "54.8%\n", + "74%,\n", + "1996\n", + "8%、\n", + "10%\n", + ",30%\n", + "8%—10%\n", + "1998\n", + ")、\n", + ")、\n", + ")、\n", + "”,\n", + "”,\n", + ",“\n", + "(“\n", + "”(\n", + "”(\n", + "———“\n", + "、“\n", + "、“\n", + "”?\n", + "”?\n", + "”,\n", + "”,\n", + ",“\n", + "43.542\n", + "36.293\n", + "19.674\n", + "15.945\n", + "A14.511\n", + "15.242\n", + "A13.443\n", + "12.634\n", + "A10.805\n", + "A9.861\n", + "32.292\n", + "30.093\n", + "22.214\n", + "15.035XR\n", + "14.391ST\n", + "17.202\n", + "15.303\n", + "14.114\n", + "14.115\n", + "12.90\n", + "1995\n", + ";1996\n", + "”;1997\n", + "”,\n", + "40\n", + "100\n", + "300\n", + "36\n", + ":“\n", + ",3\n", + "28\n", + "”。\n", + "1400\n", + ":“\n", + "———\n", + "”。\n", + "1997\n", + ":“\n", + "40\n", + ":“\n", + "1994\n", + "32\n", + "1993\n", + "1975\n", + "1954\n", + "———\n", + "20%,\n", + "100%。\n", + "14.8\n", + "1100\n", + "296\n", + "”,\n", + "218\n", + ",98%\n", + "22\n", + ",10\n", + "”,\n", + "”,\n", + ",“\n", + "”。\n", + "15%,\n", + "20%,\n", + "”。\n", + "1998\n", + "1996\n", + "76%,\n", + "40%。\n", + "”。\n", + ",《\n", + "25\n", + ":“\n", + "300\n", + "1988\n", + "60\n", + "42.4%,\n", + "29.3%,\n", + "”,\n", + "80\n", + ",200\n", + "”,\n", + "1987\n", + "》,1988\n", + "1982\n", + "1980\n", + "———\n", + "90\n", + ",18\n", + "1987\n", + ")》(\n", + "》)\n", + "1988\n", + ")》\n", + "50%。\n", + "100\n", + "100\n", + "100\n", + "369\n", + "、921\n", + ":“\n", + "▲『\n", + "32\n", + "28\n", + "”。\n", + "100\n", + "1998\n", + ",11\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "”,“\n", + "”。\n", + "”,\n", + "”。\n", + "2∶2\n", + "100\n", + "13000\n", + ",100\n", + "0∶0,\n", + ",B\n", + "———6\n", + "23\n", + "1990\n", + "1/4\n", + ":“\n", + ",“\n", + "”,\n", + ":“\n", + "”。\n", + ",“\n", + "”。\n", + "———B\n", + "19\n", + ",0∶0\n", + "3.5\n", + "0∶1\n", + "17℃/30℃\n", + "19℃/30℃\n", + "19℃/31℃\n", + "15℃/28℃\n", + "12℃/26℃\n", + "15℃/23℃\n", + "14℃/20℃\n", + "16℃/26℃\n", + "14℃/27℃\n", + "20℃/26℃\n", + "18℃/24℃\n", + "20℃/26℃\n", + "18℃/27℃\n", + "22℃/29℃\n", + "20℃/26℃\n", + "22℃/33℃\n", + "16℃/22℃\n", + "19℃/32℃\n", + "22℃/30℃\n", + "21℃/31℃\n", + "26℃/32℃\n", + "25℃/32℃\n", + "26℃/35℃\n", + "19℃/24℃\n", + "22℃/28℃\n", + "20℃/25℃\n", + "18℃/25℃\n", + "14℃/28℃\n", + "18℃/29℃\n", + "17℃/29℃\n", + "10℃/21℃\n", + "14℃/27℃\n", + "12℃/18℃\n", + "23℃/30℃\n", + "25℃/29℃\n", + "24℃/30℃\n", + "18℃/21℃\n", + "27℃/33℃\n", + "12℃/17℃\n", + "27℃/33℃\n", + "21℃/35℃\n", + "17℃/28℃\n", + "9℃/16℃\n", + "9℃/16℃\n", + "8℃/15℃\n", + "13℃/22℃\n", + "(6\n", + "—6\n", + "“《\n", + "1987\n", + "》,\n", + "86\n", + ",25\n", + "1937\n", + ")《\n", + "95\n", + "1·9\n", + "540\n", + "7000\n", + "80\n", + "96%\n", + "67\n", + "35\n", + "40%。\n", + "”,\n", + "2000\n", + "3000\n", + "25\n", + "2000\n", + "120\n", + "420\n", + "400\n", + ":“\n", + "”。\n", + "120\n", + "17\n", + "100\n", + "1955\n", + ",1928\n", + "19\n", + "89\n", + "1000\n", + "12—15\n", + "12—13\n", + ";14—18\n", + "3—5\n", + ",《\n", + "2100\n", + "740\n", + "1996\n", + "35\n", + "1500\n", + ",『\n", + "』。\n", + "』、『\n", + "』、『\n", + "』。\n", + "』,\n", + "10%\n", + "32\n", + "44.4\n", + "18℃/32℃\n", + "19℃/29℃\n", + "21℃/31℃\n", + "15℃/24℃\n", + "13℃/26℃\n", + "16℃/27℃\n", + "16℃/22℃\n", + "12℃/21℃\n", + "12℃/25℃\n", + "20℃/25℃\n", + "20℃/26℃\n", + "20℃/26℃\n", + "19℃/25℃\n", + "22℃/26℃\n", + "19℃/24℃\n", + "23℃/32℃\n", + "16℃/22℃\n", + "20℃/29℃\n", + "23℃/28℃\n", + "20℃/28℃\n", + "26℃/31℃\n", + "25℃/32℃\n", + "26℃/34℃\n", + "19℃/25℃\n", + "22℃/28℃\n", + "17℃/23℃\n", + "17℃/24℃\n", + "13℃/28℃\n", + "16℃/27℃\n", + "14℃/27℃\n", + "9℃/22℃\n", + "15℃/26℃\n", + "11℃/18℃\n", + "23℃/29℃\n", + "25℃/28℃\n", + "25℃/29℃\n", + "17℃/23℃\n", + "26℃/33℃\n", + "13℃/19℃\n", + "26℃/33℃\n", + "21℃/34℃\n", + "18℃/30℃\n", + "14℃/21℃\n", + "13℃/20℃\n", + "11℃/18℃\n", + "13℃/22℃\n", + ",11\n", + "(6\n", + "—6\n", + "”。\n", + "1988\n", + "”。\n", + "1000\n", + "———\n", + "———\n", + "———\n", + "2800\n", + ",1.5\n", + "800\n", + ",5\n", + "2316\n", + "1952\n", + "800\n", + "1600\n", + "100\n", + "1065\n", + "265\n", + "125\n", + "1.5\n", + "2134\n", + "3、\n", + "2、\n", + "38.5%。\n", + "1、\n", + ",1996\n", + "19℃/30℃\n", + "19℃/30℃\n", + "21℃/32℃\n", + "17℃/24℃\n", + "11℃/25℃\n", + "16℃/27℃\n", + "14℃/20℃\n", + "12℃/22℃\n", + "13℃/24℃\n", + "19℃/27℃\n", + "20℃/30℃\n", + "19℃/27℃\n", + "20℃/27℃\n", + "21℃/29℃\n", + "19℃/24℃\n", + "22℃/32℃\n", + "17℃/23℃\n", + "17℃/29℃\n", + "20℃/28℃\n", + "20℃/24℃\n", + "24℃/29℃\n", + "25℃/31℃\n", + "27℃/33℃\n", + "18℃/23℃\n", + "21℃/28℃\n", + "16℃/20℃\n", + "17℃/24℃\n", + "13℃/27℃\n", + "17℃/26℃\n", + "16℃/27℃\n", + "7℃/20℃\n", + "16℃/24℃\n", + "11℃/20℃\n", + "22℃/29℃\n", + "22℃/27℃\n", + "23℃/27℃\n", + "12℃/19℃\n", + "25℃/33℃\n", + "11℃/19℃\n", + "27℃/37℃\n", + "21℃/34℃\n", + "16℃/26℃\n", + "12℃/21℃\n", + "13℃/19℃\n", + "12℃/17℃\n", + "12℃/23℃\n", + "(6\n", + "—6\n", + "40\n", + "》、《\n", + "》、《\n", + ",10\n", + "65\n", + ")“\n", + "150\n", + "(http∶//www.peopledaily.com.cn)\n", + "1700\n", + "1088\n", + "3:\n", + "2:\n", + "1:\n", + "100\n", + "19\n", + "107\n", + "”。\n", + "”,\n", + ",1940\n", + "78\n", + "100\n", + "35\n", + "1963\n", + ",“\n", + "”,\n", + "60\n", + "”1957\n", + "28\n", + ",“\n", + "40\n", + "26\n", + "100\n", + "200\n", + "———《\n", + "———\n", + "》,\n", + ",『\n", + "17℃/30℃\n", + "19℃/31℃\n", + "20℃/34℃\n", + "15℃/30℃\n", + "13℃/26℃\n", + "14℃/22℃\n", + "14℃/20℃\n", + "13℃/22℃\n", + "10℃/23℃\n", + "19℃/25℃\n", + "20℃/27℃\n", + "20℃/27℃\n", + "20℃/30℃\n", + "20℃/25℃\n", + "20℃/26℃\n", + "21℃/30℃\n", + "16℃/22℃\n", + "16℃/28℃\n", + "21℃/28℃\n", + "22℃/32℃\n", + "21℃/27℃\n", + "24℃/29℃\n", + "27℃/34℃\n", + "20℃/26℃\n", + "20℃/25℃\n", + "17℃/25℃\n", + "16℃/24℃\n", + "10℃/25℃\n", + "19℃/30℃\n", + "15℃/26℃\n", + "8℃/20℃\n", + "17℃/24℃\n", + "17℃/26℃\n", + "23℃/28℃\n", + "21℃/28℃\n", + "21℃/27℃\n", + "14℃/20℃\n", + "26℃/35℃\n", + "11℃/19℃\n", + "29℃/34℃\n", + "21℃/34℃\n", + "14℃/25℃\n", + "14℃/21℃\n", + "12℃/18℃\n", + "12℃/19℃\n", + "14℃/20℃\n", + "(6\n", + "—6\n", + "”,\n", + "60\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "36\n", + "23\n", + "1983\n", + "85\n", + "1913\n", + "500\n", + "85\n", + "1997\n", + "85\n", + "1994\n", + "1990\n", + "”。\n", + "1973\n", + "1994\n", + "1973\n", + "1934\n", + ",1954\n", + ",5\n", + "31\n", + "———\n", + "》、\n", + "》、\n", + ",1939\n", + ",1941\n", + "79\n", + "5.6\n", + ":54\n", + "375\n", + "B—24\n", + "1944\n", + "31\n", + ",10\n", + "2000\n", + "0.5\n", + "2.6\n", + "54\n", + "23\n", + ",40\n", + "70\n", + "103\n", + "15℃/27℃\n", + "15℃/25℃\n", + "19℃/30℃\n", + "15℃/29℃\n", + "13℃/23℃\n", + "16℃/21℃\n", + "13℃/20℃\n", + "11℃/25℃\n", + "10℃/24℃\n", + "19℃/24℃\n", + "20℃/27℃\n", + "19℃/24℃\n", + "19℃/28℃\n", + "22℃/29℃\n", + "23℃/28℃\n", + "19℃/32℃\n", + "15℃/20℃\n", + "16℃/30℃\n", + "22℃/28℃\n", + "24℃/34℃\n", + "23℃/30℃\n", + "23℃/27℃\n", + "26℃/33℃\n", + "19℃/26℃\n", + "20℃/28℃\n", + "18℃/26℃\n", + "16℃/26℃\n", + "9℃/25℃\n", + "17℃/27℃\n", + "14℃/28℃\n", + "8℃/23℃\n", + "13℃/27℃\n", + "17℃/27℃\n", + "22℃/30℃\n", + "22℃/28℃\n", + "22℃/29℃\n", + "14℃/18℃\n", + "27℃/34℃\n", + "12℃/19℃\n", + "28℃/35℃\n", + "20℃/33℃\n", + "11℃/19℃\n", + "15℃/24℃\n", + "16℃/25℃\n", + "14℃/23℃\n", + "12℃/23℃\n", + ",7\n", + "—10\n", + "(6\n", + "—6\n", + "4000\n", + "3340\n", + "3000\n", + ",60%\n", + "”,\n", + "、“\n", + "700\n", + "50%\n", + "60%。\n", + ":“\n", + "2000\n", + "1000\n", + "1000\n", + "1000\n", + "4394\n", + "135\n", + "68\n", + "175\n", + "175\n", + "144\n", + "1.81\n", + "75%,\n", + "448\n", + "67\n", + "1.65\n", + "59\n", + "46\n", + "2.3961\n", + "、1\n", + "51/52\n", + ",1996\n", + "55\n", + "、200\n", + "3、\n", + "1995\n", + "28\n", + "2、\n", + "3300\n", + "7—15\n", + "1、\n", + ",6\n", + "27\n", + "”、“\n", + "”、“\n", + "》、\n", + "800\n", + "300\n", + ",《\n", + ",《\n", + "———\n", + "19\n", + "30.5\n", + "89.0\n", + "5.3\n", + "19\n", + "06\n", + "5.1\n", + "30.5\n", + "89.5\n", + "1938\n", + ",1929\n", + ",1930\n", + ",1932\n", + "26\n", + "83\n", + "”。\n", + ":“\n", + "51\n", + ",『\n", + "』,\n", + "』,\n", + "』,\n", + "』,\n", + "』。\n", + "』。\n", + "……\n", + "”,“\n", + "”,“\n", + "”,“\n", + "”,“\n", + "”,“\n", + "”,“\n", + "》。\n", + "30%\n", + "70\n", + "》,\n", + "、“\n", + "44941\n", + "19022\n", + "3382\n", + "10270\n", + "5370\n", + "1134.7\n", + "51549\n", + "14066\n", + "5426\n", + "8406\n", + "143\n", + "91\n", + "909\n", + "3·3\n", + "2043\n", + "100\n", + "150\n", + "》、《\n", + "40%—60%\n", + ",1998\n", + "4.2%,\n", + "151.39\n", + "79%\n", + "46.7%。\n", + "206.73\n", + "7.7\n", + "1.3\n", + "8438.89\n", + "7813.11\n", + "10290.36\n", + ",“\n", + "”,\n", + "”,\n", + "2000\n", + ";5\n", + "2.2\n", + ",6\n", + "”,\n", + "”,\n", + "”。\n", + "BP\n", + "”:\n", + "”?\n", + "3.3\n", + "”。\n", + "”1\n", + "2300\n", + "”。\n", + ":6\n", + ":“\n", + ",“\n", + "500\n", + "》,\n", + "1832\n", + ",1841\n", + "25\n", + "20—2\n", + "“3·15”\n", + "”,\n", + "1927\n", + "———\n", + ":“\n", + ":“\n", + "……”\n", + "”,\n", + "1956\n", + ")。\n", + "1948\n", + "》,\n", + "”,\n", + ":“\n", + "42\n", + "120\n", + "7000\n", + "800\n", + "36\n", + ",1982\n", + "1956\n", + "———\n", + "21\n", + ":“\n", + ";1989\n", + ";1994\n", + "305\n", + "3400\n", + ";1996\n", + "65\n", + ":“\n", + "1983\n", + "———\n", + "》,\n", + "29\n", + "6.5\n", + "》、\n", + "1949\n", + "》,\n", + "1945\n", + "),\n", + "1945\n", + "》。\n", + ":1.“\n", + "”:\n", + "1997\n", + "1997\n", + "———\n", + "1995\n", + "52\n", + "---\n", + "1993\n", + "1989\n", + "》,\n", + "1987\n", + "1982\n", + "”,\n", + "33\n", + ":“\n", + "1/3\n", + "1942\n", + "1938\n", + ":“\n", + "”,\n", + "”,\n", + ":“\n", + "1925\n", + "80\n", + "”“\n", + ":“\n", + "”。\n", + ",“\n", + "”。\n", + "》,\n", + "300\n", + "1917\n", + "……\n", + ",10\n", + "”,\n", + "”;\n", + "95\n", + "1988\n", + "———\n", + "2000\n", + "1991\n", + "———\n", + "———\n", + "———\n", + ",80%\n", + "———\n", + "4—5\n", + "———\n", + "———\n", + "”。\n", + "———\n", + "———\n", + "2—3\n", + "、B\n", + "90\n", + "80%\n", + "”,\n", + "5890\n", + "4071\n", + "”,\n", + "164\n", + "105\n", + "2000\n", + ",90%\n", + "1600\n", + ",50\n", + "46\n", + "65\n", + ":“\n", + "WB—1\n", + "WB—1\n", + "……\n", + "”!\n", + "---\n", + "———\n", + "———\n", + "———\n", + "”。\n", + "……\n", + "115\n", + "340\n", + "2729\n", + "1200\n", + "1/4。\n", + "280\n", + "1985\n", + "178\n", + "68\n", + "405\n", + "1/3\n", + "”、\n", + "……\n", + "200\n", + "40\n", + "29\n", + "”!\n", + "300\n", + "———\n", + "”。\n", + "”,\n", + "”;\n", + "”。\n", + "》、《\n", + "》、《\n", + ",“\n", + "”,\n", + "”。\n", + "”,\n", + "100\n", + "2/3\n", + "”。\n", + "”,\n", + "”。\n", + "———\n", + "》,\n", + "》。\n", + "1/3\n", + "1/2,\n", + "19\n", + "”。\n", + "1994\n", + "”。\n", + "”。\n", + "1918\n", + "1982\n", + "”,\n", + "”,\n", + "”(\n", + "》。\n", + "1918\n", + "》,\n", + "1922\n", + "1925\n", + "》、《\n", + "》、《\n", + "》、《\n", + "”(\n", + ")。\n", + "……\n", + "……\n", + "———\n", + "”,\n", + "……\n", + "”,\n", + "“FAX”\n", + "”。\n", + "”,\n", + "),\n", + "》(\n", + "”、“\n", + "”、“\n", + "”,\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + "———\n", + "),\n", + ",“\n", + "”,\n", + ",“\n", + "……\n", + "……\n", + ":“\n", + "”,\n", + "……\n", + "……\n", + "》、\n", + "……\n", + "45\n", + "1992\n", + "”,\n", + "”。\n", + "1998\n", + "———\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "”。\n", + "》、《\n", + "》,\n", + "……\n", + "”,\n", + ")。\n", + ":“\n", + "……\n", + "……\n", + "……\n", + "……\n", + "……\n", + "……\n", + ",“\n", + "》)、\n", + "》)、\n", + "》)、\n", + "》)\n", + "》,\n", + "……\n", + "》,\n", + "》,\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "1986\n", + "”;\n", + ":“\n", + "2、\n", + "”,\n", + "”,\n", + ":“\n", + ":“\n", + "》。\n", + "Rousseau(\n", + ",“\n", + ":“\n", + "……”\n", + "1993\n", + "”,\n", + "———\n", + "superannuatedCoquette\n", + "superannuated(\n", + "),\n", + "Coquette\n", + "Coquette,\n", + "”,\n", + ":superannuatedCoquette。\n", + "———\n", + "》,\n", + "”,\n", + ":“\n", + "”。\n", + "”,\n", + "∶(010)65912806\n", + "3—5\n", + "80)\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + ":“\n", + "”,\n", + "”。\n", + ":“\n", + "80\n", + "3.25\n", + "3.5\n", + "54\n", + "21\n", + "1993\n", + "23\n", + "48\n", + "、“\n", + ")“\n", + "———\n", + "……(\n", + "25\n", + ":“\n", + "74\n", + "2/3\n", + "29\n", + ":“\n", + "’,\n", + "25\n", + "25\n", + "7000\n", + "、“\n", + "”1.5\n", + ",“\n", + "”。\n", + ")(\n", + "”,\n", + "”。\n", + "……\n", + "”。\n", + "———\n", + "……\n", + "……\n", + "……\n", + "———\n", + "1995\n", + "———\n", + ":“\n", + "———\n", + "1994\n", + "———\n", + "———\n", + "……\n", + "……\n", + "”,\n", + "……\n", + "……\n", + ",“\n", + "”———\n", + ":“\n", + "”“\n", + "”。\n", + "VCD。\n", + "”,\n", + "1998\n", + "”。\n", + "》),\n", + "……\n", + ":“\n", + "……\n", + "”,\n", + "———\n", + "”,\n", + "”,\n", + "……\n", + "……\n", + "”,\n", + "”,\n", + "”……\n", + "……\n", + "……\n", + "……\n", + "———\n", + "”,\n", + ",“\n", + "”。\n", + "”。\n", + "》,\n", + "”,\n", + ")、\n", + ")、\n", + ")。\n", + "1949\n", + "》、《\n", + "》,\n", + "———\n", + "1959\n", + "……”\n", + "……”\n", + "”,\n", + "1957\n", + "1949\n", + ",“\n", + "』《\n", + ",《\n", + "●《\n", + "》,\n", + "》(\n", + "”,\n", + "———\n", + "》(\n", + "),\n", + "---\n", + "”,\n", + "---\n", + "”,\n", + "……“\n", + ",“\n", + "……\n", + "———\n", + ":“\n", + "”,“\n", + "1984\n", + "”。\n", + "”,“\n", + "”;“\n", + "”。\n", + ":“\n", + "”,“\n", + "”。\n", + "”。\n", + "》,\n", + "———\n", + "』,\n", + ":『\n", + "』,\n", + "6·\n", + ",“\n", + "5·\n", + "”,“\n", + "”,\n", + "4·\n", + "3·\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "2·\n", + "”。\n", + "1·\n", + ",“\n", + ",“\n", + ")。\n", + "1997\n", + "”。\n", + "、“\n", + "”,\n", + "”,\n", + ")、\n", + "1996\n", + "1991\n", + "”。\n", + "1991\n", + "1965\n", + "90%\n", + "1962\n", + "1962\n", + "1986\n", + "21\n", + "1985\n", + "”。\n", + "1983\n", + "1984\n", + "31\n", + "》,\n", + "1984\n", + "83\n", + "1954\n", + ",《\n", + "1954\n", + "1978\n", + "1975\n", + "1983\n", + "》、《\n", + "》,\n", + "1978\n", + "1959\n", + "28\n", + "1959\n", + "1954\n", + "21\n", + "1987\n", + "”。\n", + "”,\n", + "81\n", + ",“\n", + "”。\n", + "1986\n", + "1987\n", + "1986\n", + "”,\n", + "1983\n", + "1943\n", + "———\n", + "22\n", + "”。\n", + "1941\n", + "900\n", + "73%。\n", + "1022\n", + "516\n", + "1371\n", + "”,\n", + "2224\n", + "1.112\n", + "”,\n", + ",『\n", + "』。\n", + "』,\n", + "』,\n", + "』,\n", + "』;\n", + "』,\n", + "』。\n", + "』。\n", + "』(\n", + ",1997\n", + "25.3\n", + "110.5%,\n", + "2.4\n", + "2800\n", + "65\n", + "600\n", + "”,\n", + "”,\n", + "”、“\n", + "”、“\n", + "97\n", + "445\n", + "1997\n", + "———\n", + "40\n", + "”、“\n", + "628\n", + "40%\n", + "332\n", + "91\n", + "40\n", + "40\n", + "261\n", + "95\n", + "117\n", + "53\n", + ",“\n", + "99%,\n", + ",88%\n", + "』(\n", + "……\n", + "……”“\n", + ",(\n", + "”“\n", + "……”\n", + "”“\n", + "31\n", + "……”\n", + "1990\n", + "25\n", + "”,\n", + "”。\n", + "2.5\n", + "6000\n", + "”,\n", + "”,\n", + "1994\n", + "”。\n", + "1958\n", + "”。\n", + ")(\n", + "300\n", + "420\n", + "154\n", + "292\n", + ",90%\n", + "”,\n", + "62\n", + "70\n", + "589\n", + "54\n", + "22307\n", + "55\n", + "7817\n", + "31%,\n", + "25\n", + "699\n", + "3%。\n", + "”、“\n", + "”、“\n", + "”、“\n", + "17\n", + "”、“\n", + "240\n", + "300\n", + "1000\n", + ":“\n", + "330\n", + ":“\n", + ":“\n", + "36\n", + "”、“\n", + "”,\n", + ":“\n", + "21\n", + "6000\n", + "500\n", + "1000\n", + ",40\n", + "1995\n", + "40\n", + ":“\n", + "4.4\n", + "6000\n", + "1992\n", + ":“\n", + "19\n", + ":“\n", + "”1995\n", + "2.6\n", + "”。\n", + "40\n", + ":1.\n", + ";2.\n", + ";3.\n", + ":1.\n", + ";2.\n", + ";3.\n", + ";4.\n", + "9980\n", + "99800\n", + ")47\n", + "7.2%\n", + "589\n", + "1958\n", + ",1951\n", + "1951\n", + "5000\n", + "0.5\n", + ")、\n", + "113\n", + "113\n", + "86\n", + "589\n", + "47\n", + "27\n", + "834\n", + "203\n", + "521\n", + "507\n", + "1997\n", + "30421\n", + "73861\n", + "190128\n", + "19\n", + "142\n", + ":3\n", + "26\n", + "10.63\n", + "26\n", + "),\n", + "1997\n", + "29\n", + "1996\n", + "、8\n", + ",“\n", + "49\n", + "2495\n", + "1996\n", + "”、\n", + "IIR、\n", + "1990\n", + "1995\n", + "80\n", + "2.35\n", + ",“\n", + "1998\n", + ",“\n", + "1080\n", + "74772\n", + "1·4%,\n", + "7%—8%,\n", + "10%\n", + "3%—4%\n", + "5000\n", + "100\n", + "1997\n", + "1080\n", + "90\n", + ")。\n", + "276\n", + "176\n", + "480\n", + "600\n", + "1997\n", + "1080\n", + "39.3%。\n", + "200\n", + "22\n", + "▲1997\n", + "1080\n", + "▲1997\n", + "90\n", + "5000\n", + "100\n", + "7%—8%,\n", + "3%—4%,\n", + "1.4%\n", + "”。\n", + ",1997\n", + "226\n", + "1100\n", + "34\n", + "140\n", + "1997\n", + "1997\n", + "120\n", + "1997\n", + "200\n", + "100\n", + "1995\n", + "1997\n", + "160\n", + "166\n", + "》,\n", + "3、\n", + ")(\n", + ")2、\n", + ")(\n", + ")1、\n", + ")(\n", + "……\n", + "”、“\n", + "”,\n", + ",《\n", + "1997\n", + "》、《\n", + "》、《\n", + ",《\n", + ",1943\n", + ",1965\n", + "……\n", + "200\n", + "1986\n", + "2.5\n", + "1994\n", + "700\n", + ",“\n", + "”。\n", + "2000\n", + "1982\n", + ",“\n", + "”。\n", + "1935\n", + ",1950\n", + "86\n", + "(1+2)\n", + "(1+2)\n", + "22\n", + "65\n", + "(1+2)\n", + ",《\n", + "(1+2)\n", + "1966\n", + "1973\n", + "“1+2”\n", + "90\n", + "”,\n", + "”,\n", + ",65\n", + "4.9%,\n", + "90\n", + ",65\n", + "11%,\n", + "1131\n", + "351\n", + "226\n", + "179\n", + "55\n", + "30%,\n", + "80%\n", + "21%,\n", + "21%\n", + "1993\n", + "500\n", + "3—10\n", + "60\n", + "1996\n", + "2000\n", + "7.5\n", + "1/4。\n", + "Cisco、\n", + "IDT\n", + "2/3\n", + "1998\n", + ",“4000\n", + ",10\n", + ",“\n", + "”。\n", + "4000\n", + "、7\n", + "、9\n", + "、12\n", + "———\n", + "”,\n", + "27\n", + "900\n", + "1997\n", + "1994\n", + "———\n", + "1995\n", + "“CE”\n", + "1996\n", + "”。\n", + "”,\n", + "90\n", + ",90\n", + "26\n", + "”:“\n", + "240\n", + "1200\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "400\n", + "1000\n", + "……\n", + "……\n", + "1986\n", + "1990\n", + "37.5\n", + "1991\n", + "1995\n", + "257.8\n", + "70%\n", + ",1997\n", + "2000\n", + "———(\n", + "40\n", + "———\n", + "”。\n", + ",500\n", + "140\n", + "120\n", + "200\n", + ":“\n", + "1988\n", + "———\n", + "1994\n", + "21\n", + "5000\n", + "17\n", + "419\n", + "11.27\n", + ",220\n", + "———\n", + "”,\n", + "1971\n", + ",《\n", + "”、“\n", + "80\n", + ",“\n", + "7%,\n", + "15%\n", + "90\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "2011\n", + "810\n", + "22\n", + "36\n", + "2001\n", + ",600\n", + "0.03%,\n", + "(“\n", + "1999\n", + "1998\n", + "”、“\n", + "”、\n", + "400\n", + ",1997\n", + "109\n", + ",631\n", + "1997\n", + "4300\n", + "35\n", + "31\n", + "28212\n", + "1997\n", + "850\n", + "1996\n", + "49.12%,\n", + "2.94\n", + "1996\n", + "79.53%,\n", + "2.75\n", + "1996\n", + "9.69%。\n", + "200\n", + "1997\n", + "6366\n", + "100.64\n", + "37.4\n", + ",1997\n", + "266\n", + "1996\n", + "6.6%,\n", + "3.69\n", + "1996\n", + "22.3%,\n", + "4.03\n", + "33.5%。\n", + "31%,\n", + "6%。\n", + ",1997\n", + "7.64\n", + "1996\n", + "10.2%,\n", + "6.4\n", + "1996\n", + "12%;\n", + "1.2\n", + "1996\n", + "1.6%。\n", + "608\n", + "98\n", + "78174\n", + "1876\n", + "2.4%。\n", + "53\n", + "232\n", + "1200\n", + "60%\n", + "44\n", + "60%\n", + "1.8\n", + "》,\n", + "17\n", + "66%\n", + "2720\n", + "4850\n", + "”,\n", + ",1\n", + ",22\n", + "518\n", + "”。\n", + "”。\n", + "111\n", + "』,\n", + "』,\n", + "4100\n", + "3000\n", + "100\n", + ",1100\n", + ":“\n", + "3000\n", + "200\n", + "、100\n", + "100\n", + "1995\n", + "500\n", + "1994\n", + ":“\n", + "19\n", + "……\n", + "986\n", + "、200\n", + "28\n", + ":“\n", + "———“\n", + "”。\n", + "1995\n", + "39\n", + "》635\n", + "》。\n", + "”,\n", + "74\n", + "31\n", + "25\n", + "1.25\n", + "1800\n", + "1994\n", + "200\n", + "1996\n", + "4000\n", + "1550\n", + "701\n", + "31\n", + "80\n", + ",5000\n", + "400\n", + "31\n", + "108\n", + "”。\n", + "500\n", + "30%\n", + ",300\n", + "3000\n", + "4000\n", + "1995\n", + "---\n", + "”,\n", + ":“\n", + "”、“\n", + "”、“\n", + "”;\n", + ":“\n", + "”;\n", + "”……\n", + "……\n", + "”,\n", + ",“\n", + ",“\n", + ",“\n", + "○”\n", + "34\n", + "1100\n", + "1000\n", + "136\n", + "1993\n", + "1100\n", + "○”\n", + "1995\n", + "500\n", + "”,\n", + "”,\n", + "”,\n", + "60\n", + "6000\n", + "1995\n", + "150\n", + ",1992\n", + "○”\n", + "○”\n", + "54\n", + "47\n", + ",“\n", + "”。\n", + "1000\n", + ":4\n", + "1989\n", + ":“\n", + "”20\n", + ",“\n", + "”、“\n", + "”……\n", + "1998\n", + ")“\n", + "3480W(3000kcal/h)\n", + "7.5A,\n", + "1A\n", + "1A\n", + ")。\n", + "20%—40%\n", + "×140W。\n", + "2.8\n", + ",F\n", + ",C\n", + ",R\n", + ",D\n", + "×100W。\n", + "52.9%\n", + ",36.7%\n", + "60.1%,\n", + "10.4%。\n", + "36.5%;\n", + "22.1%;\n", + "20.3%\n", + "15.6%;\n", + "5.5%。\n", + "22%\n", + "”,\n", + "9.8%\n", + "58.7%\n", + "66%\n", + "4.\n", + "3.\n", + "2.\n", + "1.\n", + "114\n", + "30%,\n", + "3%。\n", + "750\n", + "880\n", + ";2000\n", + "1150\n", + "1500\n", + "4.\n", + "2.5—7\n", + "10—30\n", + "W。\n", + "10%。\n", + "20%—30%。\n", + "20%—30%。\n", + "1000\n", + ",15\n", + "1500\n", + ",30\n", + "2500\n", + ",40\n", + "3000\n", + ")、\n", + "13℃,\n", + "(12.8℃),\n", + ";3—5\n", + "5℃—6℃、14℃、19℃—20℃,\n", + "8.7℃、14.5℃\n", + "20℃。\n", + "4℃—6℃,\n", + "7℃—8℃。\n", + ",2\n", + "80\n", + "39℃—41℃。\n", + "1997\n", + "68%\n", + ",2000\n", + "1000\n", + "10.7%—47.9%,\n", + ":“\n", + "1998\n", + "98\n", + ",“\n", + ",24\n", + ":“\n", + "70%\n", + "1992\n", + "100\n", + ":“\n", + "6553\n", + "224\n", + ":“\n", + "1994\n", + "6000—7000\n", + "2500—3500\n", + "”,\n", + "105\n", + "2.51、\n", + "3.03;\n", + "30%\n", + "98\n", + "———“\n", + "40\n", + "30%。\n", + "”,\n", + ",72\n", + "3、4\n", + ",7、8\n", + "1300\n", + "81\n", + "3—10\n", + ":“\n", + "20%\n", + "———(\n", + ",“\n", + "”,\n", + ",“\n", + "”,\n", + "———\n", + "”(\n", + "),\n", + "”,\n", + "”,“\n", + "”。\n", + ",“\n", + ":《\n", + "”。\n", + "”,\n", + "”、“\n", + ":《\n", + ":《\n", + "〕ⅱ\n", + "……\n", + ":『\n", + ":『\n", + "》。\n", + "……\n", + ":『\n", + ":『\n", + ",《\n", + "……\n", + "……\n", + ",《\n", + "……,\n", + "”,\n", + "”、“\n", + "”,“\n", + "……\n", + ":“\n", + "”,\n", + "》,\n", + ",《\n", + "”,\n", + "……\n", + ":“\n", + "”,\n", + ":“\n", + ",《\n", + ";《\n", + ";《\n", + ";《\n", + "1979\n", + "233\n", + "95\n", + "328\n", + "”。\n", + "2、\n", + "1、\n", + "1949\n", + "1996\n", + "48\n", + "31\n", + "60\n", + "1993\n", + "85%,\n", + ",10\n", + "80\n", + "42\n", + "17\n", + "17\n", + "19\n", + ",42\n", + ",40\n", + ":“\n", + "90\n", + "40\n", + "---\n", + ",1998\n", + "3000\n", + "1996\n", + "7000\n", + "1993\n", + "1997\n", + "125\n", + "200\n", + "”,\n", + "———\n", + "』,\n", + "』,\n", + "』。\n", + "』。\n", + "』。\n", + "1997\n", + "》,\n", + "97\n", + "127\n", + "40\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + ",“\n", + "21\n", + "……\n", + ":“\n", + "”,\n", + ":(010)65013467\n", + "B12\n", + "18%—25%。\n", + "56%,\n", + "39%,\n", + "16%,\n", + "15%,\n", + "81)\n", + "”、“\n", + "”、“\n", + "”,\n", + "2300\n", + "400\n", + "”,\n", + "1500\n", + "1996\n", + "1000\n", + "):\n", + "”,\n", + "):\n", + "100\n", + "):\n", + "”,\n", + "):\n", + "21\n", + "):\n", + "):\n", + "21\n", + "):\n", + "”,\n", + "5000\n", + "):\n", + ",60%\n", + "70%\n", + "90%。\n", + "200\n", + "300\n", + ",1\n", + "60%—70%。\n", + "):\n", + "):\n", + "):\n", + "8000\n", + "):\n", + "7000\n", + "”1.5\n", + "):\n", + "》。\n", + "”,\n", + "》,\n", + "》,\n", + "》,\n", + "》,\n", + ",《\n", + "》、《\n", + "”,\n", + "”,\n", + ",“\n", + "1997\n", + "———\n", + "———\n", + "———\n", + "”。\n", + "80\n", + ",《\n", + "70\n", + ":“\n", + "》,\n", + "”(\n", + "21\n", + "”)\n", + "”———\n", + "———“\n", + "”,\n", + "001\n", + "1986\n", + "》,\n", + "”,\n", + "”,\n", + ":“\n", + "2500\n", + "400\n", + "1992\n", + "1900\n", + "———\n", + "80\n", + "》,\n", + "5020\n", + "1.6\n", + "1298\n", + "1445\n", + "400\n", + "(1041—1048),\n", + "”,\n", + "600\n", + "》、《\n", + "636\n", + "),\n", + "……\n", + "1996\n", + "———\n", + "CD—ROM、DVD\n", + "E—MAIL,\n", + "”,\n", + "”、\n", + "100\n", + "):\n", + "”,\n", + "2000\n", + "”,\n", + "”。\n", + "3,\n", + "”?\n", + ",《\n", + "):\n", + "……”\n", + "”、“\n", + "》,\n", + "”。\n", + "”、“\n", + "”、“\n", + "”。\n", + "”,\n", + "(52\n", + "):\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”“\n", + "---\n", + "③)\n", + "1/10;\n", + "———\n", + "40\n", + "600—800\n", + "300\n", + "”(\n", + "”(\n", + ",1998\n", + "”。\n", + "———\n", + "”,\n", + "———\n", + "———\n", + "”;\n", + "”,\n", + "150\n", + "52\n", + "1.5\n", + "330\n", + "130\n", + "”、\n", + "1000\n", + "80%\n", + "———\n", + "6500\n", + "),1997\n", + "4.6\n", + "5135\n", + "1996\n", + "80%\n", + "90%,\n", + "50.0%。\n", + "1998\n", + "130\n", + "82%。\n", + "1997\n", + "300\n", + "25\n", + "11.4\n", + "———116\n", + "60\n", + ",1992\n", + "1982\n", + "5000\n", + "4000\n", + "300\n", + ",4000\n", + "13.33\n", + "1996\n", + "———\n", + "2000\n", + "2000\n", + ",300\n", + "4000\n", + "6000\n", + "1.7\n", + ",“\n", + "70\n", + "40\n", + "119\n", + "6000\n", + "---\n", + "》,\n", + "1997\n", + ",1976\n", + "……\n", + "”,\n", + ",《\n", + "(1469)\n", + "》、《\n", + "》、《\n", + "1977\n", + ",1947\n", + ")(\n", + ":“\n", + "》,\n", + "》,\n", + "”,\n", + "1997\n", + "1991\n", + "1975\n", + "”。\n", + "”。\n", + "”、“\n", + "”。\n", + "———\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + "”,\n", + "”:\n", + ",“\n", + "”、\n", + "”,\n", + "》、\n", + "”,\n", + "”,\n", + "”,\n", + ":“\n", + "”,\n", + ",“\n", + "”,\n", + "”,\n", + "”,\n", + ":“\n", + "”3、\n", + ")(\n", + "〉)2、\n", + ")(\n", + "〉)\n", + "”,\n", + "”,\n", + "28\n", + "27\n", + "1986\n", + "213\n", + ",27\n", + ":“\n", + "”,\n", + "76.5\n", + "5.1\n", + "59.5\n", + ",115\n", + "40\n", + "960\n", + "220\n", + "81\n", + "53.3\n", + "36.6\n", + "2200\n", + "200\n", + "28\n", + ",1994\n", + ",(\n", + "175\n", + "———\n", + "———\n", + "”,\n", + "……\n", + "……\n", + "”,\n", + "”,\n", + "———\n", + "……\n", + "96%。\n", + "2、\n", + "6474.8\n", + "794.7\n", + "86.9\n", + "32\n", + "1、\n", + "28\n", + "”。\n", + "》,\n", + ":《\n", + "1988\n", + "”,1994\n", + ",43\n", + "1400\n", + "600\n", + "4300\n", + ":“\n", + "78\n", + "43\n", + "1955\n", + ":“40\n", + ")’,‘\n", + ",12\n", + ",500\n", + "480\n", + "2/3\n", + "80%\n", + "592\n", + "43.4%。\n", + "5000\n", + "……\n", + "90\n", + "150\n", + "……\n", + "———\n", + "”,\n", + ":“\n", + "1000\n", + "”;\n", + "”。\n", + "……\n", + "60\n", + ":“\n", + "99\n", + "》、《\n", + "》、《\n", + "200\n", + ",64%\n", + ",1991\n", + ":“\n", + "”,\n", + "……1987\n", + "”。\n", + "36%\n", + "97%。\n", + "155\n", + "”,\n", + "26\n", + ",“\n", + "”。\n", + ",“\n", + "”。\n", + "……\n", + "1978\n", + "”。\n", + "”。\n", + ",“\n", + "1978\n", + ",“\n", + "”。\n", + "1992\n", + "”,\n", + "”,\n", + "1978\n", + "”,\n", + "”,\n", + "“‘\n", + "”。\n", + "”,\n", + "”,\n", + "1977\n", + ",《\n", + "1978\n", + "》,\n", + "(1)\n", + "1500\n", + "284\n", + "40%\n", + "”,\n", + "80\n", + ":“\n", + "400\n", + "1000\n", + "”(\n", + ",16\n", + ",12\n", + ",100\n", + "1991\n", + "23\n", + "1995\n", + "3000\n", + "1998\n", + "100\n", + ",1996\n", + "30%\n", + "40%\n", + "1988\n", + "1.5\n", + "1985\n", + "———\n", + ",300\n", + ",10\n", + "435\n", + ",“\n", + "1000\n", + "—35\n", + "—2\n", + "1998\n", + "200\n", + "1996\n", + "1996\n", + "———\n", + "1996\n", + "70\n", + "1995\n", + "1991\n", + "”、“\n", + ",1973\n", + "1997\n", + "———\n", + "1994\n", + "1991\n", + "”,\n", + ",1964\n", + "、“\n", + "○』\n", + "○』\n", + "○』\n", + "○』\n", + "(『\n", + "○』\n", + ")。\n", + "○』\n", + "』、『\n", + "140\n", + "”。\n", + "36.61\n", + "”,\n", + "80%,\n", + "50%。\n", + "372\n", + "23862\n", + "41851\n", + "600\n", + "———\n", + "”,\n", + "600\n", + ":“\n", + ":“\n", + ":“\n", + "”20\n", + ":“\n", + "209\n", + "28\n", + "12·3%,“\n", + "180\n", + "31\n", + "8%,\n", + "9.5%。\n", + "800\n", + "1000\n", + "》。\n", + "”,\n", + ":“\n", + "”,\n", + "———\n", + "———\n", + "”———\n", + "”,\n", + "”,\n", + "”、“\n", + "”,\n", + "———“\n", + "》、\n", + "》、\n", + "》、《\n", + "1998\n", + "》,\n", + "1997\n", + "”、\n", + ",“\n", + "”,\n", + "”。\n", + "”。\n", + "”。\n", + "1995、1996\n", + ",1997\n", + "———\n", + "……\n", + "……\n", + ")12\n", + "223\n", + "”,\n", + "223\n", + "214\n", + "”,\n", + "223\n", + "214\n", + "”。\n", + "1998\n", + ",“\n", + "1755\n", + "),\n", + "”。\n", + "”。\n", + "1998\n", + "27\n", + "”,\n", + "”;\n", + "”(\n", + "———\n", + "———\n", + "”,\n", + "”,\n", + "》、《\n", + "》、《\n", + "》,\n", + "》、《\n", + "》,\n", + "》,\n", + "》,\n", + ":“\n", + "1992\n", + "”,\n", + "———\n", + "”。\n", + "》,\n", + "”、“\n", + ")。\n", + "』,\n", + ":『\n", + "』。\n", + "”,\n", + "”。\n", + "1997\n", + "”。\n", + "———\n", + "1.2\n", + "……\n", + "280\n", + "》,\n", + "23\n", + "160\n", + "34\n", + "14.7%。\n", + "1998\n", + ",《\n", + "”,\n", + "”,\n", + "”、“\n", + "”。\n", + "90%\n", + "101\n", + "80%\n", + "72.3%。\n", + "300\n", + "112\n", + "19\n", + "35\n", + "58.5%。\n", + "79\n", + "101\n", + "21.8%。\n", + "”。\n", + "』,\n", + "”,“\n", + "100\n", + "120\n", + "”。\n", + "1995\n", + "32.65%,\n", + "7.7%,\n", + "59.7%。\n", + "”,\n", + "100%\n", + "”:\n", + "”。\n", + "1997\n", + "510\n", + "853.64\n", + "25%\n", + "37.2%。\n", + "68\n", + ",41\n", + ")”\n", + "781\n", + "204.39\n", + "92.11\n", + "3398.18\n", + "189.2\n", + "18.21\n", + "35.44\n", + "1513\n", + "145.16\n", + "46\n", + "2837\n", + "252\n", + "1409\n", + "”。\n", + ":“\n", + "1994\n", + "”,\n", + "”,\n", + "”,\n", + "》,\n", + "”。\n", + ",“\n", + "”、“\n", + "”、\n", + "”。\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "30%\n", + "1988\n", + "1983\n", + ",『\n", + "』,\n", + "』,\n", + "』,\n", + ",『\n", + "』、『\n", + "』,\n", + "』,\n", + "』,『\n", + "』。\n", + ":“\n", + ",‘\n", + ":“\n", + ",‘\n", + "———\n", + "2050\n", + "),\n", + "1%。\n", + ";“\n", + "”,“\n", + "”,“\n", + "”,\n", + "),\n", + "”、“\n", + "”。\n", + "”、“\n", + "(learningsociety),\n", + ",“\n", + ",“\n", + "”、“\n", + "”、“\n", + "),\n", + ",“\n", + "50%\n", + "48.312\n", + "22.733\n", + "22.394\n", + "22.065\n", + "15.291\n", + "15.412\n", + "13.773\n", + "A12.794\n", + "8.305\n", + "7.781\n", + "47.992XR\n", + "30.593\n", + "19.644\n", + "17.215\n", + "15.08\n", + "30%\n", + "30%\n", + "500\n", + "29\n", + "、30\n", + "”(\n", + "),\n", + "40—50\n", + "1993—1995\n", + "8%\n", + ",“\n", + "”,\n", + ",“\n", + "”,\n", + "4000\n", + "8000\n", + "1.16\n", + "4000\n", + "55%\n", + "1.15\n", + "”,\n", + "”。\n", + "8%\n", + "10—20\n", + "5—10\n", + "9.4\n", + "300\n", + "10.5\n", + "GDP\n", + "5%\n", + "GDP\n", + "80\n", + "43%、41%、24%\n", + "47%,\n", + ",《\n", + "》(\n", + "》。\n", + "”,\n", + ",《\n", + "———\n", + "》、《\n", + "”、“\n", + "”、“\n", + "”、“\n", + "———\n", + "……\n", + "》,\n", + ":“\n", + "———\n", + "……\n", + "1996\n", + "……\n", + "1995\n", + "———\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "……\n", + ":“\n", + ":“\n", + "———\n", + "1948\n", + "6—7\n", + "》,\n", + "”,\n", + "”,“\n", + "”、“\n", + "”、“\n", + "》,\n", + ",《\n", + ",“\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "”,\n", + "》、《\n", + "6、7\n", + "》、《\n", + "》、《\n", + "》、《97\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "———\n", + "……\n", + ":“\n", + "”,\n", + ":“\n", + "……”\n", + "”。\n", + "》(\n", + "),\n", + "21\n", + "21\n", + "1997\n", + "1000\n", + "31\n", + "abb、att、ibm\n", + "90\n", + "21\n", + "19\n", + "1981\n", + "1996\n", + "39%,\n", + "211\n", + "26\n", + "),\n", + "600\n", + ")。\n", + "21\n", + "》,\n", + "21\n", + "26\n", + "』,\n", + "1992\n", + "96\n", + "”。\n", + ":“\n", + "21\n", + ":11\n", + "60\n", + "21\n", + "21\n", + "———\n", + "》。\n", + "1984\n", + ",《\n", + "21\n", + "1997\n", + "--\n", + "○○○\n", + ",“\n", + "》。\n", + "》,\n", + "25\n", + ")12\n", + "21\n", + "60\n", + "”,\n", + "90\n", + "36\n", + ":“\n", + "”。\n", + ":“\n", + "--\n", + "---\n", + ":———\n", + "———\n", + "———\n", + "———\n", + "1998\n", + "1998\n", + ":1997\n", + "1998\n", + "1998\n", + "1997\n", + "》(\n", + "》)\n", + "》,\n", + "1997\n", + ":1.\n", + "2.\n", + "3.\n", + "4.\n", + "5.\n", + "1997\n", + ":1.《\n", + "》(\n", + "77\n", + ");2.《\n", + "》(\n", + "88\n", + ");3.《\n", + "》(\n", + "180\n", + ");4.《\n", + "》(\n", + "235\n", + ");5.《\n", + "》(\n", + "315\n", + ");6.《\n", + "》(\n", + "193\n", + ");7.《\n", + "》(\n", + "199\n", + ");8.《\n", + "》(\n", + "246\n", + ");9.《\n", + "》(\n", + "286\n", + ");10.《\n", + "》(\n", + "186\n", + ");11.《1981\n", + "》(\n", + "373\n", + ");12.《\n", + "》(\n", + "367\n", + ");13.《\n", + "》(\n", + "381\n", + ");14.《\n", + "》(\n", + "432\n", + ")。\n", + ":1.《\n", + "》(\n", + "115\n", + ";2.《\n", + "》(\n", + "135\n", + ";3.\n", + ";4.《\n", + "》(\n", + "101\n", + ";5.《\n", + "》(\n", + "385\n", + ";6.《\n", + "》(\n", + "366\n", + ";7.《\n", + "》(\n", + "288\n", + "a《\n", + "》、\n", + "c《\n", + "》;8.《\n", + "》(\n", + "383\n", + "(3)\n", + ";9.《\n", + "》(\n", + "486\n", + "(2)\n", + ";10.1992\n", + "17\n", + "》(\n", + "151\n", + ";11.1995\n", + "27\n", + "》(\n", + "245\n", + ":1.\n", + "”、“\n", + "”、“\n", + "2.\n", + "3.\n", + "4.\n", + "5.\n", + "6.\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "7.\n", + "8.\n", + "9.\n", + "”;\n", + "10.\n", + "”。\n", + "19\n", + "(157\n", + "28\n", + "(1997\n", + "》,\n", + "1996\n", + "1997\n", + "》、《\n", + "1996\n", + "1997\n", + "》,\n", + "2010\n", + "1997\n", + ")(\n", + "”、“\n", + "”、\n", + ")(\n", + ":1997\n", + "”,\n", + ":“\n", + "650\n", + "”,\n", + ":10\n", + "1996\n", + "”,\n", + "600\n", + ")》\n", + ")》\n", + ":“\n", + ":“\n", + "”“\n", + ":“\n", + "435\n", + "17\n", + "435\n", + "427\n", + ",424\n", + "424\n", + ":“\n", + ":“\n", + "1992\n", + "”、“\n", + "”、“\n", + "”、“\n", + "21\n", + "21\n", + "1997\n", + "”、“\n", + "”、\n", + "28\n", + "”、“\n", + "”、\n", + ")《\n", + ",“\n", + "”:\n", + "”,\n", + "”。\n", + "90\n", + "1979\n", + "”,\n", + "”,\n", + "”:\n", + ",1995\n", + ",“\n", + "”。\n", + ",“\n", + "1995\n", + "”、“\n", + "”。\n", + "”。\n", + ",“\n", + "”。\n", + ":“\n", + "》、《\n", + "》、《\n", + "21\n", + "52\n", + "21\n", + "、“\n", + ",21\n", + "1997\n", + "26\n", + "26\n", + ":1997\n", + "19\n", + "100\n", + ",1925\n", + "1927\n", + "1931\n", + ";1932\n", + "1933\n", + "1934\n", + "1935\n", + "21\n", + "38\n", + ":“\n", + "1932\n", + "1932\n", + "1934\n", + "”,\n", + "”,\n", + "”,\n", + "25\n", + "25\n", + "1972\n", + "1/4\n", + "1996\n", + "200\n", + "1500\n", + "160\n", + "1997\n", + "34\n", + "),\n", + "25\n", + "25\n", + "21\n", + "”)\n", + "1997\n", + "1997\n", + "21\n", + ":1.《\n", + "》;2.《\n", + "1998—2000\n", + "》。\n", + "1997\n", + "17\n", + "17\n", + "』,\n", + "』,\n", + "1997\n", + ",10\n", + "),\n", + ":“\n", + ":“\n", + ":“\n", + "”2001\n", + ":“\n", + ":“\n", + ":“\n", + ")1.5\n", + "17\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "2.5\n", + "19\n", + "17\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "3.5\n", + "23\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "4.5\n", + "19\n", + "(1)62\n", + "(2)59\n", + "(3)64\n", + "5.5\n", + "19\n", + "60.8%\n", + ":(1)\n", + "(2)\n", + "(3)\n", + ":1.\n", + "26889108,\n", + ")。\n", + "2.\n", + "1000\n", + "500\n", + "130\n", + "1000\n", + "3.\n", + "4.\n", + "26889108\n", + "229\n", + "》。\n", + "5.\n", + ":1.(1)2.(1)3.(1)4.(2)5.(3)。\n", + ")1.5\n", + "27\n", + "》。\n", + "(1)16\n", + "(2)19\n", + "(3)15\n", + "2.5\n", + "31\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "3.5\n", + "23\n", + "70%\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "4.5\n", + "26\n", + "30%\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "5.6\n", + "(1)\n", + "(2)\n", + "(3)\n", + ":1.\n", + "26889108,\n", + ")。\n", + "2.\n", + "1000\n", + "500\n", + "130\n", + "1000\n", + "4.\n", + "5.\n", + "26889108\n", + "230\n", + "》。\n", + "6.\n", + ":1.(2)2.(1)3.(3)4.(1)5.(1)。\n", + ")7\n", + "27\n", + ":———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "21\n", + "”,\n", + "21\n", + "40\n", + ":———\n", + "———\n", + "———\n", + "———\n", + "1999\n", + "》,\n", + "31\n", + "47\n", + "131\n", + "1996\n", + "41\n", + "28%,\n", + "31%。\n", + "41\n", + "17\n", + "———“\n", + "110”\n", + "”、“\n", + ")11\n", + "25\n", + "25\n", + "25\n", + "21\n", + ":———\n", + "———\n", + "———\n", + "———\n", + "———\n", + "—20\n", + "40\n", + "21\n", + ")12\n", + "———\n", + ",“\n", + "”。\n", + "”,\n", + ",《\n", + "》、《\n", + "》、\n", + "21\n", + "1997\n", + "1982\n", + "》,\n", + "2020\n", + "21\n", + "(1997\n", + ":“\n", + "26\n", + "1997\n", + "26\n", + "1997\n", + "———\n", + "34\n", + "1997\n", + ":“\n", + "”“\n", + ")、\n", + ")1.\n", + "25\n", + ":①\n", + "2.\n", + "2380\n", + ":①\n", + "3.\n", + ":①\n", + "4.\n", + "28\n", + ":①\n", + "5.\n", + ")1.4\n", + "2.4\n", + "21\n", + "3.4\n", + "22\n", + "4.\n", + "21\n", + "9.6\n", + "5.\n", + "43\n", + "185\n", + ")1.6\n", + ":“\n", + "”。\n", + "(1)\n", + "(2)\n", + "(3)\n", + "2.6\n", + "318\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "3.6\n", + "150\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "4.6\n", + ":(1)\n", + "(2)\n", + "(3)\n", + "5.6\n", + "(1)\n", + "(2)\n", + "(3)\n", + "--\n", + "』,\n", + "』;\n", + "』,\n", + "』、『\n", + "』、\n", + "』。\n", + "』;\n", + "』、『\n", + "』、\n", + "》。\n", + "』;\n", + "”——\n", + "22\n", + "”,\n", + "”。\n", + "1996\n", + "》,\n", + ",13\n", + "、538\n", + "28\n", + ",15\n", + "26\n", + "》,\n", + "”11\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》。\n", + "1994\n", + "1041\n", + ");\n", + "80\n", + "298\n", + "105\n", + "2000\n", + "1000\n", + "1995\n", + ":“‘\n", + "”,\n", + "”。\n", + ":“\n", + "”,\n", + "”,\n", + "———\n", + "”。\n", + "”。\n", + ":“\n", + "”,“\n", + "”。\n", + ",56\n", + "1955\n", + ":“\n", + "”1974\n", + ":“\n", + "”,\n", + ":“\n", + "1978\n", + "1988\n", + "1978\n", + ":“\n", + "”1979\n", + ":“\n", + "1977\n", + "1981\n", + "1978\n", + "”。\n", + "”,“\n", + "3000\n", + "1975\n", + ":“\n", + "”1978\n", + ":“\n", + "”。\n", + "1982\n", + ":“\n", + "1986\n", + ":“\n", + "1978\n", + ":“\n", + "”1979\n", + "”1981\n", + ":“\n", + ",1978\n", + ":“\n", + ":“\n", + "”,“\n", + "”,\n", + "25%\n", + "7、\n", + "8、\n", + "MD82\n", + "MD90—30\n", + "AE100\n", + "”,1971\n", + "1974\n", + "”,\n", + "19759\n", + "1978\n", + ":“\n", + "》,\n", + "”、“\n", + "”、“\n", + "』,\n", + ",1995\n", + "1996\n", + "219\n", + "60\n", + "21\n", + "1994\n", + ",1996\n", + "1996\n", + "1996\n", + "”、“\n", + "82\n", + "1995\n", + "1996\n", + "”、“\n", + "》,\n", + ")《\n", + ")。\n", + "70\n", + "1985\n", + "”:\n", + "1/2\n", + "1/2\n", + "90%\n", + ":『\n", + ":『\n", + "』『\n", + ":『\n", + "』;\n", + ":『\n", + "』;\n", + ":『\n", + "』。\n", + "》、《\n", + "》,《\n", + ":《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》、《\n", + "》,《\n", + "》,《\n", + "》。\n", + "21\n", + "45\n", + "51\n", + "28\n", + "21\n", + "29\n", + "1958\n", + "139\n", + ",1997\n", + "》,\n", + "”。\n", + "2.2%,\n", + "34%,\n", + "1/4\n", + "3000\n", + "26\n", + "300\n", + "abt\n", + "80%\n", + "1.5\n", + "58\n", + "”,\n", + "500\n", + "64\n", + "23\n", + "1.5\n", + "3000\n", + "6610\n", + "513\n", + "42\n", + "194\n", + "202\n", + "80\n", + "1978\n", + "703\n", + "63\n", + "47%\n", + "35%。\n", + "17\n", + "1996\n", + "390\n", + ",1996\n", + "5000\n", + "》、《\n", + "》、《\n", + "”、“\n", + "”、“\n", + "1996\n", + "》、《\n", + "》、《\n", + "》、《\n", + "22\n", + "1987\n", + "491\n", + "100\n", + "97\n", + "33.57\n", + "27\n", + "37\n", + ",37\n", + "、11\n", + "、3\n", + "》,\n", + "37\n", + ")1.\n", + "”2.\n", + "3.\n", + "”4.\n", + "”5.\n", + "”6.\n", + "”7.\n", + "8.\n", + "9.\n", + "10.\n", + "11.\n", + "12.\n", + "13.\n", + "14.\n", + "15.\n", + "16.\n", + "17.\n", + "”18.\n", + "19.\n", + "20.\n", + "21.\n", + "22.\n", + "23.\n", + "24.\n", + "25.\n", + "26.\n", + "27.\n", + "28.\n", + "29.\n", + "30.\n", + "31.\n", + "32.\n", + "33.\n", + "34.\n", + "35.\n", + "36.\n", + "37.\n", + ")1.\n", + "2.\n", + "3.\n", + "4.\n", + "5.\n", + "6.\n", + "7.\n", + "8.\n", + "9.\n", + "10.\n", + "11.\n", + "(3\n", + ")1.“\n", + "2.\n", + "”3.\n", + ")1.\n", + ")2.\n", + "”3.\n", + "”(\n", + ")、“\n", + "”(\n", + ")4.\n", + "”(\n", + ")、“\n", + "”(\n", + "1994\n", + "1995\n", + "313\n", + "1206\n", + "69\n", + "1.5\n", + "4900\n", + "17\n", + "1993\n", + "1994\n", + "1109\n", + "508\n", + "2642\n", + "143\n", + "”:———\n", + "———\n", + "”;\n", + "———\n", + "———\n", + "”。\n", + "900\n", + "3.3\n", + "29\n", + "21\n", + "21\n", + ",33\n", + ",12\n", + "pos\n", + "bims\n", + ",“\n", + "”。\n", + "70\n", + "”,\n", + "……\n", + ":1、\n", + ")2、\n", + ":『\n", + "》。\n", + "『ce』\n", + "『pl』\n", + "』,\n", + "》,\n", + "、500\n", + "35\n", + "130\n", + "150\n", + "),\n", + "”。\n", + "1894\n", + "》,\n", + "1904\n", + "1942\n", + "1992\n", + "》,\n", + "70\n", + "1993\n", + "”,\n", + "60\n", + "2000\n", + "”。\n", + "》,\n", + "”,\n", + "”!\n", + "”。\n", + "1998\n", + "),\n", + "———’97\n", + "1996\n", + "1996\n", + "1996\n", + "96\n", + "935\n", + "191\n", + "53\n", + "21\n", + "40\n", + "———’97\n", + "———1997\n", + "———\n", + "———\n", + "———\n", + "1996\n", + "———\n", + "———\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”、“\n", + "”。\n", + ",1997\n", + "——\n", + "”、“\n", + "99\n", + "28\n", + "23\n", + "1000\n", + "103\n", + "1500\n", + "144\n", + "277\n", + "25\n", + "162\n", + "58\n", + "21\n", + "”。\n", + "”:\n", + "”;\n", + "”,\n", + ":“\n", + ":“\n", + "”。\n", + ":1.\n", + "2.\n", + "3.\n", + "”,\n", + ",“\n", + "”,\n", + "”。\n", + "1994\n", + ":“\n", + "”———\n", + "”,\n", + ":“\n", + ":“\n", + "、“\n", + ",“\n", + ":“\n", + ":“\n", + "--《\n", + "),\n", + "25\n", + ",1921\n", + ",1922\n", + "1942\n", + "55\n", + "》(\n", + "),\n", + ":“\n", + "”!\n", + "”,\n", + "”,\n", + "”,\n", + "”。\n", + "”,\n", + ":“\n", + "”,\n", + "”。\n", + "”,\n", + ":“\n", + "、“\n", + "“××\n", + "”,\n", + "———\n", + "”,\n", + "———\n", + "———\n", + "…………\n", + "”“\n", + "17\n", + "1981\n", + "1993\n", + "52\n", + "483\n", + "63\n", + "56\n", + ",“\n", + "”。\n", + "》。\n", + "”。\n", + "”,\n", + "”。\n", + ":“\n", + "”。\n", + "——《\n", + "》。\n", + ",《\n", + "500\n", + "……\n", + "———\n", + "”,\n", + "”,\n", + "”……\n", + "1860\n", + "》,\n", + "”,\n", + "》、《\n", + "”,\n", + "”。\n", + "——\n", + "2000\n", + "……\n", + "5380\n", + "4500\n", + "———\n", + "42000\n", + "……\n", + "》(\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "——\n", + "》(\n", + "———\n", + ":“\n", + "、“\n", + "”、“\n", + "”,\n", + "”,\n", + "』。\n", + "』,\n", + "』,\n", + "……\n", + "”,\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《“\n", + "》(\n", + ")、《\n", + "》(\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + ")、《\n", + "》(\n", + "———\n", + "》,\n", + "》、《\n", + "》、《\n", + "》,\n", + "』,\n", + "』(\n", + "》、《\n", + "』,\n", + "(《\n", + "》,\n", + "1∶0\n", + "1∶1,\n", + "”,\n", + "”、“\n", + "”、“\n", + ":“\n", + "……”\n", + ":“\n", + ":“\n", + ":“\n", + "”,7\n", + "”,8\n", + "”,10\n", + "”……\n", + "”。\n", + "》,\n", + "》,\n", + "》,\n", + "》,\n", + "”。\n", + "》,\n", + ":“\n", + ":“\n", + "”(\n", + "55\n", + "《“\n", + ",《\n", + ":“\n", + ":“\n", + ":“\n", + ":“\n", + ",“\n", + "1990\n", + ":———\n", + "———\n", + "4095\n", + "26.74%;\n", + "520\n", + "2415\n", + "683\n", + "———\n", + "1374012\n", + "578\n", + "252538\n", + "”、“\n", + "”、\n", + "150\n", + "1984\n", + "1990\n", + "1997\n", + ",1990\n", + "1991\n", + "1997\n", + "1995\n", + "1995\n", + "》;\n", + "》,\n", + "1997\n", + "49\n", + "1948\n", + ",1970\n", + ",1973\n", + ",1988\n", + "1991\n", + "1992\n", + "100\n", + "110\n", + "”(\n", + "”。\n", + "”、\n", + "SOS\n", + "SOS\n", + "100\n", + "27\n", + "100\n", + "100\n", + "”。\n", + "21\n", + ",523\n", + "300\n", + "102\n", + "88\n", + "102\n", + "88\n", + ":———\n", + "64\n", + "110\n", + "110\n", + "》,\n", + ";———\n", + ",58\n", + "5098\n", + ";———\n", + "、88\n", + ";———\n", + "570\n", + "379\n", + "5609\n", + ";———\n", + "2573\n", + "6512\n", + "40%\n", + "5400\n", + "3100\n", + "62\n", + "560\n", + "——\n", + "1982\n", + "1990\n", + "”,\n", + "100\n", + "”。\n", + "365\n", + "”,\n", + "”,\n", + ":“\n", + "”,\n", + "”,\n", + "30%,\n", + "48%,\n", + "1.6\n", + "4859\n", + "4065\n", + "3311\n", + "2681\n", + "1959\n", + "1128.4\n", + "707\n", + ")342\n", + "519.4\n", + "470\n", + "33\n", + "13.6\n", + "”、“\n", + "”、“\n", + "”(\n", + ")、\n", + "”。\n", + "○、\n", + "○,\n", + "』,\n", + "○、\n", + "——\n", + "61\n", + "315\n", + "3047\n", + "1752\n", + "1364\n", + "808\n", + "70%,\n", + "60%\n", + "23\n", + "1996\n", + "102\n", + "100\n", + "1978\n", + "2.63\n", + "102\n", + "38.8\n", + "100\n", + "”。\n", + ",“\n", + "2487\n", + "4%\n", + "2010\n", + "》,\n", + "97\n", + ":“\n", + "”。\n", + "21\n", + "PATA)\n", + "5100\n", + "102\n", + "6.4\n", + "1638\n", + "97\n", + "PATA\n", + "、PATA\n", + "1500\n", + "200\n", + "1952\n", + "2000\n", + "1.8\n", + "1993\n", + "PATA,\n", + "1994\n", + "”,\n", + ",PATA\n", + "PATA\n", + "——\n", + "”、“\n", + "100\n", + "”……\n", + "”、\n", + "”,\n", + "25\n", + "……\n", + ",“\n", + "19\n", + "40\n", + "……”\n", + "60\n", + "70\n", + "1975\n", + "100\n", + "80\n", + "36\n", + "1996\n", + "1170\n", + "845\n", + "———\n", + "21\n", + ",29\n", + "K11683”。\n", + ",18\n", + "200\n", + ",17\n", + ":“\n", + "190\n", + "200\n", + "”“\n", + "200\n", + "200\n", + ":“\n", + "190\n", + "200\n", + "17\n", + "17\n", + ",“\n", + "K11683”\n", + ":“\n", + "K11625”,\n", + "K11625”\n", + ":“\n", + "120\n", + "120\n", + "400\n", + "100\n", + "70\n", + "--\n", + "1997\n", + "、“\n", + ",75\n", + ":“\n", + "1994\n", + "》,\n", + "1500\n", + "5800\n", + ",5800\n", + "1500\n", + "1995\n", + "(2000\n", + "),\n", + "”。\n", + "”(\n", + "1996\n", + ":“\n", + "21.6\n", + "56.6\n", + ":“\n", + ":“\n", + "’”\n", + "1000\n", + "2000\n", + "19\n", + "38\n", + "21\n", + ")6\n", + "21\n", + "”,\n", + "5800\n", + "4000\n", + "820\n", + "2000\n", + "’97\n", + "”,“\n", + "--\n", + "600\n", + "40\n", + ",600\n", + "100\n", + ",“\n", + "、5\n", + "1000\n", + ":“\n", + "》,\n", + ":(○\n", + "○)\n", + "——\n", + "3000\n", + "580\n", + "80\n", + "2%,\n", + "500\n", + "———\n", + "”。\n", + "100\n", + "70\n", + "1000,\n", + "200\n", + "25\n", + "2000\n", + "35\n", + "1988\n", + ",1996\n", + "25\n", + "”,\n", + ")(\n", + "”(\n", + "”。\n", + ":77\n", + "97\n", + "97\n", + "82\n", + "82\n", + ",1939\n", + "61\n", + "75\n", + "17\n", + ",8\n", + ",31\n", + ",《\n", + ",《\n", + "”。\n", + "22\n", + "”。\n", + ",《\n", + ",“\n", + "”。\n", + "35\n", + "60\n", + "35\n", + ",6\n", + "”。\n", + "1535\n", + "”,\n", + "”。\n", + "”。\n", + "60\n", + "45\n", + "1976\n", + "175\n", + "45\n", + "”,\n", + "》,\n", + "1500\n", + "86\n", + "65%\n", + ",18%\n", + "80\n", + ",“\n", + "200\n", + "1000\n", + "”———\n", + ":“\n", + "60\n", + "1992\n", + "1/4\n", + "2000\n", + "700\n", + "100\n", + "600\n", + "1.5\n", + ":“\n", + "”、“\n", + "”。\n", + "”。\n", + "”。\n", + "———\n", + ",“\n", + "),\n", + ")“\n", + "17\n", + "400\n", + "40\n", + ":“\n", + ",“\n", + ":“\n", + "600\n", + "———\n", + "”。\n", + ",5\n", + "4.8\n", + "),\n", + "7.5\n", + "12.5\n", + "1/4。\n", + "”。\n", + "40\n", + ",1973\n", + "92\n", + "1881\n", + "”,\n", + "》、《\n", + "》、《\n", + "1907\n", + "》,\n", + ":“\n", + "70\n", + ",10\n", + ",“\n", + ",7000\n", + "1990\n", + "”。\n", + "”,\n", + "300\n", + ",300\n", + ",4\n", + ":“4\n", + "……\n", + "……\n", + ":“\n", + "22\n", + "1840\n", + "”,\n", + "90\n", + ",“\n", + "……\n", + "1996\n", + ",60\n", + "23%,\n", + "18.4%。\n", + "1987\n", + "65\n", + "13.3%,\n", + "16%。\n", + "60\n", + ",2/3\n", + ",3/4\n", + "80\n", + "40%\n", + "1/4\n", + "80\n", + "”,\n", + "——\n", + "1995\n", + "”。\n", + "……\n", + "”。\n", + "”1935\n", + "1974\n" + ] + } + ], "source": [ "train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \\\n", - " tokenizer.tokenize_preprocess_ner_text(text=train_df[TEXT_COL],\n", - " label_map=label_map,\n", - " max_len=MAX_SEQ_LENGTH,\n", - " labels=train_df[LABEL_COL])\n", + " tokenizer.tokenize_ner(text=train_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=train_df[LABEL_COL])\n", "test_token_ids, test_input_mask, test_trailing_token_mask, test_label_ids = \\\n", - " tokenizer.tokenize_preprocess_ner_text(text=test_df[TEXT_COL],\n", - " label_map=label_map,\n", - " max_len=MAX_SEQ_LENGTH,\n", - " labels=test_df[LABEL_COL])" + " tokenizer.tokenize_ner(text=test_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=test_df[LABEL_COL])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "`Tokenizer.preprocess_ner_tokens` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", + "`Tokenizer.tokenize_ner` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", "1. token ids: list of numerical values each corresponds to a token.\n", "2. attention mask: list of 1s and 0s, 1 for input tokens and 0 for padded tokens, so that padded tokens are not attended to. \n", - "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ing. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", + "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ize. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", "4. label ids: list of numerical values each corresponds to an entity label, if `labels` is provided." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 8, "metadata": { "scrolled": false }, @@ -342,7 +26074,7 @@ } ], "source": [ - "print(\"Sample token ids:\\n{}\\n\".format(train_token_ids[0]))\n", + "print((\"Sample token ids:\\n{}\\n\".format(train_token_ids[0])))\n", "print(\"Sample attention mask:\\n{}\\n\".format(train_input_mask[0]))\n", "print(\"Sample trailing token mask:\\n{}\\n\".format(train_trailing_token_mask[0]))\n", "print(\"Sample label ids:\\n{}\\n\".format(train_label_ids[0]))" @@ -361,19 +26093,19 @@ "* Language.CHINESE: \"bert-base-chinese\"\n", "* Language.MULTILINGUAL: \"bert-base-multilingual-cased\"\n", "\n", - "Here we use the base, uncased pretrained model." + "Here we use the base model pre-trained only on Chinese data." ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 9, "metadata": { "scrolled": false }, "outputs": [], "source": [ "token_classifier = BERTTokenClassifier(language=LANGUAGE,\n", - " num_labels=len(label_list),\n", + " num_labels=len(label_map),\n", " cache_dir=CACHE_DIR)" ] }, @@ -386,7 +26118,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": { "scrolled": true }, @@ -412,251 +26144,47 @@ "output_type": "stream", "text": [ "\n", - "Iteration: 1%| | 26/2813 [00:31<55:41, 1.20s/it]\u001b[A\n", - "Iteration: 1%| | 26/2813 [00:49<55:41, 1.20s/it]\u001b[A\n", - "Iteration: 2%|▏ | 51/2813 [01:01<55:19, 1.20s/it]\u001b[A\n", - "Iteration: 2%|▏ | 51/2813 [01:20<55:19, 1.20s/it]\u001b[A\n", - "Iteration: 3%|▎ | 76/2813 [01:31<54:57, 1.20s/it]\u001b[A\n", - "Iteration: 3%|▎ | 76/2813 [01:50<54:57, 1.20s/it]\u001b[A\n", - "Iteration: 4%|▎ | 101/2813 [02:02<54:39, 1.21s/it]\u001b[A\n", - "Iteration: 4%|▎ | 101/2813 [02:20<54:39, 1.21s/it]\u001b[A\n", - "Iteration: 4%|▍ | 126/2813 [02:32<54:20, 1.21s/it]\u001b[A\n", - "Iteration: 4%|▍ | 126/2813 [02:50<54:20, 1.21s/it]\u001b[A\n", - "Iteration: 5%|▌ | 151/2813 [03:03<53:57, 1.22s/it]\u001b[A\n", - "Iteration: 5%|▌ | 151/2813 [03:20<53:57, 1.22s/it]\u001b[A\n", - "Iteration: 6%|▋ | 176/2813 [03:33<53:34, 1.22s/it]\u001b[A\n", - "Iteration: 6%|▋ | 176/2813 [03:50<53:34, 1.22s/it]\u001b[A\n", - "Iteration: 7%|▋ | 201/2813 [04:04<53:12, 1.22s/it]\u001b[A\n", - "Iteration: 7%|▋ | 201/2813 [04:20<53:12, 1.22s/it]\u001b[A\n", - "Iteration: 8%|▊ | 226/2813 [04:35<52:53, 1.23s/it]\u001b[A\n", - "Iteration: 8%|▊ | 226/2813 [04:50<52:53, 1.23s/it]\u001b[A\n", - "Iteration: 9%|▉ | 251/2813 [05:06<52:24, 1.23s/it]\u001b[A\n", - "Iteration: 9%|▉ | 251/2813 [05:20<52:24, 1.23s/it]\u001b[A\n", - "Iteration: 10%|▉ | 276/2813 [05:37<51:57, 1.23s/it]\u001b[A\n", - "Iteration: 10%|▉ | 276/2813 [05:50<51:57, 1.23s/it]\u001b[A\n", - "Iteration: 11%|█ | 301/2813 [06:08<51:32, 1.23s/it]\u001b[A\n", - "Iteration: 11%|█ | 301/2813 [06:20<51:32, 1.23s/it]\u001b[A\n", - "Iteration: 12%|█▏ | 326/2813 [06:38<50:59, 1.23s/it]\u001b[A\n", - "Iteration: 12%|█▏ | 326/2813 [06:50<50:59, 1.23s/it]\u001b[A\n", - "Iteration: 12%|█▏ | 351/2813 [07:09<50:31, 1.23s/it]\u001b[A\n", - "Iteration: 12%|█▏ | 351/2813 [07:20<50:31, 1.23s/it]\u001b[A\n", - "Iteration: 13%|█▎ | 376/2813 [07:40<50:05, 1.23s/it]\u001b[A\n", - "Iteration: 13%|█▎ | 376/2813 [08:00<50:05, 1.23s/it]\u001b[A\n", - "Iteration: 14%|█▍ | 401/2813 [08:11<49:30, 1.23s/it]\u001b[A\n", - "Iteration: 14%|█▍ | 401/2813 [08:30<49:30, 1.23s/it]\u001b[A\n", - "Iteration: 15%|█▌ | 426/2813 [08:41<48:57, 1.23s/it]\u001b[A\n", - "Iteration: 15%|█▌ | 426/2813 [09:00<48:57, 1.23s/it]\u001b[A\n", - "Iteration: 16%|█▌ | 451/2813 [09:12<48:24, 1.23s/it]\u001b[A\n", - "Iteration: 16%|█▌ | 451/2813 [09:30<48:24, 1.23s/it]\u001b[A\n", - "Iteration: 17%|█▋ | 476/2813 [09:43<47:51, 1.23s/it]\u001b[A\n", - "Iteration: 17%|█▋ | 476/2813 [10:00<47:51, 1.23s/it]\u001b[A\n", - "Iteration: 18%|█▊ | 501/2813 [10:14<47:21, 1.23s/it]\u001b[A\n", - "Iteration: 18%|█▊ | 501/2813 [10:30<47:21, 1.23s/it]\u001b[A\n", - "Iteration: 19%|█▊ | 526/2813 [10:44<46:48, 1.23s/it]\u001b[A\n", - "Iteration: 19%|█▊ | 526/2813 [11:00<46:48, 1.23s/it]\u001b[A\n", - "Iteration: 20%|█▉ | 551/2813 [11:15<46:16, 1.23s/it]\u001b[A\n", - "Iteration: 20%|█▉ | 551/2813 [11:30<46:16, 1.23s/it]\u001b[A\n", - "Iteration: 20%|██ | 576/2813 [11:46<45:47, 1.23s/it]\u001b[A\n", - "Iteration: 20%|██ | 576/2813 [12:00<45:47, 1.23s/it]\u001b[A\n", - "Iteration: 21%|██▏ | 601/2813 [12:16<45:16, 1.23s/it]\u001b[A\n", - "Iteration: 21%|██▏ | 601/2813 [12:30<45:16, 1.23s/it]\u001b[A\n", - "Iteration: 22%|██▏ | 625/2813 [12:46<45:04, 1.24s/it]\u001b[A\n", - "Iteration: 22%|██▏ | 625/2813 [13:00<45:04, 1.24s/it]\u001b[A\n", - "Iteration: 23%|██▎ | 650/2813 [13:17<44:26, 1.23s/it]\u001b[A\n", - "Iteration: 23%|██▎ | 650/2813 [13:30<44:26, 1.23s/it]\u001b[A\n", - "Iteration: 24%|██▍ | 675/2813 [13:48<43:57, 1.23s/it]\u001b[A\n", - "Iteration: 24%|██▍ | 675/2813 [14:00<43:57, 1.23s/it]\u001b[A\n", - "Iteration: 25%|██▍ | 700/2813 [14:19<43:20, 1.23s/it]\u001b[A\n", - "Iteration: 25%|██▍ | 700/2813 [14:30<43:20, 1.23s/it]\u001b[A\n", - "Iteration: 26%|██▌ | 724/2813 [14:49<43:05, 1.24s/it]\u001b[A\n", - "Iteration: 26%|██▌ | 724/2813 [15:00<43:05, 1.24s/it]\u001b[A\n", - "Iteration: 27%|██▋ | 749/2813 [15:19<42:27, 1.23s/it]\u001b[A\n", - "Iteration: 27%|██▋ | 749/2813 [15:30<42:27, 1.23s/it]\u001b[A\n", - "Iteration: 28%|██▊ | 774/2813 [15:50<41:50, 1.23s/it]\u001b[A\n", - "Iteration: 28%|██▊ | 774/2813 [16:10<41:50, 1.23s/it]\u001b[A\n", - "Iteration: 28%|██▊ | 799/2813 [16:21<41:17, 1.23s/it]\u001b[A\n", - "Iteration: 28%|██▊ | 799/2813 [16:40<41:17, 1.23s/it]\u001b[A\n", - "Iteration: 29%|██▉ | 824/2813 [16:51<40:43, 1.23s/it]\u001b[A\n", - "Iteration: 29%|██▉ | 824/2813 [17:10<40:43, 1.23s/it]\u001b[A\n", - "Iteration: 30%|███ | 849/2813 [17:22<40:16, 1.23s/it]\u001b[A\n", - "Iteration: 30%|███ | 849/2813 [17:40<40:16, 1.23s/it]\u001b[A\n", - "Iteration: 31%|███ | 874/2813 [17:53<39:46, 1.23s/it]\u001b[A\n", - "Iteration: 31%|███ | 874/2813 [18:10<39:46, 1.23s/it]\u001b[A\n", - "Iteration: 32%|███▏ | 899/2813 [18:23<39:11, 1.23s/it]\u001b[A\n", - "Iteration: 32%|███▏ | 899/2813 [18:40<39:11, 1.23s/it]\u001b[A\n", - "Iteration: 33%|███▎ | 924/2813 [18:54<38:38, 1.23s/it]\u001b[A\n", - "Iteration: 33%|███▎ | 924/2813 [19:10<38:38, 1.23s/it]\u001b[A\n", - "Iteration: 34%|███▎ | 949/2813 [19:25<38:08, 1.23s/it]\u001b[A\n", - "Iteration: 34%|███▎ | 949/2813 [19:40<38:08, 1.23s/it]\u001b[A\n", - "Iteration: 35%|███▍ | 974/2813 [19:55<37:35, 1.23s/it]\u001b[A\n", - "Iteration: 35%|███▍ | 974/2813 [20:10<37:35, 1.23s/it]\u001b[A\n", - "Iteration: 36%|███▌ | 999/2813 [20:26<37:05, 1.23s/it]\u001b[A\n", - "Iteration: 36%|███▌ | 999/2813 [20:40<37:05, 1.23s/it]\u001b[A\n", - "Iteration: 36%|███▋ | 1024/2813 [20:57<36:35, 1.23s/it]\u001b[A\n", - "Iteration: 36%|███▋ | 1024/2813 [21:10<36:35, 1.23s/it]\u001b[A\n", - "Iteration: 37%|███▋ | 1049/2813 [21:27<36:04, 1.23s/it]\u001b[A\n", - "Iteration: 37%|███▋ | 1049/2813 [21:40<36:04, 1.23s/it]\u001b[A\n", - "Iteration: 38%|███▊ | 1074/2813 [21:58<35:35, 1.23s/it]\u001b[A\n", - "Iteration: 38%|███▊ | 1074/2813 [22:10<35:35, 1.23s/it]\u001b[A\n", - "Iteration: 39%|███▉ | 1099/2813 [22:29<35:08, 1.23s/it]\u001b[A\n", - "Iteration: 39%|███▉ | 1099/2813 [22:40<35:08, 1.23s/it]\u001b[A\n", - "Iteration: 40%|███▉ | 1124/2813 [23:00<34:36, 1.23s/it]\u001b[A\n", - "Iteration: 40%|███▉ | 1124/2813 [23:20<34:36, 1.23s/it]\u001b[A\n", - "Iteration: 41%|████ | 1149/2813 [23:30<34:02, 1.23s/it]\u001b[A\n", - "Iteration: 41%|████ | 1149/2813 [23:50<34:02, 1.23s/it]\u001b[A\n", - "Iteration: 42%|████▏ | 1174/2813 [24:01<33:31, 1.23s/it]\u001b[A\n", - "Iteration: 42%|████▏ | 1174/2813 [24:20<33:31, 1.23s/it]\u001b[A\n", - "Iteration: 43%|████▎ | 1199/2813 [24:32<32:58, 1.23s/it]\u001b[A\n", - "Iteration: 43%|████▎ | 1199/2813 [24:50<32:58, 1.23s/it]\u001b[A\n", - "Iteration: 44%|████▎ | 1224/2813 [25:02<32:29, 1.23s/it]\u001b[A\n", - "Iteration: 44%|████▎ | 1224/2813 [25:20<32:29, 1.23s/it]\u001b[A\n", - "Iteration: 44%|████▍ | 1249/2813 [25:33<31:56, 1.23s/it]\u001b[A\n", - "Iteration: 44%|████▍ | 1249/2813 [25:50<31:56, 1.23s/it]\u001b[A\n", - "Iteration: 45%|████▌ | 1274/2813 [26:04<31:30, 1.23s/it]\u001b[A\n", - "Iteration: 45%|████▌ | 1274/2813 [26:20<31:30, 1.23s/it]\u001b[A\n", - "Iteration: 46%|████▌ | 1299/2813 [26:34<30:59, 1.23s/it]\u001b[A\n", - "Iteration: 46%|████▌ | 1299/2813 [26:50<30:59, 1.23s/it]\u001b[A\n", - "Iteration: 47%|████▋ | 1324/2813 [27:05<30:30, 1.23s/it]\u001b[A\n", - "Iteration: 47%|████▋ | 1324/2813 [27:20<30:30, 1.23s/it]\u001b[A\n", - "Iteration: 48%|████▊ | 1349/2813 [27:36<29:58, 1.23s/it]\u001b[A\n", - "Iteration: 48%|████▊ | 1349/2813 [27:50<29:58, 1.23s/it]\u001b[A\n", - "Iteration: 49%|████▉ | 1374/2813 [28:07<29:31, 1.23s/it]\u001b[A\n", - "Iteration: 49%|████▉ | 1374/2813 [28:20<29:31, 1.23s/it]\u001b[A\n", - "Iteration: 50%|████▉ | 1399/2813 [28:37<28:58, 1.23s/it]\u001b[A\n", - "Iteration: 50%|████▉ | 1399/2813 [28:50<28:58, 1.23s/it]\u001b[A\n", - "Iteration: 51%|█████ | 1424/2813 [29:08<28:26, 1.23s/it]\u001b[A\n", - "Iteration: 51%|█████ | 1424/2813 [29:20<28:26, 1.23s/it]\u001b[A\n", - "Iteration: 52%|█████▏ | 1449/2813 [29:39<27:58, 1.23s/it]\u001b[A\n", - "Iteration: 52%|█████▏ | 1449/2813 [29:50<27:58, 1.23s/it]\u001b[A\n", - "Iteration: 52%|█████▏ | 1474/2813 [30:10<27:28, 1.23s/it]\u001b[A\n", - "Iteration: 52%|█████▏ | 1474/2813 [30:30<27:28, 1.23s/it]\u001b[A\n", - "Iteration: 53%|█████▎ | 1499/2813 [30:41<26:59, 1.23s/it]\u001b[A\n", - "Iteration: 53%|█████▎ | 1499/2813 [31:00<26:59, 1.23s/it]\u001b[A\n", - "Iteration: 54%|█████▍ | 1524/2813 [31:11<26:27, 1.23s/it]\u001b[A\n", - "Iteration: 54%|█████▍ | 1524/2813 [31:30<26:27, 1.23s/it]\u001b[A\n", - "Iteration: 55%|█████▌ | 1549/2813 [31:42<25:58, 1.23s/it]\u001b[A\n", - "Iteration: 55%|█████▌ | 1549/2813 [32:00<25:58, 1.23s/it]\u001b[A\n", - "Iteration: 56%|█████▌ | 1574/2813 [32:13<25:26, 1.23s/it]\u001b[A\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Iteration: 56%|█████▌ | 1574/2813 [32:30<25:26, 1.23s/it]\u001b[A\n", - "Iteration: 57%|█████▋ | 1599/2813 [32:44<24:53, 1.23s/it]\u001b[A\n", - "Iteration: 57%|█████▋ | 1599/2813 [33:00<24:53, 1.23s/it]\u001b[A\n", - "Iteration: 58%|█████▊ | 1624/2813 [33:14<24:21, 1.23s/it]\u001b[A\n", - "Iteration: 58%|█████▊ | 1624/2813 [33:30<24:21, 1.23s/it]\u001b[A\n", - "Iteration: 59%|█████▊ | 1649/2813 [33:45<23:51, 1.23s/it]\u001b[A\n", - "Iteration: 59%|█████▊ | 1649/2813 [34:00<23:51, 1.23s/it]\u001b[A\n", - "Iteration: 60%|█████▉ | 1674/2813 [34:16<23:19, 1.23s/it]\u001b[A\n", - "Iteration: 60%|█████▉ | 1674/2813 [34:30<23:19, 1.23s/it]\u001b[A\n", - "Iteration: 60%|██████ | 1699/2813 [34:47<22:48, 1.23s/it]\u001b[A\n", - "Iteration: 60%|██████ | 1699/2813 [35:00<22:48, 1.23s/it]\u001b[A\n", - "Iteration: 61%|██████▏ | 1724/2813 [35:17<22:19, 1.23s/it]\u001b[A\n", - "Iteration: 61%|██████▏ | 1724/2813 [35:30<22:19, 1.23s/it]\u001b[A\n", - "Iteration: 62%|██████▏ | 1749/2813 [35:48<21:47, 1.23s/it]\u001b[A\n", - "Iteration: 62%|██████▏ | 1749/2813 [36:00<21:47, 1.23s/it]\u001b[A\n", - "Iteration: 63%|██████▎ | 1774/2813 [36:19<21:16, 1.23s/it]\u001b[A\n", - "Iteration: 63%|██████▎ | 1774/2813 [36:30<21:16, 1.23s/it]\u001b[A\n", - "Iteration: 64%|██████▍ | 1799/2813 [36:49<20:44, 1.23s/it]\u001b[A\n", - "Iteration: 64%|██████▍ | 1799/2813 [37:00<20:44, 1.23s/it]\u001b[A\n", - "Iteration: 65%|██████▍ | 1824/2813 [37:20<20:16, 1.23s/it]\u001b[A\n", - "Iteration: 65%|██████▍ | 1824/2813 [37:40<20:16, 1.23s/it]\u001b[A\n", - "Iteration: 66%|██████▌ | 1849/2813 [37:51<19:47, 1.23s/it]\u001b[A\n", - "Iteration: 66%|██████▌ | 1849/2813 [38:10<19:47, 1.23s/it]\u001b[A\n", - "Iteration: 67%|██████▋ | 1874/2813 [38:22<19:14, 1.23s/it]\u001b[A\n", - "Iteration: 67%|██████▋ | 1874/2813 [38:40<19:14, 1.23s/it]\u001b[A\n", - "Iteration: 68%|██████▊ | 1899/2813 [38:52<18:43, 1.23s/it]\u001b[A\n", - "Iteration: 68%|██████▊ | 1899/2813 [39:10<18:43, 1.23s/it]\u001b[A\n", - "Iteration: 68%|██████▊ | 1924/2813 [39:23<18:12, 1.23s/it]\u001b[A\n", - "Iteration: 68%|██████▊ | 1924/2813 [39:40<18:12, 1.23s/it]\u001b[A\n", - "Iteration: 69%|██████▉ | 1949/2813 [39:54<17:40, 1.23s/it]\u001b[A\n", - "Iteration: 69%|██████▉ | 1949/2813 [40:10<17:40, 1.23s/it]\u001b[A\n", - "Iteration: 70%|███████ | 1974/2813 [40:24<17:08, 1.23s/it]\u001b[A\n", - "Iteration: 70%|███████ | 1974/2813 [40:40<17:08, 1.23s/it]\u001b[A\n", - "Iteration: 71%|███████ | 1999/2813 [40:55<16:38, 1.23s/it]\u001b[A\n", - "Iteration: 71%|███████ | 1999/2813 [41:10<16:38, 1.23s/it]\u001b[A\n", - "Iteration: 72%|███████▏ | 2024/2813 [41:26<16:08, 1.23s/it]\u001b[A\n", - "Iteration: 72%|███████▏ | 2024/2813 [41:40<16:08, 1.23s/it]\u001b[A\n", - "Iteration: 73%|███████▎ | 2049/2813 [41:56<15:37, 1.23s/it]\u001b[A\n", - "Iteration: 73%|███████▎ | 2049/2813 [42:10<15:37, 1.23s/it]\u001b[A\n", - "Iteration: 74%|███████▎ | 2074/2813 [42:27<15:06, 1.23s/it]\u001b[A\n", - "Iteration: 74%|███████▎ | 2074/2813 [42:40<15:06, 1.23s/it]\u001b[A\n", - "Iteration: 75%|███████▍ | 2099/2813 [42:58<14:36, 1.23s/it]\u001b[A\n", - "Iteration: 75%|███████▍ | 2099/2813 [43:10<14:36, 1.23s/it]\u001b[A\n", - "Iteration: 76%|███████▌ | 2124/2813 [43:29<14:06, 1.23s/it]\u001b[A\n", - "Iteration: 76%|███████▌ | 2124/2813 [43:40<14:06, 1.23s/it]\u001b[A\n", - "Iteration: 76%|███████▋ | 2149/2813 [43:59<13:35, 1.23s/it]\u001b[A\n", - "Iteration: 76%|███████▋ | 2149/2813 [44:10<13:35, 1.23s/it]\u001b[A\n", - "Iteration: 77%|███████▋ | 2174/2813 [44:30<13:05, 1.23s/it]\u001b[A\n", - "Iteration: 77%|███████▋ | 2174/2813 [44:50<13:05, 1.23s/it]\u001b[A\n", - "Iteration: 78%|███████▊ | 2199/2813 [45:01<12:33, 1.23s/it]\u001b[A\n", - "Iteration: 78%|███████▊ | 2199/2813 [45:20<12:33, 1.23s/it]\u001b[A\n", - "Iteration: 79%|███████▉ | 2224/2813 [45:31<12:03, 1.23s/it]\u001b[A\n", - "Iteration: 79%|███████▉ | 2224/2813 [45:50<12:03, 1.23s/it]\u001b[A\n", - "Iteration: 80%|███████▉ | 2249/2813 [46:02<11:32, 1.23s/it]\u001b[A\n", - "Iteration: 80%|███████▉ | 2249/2813 [46:20<11:32, 1.23s/it]\u001b[A\n", - "Iteration: 81%|████████ | 2274/2813 [46:33<11:01, 1.23s/it]\u001b[A\n", - "Iteration: 81%|████████ | 2274/2813 [46:50<11:01, 1.23s/it]\u001b[A\n", - "Iteration: 82%|████████▏ | 2299/2813 [47:03<10:30, 1.23s/it]\u001b[A\n", - "Iteration: 82%|████████▏ | 2299/2813 [47:20<10:30, 1.23s/it]\u001b[A\n", - "Iteration: 83%|████████▎ | 2324/2813 [47:34<09:59, 1.23s/it]\u001b[A\n", - "Iteration: 83%|████████▎ | 2324/2813 [47:50<09:59, 1.23s/it]\u001b[A\n", - "Iteration: 84%|████████▎ | 2349/2813 [48:05<09:28, 1.23s/it]\u001b[A\n", - "Iteration: 84%|████████▎ | 2349/2813 [48:20<09:28, 1.23s/it]\u001b[A\n", - "Iteration: 84%|████████▍ | 2374/2813 [48:35<08:58, 1.23s/it]\u001b[A\n", - "Iteration: 84%|████████▍ | 2374/2813 [48:50<08:58, 1.23s/it]\u001b[A\n", - "Iteration: 85%|████████▌ | 2399/2813 [49:06<08:27, 1.23s/it]\u001b[A\n", - "Iteration: 85%|████████▌ | 2399/2813 [49:20<08:27, 1.23s/it]\u001b[A\n", - "Iteration: 86%|████████▌ | 2424/2813 [49:36<07:56, 1.22s/it]\u001b[A\n", - "Iteration: 86%|████████▌ | 2424/2813 [49:50<07:56, 1.22s/it]\u001b[A\n", - "Iteration: 87%|████████▋ | 2449/2813 [50:07<07:26, 1.23s/it]\u001b[A\n", - "Iteration: 87%|████████▋ | 2449/2813 [50:20<07:26, 1.23s/it]\u001b[A\n", - "Iteration: 88%|████████▊ | 2474/2813 [50:38<06:55, 1.23s/it]\u001b[A\n", - "Iteration: 88%|████████▊ | 2474/2813 [50:50<06:55, 1.23s/it]\u001b[A\n", - "Iteration: 89%|████████▉ | 2499/2813 [51:08<06:24, 1.23s/it]\u001b[A\n", - "Iteration: 89%|████████▉ | 2499/2813 [51:20<06:24, 1.23s/it]\u001b[A\n", - "Iteration: 90%|████████▉ | 2524/2813 [51:39<05:54, 1.23s/it]\u001b[A\n", - "Iteration: 90%|████████▉ | 2524/2813 [51:50<05:54, 1.23s/it]\u001b[A\n", - "Iteration: 91%|█████████ | 2549/2813 [52:10<05:24, 1.23s/it]\u001b[A\n", - "Iteration: 91%|█████████ | 2549/2813 [52:30<05:24, 1.23s/it]\u001b[A\n", - "Iteration: 92%|█████████▏| 2574/2813 [52:41<04:53, 1.23s/it]\u001b[A\n", - "Iteration: 92%|█████████▏| 2574/2813 [53:00<04:53, 1.23s/it]\u001b[A\n", - "Iteration: 92%|█████████▏| 2599/2813 [53:11<04:22, 1.23s/it]\u001b[A\n", - "Iteration: 92%|█████████▏| 2599/2813 [53:30<04:22, 1.23s/it]\u001b[A\n", - "Iteration: 93%|█████████▎| 2624/2813 [53:42<03:52, 1.23s/it]\u001b[A\n", - "Iteration: 93%|█████████▎| 2624/2813 [54:00<03:52, 1.23s/it]\u001b[A\n", - "Iteration: 94%|█████████▍| 2649/2813 [54:13<03:21, 1.23s/it]\u001b[A\n", - "Iteration: 94%|█████████▍| 2649/2813 [54:30<03:21, 1.23s/it]\u001b[A\n", - "Iteration: 95%|█████████▌| 2674/2813 [54:43<02:50, 1.23s/it]\u001b[A\n", - "Iteration: 95%|█████████▌| 2674/2813 [55:00<02:50, 1.23s/it]\u001b[A\n", - "Iteration: 96%|█████████▌| 2699/2813 [55:14<02:19, 1.23s/it]\u001b[A\n", - "Iteration: 96%|█████████▌| 2699/2813 [55:30<02:19, 1.23s/it]\u001b[A\n", - "Iteration: 97%|█████████▋| 2724/2813 [55:45<01:49, 1.23s/it]\u001b[A\n", - "Iteration: 97%|█████████▋| 2724/2813 [56:00<01:49, 1.23s/it]\u001b[A\n", - "Iteration: 98%|█████████▊| 2749/2813 [56:15<01:18, 1.23s/it]\u001b[A\n", - "Iteration: 98%|█████████▊| 2749/2813 [56:30<01:18, 1.23s/it]\u001b[A\n", - "Iteration: 99%|█████████▊| 2774/2813 [56:46<00:47, 1.23s/it]\u001b[A\n", - "Iteration: 99%|█████████▊| 2774/2813 [57:00<00:47, 1.23s/it]\u001b[A\n", - "Iteration: 100%|█████████▉| 2799/2813 [57:17<00:17, 1.23s/it]\u001b[A\n", - "Iteration: 100%|█████████▉| 2799/2813 [57:30<00:17, 1.23s/it]\u001b[A\n", - "Epoch: 100%|██████████| 1/1 [57:34<00:00, 3454.07s/it].23s/it]\u001b[A" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Train loss: 0.07014742273803971\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" + "Iteration: 1%| | 24/2813 [00:30<58:35, 1.26s/it]\u001b[A\n", + "Iteration: 1%| | 24/2813 [00:49<58:35, 1.26s/it]\u001b[A\n", + "Iteration: 2%|▏ | 48/2813 [01:00<58:08, 1.26s/it]\u001b[A\n", + "Iteration: 2%|▏ | 48/2813 [01:20<58:08, 1.26s/it]\u001b[A\n", + "Iteration: 3%|▎ | 72/2813 [01:31<57:47, 1.26s/it]\u001b[A\n", + "Iteration: 3%|▎ | 72/2813 [01:50<57:47, 1.26s/it]\u001b[A\n", + "Iteration: 3%|▎ | 96/2813 [02:01<57:30, 1.27s/it]\u001b[A\n", + "Iteration: 3%|▎ | 96/2813 [02:20<57:30, 1.27s/it]\u001b[A\n", + "Iteration: 4%|▍ | 120/2813 [02:32<57:15, 1.28s/it]\u001b[A\n", + "Iteration: 4%|▍ | 120/2813 [02:50<57:15, 1.28s/it]\u001b[A\n", + "Iteration: 5%|▌ | 144/2813 [03:03<56:59, 1.28s/it]\u001b[A\n", + "Iteration: 5%|▌ | 144/2813 [03:20<56:59, 1.28s/it]\u001b[A\n", + "Iteration: 6%|▌ | 168/2813 [03:34<56:38, 1.29s/it]\u001b[A\n", + "Iteration: 6%|▌ | 168/2813 [03:50<56:38, 1.29s/it]\u001b[A\n", + "Iteration: 7%|▋ | 192/2813 [04:06<56:16, 1.29s/it]\u001b[A\n", + "Iteration: 7%|▋ | 192/2813 [04:20<56:16, 1.29s/it]\u001b[A\n", + "Iteration: 8%|▊ | 216/2813 [04:37<55:55, 1.29s/it]\u001b[A\n", + "Iteration: 8%|▊ | 216/2813 [04:50<55:55, 1.29s/it]\u001b[A\n", + "Iteration: 9%|▊ | 240/2813 [05:08<55:28, 1.29s/it]\u001b[A\n", + "Iteration: 9%|▊ | 240/2813 [05:20<55:28, 1.29s/it]\u001b[A\n", + "Iteration: 9%|▉ | 264/2813 [05:39<55:02, 1.30s/it]\u001b[A\n", + "Iteration: 9%|▉ | 264/2813 [05:50<55:02, 1.30s/it]\u001b[A\n", + "Iteration: 10%|█ | 288/2813 [06:10<54:36, 1.30s/it]\u001b[A\n", + "Iteration: 10%|█ | 288/2813 [06:30<54:36, 1.30s/it]\u001b[A\n", + "Iteration: 11%|█ | 312/2813 [06:42<54:05, 1.30s/it]\u001b[A\n", + "Iteration: 11%|█ | 312/2813 [07:00<54:05, 1.30s/it]\u001b[A\n", + "Iteration: 12%|█▏ | 336/2813 [07:13<53:35, 1.30s/it]\u001b[A\n", + "Iteration: 12%|█▏ | 336/2813 [07:30<53:35, 1.30s/it]\u001b[A\n", + "Iteration: 13%|█▎ | 360/2813 [07:44<53:02, 1.30s/it]\u001b[A\n", + "Iteration: 13%|█▎ | 360/2813 [08:00<53:02, 1.30s/it]\u001b[A\n", + "Iteration: 14%|█▎ | 384/2813 [08:15<52:33, 1.30s/it]\u001b[A\n", + "Iteration: 14%|█▎ | 384/2813 [08:30<52:33, 1.30s/it]\u001b[A\n", + "Iteration: 15%|█▍ | 408/2813 [08:46<52:03, 1.30s/it]\u001b[A\n", + "Iteration: 15%|█▍ | 408/2813 [09:00<52:03, 1.30s/it]\u001b[A\n", + "Iteration: 15%|█▌ | 432/2813 [09:17<51:34, 1.30s/it]\u001b[A\n", + "Iteration: 15%|█▌ | 432/2813 [09:30<51:34, 1.30s/it]\u001b[A\n", + "Iteration: 16%|█▌ | 456/2813 [09:49<51:06, 1.30s/it]\u001b[A\n", + "Iteration: 16%|█▌ | 456/2813 [10:00<51:06, 1.30s/it]\u001b[A\n", + "Iteration: 17%|█▋ | 480/2813 [10:20<50:32, 1.30s/it]\u001b[A\n", + "Iteration: 17%|█▋ | 480/2813 [10:40<50:32, 1.30s/it]\u001b[A\n", + "Iteration: 18%|█▊ | 504/2813 [10:51<49:59, 1.30s/it]\u001b[A" ] } ], @@ -673,69 +26201,27 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Predict on Test Data" + "## Predict on Test Data\n", + "The `predict` method of the token classifier optionally returns the softmax probability of the predicted class, which is a NxM array, where N is size of the testing data and M is the number of tokens in the testing sequence" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\r", - "Iteration: 0%| | 0/247 [00:00 Date: Tue, 25 Jun 2019 20:03:05 +0000 Subject: [PATCH 056/108] Fixed bug with join_character --- utils_nlp/dataset/msra_ner.py | 10 ++-------- utils_nlp/dataset/ner_utils.py | 6 +++++- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/utils_nlp/dataset/msra_ner.py b/utils_nlp/dataset/msra_ner.py index 4dcec93c2..12728de17 100644 --- a/utils_nlp/dataset/msra_ner.py +++ b/utils_nlp/dataset/msra_ner.py @@ -5,8 +5,7 @@ FILES = { "train": "MSRA/msra-bakeoff3-training-utf8.2col", - # "test": "MSRA/bakeoff3_goldstandard.txt", - "test": "MSRA/msra-bakeoff3-training-gb.2col", + "test": "MSRA/bakeoff3_goldstandard.txt", } ENCODINGS = {"train": "utf8", "test": "gbk"} @@ -23,13 +22,8 @@ def load_pandas_df(local_cache_path="./", file_split="test"): text = text.replace("? 0", "? 0\n") text = text.replace("! 0", "! 0\n") - # text_list = text.split("\n\n") - - # # Remove empty line at EOF - # text_list = text_list[:-1] - sentence_list, labels_list = get_sentence_and_labels( - text, file_split, join_characeter="" + text, file_split ) labels_list = [ diff --git a/utils_nlp/dataset/ner_utils.py b/utils_nlp/dataset/ner_utils.py index ef55c0d90..46844be12 100644 --- a/utils_nlp/dataset/ner_utils.py +++ b/utils_nlp/dataset/ner_utils.py @@ -16,6 +16,10 @@ def get_sentence_and_labels(text, data_type="", join_characeter=" "): . O" data_type (str, optional): String that briefly describes the data, e.g. "train" + join_characeter (str, optional): String used to join input words. + Defaults to " ". For Chinese text, "" should be used because + Chinese characters/words don't have spaces between them as + English does. Returns: tuple: (list of sentences, list of token label lists) @@ -33,7 +37,7 @@ def get_sentence_and_labels(text, data_type="", join_characeter=" "): # split "word label" pairs s_split_split = [t.split() for t in s_split] sentence_list.append( - " ".join([t[0] for t in s_split_split if len(t) > 1]) + join_characeter.join([t[0] for t in s_split_split if len(t) > 1]) ) labels_list.append([t[1] for t in s_split_split if len(t) > 1]) if len(s_split_split) > max_seq_len: From 51c5f1f1e025cb0610b4806d23c6733787012b51 Mon Sep 17 00:00:00 2001 From: hlums Date: Tue, 25 Jun 2019 20:37:40 +0000 Subject: [PATCH 057/108] Fixed _truncate_seq_pairs bug. --- utils_nlp/bert/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/utils_nlp/bert/common.py b/utils_nlp/bert/common.py index df18d3bee..6f7939aff 100644 --- a/utils_nlp/bert/common.py +++ b/utils_nlp/bert/common.py @@ -74,7 +74,7 @@ def tokenize(self, text): for sentences in tqdm(text) ] - def _truncate_seq_pair(tokens_a, tokens_b, max_length): + def _truncate_seq_pair(self, tokens_a, tokens_b, max_length): """Truncates a sequence pair in place to the maximum length.""" # This is a simple heuristic which will always truncate the longer # sequence one token at a time. This makes more sense than @@ -127,7 +127,7 @@ def preprocess_classification_tokens(self, tokens, max_len=BERT_MAX_LEN): else: # get tokens for each sentence [[t00, t01, ...] [t10, t11,... ]] tokens = [ - _truncate_seq_pair(sentence[0], sentence[1], max_len - 3) + self._truncate_seq_pair(sentence[0], sentence[1], max_len - 3) for sentence in tokens ] From fd0839b5b0f1e2ee42c5c7d834dfaaf1c9f6f20a Mon Sep 17 00:00:00 2001 From: hlums Date: Tue, 25 Jun 2019 21:46:48 +0000 Subject: [PATCH 058/108] Updated notebook descriptions.' --- .../ner_msra_bert_chinese.ipynb | 26190 +--------------- 1 file changed, 365 insertions(+), 25825 deletions(-) diff --git a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb index 136b2f6db..0f0a125e0 100644 --- a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb +++ b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb @@ -178,27 +178,27 @@ " \n", " \n", " 0\n", - " 当希望工程救助的百万儿童成长起来,科教兴国蔚然成风时,今天有收藏价值的书你没买,明日就叫你悔...\n", + " 当 希 望 工 程 救 助 的 百 万 儿 童 成 长 起 来 , 科 教 兴 国 蔚 然 ...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", " 1\n", - " 藏书本来就是所有传统收藏门类中的第一大户,只是我们结束温饱的时间太短而已。\n", + " 藏 书 本 来 就 是 所 有 传 统 收 藏 门 类 中 的 第 一 大 户 , 只 是 ...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", " 2\n", - " 因有关日寇在京掠夺文物详情,藏界较为重视,也是我们收藏北京史料中的要件之一。\n", + " 因 有 关 日 寇 在 京 掠 夺 文 物 详 情 , 藏 界 较 为 重 视 , 也 是 ...\n", " [O, O, O, B-LOC, O, O, B-LOC, O, O, O, O, O, O...\n", " \n", " \n", " 3\n", - " 我们藏有一册1945年6月油印的《北京文物保存保管状态之调查报告》,调查范围涉及故宫、历博、...\n", + " 我 们 藏 有 一 册 1 9 4 5 年 6 月 油 印 的 《 北 京 文 物 保 存 ...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", " 4\n", - " 以家乡的历史文献、特定历史时期书刊、某一名家或名著的多种出版物为专题,注意精品、非卖品、纪念...\n", + " 以 家 乡 的 历 史 文 献 、 特 定 历 史 时 期 书 刊 、 某 一 名 家 或 ...\n", " [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ...\n", " \n", " \n", @@ -207,11 +207,11 @@ ], "text/plain": [ " sentence \\\n", - "0 当希望工程救助的百万儿童成长起来,科教兴国蔚然成风时,今天有收藏价值的书你没买,明日就叫你悔... \n", - "1 藏书本来就是所有传统收藏门类中的第一大户,只是我们结束温饱的时间太短而已。 \n", - "2 因有关日寇在京掠夺文物详情,藏界较为重视,也是我们收藏北京史料中的要件之一。 \n", - "3 我们藏有一册1945年6月油印的《北京文物保存保管状态之调查报告》,调查范围涉及故宫、历博、... \n", - "4 以家乡的历史文献、特定历史时期书刊、某一名家或名著的多种出版物为专题,注意精品、非卖品、纪念... \n", + "0 当 希 望 工 程 救 助 的 百 万 儿 童 成 长 起 来 , 科 教 兴 国 蔚 然 ... \n", + "1 藏 书 本 来 就 是 所 有 传 统 收 藏 门 类 中 的 第 一 大 户 , 只 是 ... \n", + "2 因 有 关 日 寇 在 京 掠 夺 文 物 详 情 , 藏 界 较 为 重 视 , 也 是 ... \n", + "3 我 们 藏 有 一 册 1 9 4 5 年 6 月 油 印 的 《 北 京 文 物 保 存 ... \n", + "4 以 家 乡 的 历 史 文 献 、 特 定 历 史 时 期 书 刊 、 某 一 名 家 或 ... \n", "\n", " labels \n", "0 [O, O, O, O, O, O, O, O, O, O, O, O, O, O, O, ... \n", @@ -250,7 +250,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 5, "metadata": { "scrolled": false }, @@ -268,7 +268,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 6, "metadata": { "scrolled": false }, @@ -288,25741 +288,11 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 7, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1945\n", - "》,\n", - "”。\n", - "1997\n", - "(1937\n", - "—1945\n", - "”。\n", - "》(1919\n", - ")、\n", - "》(1923\n", - ")、\n", - "(1927\n", - ")、\n", - "》(1926\n", - ")、\n", - "》(1930\n", - "1908\n", - ")。\n", - "”,\n", - "”。\n", - "1974\n", - "”,\n", - "”。\n", - "”,“\n", - "”,“\n", - "”,“\n", - "……\n", - "”,“\n", - "”,\n", - "”,\n", - ",“\n", - "”。\n", - "”;\n", - "”。\n", - "1967\n", - "”、“\n", - ":“\n", - "”,\n", - "”,\n", - "”。\n", - ":“\n", - ":“\n", - "……”\n", - "……\n", - "”,\n", - "……\n", - "———\n", - ",“\n", - ":“\n", - ",“\n", - ",“\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - "……\n", - ":“\n", - "”。\n", - "”。\n", - "、“\n", - ",“\n", - "”!\n", - ":“\n", - "”。\n", - ":“\n", - "———《\n", - "》,\n", - "……\n", - "”。\n", - "》,\n", - "》,\n", - "》,\n", - "……\n", - "”。\n", - "……\n", - "”。\n", - ",“\n", - "”。\n", - "———\n", - "》(\n", - "———《\n", - "———\n", - "》(\n", - "”,\n", - ":“\n", - "”,\n", - "”,\n", - "”;\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "———\n", - ",“\n", - "”,“\n", - "”,\n", - "”,\n", - ",《\n", - "———\n", - "……\n", - "1938\n", - "———\n", - "———“\n", - "”,\n", - "……\n", - "》,\n", - ":“\n", - ":“\n", - "1998\n", - "”。\n", - ",1983\n", - "1977\n", - ",1979\n", - ":(010)64014411\n", - "2908\n", - "”、“\n", - "”,\n", - "10%—20%\n", - "”(\n", - "82)\n", - "”,\n", - "”,\n", - ":“\n", - "1997\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ":“\n", - ",“\n", - "”。\n", - ",“\n", - "”,\n", - "),\n", - ",“\n", - ",“\n", - "———\n", - "”。\n", - "200\n", - "1997\n", - "1400\n", - "1995\n", - "”。\n", - "1993\n", - ",1978\n", - "1985\n", - "1988\n", - ")(\n", - "』,\n", - "》。\n", - "……\n", - ":“\n", - "1991\n", - "”,\n", - ":“\n", - ":“\n", - ":“\n", - "……\n", - "……”\n", - ":“\n", - ":“\n", - "1940\n", - "———\n", - "———“\n", - "”,\n", - "”(\n", - ")。\n", - "”。\n", - "”,\n", - "”,\n", - "”。\n", - ",“\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "———\n", - "———\n", - ":“\n", - ":“\n", - "……\n", - "……\n", - "1996\n", - "……\n", - "———\n", - "26\n", - "”,\n", - ":“\n", - ";“\n", - "”。\n", - ",“16”\n", - "27,\n", - "17\n", - "10%\n", - ",90%\n", - "1997\n", - "1.33\n", - "2000\n", - "0.07\n", - "1997\n", - "303.9%。\n", - "100\n", - "”。\n", - "1997\n", - "4000\n", - "2000—3000\n", - "200\n", - "0.75\n", - "4.2\n", - "0.75\n", - "12.7\n", - "200\n", - "90\n", - "1997\n", - "———\n", - "———\n", - ";———\n", - ";———\n", - "20%—30%;———\n", - "40%;———\n", - "1.5\n", - ",1991\n", - "37\n", - ";———\n", - "300\n", - "500\n", - "———\n", - "1959\n", - "4000\n", - "200\n", - "200\n", - ")、\n", - ")、\n", - "1978\n", - ":“\n", - "300\n", - "160—200\n", - "350\n", - "33%。\n", - "1300\n", - "26\n", - "500\n", - "1463\n", - "5.2\n", - "3.6\n", - "1/4,\n", - "1/3\n", - "1998\n", - "2000\n", - "”。\n", - "1997\n", - "》。\n", - "1994\n", - "1992\n", - "1991\n", - "1990\n", - "147\n", - "160\n", - "”。\n", - "80\n", - "28\n", - "2000\n", - "200\n", - "5.\n", - "4.\n", - "3.\n", - ",“\n", - "”(DETBLAPAKHUS),\n", - "1/4\n", - "CASH\n", - "1/4\n", - "1/4\n", - "3.\n", - "2.\n", - "1.\n", - "4000\n", - "1991\n", - "7200\n", - "CASH\n", - "1995\n", - "60\n", - "CASH\n", - "400\n", - "”,\n", - "……\n", - ":“\n", - "”。\n", - "”。\n", - "”,\n", - ":“\n", - "”。\n", - ",“\n", - ":“\n", - "”。\n", - "”,\n", - ":“\n", - ",‘\n", - "’。\n", - "”。\n", - ":“\n", - "”。\n", - "1994\n", - "”。\n", - "……\n", - ",“\n", - "OK,\n", - "1993\n", - "……\n", - "———\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - ":“\n", - "1977\n", - ":“\n", - ":“\n", - "”,\n", - ":“\n", - "”、“\n", - "”。\n", - "”,\n", - "22\n", - "”,\n", - "---\n", - "』、『\n", - "』、『\n", - "』,\n", - ",『\n", - "》,\n", - ":“\n", - ",1949\n", - "1065”\n", - ":1949\n", - "1986\n", - "1942\n", - ",1943\n", - "1959\n", - ",“\n", - "……\n", - "1941\n", - "25\n", - ";“\n", - "……\n", - "———\n", - "———《\n", - "》。\n", - "1997\n", - "———\n", - "》。\n", - "”、“\n", - "1996\n", - ",1997\n", - "1997\n", - "1996\n", - "1997\n", - ",《\n", - "》、《\n", - "》、《\n", - ",《\n", - ";《\n", - "》、《\n", - ",《\n", - ";《\n", - ",《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1997\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - ":《\n", - "》、《\n", - "》、《\n", - "》;\n", - ":《\n", - "》、《\n", - "———\n", - "》、《\n", - "》、《\n", - "》。\n", - ":《\n", - "》、《\n", - "———\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》。\n", - "1997\n", - "1997\n", - "1997\n", - "1996\n", - "1997\n", - "1997\n", - "1996\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1996\n", - ":“\n", - "”———\n", - "”,\n", - "”,\n", - "1000\n", - "2600\n", - "120\n", - "”。\n", - "”。\n", - "52\n", - "185\n", - "100\n", - "”2\n", - "”、“\n", - "800\n", - "”。\n", - "9800\n", - "2.13\n", - "300\n", - "100\n", - "150\n", - "20000\n", - "300\n", - "”,\n", - "400\n", - "200\n", - "250\n", - "500\n", - "19\n", - "175\n", - "100\n", - "300\n", - "1600\n", - "66\n", - "90\n", - "1979\n", - "5.7\n", - "、2\n", - ":“\n", - "5000\n", - "1.5\n", - "60\n", - ":“\n", - "’”。\n", - "806\n", - "1996\n", - "”,\n", - "3.4\n", - "45\n", - "225\n", - "250\n", - "”。\n", - ",“\n", - "”(\n", - "400\n", - "2000\n", - "5.95\n", - "5.95\n", - "”。\n", - "5000\n", - "980\n", - "1000\n", - "196\n", - ":100733。\n", - ":“\n", - "”。\n", - "”。\n", - ":“\n", - "”,\n", - ":“\n", - ":“\n", - "1965\n", - "3000\n", - "”、“\n", - "”,\n", - "),\n", - "),\n", - ")。\n", - "1993\n", - "25\n", - "1989\n", - "』,\n", - "21\n", - "”,\n", - ",“\n", - "35\n", - "1986\n", - "”,\n", - "”。\n", - "21\n", - ":“\n", - "”20\n", - "8000\n", - "36\n", - "110\n", - ":“\n", - ":“\n", - "”60\n", - "1954\n", - "1942\n", - ",1943\n", - ",1922\n", - "100\n", - "38\n", - "300\n", - "1997\n", - "60\n", - "1996\n", - "2000\n", - "……\n", - "1994\n", - "”,\n", - "”,\n", - "1993\n", - "”,\n", - ",5\n", - "33\n", - "1995\n", - "1994\n", - "17\n", - "200\n", - "1994\n", - "1992\n", - ",10\n", - ",“\n", - "2800\n", - "4000\n", - "3000\n", - "1.4\n", - "40\n", - "300\n", - "110\n", - "1996\n", - "”。\n", - "26\n", - ")(\n", - "”,\n", - "19\n", - "1500\n", - "———\n", - "”、“\n", - "”,\n", - ":“\n", - "1995\n", - "1968\n", - ",1995\n", - ",1950\n", - ")(\n", - "1996\n", - "400\n", - ":“\n", - "”。\n", - "1928\n", - "”(\n", - "”)。\n", - "1993\n", - "70\n", - "198\n", - "》(\n", - "1998\n", - ":“\n", - ",70\n", - "198\n", - "2.6\n", - "”,\n", - ":“\n", - ",690\n", - "》,\n", - "180\n", - "》、《\n", - "600—800\n", - "”、“\n", - "1280\n", - "400\n", - "780\n", - "》,230\n", - "660\n", - "180\n", - "”、“\n", - "”,\n", - ",“\n", - "59\n", - ",4\n", - "70\n", - ",10\n", - "80\n", - ",“\n", - "”,\n", - "25\n", - "3000\n", - "”。\n", - "200\n", - "218\n", - "40\n", - "47\n", - "2、\n", - "1、\n", - "———\n", - "4500\n", - "10000\n", - "4500\n", - "200\n", - "17\n", - "1994\n", - ",50\n", - "90\n", - "15%—20%\n", - "1050\n", - "———\n", - "150\n", - "3500\n", - "1997\n", - "2000\n", - "4000\n", - "90\n", - "4000\n", - "2000\n", - "---(\n", - "”,\n", - "”。\n", - "”,\n", - ",“\n", - "20%,\n", - "20%,\n", - "50%,\n", - "”、“\n", - "1990\n", - "1600\n", - "1/3)。\n", - ",200\n", - "1%\n", - "3‰;\n", - "———\n", - "”,\n", - "”,\n", - "”。\n", - "”,“\n", - "”,\n", - "———\n", - "”(\n", - "17\n", - "2000\n", - "300\n", - "8000\n", - "3799\n", - "82285\n", - "9200\n", - "1997\n", - "26.8%,\n", - "70%\n", - "40%\n", - "32.5%,\n", - "50%—60%\n", - "35%\n", - ",1990\n", - "1997\n", - "9.4%,\n", - "15.7%\n", - "11.2%\n", - "6800\n", - "5700\n", - "90\n", - "1990\n", - "61.1%,\n", - "1997\n", - "49.3%,\n", - "10.8\n", - "4100\n", - "21.4%\n", - "23.9%,\n", - "2.5\n", - "18.5%\n", - "26.8%,\n", - "8.3\n", - "8%\n", - "700\n", - "125\n", - "1991\n", - "1998\n", - "11.2%,\n", - "1400\n", - ",7\n", - "9805\n", - ":『\n", - ":『\n", - "SPEOS\n", - "———\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "19\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1983\n", - ",《\n", - ",1988\n", - "》,\n", - ",6\n", - ",6\n", - "31\n", - "28\n", - ",5\n", - "、6\n", - "》,\n", - "……\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1983\n", - "———\n", - "1985\n", - ",《\n", - "”,\n", - "……\n", - ",14\n", - "———\n", - "1960\n", - "”。\n", - "2、\n", - "1、\n", - "———\n", - "———\n", - "”,\n", - "”,\n", - "———\n", - "”,\n", - "”。\n", - "———\n", - "”。\n", - "———\n", - "1994\n", - "1994\n", - "1985\n", - "1981\n", - ",“\n", - "---\n", - "---\n", - "”,\n", - "”。\n", - "1997\n", - "6000\n", - "1996\n", - "3000\n", - "1000\n", - "57\n", - "”,\n", - ",2000\n", - "1500\n", - "”。\n", - ",“\n", - "”,\n", - "600\n", - "50%\n", - "1996\n", - "37%,\n", - "10%。\n", - "1995\n", - "5000\n", - "”,\n", - "2.45\n", - "3000\n", - "1996\n", - "1657.2\n", - "),\n", - "50%,\n", - "48\n", - "540\n", - "”(\n", - "17\n", - "150\n", - "400\n", - "1000\n", - "350\n", - "70\n", - "”,\n", - ",“\n", - ",“\n", - "……\n", - ",“\n", - "……\n", - ",“\n", - "”,\n", - "200\n", - "……\n", - "1.2\n", - ";“\n", - "……\n", - "500\n", - ",4\n", - "200\n", - "”。\n", - ",4\n", - "……\n", - "38\n", - "1200\n", - "4.96\n", - "”(\n", - "66\n", - ",58\n", - "”,\n", - "”。\n", - "73\n", - "25\n", - "1000\n", - "”,\n", - "40\n", - "200\n", - ",70\n", - "2000\n", - "”。\n", - "”。\n", - "……\n", - ",『\n", - "』,\n", - "85\n", - "”,\n", - "……\n", - "1994\n", - "2000\n", - "1989\n", - ":“\n", - "85\n", - "———\n", - "1992\n", - "1996\n", - "……\n", - "1995\n", - "8000\n", - "7000\n", - "5000\n", - "1995\n", - "……\n", - "200\n", - "4000\n", - "60\n", - "1995\n", - "3·1\n", - ":“\n", - "”1996\n", - ":“\n", - "”1996\n", - "1995\n", - "2000\n", - "1995\n", - "1996\n", - "18.72\n", - "21.40\n", - "114.3%。\n", - "1990\n", - "1997\n", - "23\n", - "1997\n", - "8.9\n", - "1.6\n", - ":“\n", - "150\n", - "1995\n", - "129\n", - "”!\n", - "1996\n", - ":“\n", - "1∶5。\n", - "40\n", - "”,\n", - "1995\n", - "”,\n", - ":“\n", - "1997\n", - "1992\n", - "300\n", - "”1991\n", - "1983\n", - "1979\n", - ",20\n", - "”。\n", - "”。\n", - ":“\n", - "60\n", - "40%,\n", - "3∶1\n", - "6000\n", - "100\n", - ",300\n", - ",800\n", - "”,\n", - "”。\n", - "2.1\n", - "”。\n", - "100\n", - "1.2\n", - "3800\n", - "267\n", - "1000\n", - "、10\n", - "1·2\n", - "、40\n", - ")(\n", - "140\n", - "1997\n", - "2700\n", - "1996\n", - "1997\n", - "1998\n", - "1995\n", - "23\n", - ")(\n", - "21\n", - "0·6\n", - ",1995\n", - "5000\n", - ",1996\n", - "3000\n", - ",1997\n", - "60%\n", - "40%\n", - "20%。\n", - "2000\n", - "1/3\n", - "1/2\n", - "700\n", - "1.6\n", - "、50%—60%\n", - "27\n", - "164\n", - "14.7\n", - "63\n", - "81\n", - ":“\n", - "……\n", - "47\n", - "”,\n", - "1993\n", - ":“\n", - "54\n", - ",1992\n", - "40\n", - "”,\n", - "1991\n", - "47\n", - "1945\n", - "53\n", - "300\n", - "”。\n", - "---\n", - "90\n", - "—40\n", - "”,\n", - "”,\n", - "”。\n", - ")。\n", - "1/3\n", - "1/5。\n", - "21\n", - "1205\n", - "29\n", - "4274\n", - "1996\n", - ":5\n", - "60%\n", - ":1997\n", - "34.4%,\n", - "1993\n", - "61.8%,\n", - "62.1%,\n", - "50%\n", - "60%,\n", - "50%。\n", - "1994\n", - "……\n", - "”,\n", - "500\n", - "100—200\n", - ");\n", - "7000\n", - "》。\n", - ",BP\n", - "》。\n", - "》(\n", - "》,\n", - "》。\n", - "》。\n", - ":“\n", - "……”\n", - "1993\n", - "”;\n", - "》,\n", - "》,\n", - "”。\n", - "1966\n", - "1978\n", - "1942\n", - "》。\n", - "”,\n", - "》。\n", - "》、《\n", - "》,\n", - "》(\n", - "),\n", - "1940\n", - "1937\n", - "》,\n", - "》。\n", - "1936\n", - "》,\n", - "》,\n", - "》,\n", - ",《\n", - ":“\n", - "”1956\n", - ",‘\n", - "”;\n", - ",“\n", - ":“\n", - ",“\n", - "———\n", - "……\n", - ",“\n", - "”,\n", - "……\n", - "”。\n", - "》、《\n", - "》、《\n", - "》,\n", - "》、《\n", - "》、《\n", - "”。\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "”、“\n", - "”,\n", - "”。\n", - "1996\n", - ",“\n", - "》,\n", - "”,\n", - "”。\n", - "———\n", - "1997\n", - "》,\n", - "”《\n", - "》。\n", - "”,\n", - "”。\n", - "365\n", - "0.61\n", - "1996\n", - ";6\n", - "87\n", - "1996\n", - "25\n", - "28\n", - "”,\n", - "』,\n", - "600\n", - "”,\n", - "”,\n", - "35%\n", - "150\n", - "110\n", - "60\n", - "38\n", - "87\n", - "700\n", - "800\n", - ",20\n", - "100\n", - "”,“\n", - "”,\n", - "34\n", - "3.6\n", - "28\n", - "、40\n", - "25\n", - "1.5\n", - "1.2\n", - "150\n", - "1986\n", - "1500\n", - "1997\n", - "2000\n", - "2000\n", - "7000\n", - "500\n", - "1987\n", - "1269\n", - "80\n", - "1996\n", - "5000\n", - "”,\n", - "350\n", - "”,\n", - "———\n", - "6.14\n", - "13.7%,\n", - "2800\n", - "8.7%。\n", - ":“\n", - ":“\n", - "”,\n", - ",150\n", - "43\n", - "150\n", - "”。\n", - "1100\n", - "400\n", - ":“\n", - "”,\n", - "”,\n", - "”。\n", - "1997\n", - ";“\n", - "500\n", - "1/8。\n", - "1996\n", - "300\n", - "3290\n", - "90\n", - "1978\n", - "———\n", - "———\n", - "———(\n", - ":“\n", - "0.2\n", - "1998\n", - "27\n", - "1994\n", - "1990\n", - "1987\n", - "1.5\n", - "23\n", - "26·7\n", - "……\n", - ",5\n", - ",10\n", - "100\n", - ",1995\n", - "150\n", - ",1994\n", - ",“\n", - "”、“\n", - "1993\n", - "———\n", - "”,\n", - "60%\n", - "200\n", - "1996\n", - "34\n", - ":“\n", - "80\n", - "”。\n", - "1000\n", - "10%。\n", - "52.8\n", - "1998\n", - "—2000\n", - ",“\n", - "1700\n", - "1400\n", - "300\n", - "17\n", - "1700\n", - "1/3,\n", - ":“\n", - "1986\n", - ",1997\n", - "1994\n", - "46\n", - "40\n", - ":“\n", - "———\n", - "”。\n", - "”,\n", - ":“\n", - ",78\n", - "”,\n", - "———\n", - "1200\n", - "2·8\n", - "6900\n", - ":“\n", - "……\n", - ":“\n", - "……\n", - "”,\n", - "———\n", - "”;\n", - "……\n", - ",《\n", - "”(1997\n", - "),\n", - "”。\n", - "”(1996\n", - "),\n", - "”。\n", - "”(1995\n", - "),\n", - "”,\n", - "”(1994\n", - "),\n", - "”。\n", - "”(1993\n", - "),\n", - "”,\n", - "”(1992\n", - "),\n", - "”。\n", - "”。\n", - "1991\n", - "”,\n", - "”,\n", - "”。\n", - "23790\n", - "27\n", - "6000\n", - "1997\n", - "1996\n", - "66\n", - "51\n", - "1989\n", - "1.8\n", - "600\n", - "17\n", - "1997\n", - "28\n", - "”,\n", - "600\n", - "70\n", - "2339\n", - ",24\n", - "27\n", - "123\n", - "1993\n", - ",1995\n", - "1000\n", - "500\n", - "———\n", - "1995\n", - "1997\n", - "”,\n", - "”。\n", - "600\n", - "3000\n", - "”,\n", - "”。\n", - "———\n", - ",1971\n", - "1995\n", - ",1957\n", - "”,\n", - "”。\n", - "100\n", - "200\n", - "4000\n", - "400\n", - "100\n", - "26\n", - "21\n", - ",1994\n", - ":“\n", - ",1995\n", - "”。\n", - "”。\n", - "”,\n", - "200\n", - "》,\n", - "100\n", - "1.5\n", - "1994\n", - "2280\n", - ":“\n", - "1994\n", - "1994\n", - "———\n", - "3500\n", - ",“\n", - "”。\n", - "———\n", - "———1997\n", - "”;\n", - ":“\n", - "---(\n", - ")(\n", - ")2:\n", - "1:\n", - "13.26\n", - "1992\n", - "”、“\n", - "”。\n", - "70%\n", - "”。\n", - ",4\n", - "27\n", - "”,\n", - "60\n", - "”。\n", - "500\n", - "22\n", - "2600\n", - "”,\n", - ",1997\n", - "2:\n", - "8000\n", - "1:\n", - "1985\n", - "—30\n", - ")4\n", - "28\n", - "124\n", - "8000\n", - "226\n", - "2000\n", - "”,\n", - "1200\n", - "”。\n", - "1996\n", - "26\n", - "”,38\n", - "1996\n", - "27\n", - "121\n", - "”。\n", - ")(\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - "”、“\n", - ",“\n", - ",“\n", - "6000\n", - "TICO\n", - "……\n", - "500\n", - "”。\n", - "1996\n", - "48\n", - "1997\n", - "36\n", - "80%\n", - "60%\n", - "”,\n", - "1100\n", - "400\n", - "8000—10000\n", - "5000—7000\n", - "3000\n", - "90\n", - "……(\n", - "》,\n", - "1991\n", - "》,\n", - "》、\n", - "》、\n", - "》、《\n", - "》、\n", - "》、\n", - "》、\n", - "》、\n", - "》,\n", - "1998\n", - "31\n", - ",“\n", - "”。\n", - "……”\n", - "109\n", - "”,\n", - "”,\n", - "《B\n", - "”,\n", - ":“\n", - "”,\n", - "1996\n", - "2000\n", - "”,\n", - "”,\n", - "1996\n", - "……\n", - "《B\n", - "》,\n", - "1·29\n", - "1996\n", - "90\n", - ",1997\n", - "1.5\n", - "200\n", - "4000\n", - ",80%\n", - "850\n", - "2000\n", - "、10\n", - "、6\n", - "55\n", - "5000\n", - "260\n", - "”,\n", - "1996\n", - "2500\n", - ",1997\n", - "70\n", - "100\n", - "4500\n", - "”。\n", - "1.5\n", - "80\n", - "8000\n", - "———\n", - "2000\n", - "……\n", - "2500\n", - ",1994\n", - "8000\n", - "1.2\n", - "90\n", - "100\n", - "、100\n", - "、1000\n", - "”,\n", - "8%。\n", - "”。\n", - "———\n", - "5—10\n", - ",“\n", - "4500\n", - "3%,\n", - "480\n", - "200\n", - "2000\n", - "———\n", - "2071\n", - "1200\n", - "100\n", - "125\n", - "”。\n", - ")“\n", - "60\n", - "300\n", - "140\n", - "160\n", - ",28\n", - "7·5\n", - "600\n", - "40\n", - "6.4\n", - "5.8\n", - ",1997\n", - "2.4\n", - "”,\n", - "”:\n", - "1994\n", - "1000\n", - "1997\n", - "8100\n", - "”,\n", - "2000\n", - "135\n", - "500\n", - "1995\n", - "370\n", - ",1996\n", - "2050\n", - ",1994\n", - "317\n", - "3500\n", - "1000\n", - "1200\n", - "———\n", - "600\n", - "1000\n", - "400\n", - "17\n", - "”。\n", - "--\n", - "628\n", - ",15\n", - "17\n", - "”,\n", - "1996\n", - ",1997\n", - "》,\n", - "279.7\n", - "”,\n", - ":“\n", - ":“\n", - ",1000\n", - "1912\n", - "”。\n", - ")2:\n", - "70\n", - "1:\n", - "———\n", - "70\n", - "23\n", - "100%,\n", - "80%\n", - "1997\n", - ":“\n", - "”。\n", - "1.5\n", - "”。\n", - "”、\n", - "27300\n", - "95%,\n", - "47%,\n", - "43%。\n", - ",1992\n", - "90\n", - "1954\n", - "37\n", - "---\n", - "17\n", - "、18\n", - "60\n", - "×210(\n", - "430\n", - "60\n", - "19\n", - "”,\n", - "……\n", - "1994\n", - ":“\n", - ",“\n", - "”。\n", - ",1\n", - "2000\n", - "70\n", - "”。\n", - "1996\n", - ",73\n", - ":“\n", - "340\n", - "”,\n", - "1996\n", - "、1997\n", - "400\n", - "100\n", - ":“\n", - "”30\n", - "17\n", - "1996\n", - "39\n", - "”,\n", - "”,\n", - "”,\n", - "5%。\n", - "1500\n", - "80\n", - "1984\n", - "85%\n", - "1500\n", - "40\n", - "600\n", - "1954\n", - "1952\n", - "73\n", - ",1946\n", - "———\n", - "———\n", - "300\n", - "300\n", - "”“\n", - "1938\n", - ",10\n", - "40\n", - "1993\n", - ")(\n", - "40\n", - ",“\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - "1997\n", - "“10·9”\n", - "———\n", - "2340\n", - "),\n", - "190\n", - "1991\n", - "11600\n", - "),\n", - "1984\n", - ",1991\n", - ",1951\n", - ",1970\n", - "、67\n", - "58\n", - "”,\n", - "40\n", - ",95%\n", - ":“\n", - "500\n", - "619\n", - "3200\n", - "86\n", - "40\n", - "3000\n", - "140\n", - "74\n", - "346\n", - "6400\n", - "80%。\n", - "48\n", - "”———\n", - "———20\n", - "———20\n", - "》,\n", - "19\n", - "1968\n", - "19\n", - "———\n", - "1994\n", - "200\n", - "”。\n", - ",“\n", - ":“\n", - "”,\n", - "1985\n", - "”———\n", - "Email\n", - "1992\n", - "”。\n", - "19\n", - "36\n", - "500\n", - "”,\n", - "29\n", - "”31\n", - "---(\n", - ")(\n", - "),\n", - ",“\n", - "”。\n", - "———\n", - "”(\n", - "15·8%,\n", - "18·1%。\n", - "233\n", - "17%,\n", - "1997\n", - "20·4%。\n", - "149\n", - "19·8%,\n", - "210\n", - "359\n", - "19·2%,\n", - "5·5%。\n", - "9·3%、10·4%、13·7%。\n", - "88·6\n", - "6·4%。\n", - "328\n", - "1·3%。\n", - "”,\n", - "58·45%,\n", - "45·51%,\n", - "41·89%,\n", - "31·8%,\n", - "26·02%,\n", - "23·46%,\n", - "16·19%,\n", - "15·47%。\n", - "50·94%,\n", - "48·14%,\n", - "32·31%,\n", - "30·07%,\n", - "22·75%,\n", - "18·32%,\n", - "15·6%,\n", - "4·97%。\n", - "21\n", - "”。\n", - "80\n", - "90\n", - "(PIP)、\n", - "(POP)、\n", - "(DOULESCAN)、\n", - "(NICAM)\n", - "”,\n", - "1998\n", - "2006\n", - "(HDTV),\n", - "1000\n", - "”。\n", - "1996\n", - "(FCC)\n", - "80\n", - "90\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "、“\n", - "”。\n", - ":“\n", - ",1997\n", - "88\n", - "1996\n", - "28\n", - ":“\n", - "60\n", - "1.6\n", - ",1996\n", - ",“\n", - "”。\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "1、\n", - "”。\n", - "1996\n", - "1996\n", - ",《\n", - "》、《\n", - "》、《\n", - "51069\n", - "”,\n", - ":“\n", - ":“\n", - ",4\n", - "32\n", - "6000\n", - "5、\n", - "3、\n", - "1、\n", - "”、“\n", - "”。\n", - ",“\n", - "———\n", - "▲『\n", - "1860\n", - "8848\n", - "27\n", - "28\n", - "19\n", - "28\n", - ",“\n", - "”。\n", - "1989\n", - "6000\n", - "7000\n", - "……\n", - "21\n", - "8021\n", - "8000\n", - "———\n", - "1997\n", - "”DS97\n", - ":“\n", - ":“\n", - "1988\n", - ",“\n", - "”。\n", - ",95%\n", - "5%。\n", - "”———\n", - "”,\n", - "”。\n", - "36\n", - "336\n", - "”。\n", - ",4\n", - "、6\n", - ",71∶65;\n", - ",66∶60。\n", - ",88∶69;\n", - ",96∶62。\n", - ",73∶66;\n", - ",87∶61。\n", - "107∶32\n", - "30%\n", - "31∶41\n", - "21\n", - "96∶59\n", - "28\n", - "55∶72\n", - "27\n", - ":26\n", - "70\n", - "25\n", - ",3\n", - "600\n", - "》。\n", - "》。\n", - "---『\n", - ",“\n", - "1991\n", - "104∶99\n", - ",“\n", - "1987\n", - "1986\n", - ",34\n", - "70\n", - "80\n", - "85\n", - ",3\n", - ":“\n", - "1992\n", - "76∶66\n", - "2000\n", - ";2010\n", - "2000\n", - ",2010\n", - "1997\n", - "1100\n", - "100\n", - "1995\n", - "300\n", - "1000\n", - ",80\n", - "300\n", - "OK,\n", - ":“\n", - "”,\n", - "---\n", - "0∶6\n", - "0∶0\n", - "2∶0\n", - "3∶2\n", - "2∶2\n", - "4∶1\n", - "5∶0\n", - "2∶3\n", - "5∶3\n", - "1∶0\n", - "2∶0\n", - "2∶1\n", - "2∶3\n", - "27\n", - "65∶75\n", - "95∶89\n", - "46\n", - "19\n", - "90\n", - "24∶36\n", - "12∶8\n", - "27\n", - "52∶70\n", - "25\n", - "2∶0\n", - "25\n", - ":“\n", - "……”\n", - "”,\n", - "2∶1\n", - "2∶1\n", - "4∶1\n", - "”。\n", - "”。\n", - "---\n", - "”,\n", - "90\n", - "WNBA\n", - "”,\n", - "NBA\n", - ",B\n", - ";C\n", - ";D\n", - ",A\n", - "02\n", - "19\n", - "1994\n", - "1992\n", - "750\n", - "0∶2\n", - ",23\n", - ",14\n", - "26\n", - "3∶1\n", - "、5\n", - "、6\n", - "、7\n", - "、8\n", - "、9\n", - "、10\n", - "、11\n", - "、12\n", - "、13\n", - "、14\n", - "、15\n", - ",B\n", - ",C\n", - "89∶37\n", - "64∶45\n", - "26\n", - "27\n", - ":“\n", - "2∶0\n", - ",“\n", - "’,\n", - "”。\n", - ",36\n", - "1996\n", - "26\n", - "260.06\n", - "2000\n", - "33\n", - "、42\n", - "28\n", - "60\n", - "80\n", - "17\n", - "23\n", - "29\n", - "2200\n", - "25\n", - "23\n", - "103\n", - "33\n", - "6∶4、4∶6\n", - "6∶2\n", - "6∶1\n", - "6∶4\n", - "1989\n", - "1994\n", - "25\n", - "3∶0。\n", - "684.0\n", - "3×20\n", - "2000\n", - "23\n", - ",5\n", - "118.5\n", - "75\n", - "580\n", - "556\n", - "431\n", - "585\n", - "416\n", - "376\n", - "、8\n", - "’。\n", - ",“\n", - "”,\n", - "”,\n", - "”。\n", - "04\n", - "……\n", - "———\n", - "26\n", - "25\n", - "2∶0\n", - "0∶2\n", - "23\n", - "26\n", - "、28\n", - "11∶4\n", - "……\n", - "……\n", - "2∶1\n", - "11∶4\n", - "12∶10\n", - "0∶2\n", - "9∶11\n", - "7∶10\n", - "15∶7、15∶11\n", - ",“\n", - "”。\n", - "11∶9、10∶12、11∶5\n", - "”。\n", - "1992\n", - "1997\n", - ":“\n", - "……”\n", - ":“\n", - "……”\n", - "1994\n", - ";1998\n", - "1994—1998\n", - "23\n", - "……\n", - "---\n", - "15∶10、11∶15、15∶2\n", - "2∶3。\n", - "18∶14、15∶7\n", - "3∶2\n", - "8700\n", - "8300\n", - "8300\n", - "———\n", - "1954\n", - "65\n", - "57\n", - "”。\n", - "”。\n", - "2∶1\n", - "0∶0。\n", - "2∶1\n", - "1∶0\n", - "3∶2\n", - "32\n", - "0∶2\n", - "2∶3\n", - "6∶0\n", - "、“\n", - "17\n", - "4∶1\n", - ":“\n", - "”;\n", - ":“\n", - "”,\n", - ":“\n", - "”。\n", - "”。\n", - "”。\n", - ":“\n", - "”。\n", - ":“\n", - ":“\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - ":“\n", - "”:\n", - "1996\n", - ":“\n", - ":“\n", - "”。\n", - ")(\n", - "),\n", - "1958\n", - "1954\n", - "》(\n", - "》)\n", - "“54.7.2,\n", - "1992\n", - "3∶1\n", - "0∶2\n", - "50%\n", - "”。\n", - "2∶3\n", - "4000\n", - "63\n", - "6200\n", - "21\n", - "0∶1\n", - "25\n", - "5∶0\n", - "29\n", - "19\n", - "1∶0\n", - "1966\n", - "32\n", - "21\n", - "1∶0\n", - "……\n", - "1000\n", - "25\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1995\n", - "……\n", - "4∶0\n", - "2∶3\n", - "2∶2\n", - "1∶2\n", - "---\n", - "195\n", - "345\n", - "(150\n", - "),\n", - "7.5\n", - "77\n", - "58\n", - "87.5\n", - "122.5\n", - "210\n", - "23\n", - "15∶1、15∶8\n", - "11∶8、11∶8\n", - "11∶4、11∶0\n", - "15∶10、15∶8\n", - "11∶5、11∶7\n", - "21\n", - "5∶0\n", - "4∶1\n", - "1860\n", - "5∶4\n", - ":1998\n", - ",“\n", - "”。\n", - ",26\n", - ",22\n", - "3∶2\n", - "2∶3\n", - "、11\n", - "21\n", - "90\n", - ")21\n", - "、22\n", - "19\n", - "5∶2\n", - ";20\n", - "4∶3\n", - ",5\n", - "19\n", - "7∶15、13∶15\n", - "15∶11、15∶6\n", - "4∶1\n", - "”。\n", - "———\n", - "21\n", - "1∶0\n", - ",4\n", - "17\n", - "19\n", - "1∶1\n", - "3∶2\n", - "4∶1\n", - "22\n", - ":A\n", - ";A\n", - "2∶3\n", - "4∶1\n", - "3∶2\n", - "1∶2\n", - "1∶2\n", - "2∶1\n", - "”。\n", - "2∶0\n", - "1∶1\n", - "16∶17\n", - "0∶2\n", - "4∶8\n", - "14∶14。\n", - "15∶3\n", - ",A\n", - "---\n", - "69\n", - "145\n", - "315\n", - "90.5\n", - "92.5\n", - "112.5\n", - "205\n", - "53\n", - "1986\n", - "21\n", - "25\n", - "17\n", - "”。\n", - "81\n", - "》,5\n", - "”———\n", - "2∶3\n", - "11∶6、11∶3\n", - "2∶3。\n", - "”。\n", - "2∶3\n", - "2∶2\n", - "2∶0\n", - "3∶2\n", - "21\n", - "5∶0\n", - "5∶0\n", - "17\n", - "1∶2\n", - "———\n", - "19\n", - "4∶1\n", - "5∶0\n", - "21\n", - ":A\n", - ";B\n", - "2∶0\n", - "”。\n", - "5∶0\n", - "3∶2\n", - "19\n", - ":21\n", - "12∶35\n", - "7∶00\n", - "11∶42\n", - "21\n", - "1∶0\n", - "AC\n", - "19\n", - "8300\n", - "19\n", - "(180\n", - "140\n", - "175\n", - "315\n", - "62\n", - "56\n", - "120\n", - "150\n", - "270\n", - "95\n", - "2.5\n", - "48\n", - ",16\n", - "70\n", - "2.5\n", - "”,\n", - "19\n", - "0∶1\n", - "1∶0\n", - "1∶1\n", - "17\n", - "1∶0\n", - "7790\n", - "8300\n", - ")、\n", - "),\n", - "8300\n", - "1∶3\n", - "4∶1、8∶0、2∶1\n", - "0∶0,\n", - "BTV\n", - "5∶4\n", - "26\n", - "31\n", - "63∶69。\n", - "87∶114\n", - "……(\n", - "6∶15\n", - "15∶9、18∶15\n", - "0∶3\n", - "1∶4\n", - "2∶0\n", - "0∶3\n", - "4∶1\n", - "5∶0\n", - "4∶1\n", - "15∶5、16∶17、8∶15\n", - "2∶0\n", - "5∶0\n", - "5∶0\n", - "2∶3\n", - "”,\n", - "”。\n", - "……\n", - "11∶0、11∶1\n", - "———\n", - "200\n", - "1996\n", - "23\n", - "—1997\n", - "29\n", - "2000\n", - "1∶11\n", - "11∶4\n", - "1∶11、3∶11\n", - "———\n", - "5∶0\n", - "3∶2\n", - "4∶1\n", - "4∶1\n", - "2∶0\n", - "2∶0\n", - "11∶4\n", - "1∶11、3∶11\n", - "1∶2\n", - "11∶5、11∶7\n", - "4∶1\n", - "17\n", - ",62\n", - "2002\n", - "———\n", - "31\n", - "2∶2\n", - "2∶1\n", - "0∶0\n", - "0∶0\n", - "0∶2\n", - "2∶1\n", - "0∶1\n", - "17\n", - "1∶0\n", - "23\n", - "21∶13、21∶11\n", - "12∶8,\n", - "21∶13\n", - "3∶2、2∶0\n", - "1994\n", - "AC\n", - ",16\n", - "26\n", - "6、“\n", - "”———\n", - "5、\n", - "”———\n", - "4、“\n", - "”———\n", - "3、“\n", - "———\n", - "2、“\n", - "”———\n", - "1、“\n", - "”———\n", - ":“\n", - "……\n", - "”,\n", - "”———\n", - "4、\n", - "3、\n", - "2、\n", - "1、\n", - "2、\n", - "1、\n", - "1493\n", - "4480\n", - "62\n", - "586\n", - "180\n", - "64\n", - "100\n", - "2∶0\n", - "2∶4\n", - ":“\n", - "26\n", - "”,\n", - "“15\n", - "、20\n", - "9.8\n", - "1997\n", - "”,\n", - "1000\n", - "4618\n", - "3132\n", - "1990\n", - "1997\n", - "120\n", - "60\n", - "400\n", - "1996\n", - "80\n", - "260\n", - "”。\n", - ",“\n", - "”。\n", - "65\n", - "1995\n", - "15%\n", - "———\n", - ")(\n", - ":“\n", - ":“\n", - "”5\n", - ":“\n", - ":“\n", - "……”\n", - ";5\n", - "、12\n", - ";5\n", - "———\n", - "……\n", - "22\n", - "103\n", - "1∶0\n", - "25\n", - "1∶0\n", - "21\n", - "23\n", - ",8\n", - "28\n", - ",9\n", - "150\n", - "21\n", - "19\n", - ",23\n", - "57\n", - "1090\n", - "32\n", - "”。\n", - "80\n", - "1100\n", - ",1997\n", - ",“\n", - "”。\n", - "”(\n", - "1996\n", - "2000\n", - "80\n", - "”。\n", - "4300\n", - "2/3\n", - ",1994\n", - "106\n", - "110\n", - "———\n", - "、3\n", - "、1\n", - "2.6\n", - ":“\n", - ":4\n", - "”。\n", - "”。\n", - ",15\n", - ",18\n", - ",19\n", - ":5\n", - ";9\n", - ";11\n", - "23\n", - "1998\n", - "22\n", - "17\n", - "3000\n", - "6000\n", - "500\n", - "、1000\n", - "3000\n", - "166\n", - "5、6\n", - "”,\n", - ",“\n", - "79,\n", - ",“\n", - "100\n", - "200\n", - "100\n", - "200\n", - ",12\n", - "37、11\n", - "49\n", - "51\n", - "06、11\n", - "09\n", - "40\n", - "55\n", - "21\n", - "71\n", - ":“\n", - "22\n", - ":“\n", - "100\n", - "、200\n", - "、400\n", - ":“\n", - "79\n", - "0.08\n", - "71,\n", - "”,\n", - "2000\n", - "”,\n", - "”。\n", - "1998\n", - "70%;\n", - "27\n", - "33\n", - "968\n", - ")。\n", - "45\n", - "ATP\n", - "260\n", - "1.5\n", - "36\n", - "52\n", - "、56\n", - "、60\n", - "、65\n", - "、70\n", - "、75\n", - "、80\n", - "、85\n", - "85\n", - "673.98\n", - "480.09\n", - "517.26\n", - "23.31\n", - "76\n", - "79\n", - "94\n", - "03\n", - "25\n", - ":“\n", - "49\n", - "05\n", - "1997\n", - "1997\n", - "76\n", - "100\n", - "100\n", - "1997\n", - "1996\n", - "22.8\n", - "78\n", - "4×100\n", - "1992\n", - "100\n", - "1989\n", - ",15\n", - "100\n", - "、200\n", - "17、22\n", - "26,\n", - "”。\n", - "80\n", - "60。\n", - "100\n", - "79\n", - "300\n", - "———\n", - "》(\n", - "》)。\n", - "———\n", - "2500\n", - "1998\n", - "NEC\n", - "NEC\n", - "8000\n", - "8586\n", - "———\n", - "100\n", - ")。\n", - "100\n", - "79\n", - "“Hello”\n", - "CA1407\n", - "2∶1\n", - "0∶0,\n", - "90\n", - "0∶0\n", - "2∶2\n", - "3∶1\n", - "1∶2\n", - "3∶0\n", - "75\n", - "295\n", - "130\n", - "165.5\n", - "295\n", - "165.5\n", - "56\n", - "165\n", - "487\n", - "894\n", - "7×5.35\n", - "42\n", - "43\n", - "345\n", - "21∶8\n", - "21∶12。\n", - ",20\n", - "21∶18\n", - "3∶1\n", - "1996\n", - "22\n", - "667.59\n", - "654.54\n", - "692.58\n", - "509.64\n", - "487.14\n", - "513.63\n", - "252.99\n", - "262.38\n", - "3∶0\n", - "380\n", - "315\n", - "408\n", - "378\n", - "),\n", - "》、\n", - "……\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - ":“\n", - "”。\n", - ",“\n", - "”,\n", - "”、“\n", - "”、“\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ",“\n", - "”。\n", - ":“\n", - "》。\n", - "……\n", - "1995\n", - "1986\n", - "1991\n", - ")。\n", - "1988\n", - "1986\n", - "),\n", - "》,\n", - "1951\n", - ",《\n", - "》,\n", - ":“\n", - ":“\n", - "”1995\n", - ",《\n", - "5000\n", - ",100\n", - "1992\n", - "1994\n", - "1999\n", - "2000\n", - "3∶0\n", - "3∶0\n", - "17\n", - "23\n", - "5067\n", - "1998\n", - "”,\n", - "3∶1\n", - "3∶6\n", - "0∶6\n", - "”,\n", - "40\n", - ",10\n", - "90\n", - "200\n", - "、400\n", - "22\n", - "01;\n", - "60\n", - "05\n", - "01\n", - "———\n", - "2001\n", - "1/10,\n", - "1/5,\n", - "211\n", - "———\n", - "2000\n", - ",2002\n", - "2002\n", - "”。\n", - "5∶0\n", - "2∶2\n", - "……\n", - "1∶3\n", - "5∶0\n", - "2∶3\n", - "25\n", - "47\n", - "1975\n", - "1985\n", - "1993\n", - "”。\n", - "8300\n", - "8700\n", - "40\n", - "113\n", - "61\n", - "22\n", - ",12\n", - ",6\n", - "19\n", - "2∶0\n", - "5000\n", - ",4\n", - "8000\n", - "5000\n", - "”,\n", - "N2\n", - "3000\n", - "2000\n", - "19\n", - "400\n", - "5000\n", - ",“\n", - "”,\n", - "200\n", - "”。\n", - "40\n", - "”,\n", - "500\n", - "26\n", - "28\n", - "37\n", - "1995\n", - "1987\n", - "、“\n", - "80\n", - "’,\n", - "”。\n", - "---\n", - "A,\n", - "———\n", - ":“\n", - "398\n", - "400\n", - "、8\n", - "1∶2\n", - ":1996\n", - "1986\n", - "103\n", - "21\n", - "2∶0\n", - "2∶0\n", - "3∶1\n", - "3∶0\n", - "40\n", - ":“\n", - "、2\n", - "4—6\n", - "100\n", - "、8\n", - "750\n", - "1500\n", - "240\n", - "1998\n", - "”,\n", - "240\n", - "6000\n", - "—21\n", - "”,\n", - "19\n", - "25\n", - "71∶70\n", - "14∶5\n", - "71∶70\n", - "1/4\n", - "1/4\n", - "3∶1\n", - "2∶0\n", - ",5\n", - "”,\n", - "64\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "1995\n", - "1200\n", - "0∶0\n", - "2∶1\n", - "1∶1\n", - "0∶0\n", - "2∶0\n", - "1∶3\n", - "0∶5\n", - "5∶1\n", - "71122\n", - "53118\n", - "44116\n", - "44016\n", - "34213\n", - "41413\n", - "32411\n", - "31410\n", - "24310\n", - "3069\n", - "2349\n", - "2349\n", - "1356\n", - "1356\n", - "4∶14\n", - "99∶52\n", - "92∶77\n", - ":“\n", - "17\n", - "8201\n", - "———\n", - "66\n", - "88,\n", - "35\n", - "37\n", - "2∶0\n", - "1992\n", - "400\n", - "A1\n", - "』,\n", - "3、\n", - "2、\n", - "1、\n", - "1998\n", - "2000\n", - "———“\n", - ":“\n", - "1954\n", - ",1979\n", - ",1996\n", - "”。\n", - ")、\n", - "2000\n", - "140\n", - "2000\n", - ",“\n", - "———\n", - "---\n", - "……\n", - "60\n", - "1988\n", - "”,\n", - "”。\n", - "”。\n", - "25\n", - "”。\n", - "”。\n", - "”(\n", - "”,\n", - "”:\n", - "100%。\n", - "32\n", - "400\n", - "———\n", - "116\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - ",5\n", - "1996\n", - "32\n", - ",1995\n", - "———\n", - "180\n", - "……\n", - "”,\n", - "”,\n", - "”、\n", - "33\n", - "65\n", - "———\n", - ",“\n", - "”,\n", - "”。\n", - ":“\n", - "”、\n", - "”、\n", - "”、\n", - "100\n", - "”(4\n", - "80\n", - "1.1\n", - "3000\n", - "”。\n", - ",“\n", - "”。\n", - "1995\n", - "1990\n", - "1986\n", - "60\n", - "”。\n", - "……\n", - "300\n", - ":“\n", - "1990\n", - "28\n", - "46\n", - ":“\n", - "1993\n", - "2000\n", - "———\n", - "1992\n", - "”。\n", - "3400\n", - "1991\n", - ",40\n", - "300\n", - "4×350MW\n", - "》、《\n", - "》、《\n", - ",1996\n", - "44%\n", - "75%\n", - "13%\n", - "50%。\n", - "25\n", - "1000\n", - "GSM\n", - "S80\n", - "”,1996\n", - "28\n", - "28\n", - "”。\n", - ":“\n", - "8000\n", - "27\n", - "140\n", - "6.7\n", - "28\n", - "140\n", - "27\n", - "150%。\n", - ",29\n", - "28\n", - "27\n", - "60%\n", - "80%,\n", - "1∶6.2。\n", - "28\n", - "、30\n", - "、45\n", - "”,\n", - "”,\n", - "1992\n", - "”。\n", - "”,\n", - "”,\n", - "———\n", - "10%\n", - "12%。\n", - ":1996\n", - "19%,1997\n", - "22%,\n", - "318\n", - "1997\n", - "29%,\n", - "800\n", - "1997\n", - "2000\n", - "1997\n", - "66%,\n", - "5.29\n", - "1994\n", - "1997\n", - "1987\n", - "1994\n", - "1993\n", - "———\n", - "27\n", - "1996\n", - "3240\n", - "3150\n", - ",1997\n", - "3·7%。\n", - "27\n", - "27\n", - "1/4\n", - "17\n", - "1990\n", - "1300\n", - "4380\n", - "40%\n", - "21%\n", - "3060\n", - ",20\n", - "40\n", - "16.4%。\n", - "27095\n", - "19\n", - "9000\n", - "5820\n", - "27\n", - "———\n", - "123\n", - "———\n", - "8000\n", - ",1986\n", - "950\n", - "350\n", - "1986\n", - ",“\n", - "”,\n", - "1994\n", - "750\n", - "2000\n", - "55\n", - "500\n", - "———\n", - "25\n", - "8%\n", - "1998\n", - "26\n", - "25\n", - "———\n", - "70\n", - ")(\n", - "1999\n", - "250\n", - "6·5\n", - ")。\n", - "26\n", - "8250\n", - "217\n", - "1999\n", - "(1998\n", - "1999\n", - "1998\n", - "3%—3.5%\n", - "4%—4.5%;\n", - "6.2%\n", - "1.4%(\n", - ")。\n", - "1998\n", - "26\n", - "1997\n", - "1997\n", - "8·65\n", - "”。\n", - "26\n", - "17·09\n", - "4·89\n", - ",5\n", - "25\n", - "4000\n", - "1.5\n", - "750\n", - ")。\n", - "200\n", - "1.76\n", - "———\n", - "26\n", - "21\n", - "40\n", - "500\n", - "25\n", - "200\n", - "250\n", - "40%\n", - ",35%\n", - "26\n", - "20%\n", - "261\n", - "250\n", - "25\n", - "1989\n", - "12478\n", - "250\n", - "”。\n", - "1400\n", - "2610\n", - "25\n", - "”)\n", - "40\n", - "4%,\n", - "85%\n", - "1948\n", - "16.5\n", - "44\n", - "60%。\n", - "300\n", - "4200\n", - "”、\n", - "580\n", - "”,\n", - "7%。\n", - "25\n", - "24.15\n", - "350\n", - "331.90\n", - "1987\n", - ",5\n", - "22\n", - "40.67\n", - "41.15\n", - "40.94\n", - "25\n", - "1991\n", - "137·73\n", - "1.210%,\n", - "26\n", - "137.67\n", - "0·54\n", - "2001\n", - "1300\n", - "1945\n", - "1943\n", - "1941\n", - "58\n", - "120\n", - "2.57\n", - "7.4\n", - "87%,\n", - "17\n", - "79%。\n", - "200\n", - "”。\n", - "26\n", - "1996\n", - "100\n", - ",20\n", - "880\n", - "80%\n", - "2/3\n", - "100\n", - "1/3\n", - "1996\n", - "1996\n", - "100\n", - ",1997\n", - "、5\n", - "、2\n", - "1990\n", - "1996\n", - "1997\n", - "13620\n", - "800\n", - "75%\n", - "3600\n", - "4500\n", - "1500\n", - "3000\n", - "23\n", - ",1998\n", - "84\n", - "4.5\n", - "6.8\n", - "23\n", - "),\n", - "600\n", - "5000\n", - "》65×81\n", - "1906\n", - ",50×60\n", - "1890\n", - ",61×50\n", - "1889\n", - "……\n", - "“112”,\n", - "23\n", - "150\n", - "100\n", - ":“\n", - "》、\n", - "》。\n", - ",1500\n", - "19\n", - "100\n", - "22%,\n", - "19\n", - ",“\n", - "”。\n", - "70\n", - "1994\n", - "”:\n", - ":“\n", - "”,\n", - "21\n", - ",1996\n", - "21\n", - "22\n", - "”。\n", - "IBM\n", - "1969\n", - "1982\n", - "ATT\n", - "1974\n", - "1982\n", - "ATT\n", - "”。\n", - "Win98,\n", - "Win98;\n", - "”。\n", - "1990\n", - "”。\n", - "office\n", - "NT\n", - "Java\n", - "Win98。\n", - "PC\n", - "PC\n", - "Windows\n", - ",Win98\n", - "PC\n", - "25\n", - ":“\n", - "”。\n", - "21\n", - ",“\n", - "23\n", - "115·6\n", - "1193\n", - "80%\n", - "152\n", - "67\n", - "555\n", - "681\n", - "3%,\n", - "22\n", - ",1997\n", - "1455\n", - "40%\n", - "50%\n", - "4%\n", - "5%\n", - "1997\n", - ",1997\n", - "3·7%,\n", - "———\n", - "1998\n", - "25\n", - "98”\n", - "1972\n", - ",1982\n", - "1969\n", - "IBM\n", - "1982\n", - "1937\n", - "1890\n", - ":1906\n", - ",“\n", - "98’,\n", - "”。\n", - ":“\n", - "”,“\n", - "IBM、\n", - "28\n", - "98”\n", - "98”\n", - ",“\n", - "98”\n", - "95”,\n", - "98”\n", - "98”\n", - "———\n", - "95”\n", - "1995\n", - "100\n", - "95”\n", - "JAVA\n", - "1974\n", - "85%\n", - "98”\n", - "25\n", - "———\n", - "”、“\n", - "”、“\n", - "”、“\n", - "21\n", - "21\n", - "MBA(\n", - ",MBA\n", - "”。\n", - "21\n", - "1000\n", - "1997\n", - "700\n", - "46.5%。\n", - "100\n", - "200\n", - "100\n", - "2000\n", - "100\n", - "200\n", - "1994\n", - "0.02%,\n", - "0.06\n", - "80%\n", - "90%\n", - "28\n", - "……\n", - "110\n", - "50%\n", - "30%。\n", - ")(\n", - ")(\n", - "80%。\n", - "29\n", - "43.3%。\n", - "》(\n", - "》1998\n", - "、“\n", - "”,\n", - "、《\n", - "……\n", - "……\n", - "』,\n", - "》、《\n", - "———\n", - "———\n", - ",“\n", - "”,\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "……\n", - "———\n", - "……“\n", - "———\n", - "……“\n", - "———\n", - "……\n", - "》,\n", - "……\n", - ":“\n", - "’,‘\n", - "……”\n", - "———\n", - "———《\n", - "1980\n", - ",《\n", - "1995\n", - "1995\n", - "—1996\n", - "———\n", - "》(\n", - "》、\n", - "》、\n", - "●《\n", - "”,\n", - "”、“\n", - "”、“\n", - "”,\n", - "”。\n", - ")、\n", - "”,\n", - "”,\n", - "……\n", - "”。\n", - ":《\n", - "1998\n", - ",“\n", - ",“\n", - ":《\n", - "1998\n", - ":《\n", - "1998\n", - "……(\n", - "……\n", - ":《\n", - "1998\n", - "———\n", - "……\n", - "1995\n", - "———\n", - "……\n", - "……\n", - "》、《\n", - "》、《\n", - "》,\n", - "》、\n", - "》,\n", - "》、《\n", - "》、《\n", - "”。\n", - "”,\n", - ",1949\n", - ",1976\n", - "、“\n", - "66\n", - "”,\n", - ",1997\n", - "”。\n", - ",3\n", - "”,\n", - "1990\n", - "300\n", - "100%。\n", - "”。\n", - "”,\n", - "”。\n", - "50%,\n", - "4500\n", - "3000\n", - "1987\n", - "”、\n", - "———\n", - ",1997\n", - "”,\n", - "”,\n", - "150\n", - "1.5\n", - ",1995\n", - "300\n", - "70\n", - "300\n", - "———“\n", - "90\n", - "……\n", - "1995\n", - ":“\n", - "……\n", - ":“\n", - ":“\n", - "1995\n", - "”,\n", - "———\n", - "1000\n", - "100\n", - "”,\n", - "”,\n", - "1938\n", - "1943\n", - "23\n", - "9500\n", - "2.1\n", - "2000\n", - ":1995\n", - "……\n", - ",“\n", - "”“\n", - "1995\n", - "1982\n", - "”,23\n", - "33\n", - "33\n", - "……\n", - "……\n", - "100\n", - "”。\n", - "”。\n", - "”,\n", - "1997\n", - "54\n", - "23\n", - ",85.1%\n", - "35\n", - "82%\n", - "……\n", - ":“‘\n", - "”3\n", - ",26\n", - "MTU\n", - "”。\n", - "”,\n", - "25\n", - "300\n", - "475\n", - "23\n", - "60\n", - "”,\n", - ":“\n", - "13939\n", - ":“\n", - "127\n", - ",27\n", - "296\n", - "15%,\n", - "200\n", - "92\n", - "1.44\n", - "45\n", - "2.3\n", - "1200\n", - "6.6\n", - "55\n", - ":(010)68180800\n", - "”,\n", - ":“\n", - "86)\n", - "1984\n", - "73\n", - "43\n", - "1994\n", - "200\n", - "25\n", - ",4000\n", - ",《\n", - "119\n", - "1998\n", - "35.58\n", - "25\n", - "”,\n", - "”,\n", - "”,\n", - "”、“\n", - "”、“\n", - "”。\n", - ":“\n", - "1997\n", - "1997\n", - "100\n", - "1989\n", - "1996\n", - "1996\n", - "”,\n", - "ID\n", - "25\n", - "1.3\n", - "……\n", - ",1996\n", - "ISO9002\n", - "),\n", - "1992\n", - "1990\n", - "80\n", - "———\n", - "2、\n", - "Y222/221\n", - "),\n", - "151\n", - "1、\n", - "———\n", - "1900\n", - "1996\n", - "1.7\n", - "9400\n", - "6200\n", - ",3\n", - ",5\n", - "1996\n", - "”———\n", - "800\n", - "7000\n", - ",1/3\n", - ",60\n", - "1000\n", - "160\n", - "160\n", - "1995\n", - "1988\n", - "80\n", - ",1997\n", - "1.8\n", - "1700\n", - "1987\n", - "1200\n", - "120\n", - "1987\n", - "85%\n", - "120\n", - "”———\n", - "2000\n", - "1500\n", - "41.9\n", - "2000\n", - "”,\n", - "”。\n", - "1994\n", - "1996\n", - "15%\n", - "34\n", - ",16\n", - "19\n", - "34\n", - "”,\n", - "2000\n", - "》,\n", - "”:\n", - "80%\n", - "”“\n", - "”“\n", - "62\n", - "》,\n", - "》,\n", - "”,\n", - ",“\n", - ",“\n", - "”,\n", - ",“\n", - "”,\n", - "”(\n", - "”,\n", - "1996\n", - "60\n", - "1996\n", - ":“\n", - "———\n", - "》,\n", - "———\n", - ":“\n", - "100\n", - ":“\n", - ",1992\n", - ",1996\n", - "1995\n", - "149\n", - "1990\n", - ",“\n", - "”。\n", - "3390\n", - "600\n", - "150\n", - "109\n", - "1990\n", - "———\n", - "A;\n", - "D;\n", - "E;\n", - "B1\n", - "A;\n", - "D;\n", - "RELAX(\n", - "RELAX(\n", - "1998\n", - "1200\n", - "1500\n", - "1994\n", - "———\n", - ")。\n", - ")。\n", - "Slot2\n", - "RISC\n", - "Intel\n", - "———NetCenter740/745\n", - ":“\n", - "80\n", - ",“\n", - "”,\n", - ":“\n", - "××—××\n", - "72\n", - "72\n", - "———\n", - "2000\n", - "1996\n", - "1997\n", - "70%。\n", - "”。\n", - "80%。\n", - "”,\n", - "”,\n", - "”,\n", - "1000\n", - "100\n", - "40\n", - "25\n", - "30—40\n", - "),\n", - "”。\n", - "”、“××\n", - "”、“××\n", - ",“\n", - ":(\n", - "(010—64217984),\n", - "50%\n", - "———\n", - "—4\n", - "1/10,\n", - "65%。\n", - "1/3。\n", - ",7\n", - ",4\n", - "95%\n", - "90%\n", - ",60\n", - "”,\n", - "---\n", - "》、《\n", - "》、《\n", - "———\n", - "1991\n", - ";1993\n", - "”;1995\n", - "”。\n", - "”、\n", - "”、“\n", - "”。\n", - "“1988\n", - "1977\n", - ",1957\n", - ")(\n", - "1956\n", - "”,\n", - "》、《\n", - "』,\n", - "TCL\n", - "》。\n", - "”,\n", - "》、《\n", - "》“\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "2、\n", - "1、\n", - "》、《\n", - "』,\n", - ",『\n", - "』,\n", - ",『\n", - "』,\n", - "”,\n", - "———\n", - ",10\n", - "———“\n", - "”。\n", - ":“\n", - ":“\n", - "70%\n", - "……“\n", - ":“\n", - ",36\n", - "1965\n", - "———\n", - "———\n", - "———\n", - ",1991\n", - "……\n", - "”,\n", - "60\n", - "1962\n", - ",“\n", - "108\n", - "1962\n", - ",108\n", - "———\n", - "2000\n", - "”,“\n", - "”,“\n", - "”,\n", - "2.1\n", - "1.5\n", - "”、“\n", - "”。\n", - "56\n", - ",56\n", - ",56\n", - "56\n", - "”、“\n", - "”,\n", - "———\n", - "1300\n", - "45\n", - "1324\n", - ":“\n", - "1983\n", - "1995\n", - "19\n", - ",21\n", - "21\n", - "21\n", - "”。\n", - "1/3\n", - "19\n", - "“007”\n", - "1997\n", - "“007”\n", - "1962\n", - "“007”\n", - "“007”\n", - "25\n", - "“007”\n", - "19\n", - "———\n", - ",3\n", - "19\n", - "》,\n", - "1906\n", - "》,\n", - "1889\n", - "19\n", - "172\n", - "”。\n", - ",7\n", - "40.99\n", - "40.95\n", - "50.05\n", - "50.03\n", - ",6\n", - "14.07\n", - "12.96\n", - "9%。\n", - "19\n", - "1988\n", - "19\n", - "31\n", - "21\n", - ",10\n", - "580\n", - "8%。\n", - "”、“\n", - "95%,\n", - "4%,\n", - "20%\n", - "1997\n", - "1997—2002\n", - "1996\n", - "80\n", - "1996\n", - "600\n", - "1800\n", - "9000\n", - "2010\n", - "1.6\n", - "8000\n", - "2010\n", - "70\n", - "8000\n", - "300\n", - "200\n", - ",21\n", - "50%\n", - "10%,\n", - "40%\n", - "80%\n", - "100\n", - "390\n", - "28\n", - "1997\n", - "1984\n", - "1997\n", - "60\n", - ",5\n", - "100\n", - "1997\n", - "78\n", - "1996\n", - "1996\n", - "1997\n", - "205\n", - "300\n", - "),\n", - "25%,\n", - "7.8%,\n", - "13.4%\n", - "23.5%。\n", - "21\n", - "3.2%。\n", - "7%\n", - "25\n", - "10%\n", - "8%。\n", - "13.8%,\n", - "31196\n", - ",4\n", - "1.7%,\n", - "43516\n", - "33\n", - ",4\n", - "12320\n", - "52.6%,\n", - "1998\n", - "21\n", - ",1998\n", - ",1998\n", - "1997\n", - "7.7%,\n", - "3.8%。\n", - "19\n", - "12·2\n", - "57.8%。\n", - "16192\n", - "9023\n", - "1811\n", - "1741\n", - "1000\n", - "30.1%\n", - "21%。\n", - "19\n", - "1997\n", - "1991\n", - "136.65\n", - "2.93%。\n", - "100\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "3500\n", - "112\n", - "22\n", - "”。\n", - "1995\n", - "44\n", - ",45\n", - "48\n", - "25\n", - "60\n", - "49\n", - "44\n", - "34\n", - "17\n", - "21\n", - "24%。\n", - "19\n", - "753\n", - "80\n", - "1996\n", - "1948\n", - "、130\n", - "19\n", - "21\n", - "1000\n", - "1997\n", - "”,\n", - "1000\n", - "IBM\n", - "RS/6000\n", - "1000\n", - "DNA\n", - "DNA\n", - "10〈17\n", - "DNA\n", - "Adleman\n", - "Adleman\n", - "1994\n", - "Science\n", - "DNA\n", - "DNA\n", - "DNA\n", - "0,\n", - "(qubits)\n", - "2(L\n", - "2(L\n", - "Shor\n", - "1000\n", - "AT&T\n", - "PeterShor\n", - ",1994\n", - "1986\n", - "1985\n", - "Likharev\n", - "(RSFQ)\n", - "RSFQ\n", - "0.3\n", - "60\n", - ",IBM\n", - ",80\n", - "0,\n", - "2010\n", - "1000\n", - "DNA\n", - "10—20\n", - "1994\n", - ",1995\n", - ",1998\n", - "60\n", - "1000\n", - "1990\n", - "1987\n", - "1985—1986\n", - "CSL\n", - "1943\n", - "19\n", - "1975\n", - "110\n", - "63\n", - "1859\n", - "139\n", - "1998\n", - "1998\n", - "100\n", - "”。\n", - "1·22\n", - ",200\n", - "112\n", - "3500\n", - "、2\n", - "1995\n", - "”。\n", - "”。\n", - "85%\n", - "20%\n", - "30%\n", - "1941\n", - "2000—2400\n", - "75\n", - "90\n", - "8∶2\n", - "”。\n", - "85%。\n", - "17%。\n", - "82.1\n", - "98”,\n", - "95”\n", - "98”\n", - "60\n", - "850\n", - "17\n", - "”,\n", - "1957\n", - "”。\n", - "1886\n", - "”?\n", - "17\n", - "80\n", - "17\n", - "23·86\n", - "200\n", - "900\n", - "),\n", - "88\n", - "72.54\n", - "1966\n", - "1960\n", - "1915\n", - "400\n", - "8500\n", - "4800\n", - "1476\n", - "1961\n", - "、1966\n", - "15.2\n", - "171、52\n", - "32\n", - "134.5\n", - "368.4\n", - "675\n", - "100\n", - "———\n", - "———\n", - "98”\n", - "95”\n", - "98”\n", - "98”\n", - "17\n", - "98”\n", - "5·5%\n", - "7·2%,\n", - "17\n", - "200\n", - "450\n", - "2、\n", - "1、“\n", - "”16\n", - "56\n", - "230\n", - "250\n", - "295\n", - "132\n", - "31\n", - "19\n", - "10%,\n", - "60\n", - "200\n", - "40\n", - "164.5\n", - "124.5\n", - "4.5%。\n", - "1.02\n", - "2.5%。\n", - "38\n", - "1.2\n", - "22\n", - ")。\n", - "1998—1999\n", - "66\n", - "),\n", - "”。\n", - "375\n", - "240\n", - "22\n", - ",4\n", - "12.20\n", - "0.76\n", - "12.09\n", - "3.39\n", - "2001\n", - "2002\n", - "—M”\n", - "—2”\n", - "85\n", - ",2003\n", - "690\n", - "274\n", - "1/10\n", - "1%。\n", - "50PPM(1PPM\n", - "1/10。\n", - "1997\n", - "37\n", - "300\n", - "(1\n", - "0·16\n", - "1996\n", - "1995\n", - "”,\n", - "33%\n", - "19%,\n", - "”。\n", - "70%\n", - "”。\n", - "500\n", - "229\n", - "115\n", - "50·2%;\n", - "104\n", - "45·4%。\n", - "80\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "”。\n", - "44\n", - "1996\n", - "1996\n", - "1984\n", - "80\n", - "620\n", - "370\n", - ",5\n", - "”,\n", - "39·50\n", - "40·55\n", - "1700·03\n", - "71·29\n", - "172·40\n", - "———\n", - "162·37\n", - "1997\n", - "75\n", - "3%。\n", - "450\n", - "600\n", - "56\n", - "250\n", - "80\n", - "DEC\n", - "●DEC\n", - "DIGITALPC\n", - "P6300\n", - "GB13000/GBK\n", - "1995\n", - "———\n", - ":“\n", - "800\n", - "……\n", - "……\n", - "40\n", - "2BQ—140\n", - "7CB—2\n", - "1996\n", - "○○○\n", - "”。\n", - ":“\n", - "X—33\n", - "X—33\n", - "1666\n", - "80\n", - "1992\n", - "X—33\n", - "1998\n", - "X—33\n", - "X—33\n", - ":“\n", - "X—33\n", - "X—33\n", - "7500\n", - "”。\n", - "1975\n", - "1965\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - "———\n", - "500\n", - "900\n", - "”,\n", - "———\n", - ",“\n", - "”。\n", - "700\n", - "86\n", - "100\n", - "90\n", - "———\n", - ":“\n", - "80\n", - "……\n", - ":“\n", - "1000\n", - "”,\n", - "”、“\n", - "”,\n", - "》。\n", - "———\n", - "1986\n", - "1997\n", - "90\n", - "”,\n", - "1/5。\n", - "80\n", - "132\n", - "200\n", - "40\n", - "1997\n", - "700\n", - "90%\n", - "31%,\n", - "50—60\n", - "120\n", - "、10\n", - "、39\n", - "2002\n", - "”。\n", - "90\n", - "BBL\n", - ",5\n", - ",“\n", - "420\n", - "(1\n", - "1/3\n", - "3000\n", - "800\n", - "2264\n", - "1996\n", - "150\n", - "2%,\n", - "40\n", - "20%\n", - "10%,\n", - "1.5%。\n", - "3.75%\n", - "3%—3.25%。\n", - "300\n", - "3.5\n", - "1200\n", - "2200\n", - "5.25\n", - "34\n", - "1300\n", - ";2400\n", - "29\n", - "1.75\n", - "2.8\n", - "22\n", - "103\n", - "27\n", - "21\n", - ")2、\n", - "40\n", - "70\n", - "1500\n", - "21\n", - "1、5\n", - ",1996\n", - "2·96%。\n", - "《1997\n", - "1998\n", - "”。\n", - "1998\n", - ",1998\n", - "”。\n", - "100\n", - "348.61\n", - "351.86\n", - "1989\n", - "400\n", - ",4\n", - "350\n", - "1989\n", - "1·7766\n", - "1·7762。\n", - "132·75\n", - "134·07。\n", - "38\n", - "420\n", - "———\n", - "400\n", - "2/3\n", - "1999\n", - "2·8\n", - "“1998\n", - "”,\n", - "”,\n", - "38\n", - "40\n", - ",1999\n", - "2000\n", - ",15\n", - "23\n", - "46\n", - "40%\n", - "500\n", - "1.8\n", - "(6\n", - "(20\n", - ",29\n", - ",1998\n", - ",*\n", - "82500\n", - "4.5\n", - "4400\n", - "6%—25%,\n", - "80\n", - "100\n", - "3.03%\n", - "736\n", - "804\n", - "853\n", - "1997\n", - "48\n", - "3AW\n", - "———“\n", - "”。\n", - "2、\n", - "1、\n", - "7·5\n", - "”。\n", - "1986\n", - "26\n", - "370\n", - "165\n", - "5700\n", - "1/3,\n", - "408·5\n", - "1996\n", - "———\n", - "(SBC)11\n", - "620\n", - "(Ameritech),\n", - "”。\n", - "21\n", - "40%\n", - "2002\n", - ",“\n", - "”,“\n", - "”。\n", - "2·9\n", - "1999—2002\n", - "90\n", - "70\n", - ",1997\n", - "21%,\n", - "18%,\n", - "1997\n", - "59%\n", - "83%\n", - "50%\n", - "1973\n", - ",1944\n", - "40%。\n", - "10%\n", - "20%\n", - "4000\n", - "65%\n", - "68%。\n", - "95%\n", - "85%\n", - "500\n", - "42\n", - "57\n", - "5%。\n", - "200\n", - "3000\n", - "200\n", - "1/3,\n", - "23%,\n", - "8%。\n", - "1380\n", - ",65%\n", - "6%。\n", - "1·199\n", - "1·149\n", - "8·96\n", - "9·493\n", - "8920\n", - "4%,\n", - "4790\n", - "10%,\n", - "4130\n", - "4010\n", - "2·65\n", - "1%;\n", - "6·31\n", - "1997—1998\n", - "2000\n", - "”,\n", - "128\n", - "4、5\n", - "9、10\n", - "1300\n", - "19%,\n", - "1997\n", - "1993\n", - "1998\n", - "191\n", - "1989\n", - "”,\n", - ",15\n", - ",“\n", - ":1\n", - "、2\n", - ";10\n", - "、20\n", - ";1\n", - "1200\n", - "2002\n", - "76\n", - "、2\n", - "、5\n", - "、10\n", - "、20\n", - "、50\n", - "、1\n", - "60\n", - "747\n", - "767\n", - "737—100\n", - "200\n", - "737—300、400\n", - "500\n", - "118\n", - "282\n", - "737—100\n", - "200\n", - "152\n", - "196\n", - "26\n", - "737—100\n", - "200\n", - "25\n", - "737\n", - "737—300、400、500\n", - "8%,\n", - "6%\n", - "3%\n", - "”。\n", - "26\n", - ",35%\n", - "33\n", - "23\n", - "100\n", - "150\n", - "4600\n", - "7.46\n", - "2287\n", - "1060\n", - "17\n", - "70%,\n", - "1980\n", - "1992\n", - "28%\n", - "3%,\n", - "”。\n", - ":IMF\n", - "96\n", - "),\n", - "22%。\n", - "530\n", - "3000\n", - ")。\n", - "119\n", - "400\n", - ",1000\n", - ")。\n", - ":“\n", - "”。\n", - "”。\n", - "”。\n", - "70%\n", - "”。\n", - "2.7%\n", - "2.9%;\n", - "1.5%;\n", - "1%,\n", - "2003\n", - "107%。\n", - "17\n", - "1999\n", - "2001\n", - "》,\n", - "”,\n", - "”。\n", - "”,\n", - "”,“\n", - "60%\n", - "”,\n", - ",“\n", - "”。\n", - "1997\n", - "4.7%,\n", - "4.2%\n", - "1%。\n", - ":“\n", - "”。\n", - "1994\n", - "1999\n", - "333·8\n", - "1997\n", - "1.5%,\n", - "121.6%,\n", - ":“\n", - "14·7\n", - "1.3%。\n", - "”,\n", - "1998\n", - "3%\n", - "10%\n", - "1/4。\n", - "80\n", - "10%\n", - "11%,1994\n", - "124.9%。\n", - "70\n", - "”,\n", - "”。\n", - "41\n", - "”。\n", - ";“\n", - ";“\n", - "”,\n", - "1929\n", - "”,\n", - "”。\n", - "”。\n", - "、20\n", - "”。\n", - "506%\n", - "199%。\n", - "197%。\n", - "2002\n", - "85\n", - "1998\n", - "”,\n", - ":“\n", - ":“\n", - "”“\n", - "”“\n", - ":“\n", - "1967\n", - "……\n", - ",“\n", - ":“\n", - "”,\n", - "1000\n", - "7000\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - ":“\n", - ":“\n", - ":“\n", - "”,\n", - ":“\n", - "1.5\n", - "49\n", - "7000\n", - "”10\n", - ":“\n", - ":“\n", - "1996\n", - "1995\n", - "1994\n", - "400\n", - "1993\n", - ",200\n", - "200\n", - "17\n", - "———“\n", - "”!\n", - "———\n", - "1985\n", - "36\n", - "229\n", - "104\n", - ",1997\n", - "1900\n", - "450\n", - "3300\n", - "1993\n", - "0.4\n", - "4000\n", - "200\n", - "、40\n", - "”。\n", - "1991\n", - "8.37\n", - "”,1993\n", - "5.73\n", - "……\n", - "40\n", - "1/3\n", - ")、\n", - "”,56\n", - "56\n", - "56\n", - "56\n", - "56\n", - "150\n", - "、40\n", - "31\n", - "”,\n", - "—26\n", - "”,\n", - "7.4\n", - "0.547\n", - "43%\n", - "57%。\n", - ":“\n", - "1660\n", - "920\n", - "),\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - ",“\n", - "”。\n", - "”,“\n", - "”。\n", - ",“\n", - "”,“\n", - "”,\n", - "1989\n", - "”,\n", - ",1989\n", - "1979\n", - "80\n", - "1970\n", - "》、《\n", - "”,\n", - "”。\n", - "”。\n", - "”。\n", - ",1996\n", - "》,\n", - "”。\n", - "1959\n", - "》,\n", - "1995\n", - "82\n", - "1946\n", - "60\n", - "1941\n", - "1944\n", - "”。\n", - "110\n", - "47\n", - "》,\n", - "47\n", - "1.5\n", - "”,\n", - "1983\n", - ":“\n", - ";“\n", - "”(FACTS),\n", - "21\n", - "”,\n", - "(IGCC)、\n", - "(PFBC)\n", - "80%\n", - "5%—10%\n", - "37%\n", - "58%。\n", - "5000—6000\n", - "1KW(\n", - ")。\n", - "21\n", - "1965\n", - "1995\n", - "35\n", - "1995\n", - "25%\n", - "2020\n", - "24%。\n", - "1996\n", - "39%\n", - "2020\n", - "37%。\n", - "1995\n", - "20%,\n", - "2020\n", - "30%\n", - "1998\n", - "2020\n", - "63900\n", - "1970\n", - "575\n", - "21\n", - "”。\n", - ":“\n", - "100\n", - "1100\n", - "8000\n", - ":1998—1999》\n", - "———\n", - "”,\n", - ",“\n", - "”。\n", - "2000—2001\n", - "14%。\n", - ",“\n", - "—3”\n", - "”。\n", - "25\n", - "—3”\n", - "1989\n", - "”。\n", - "1979\n", - "1950\n", - "90\n", - "700\n", - "”,\n", - "5%\n", - "45\n", - "22\n", - "”。\n", - ")。\n", - "60\n", - "”,\n", - "”,\n", - "”,\n", - "1.2\n", - ":“\n", - ",“\n", - "2002\n", - "40%—50%\n", - ",30%—40%\n", - "1971\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - "27\n", - "23\n", - "),\n", - ",“\n", - "”,\n", - "1993\n", - "90\n", - "80\n", - "70\n", - "1979\n", - "2.25%\n", - "3.5\n", - "2.9\n", - "1951\n", - "1958\n", - "8%。\n", - "10%\n", - "9%\n", - "25%,\n", - ",1997\n", - "500\n", - "1991\n", - "300%,\n", - "21\n", - "1858\n", - "1859\n", - "400\n", - "1902\n", - "19\n", - "650\n", - "800\n", - "”,\n", - "》“\n", - "”。\n", - "34\n", - "49\n", - ",3\n", - "———\n", - "28\n", - "60\n", - ",20\n", - "38\n", - ")。\n", - "”。\n", - "S—\n", - ",“\n", - "A。\n", - "2、\n", - "1、\n", - "5300\n", - "3.9%,\n", - "200\n", - "750\n", - ",800\n", - "”。\n", - "1997\n", - "340\n", - "1995\n", - "5%\n", - "”。\n", - "”,\n", - ",“\n", - "”。\n", - "”。\n", - "9000\n", - "1949\n", - ",1\n", - ",100\n", - "72%,\n", - "5%\n", - "80\n", - "”,\n", - "0—2%。\n", - "1.5%,\n", - "”。\n", - "1997\n", - "1.7%\n", - "1998\n", - "0.2%。\n", - ",2\n", - "0·1%。\n", - "1610\n", - "LEAR—35A\n", - "89\n", - "2·4\n", - "),\n", - "2·4%。\n", - "1992\n", - "1994\n", - "72\n", - "54%\n", - "94\n", - "“9405”,\n", - "125\n", - "28\n", - "109\n", - "4·4%———\n", - "123\n", - "123\n", - ":123\n", - "”。\n", - "”。\n", - ",“\n", - "”。\n", - "”。\n", - "1982\n", - "38\n", - "70\n", - "62\n", - "”。\n", - "60\n", - "1000\n", - "60\n", - "26%,\n", - "30%。\n", - "7%—8%;\n", - "29\n", - "28\n", - "70\n", - "40\n", - "2002\n", - ",“\n", - "”。\n", - "”。\n", - ",“\n", - ",15\n", - ",3\n", - ",5\n", - "》。\n", - "1989\n", - "500\n", - "28\n", - "、7500\n", - "40\n", - ",“\n", - "”。\n", - "———\n", - "26\n", - "2.7\n", - "、19.6%、16.6%。\n", - "2.9\n", - "19.4%,\n", - "18.6%。\n", - "1999\n", - ",2002\n", - ",2002\n", - "62\n", - "2002\n", - "1996\n", - "25\n", - "31\n", - "”,\n", - "86\n", - "1996\n", - "150\n", - "1994\n", - "”,\n", - "3000\n", - "68\n", - "110\n", - "160\n", - "80\n", - "46\n", - "26\n", - "1.8\n", - ",1997\n", - "2.4\n", - "1800\n", - "31\n", - "1/4,\n", - "1/3,\n", - "2/3。\n", - "1995\n", - ",1993\n", - "200\n", - "1992\n", - "40\n", - "1988\n", - "150\n", - "1.4\n", - "150\n", - ";1997\n", - "1978\n", - "40\n", - "40\n", - "1978\n", - "”,\n", - "ISO9002\n", - "———\n", - ",“\n", - ",“\n", - ",“\n", - ",“\n", - ",“\n", - "”,\n", - "”。\n", - "42\n", - "300%\n", - "”。\n", - ")2、\n", - "———\n", - "100\n", - "1、\n", - "、18\n", - "”,\n", - "2000\n", - "1993\n", - "8.8\n", - "300\n", - "80\n", - "6.9\n", - "160\n", - "300\n", - "”。\n", - ",600\n", - ",9\n", - "2000\n", - "”,\n", - "100\n", - "1992\n", - "6.6\n", - "260\n", - ",1993\n", - "2.6\n", - ",1986\n", - "1985\n", - ",1980\n", - "1978\n", - "1993\n", - "1.2\n", - "37.6\n", - "”,\n", - "”。\n", - "90\n", - "110\n", - "……\n", - "1988\n", - "1986\n", - "19\n", - "400\n", - "300\n", - "19\n", - "5000\n", - "),\n", - "25\n", - "100\n", - "19\n", - "7000\n", - ";1997\n", - "90%\n", - "300\n", - "1997\n", - "3500\n", - "19\n", - "85\n", - "40\n", - "”,\n", - "40%\n", - "13%,\n", - "18%;13%\n", - "9%。\n", - "8%\n", - "11%,\n", - "28\n", - "28\n", - "28\n", - "28\n", - ",“\n", - "”。\n", - "28\n", - "26\n", - "”。\n", - "27\n", - "4、\n", - "130\n", - "3、\n", - "60\n", - ",100\n", - "2、\n", - "1、\n", - "170\n", - "27\n", - ",《\n", - "28\n", - "》。\n", - "”,\n", - "1%—2%\n", - "”:“\n", - "”。\n", - "20%\n", - "10%\n", - "600—700\n", - "(1\n", - "),\n", - ",“\n", - ")、\n", - "40\n", - "300\n", - "5000\n", - "”,\n", - "489\n", - ",“\n", - "”。\n", - "———“\n", - "”。\n", - "27\n", - "200\n", - "、2\n", - "3000\n", - ",3\n", - "2000\n", - "28\n", - "30%\n", - "94·4%\n", - "22\n", - "26\n", - "26\n", - "28\n", - "27\n", - "26\n", - "2·5\n", - "”。\n", - "27\n", - "26\n", - "29\n", - ":“\n", - ",29\n", - ",“\n", - "”。\n", - "82\n", - "1/50。\n", - "26\n", - "———\n", - "、1959\n", - "26\n", - "1994\n", - "25\n", - "23\n", - "26\n", - "1996\n", - "25\n", - "26\n", - "120\n", - "26\n", - "26\n", - "26\n", - ":“\n", - "27\n", - ",“\n", - "”。\n", - "26\n", - ",“\n", - "”。\n", - "”。\n", - "”。\n", - "27\n", - "1974\n", - "1998\n", - "27\n", - "26\n", - ",“\n", - "”。\n", - "26\n", - "”。\n", - "26\n", - "26\n", - "———\n", - "83\n", - ":“\n", - "1000\n", - ",26\n", - "、24\n", - "26\n", - "25\n", - "25\n", - "”。\n", - ",“\n", - "”,\n", - ":“\n", - "126\n", - "’。\n", - "”,\n", - "”,\n", - "126\n", - ":“\n", - "25\n", - "40\n", - "25\n", - "25\n", - "25\n", - "800\n", - "440\n", - "22\n", - "25\n", - "25\n", - ",“\n", - "”。\n", - "75\n", - ",“\n", - "”,“\n", - "50%\n", - "”。\n", - "”,\n", - "26\n", - "26\n", - "”。\n", - "”。\n", - "”,\n", - "》,\n", - "》。\n", - "26\n", - "25\n", - "2010\n", - ",20\n", - "25\n", - "25\n", - "2:\n", - "1:\n", - "25\n", - "93\n", - "800\n", - "26\n", - "26\n", - "”,\n", - "”,\n", - "》。\n", - "”,\n", - ",“\n", - "”,“\n", - "”。\n", - ",“\n", - ",“\n", - "1951\n", - "”。\n", - ",“\n", - ")”,\n", - "26\n", - "21\n", - "26\n", - "1997\n", - "”。\n", - "”。\n", - ",“\n", - "1997\n", - "3000\n", - "(5000\n", - "60%\n", - ",1997\n", - "700\n", - "280\n", - "”。\n", - ",2000\n", - "8000\n", - "6000\n", - "1997\n", - ",1996\n", - "6800\n", - "5000\n", - "73%\n", - "1996\n", - "(EIU)\n", - ",1997\n", - "90\n", - "80\n", - "70\n", - "”?\n", - ",20\n", - "”。\n", - ";5\n", - "1000\n", - "49\n", - "1.2\n", - "),\n", - "”。\n", - "……\n", - "”,\n", - "”。\n", - "”,\n", - "1971\n", - "25\n", - "19\n", - "22\n", - "23\n", - "22\n", - "”。\n", - ",24\n", - "45.6\n", - "49\n", - "48\n", - "25\n", - "25\n", - "111\n", - "100\n", - ",3\n", - "29\n", - "23\n", - "386\n", - "148\n", - "134\n", - "48\n", - "17\n", - "———\n", - "1997\n", - "”,\n", - "23\n", - "21\n", - "23\n", - "21\n", - "425\n", - "”,\n", - "1996\n", - "”,\n", - "”。\n", - ":“\n", - "1989\n", - "1995\n", - "255\n", - "1700\n", - "281\n", - "”。\n", - "1.8\n", - ",1.4\n", - "300\n", - "”——\n", - "150\n", - "、“\n", - "”“\n", - "”,\n", - "100\n", - "19\n", - ";19\n", - ";20\n", - ";20\n", - "1935\n", - "1936\n", - ",“\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - ":(1)\n", - ":(1)\n", - "70\n", - "”,\n", - ":(1)\n", - ":(1)\n", - ":(1)\n", - ";(2)\n", - ";(3)\n", - ":(1)\n", - "”,“\n", - ":18\n", - ";19\n", - "(15\n", - ");\n", - "(18\n", - "60\n", - "19\n", - "60\n", - ");\n", - "(19\n", - ");\n", - ":(1)\n", - "”,\n", - "21\n", - "”,\n", - "”、“\n", - "”、“\n", - "”。\n", - "200\n", - "———\n", - "———\n", - ",“\n", - "———\n", - "21\n", - "---\n", - "”,\n", - "”,\n", - ",“\n", - "”,\n", - "———\n", - "”,\n", - "———\n", - "1962\n", - "———\n", - "———\n", - "21\n", - "21\n", - "---\n", - ",“\n", - ":“\n", - "---\n", - ",20\n", - "』。\n", - "』、『\n", - "300\n", - "2002\n", - "7—8\n", - "230\n", - "180\n", - "6—10\n", - "70\n", - "198\n", - "———\n", - "40\n", - ":1998\n", - "1998\n", - "1241\n", - "1003\n", - "700\n", - "》;\n", - "25\n", - "1%。\n", - "1997\n", - "300\n", - "1995\n", - ":“\n", - "”。\n", - ",10\n", - "128\n", - "7500\n", - "21\n", - "1993\n", - "410\n", - "———\n", - "1996\n", - ",“\n", - "”。\n", - "1992\n", - "242\n", - "78\n", - "1992\n", - "1991\n", - "360\n", - "1991\n", - "》。\n", - "80\n", - "1000\n", - "1995\n", - "1988\n", - "1992\n", - "214.6\n", - "1988\n", - "5000\n", - ")。\n", - "69\n", - "69\n", - "1986\n", - "80\n", - "》(\n", - "1987\n", - "》,\n", - "》,\n", - "19\n", - "157\n", - "46\n", - ",28\n", - ",11\n", - "97\n", - "82\n", - "1994\n", - "382\n", - "510\n", - "1993\n", - "》,\n", - "19\n", - "1997\n", - "19\n", - "ISO14000\n", - "、“\n", - "”、“\n", - "”、“\n", - "《CAD\n", - "》。\n", - "CAD\n", - "200\n", - "CAD\n", - "95%。\n", - "CAD\n", - "CAD\n", - "CAD\n", - "17\n", - "(CAD)\n", - "CAD\n", - "21\n", - "21\n", - "、B\n", - "CT\n", - ",24\n", - "17\n", - "1997\n", - "3000\n", - "”,\n", - "1.8\n", - "”。\n", - "5000\n", - "”,\n", - "1997\n", - "8000\n", - "1991\n", - "1991\n", - "16.73‰\n", - "1997\n", - "8‰,\n", - "84%\n", - "95%。\n", - "”,\n", - "10%—20%\n", - "10.5\n", - "”,\n", - "90\n", - "”,\n", - "”。\n", - ",“\n", - "300\n", - "1996\n", - "、750\n", - "』。\n", - "』,\n", - "2、3、4\n", - "40\n", - "43.83%;\n", - "127.6\n", - "39.6%。\n", - "100\n", - "1/3\n", - "1.5%\n", - "1000\n", - ")、\n", - ")、\n", - ")、\n", - "(DVD\n", - ")、\n", - "1997\n", - "60\n", - "、4\n", - ",1995\n", - "、1997\n", - ":56\n", - "145\n", - "1030\n", - "300\n", - "800\n", - "0.44\n", - "1996\n", - "300—500\n", - "、12\n", - "1995\n", - "21\n", - "140.4\n", - "5.7\n", - "1992\n", - "6—12\n", - "58\n", - "13.5\n", - "88.3%;\n", - "18.5%,\n", - "21\n", - "……\n", - "---\n", - ",“\n", - "”。\n", - "96\n", - "80\n", - ":“\n", - ",1996\n", - "21\n", - "120\n", - ",3\n", - "4023\n", - "4.2\n", - ",“\n", - "”,\n", - "200\n", - "2300\n", - "》,\n", - "87\n", - "21\n", - "17\n", - "17\n", - "39\n", - "100\n", - "141\n", - "554\n", - "1.5\n", - "1100\n", - "17\n", - "”,\n", - "365\n", - "22\n", - "100\n", - "100\n", - "500\n", - "100\n", - "2、\n", - "1、5\n", - "17\n", - "91·57%\n", - "93·34%。\n", - "20%,6\n", - "180\n", - "19.13\n", - "50%\n", - "8%\n", - "40%\n", - ",60%\n", - "1%\n", - "1997\n", - "》。\n", - "40\n", - "17\n", - "99%。\n", - "70\n", - "3000\n", - "1997\n", - "ISO14000\n", - "”、“\n", - ":“\n", - "”1997\n", - "1994\n", - "1988\n", - "1994\n", - "240\n", - "17\n", - "8000\n", - "2600\n", - "36000\n", - ",0.5\n", - "1984\n", - "35800\n", - "”。\n", - "1986\n", - "』---\n", - "”,\n", - "1997\n", - "1995\n", - "24.9\n", - "23\n", - "1985\n", - "66.9\n", - "60%\n", - "1992\n", - "100\n", - "300\n", - "80\n", - "4400\n", - "90\n", - "300\n", - "1000\n", - "49.2%。\n", - "1960\n", - ",70\n", - "80\n", - "90\n", - "20%,\n", - "70\n", - "80\n", - "3.9%。\n", - "70\n", - "、80\n", - "90\n", - "70\n", - "”,\n", - "”,\n", - "”,\n", - "》,\n", - "1966\n", - "”,\n", - "”,\n", - "60\n", - "1958\n", - "”,\n", - "1956\n", - ",“\n", - "”。\n", - "1955\n", - "》(\n", - "25\n", - "100\n", - "》(\n", - "》(\n", - "》(\n", - "3·\n", - "1994\n", - "100\n", - "77\n", - "1994\n", - "2·\n", - ");\n", - "———\n", - ");\n", - ")、\n", - ":(1)\n", - ",“\n", - "”、\n", - ",“\n", - "”。\n", - "”,\n", - "”,“\n", - "80\n", - "1984\n", - "3000\n", - "80\n", - "40\n", - ",“\n", - "97.3%\n", - "99.1%。\n", - "”,\n", - "”1\n", - "———\n", - "31\n", - ")、\n", - "102\n", - "3835\n", - "1993\n", - "102\n", - "KILL\n", - "CA\n", - "KILL\n", - "KILL\n", - "CA\n", - "KILL98\n", - "1995—1996“\n", - ",1997\n", - "70\n", - "———\n", - "39%,29%\n", - "42%。\n", - "4.91%\n", - "16.2%,\n", - "21.5%。\n", - ",4\n", - "80\n", - "21\n", - "80\n", - ",20\n", - "60\n", - ";“\n", - "1997\n", - "94\n", - "45\n", - "260\n", - "1994\n", - ",4\n", - "108\n", - "60\n", - "1997\n", - "36\n", - "132.15\n", - "2114\n", - "1991\n", - ")、\n", - "》、\n", - "100\n", - "”。\n", - "”,\n", - "》,\n", - "60\n", - ":《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - "”。\n", - ",《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ",《\n", - "》(\n", - ")、《\n", - "》(\n", - "》(\n", - ")、\n", - "》(\n", - "”,\n", - ",1958\n", - "42\n", - "“‘\n", - ",“\n", - ",“\n", - "60\n", - "》。\n", - "”)\n", - "250\n", - "”、“\n", - "”、“\n", - "E1\n", - "200\n", - "1993\n", - "90\n", - "”,\n", - "1996\n", - "”、“\n", - "”、“\n", - "36\n", - "10%—30%,\n", - "—50\n", - "),\n", - "40\n", - "140\n", - "40\n", - "210\n", - "”,\n", - "”,\n", - "3300\n", - "———\n", - "21\n", - "、“\n", - "”。\n", - "1990\n", - "2400\n", - "60\n", - "1998\n", - "1996\n", - ",1997\n", - "”。\n", - "』。\n", - "”;\n", - "80\n", - "80\n", - "21\n", - "21\n", - "1/5\n", - "1997\n", - "62.5%,\n", - "6686\n", - "973\n", - "647\n", - ",1990\n", - "70.5\n", - "6%,\n", - "920\n", - "600\n", - "2500\n", - "50%,\n", - "80%。\n", - "532\n", - "122\n", - "1988\n", - "100\n", - "1995\n", - "27\n", - "1987\n", - ",1991\n", - "》、《\n", - "》、《\n", - ":“\n", - "5·12\n", - ",24\n", - "292\n", - "),\n", - "29\n", - "191\n", - "12.36\n", - "21\n", - "1995\n", - "》、《\n", - "《60\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "40\n", - "、800\n", - "1957\n", - "1990\n", - "1987\n", - "”、“\n", - "1982\n", - "》,\n", - "1957\n", - "”,\n", - ":“\n", - "1954\n", - "1948\n", - "1947\n", - ":“\n", - "1946\n", - "》。\n", - "6741、6742\n", - "———\n", - "6000\n", - "、2000\n", - ";“\n", - "”。\n", - "”;\n", - "”,\n", - "2000\n", - "”、“\n", - "”、“\n", - ");\n", - ":《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1995\n", - ",1995\n", - "1963\n", - ",1987\n", - "1987\n", - "27\n", - "88\n", - "1000\n", - "》、《\n", - "3000\n", - "1996\n", - "60\n", - "”。\n", - "”,\n", - "”,\n", - "———\n", - "”、“\n", - "”、“\n", - "63”\n", - "33\n", - "———\n", - "———\n", - "60”\n", - "80\n", - "60%。\n", - "SPE\n", - "”,\n", - "8000\n", - "300\n", - "”,\n", - "———\n", - "40\n", - "280\n", - "85%\n", - "、X\n", - ",“\n", - "”。\n", - "2000\n", - "1997\n", - "1993\n", - "300\n", - ",“\n", - "1.2\n", - "”。\n", - ":“\n", - "”,“\n", - "”。\n", - "196\n", - "365\n", - "———\n", - "3.4\n", - "3.4\n", - "1.6\n", - "1995\n", - "1993、1994\n", - "8000\n", - ",4\n", - "23\n", - ",4\n", - "26\n", - ":4\n", - "26\n", - "”,\n", - "”,\n", - "”,\n", - "200\n", - ",40\n", - "2100\n", - "7782\n", - "7234\n", - "200\n", - ",75\n", - "———\n", - "———\n", - "313\n", - "26\n", - "1997\n", - "”、“\n", - "2000\n", - "20%。\n", - "100\n", - ")、\n", - "”。\n", - ",“\n", - "7000\n", - ",7\n", - "99.9%,\n", - "93.8\n", - "90%\n", - "1967\n", - "35.2‰,\n", - "1997\n", - "5.25‰。\n", - ":60\n", - "1∶5;70\n", - "1∶16;80\n", - "1∶45;90\n", - "1∶120。\n", - "1996\n", - "《1996\n", - "38\n", - "1994\n", - "98.5%,\n", - "99.5%,\n", - "99.9%,\n", - "98.7%,\n", - "31\n", - "121\n", - "110\n", - "4.22\n", - "1990\n", - "22.6%;\n", - "21.7\n", - "1990\n", - "30%;\n", - "675\n", - ",1992\n", - ",“\n", - "1986\n", - "1990\n", - ",1990\n", - "95\n", - "》,\n", - "》(\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - ")。\n", - "80\n", - "90\n", - ":(1)\n", - "”,\n", - "”。\n", - "”、\n", - "”、“\n", - ":(1)\n", - ",“\n", - ":(1)\n", - "”,\n", - ":(1)\n", - "1996\n", - "”。\n", - "21\n", - "70\n", - "”“\n", - "21\n", - "21\n", - "35\n", - "28\n", - ",300\n", - "484\n", - "』,\n", - "』。\n", - "》、《\n", - "1993\n", - "”。\n", - "》、《\n", - "44\n", - ",1987\n", - "300\n", - "100%,\n", - "600\n", - "1964\n", - "1947\n", - ",1988\n", - "1971\n", - "”,\n", - "1996\n", - "”,\n", - "400\n", - "21\n", - "160\n", - "300\n", - "29\n", - "10%,\n", - "80%\n", - ",95%\n", - "”。\n", - "”,\n", - "———\n", - "1995\n", - "———\n", - "3500\n", - "2000\n", - "”。\n", - "———\n", - "1991\n", - "”,\n", - "---\n", - ",“\n", - "”,\n", - ",“\n", - "”,\n", - "”。\n", - "”、“\n", - "”,\n", - ",1993\n", - "、“\n", - "———\n", - "500\n", - "CE—7479DBS\n", - "73\n", - "200\n", - "150\n", - "”,\n", - "30%\n", - "》,\n", - "9202\n", - "(9202\n", - "21\n", - ":“\n", - ":“\n", - "’,\n", - "96\n", - ":“\n", - ":“\n", - "40\n", - "、800\n", - "”、“\n", - "31457\n", - "1994\n", - "31\n", - "6741、6742\n", - "116\n", - "”、“\n", - "”、“\n", - "100\n", - "———\n", - "1998\n", - "10—24\n", - "10%\n", - "19\n", - "1998\n", - ",30\n", - "9970\n", - "290\n", - "173\n", - "1985\n", - "94\n", - "100\n", - "94\n", - "100\n", - "199452\n", - ",2000\n", - "7—14\n", - "1.7\n", - "500\n", - ",4\n", - "100\n", - "1997\n", - "”、“\n", - "”,\n", - "21\n", - "”。\n", - "2000\n", - "180\n", - "1997\n", - "40\n", - "1999\n", - "500\n", - "140\n", - "450\n", - "800\n", - "1995\n", - "1997\n", - "1995\n", - "95\n", - "34\n", - "DEC\n", - "2450\n", - "200\n", - "100%。\n", - "786\n", - "780\n", - "99.24%。\n", - "700\n", - "7000\n", - ":“\n", - "”,\n", - "120\n", - "”,\n", - ":“\n", - "”,\n", - "”;\n", - "”,\n", - "“300\n", - "”,\n", - "1980\n", - ";1985\n", - ",100\n", - "、200\n", - "、1000\n", - "6000\n", - "”。\n", - "”,\n", - "1994\n", - "55\n", - ":“\n", - "、“\n", - "---\n", - "100\n", - "1917—1927\n", - "130\n", - "(VeraSchwarcz)\n", - "(1917—1927)”\n", - "1996\n", - "7072\n", - "”。\n", - "49\n", - "19\n", - ":“\n", - "”“\n", - "》:\n", - "21\n", - "21\n", - "60\n", - ":“\n", - ",1998\n", - ",8000\n", - "1944\n", - "2.7\n", - "69\n", - "3000\n", - "》,\n", - "200\n", - ",52\n", - "41\n", - "79\n", - ",“\n", - ";79\n", - "》。\n", - "100\n", - ",“\n", - "———\n", - "》、\n", - "》、\n", - "100\n", - "100\n", - "10000\n", - "1998\n", - "1898\n", - "100\n", - "〉》。\n", - "〉》,\n", - "》。\n", - "》“\n", - "……\n", - "———“\n", - "———\n", - ",“\n", - "71\n", - "》,\n", - "》,\n", - "”,\n", - "252\n", - ":《\n", - "》,“\n", - "……”\n", - "100\n", - ",《\n", - "28\n", - "———\n", - "”。\n", - "600\n", - ",“\n", - "、“\n", - "100\n", - "26\n", - ",11\n", - "1.3\n", - "》。\n", - "”、“\n", - ":“1997\n", - "1996\n", - "1997\n", - "A。\n", - "———\n", - "1971\n", - ",3000\n", - "”。\n", - "17\n", - ",200\n", - "”。\n", - ":“\n", - "”9\n", - "150\n", - ",“\n", - "1997\n", - "---\n", - ",“\n", - "58\n", - ",86%\n", - "”;98%\n", - ":“\n", - ":“\n", - "3000\n", - "1000\n", - "300\n", - "2000\n", - "”、“\n", - "1996\n", - "1997\n", - "1995\n", - "1994\n", - "1996\n", - "90\n", - "1992\n", - "1993\n", - ":“\n", - ":“\n", - ":“\n", - ":5\n", - "27\n", - ":“\n", - ",80\n", - "28\n", - ",“\n", - "483\n", - "65\n", - "2000\n", - "———\n", - "1997\n", - "1—2\n", - ",4\n", - ",5\n", - ",15\n", - ",15\n", - "———\n", - "”。\n", - "9711\n", - "800\n", - "28\n", - "”。\n", - "1998\n", - "500\n", - "90\n", - "(INTERNET)\n", - "90\n", - ",“\n", - "”,\n", - "21\n", - "INTERNET)、\n", - "1996\n", - "1995\n", - "90\n", - "NII\n", - "1991\n", - ";1994\n", - "NII,\n", - "90\n", - "2020\n", - "2%,\n", - "2%\n", - "90%。\n", - "70\n", - "50%,\n", - "78%,\n", - "50%—75%,\n", - "35%—50%,\n", - "35%。\n", - "21\n", - ",60\n", - "70\n", - ",80\n", - ",90\n", - ",1991\n", - "238\n", - ",1997\n", - "3.42\n", - "”。\n", - "1992\n", - "”(\n", - ",“\n", - "”,\n", - "60%。\n", - "90\n", - "1962\n", - "》,\n", - "1957\n", - ",1909\n", - "1949\n", - "87.5%,\n", - "12.5%。\n", - "1912\n", - "50%\n", - "”《\n", - ":“\n", - "33.54%。\n", - "80\n", - "18000\n", - "28\n", - "25\n", - "51\n", - "6600\n", - "29\n", - "26\n", - "29\n", - "”,\n", - ",5\n", - "25\n", - "28\n", - "』,\n", - "』、『\n", - "』、『\n", - ",《\n", - "580\n", - "686.6\n", - "28\n", - "”———\n", - "、“\n", - "”———\n", - "、“\n", - "”———\n", - "、“\n", - "”———\n", - "”,\n", - "”。\n", - "———“\n", - "”,\n", - "”。\n", - ",12\n", - "66%,\n", - "96∶59\n", - ":“\n", - "42∶30,\n", - "21\n", - "8∶12,\n", - "13∶16、19∶20……\n", - "……\n", - ":“\n", - "),\n", - ":31\n", - ",31\n", - "22\n", - ",6\n", - "22\n", - "C、D\n", - ",A、B\n", - ",A\n", - ";B\n", - ";C\n", - ";D\n", - "103∶94\n", - "29\n", - ",12\n", - "13℃/29℃\n", - "16℃/28℃\n", - "17℃/30℃\n", - "10℃/28℃\n", - "8℃/22℃\n", - "10℃/24℃\n", - "14℃/18℃\n", - "10℃/22℃\n", - "8℃/21℃\n", - "18℃/29℃\n", - "21℃/30℃\n", - "19℃/27℃\n", - "18℃/29℃\n", - "21℃/29℃\n", - "21℃/29℃\n", - "19℃/29℃\n", - "15℃/19℃\n", - "14℃/29℃\n", - "22℃/32℃\n", - "20℃/25℃\n", - "25℃/31℃\n", - "23℃/30℃\n", - "26℃/34℃\n", - "20℃/26℃\n", - "20℃/28℃\n", - "17℃/26℃\n", - "13℃/24℃\n", - "21℃/27℃\n", - "15℃/20℃\n", - "12℃/27℃\n", - "10℃/20℃\n", - "8℃/26℃\n", - "16℃/24℃\n", - "23℃/30℃\n", - "24℃/29℃\n", - "24℃/29℃\n", - "15℃/22℃\n", - "28℃/34℃\n", - "9℃/20℃\n", - "27℃/36℃\n", - "17℃/30℃\n", - "10℃/18℃\n", - "9℃/17℃\n", - "8℃/14℃\n", - "8℃/15℃\n", - "15℃/24℃\n", - ":29\n", - "(5\n", - "29\n", - "—5\n", - ")『\n", - "』,\n", - "1980\n", - "29\n", - ",100\n", - "100\n", - "1935\n", - ",1956\n", - "28\n", - "28\n", - "28\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "300\n", - "28\n", - "27\n", - "28\n", - "80\n", - "1998\n", - "8%\n", - ",5\n", - "21\n", - "、22\n", - "8%。\n", - "28\n", - "28\n", - "1998\n", - "27\n", - "28\n", - "》,\n", - "1997\n", - "1993\n", - "”,1996\n", - "1997\n", - "1996\n", - "”。\n", - "”。\n", - "1993\n", - "1998\n", - "1991\n", - "”,\n", - "36\n", - "43\n", - "35\n", - "42\n", - "28\n", - "—4\n", - "4—6\n", - "28\n", - ":22\n", - "—24\n", - "7℃—10℃。\n", - "28\n", - ":『\n", - "4—5\n", - ":28\n", - "29\n", - "(5\n", - "28\n", - "—5\n", - "29\n", - "25\n", - "21\n", - "27\n", - ":『\n", - "33\n", - "BNO\n", - "150\n", - "27\n", - "1997\n", - "1997\n", - "272\n", - ",15\n", - ",32\n", - ",5\n", - "》,\n", - "1997\n", - "”(\n", - ")。\n", - "27\n", - "》1998\n", - "1997\n", - ",5\n", - "26\n", - "29\n", - "27\n", - "26\n", - "27\n", - "”,\n", - ")、\n", - "”,\n", - "”。\n", - "2、\n", - "1996\n", - "1、\n", - "“6·26”\n", - "27\n", - "“6·26”\n", - "14℃/27℃\n", - "18℃/26℃\n", - "18℃/26℃\n", - "9℃/29℃\n", - "9℃/22℃\n", - "12℃/21℃\n", - "16℃/20℃\n", - "8℃/22℃\n", - "8℃/18℃\n", - "19℃/29℃\n", - "20℃/30℃\n", - "18℃/30℃\n", - "17℃/25℃\n", - "22℃/30℃\n", - "18℃/28℃\n", - "15℃/27℃\n", - "13℃/22℃\n", - "16℃/29℃\n", - "20℃/26℃\n", - "20℃/30℃\n", - "23℃/31℃\n", - "22℃/33℃\n", - "26℃/33℃\n", - "20℃/27℃\n", - "22℃/30℃\n", - "15℃/22℃\n", - "16℃/25℃\n", - "6℃/26℃\n", - "14℃/26℃\n", - "12℃/28℃\n", - "9℃/25℃\n", - "13℃/27℃\n", - "17℃/23℃\n", - "22℃/30℃\n", - "22℃/30℃\n", - "22℃/30℃\n", - "14℃/25℃\n", - "26℃/34℃\n", - "11℃/20℃\n", - "27℃/35℃\n", - "19℃/33℃\n", - "9℃/20℃\n", - "10℃/18℃\n", - "11℃/17℃\n", - "8℃/15℃\n", - "13℃/24℃\n", - "27\n", - "28\n", - "5—6\n", - "(5\n", - "27\n", - "—5\n", - "28\n", - "”,\n", - "”,\n", - "26\n", - "”,\n", - "”、“\n", - "”、“\n", - "”,\n", - "1988\n", - "1992\n", - "7%,\n", - "26%。\n", - "957\n", - "》,\n", - "26\n", - "”、\n", - "26\n", - "”(\n", - "26\n", - "26\n", - "26\n", - "』。\n", - ":『\n", - "』。\n", - "……\n", - "』。\n", - ",『\n", - ",『\n", - "』,\n", - ",『\n", - "1996\n", - "40\n", - "15℃/27℃\n", - "16℃/26℃\n", - "18℃/27℃\n", - "12℃/29℃\n", - "7℃/19℃\n", - "15℃/25℃\n", - "14℃/19℃\n", - "8℃/20℃\n", - "10℃/25℃\n", - "20℃/30℃\n", - "17℃/27℃\n", - "17℃/29℃\n", - "18℃/29℃\n", - "21℃/25℃\n", - "20℃/29℃\n", - "18℃/27℃\n", - "15℃/21℃\n", - "17℃/28℃\n", - "20℃/30℃\n", - "20℃/30℃\n", - "24℃/31℃\n", - "23℃/33℃\n", - "24℃/30℃\n", - "20℃/26℃\n", - "20℃/31℃\n", - "18℃/27℃\n", - "16℃/26℃\n", - "9℃/25℃\n", - "14℃/25℃\n", - "13℃/29℃\n", - "8℃/20℃\n", - "9℃/22℃\n", - "15℃/25℃\n", - "20℃/23℃\n", - "24℃/30℃\n", - "24℃/30℃\n", - "17℃/27℃\n", - "27℃/34℃\n", - "11℃/21℃\n", - "27℃/36℃\n", - "18℃/30℃\n", - "8℃/15℃\n", - "9℃/14℃\n", - "11℃/16℃\n", - "9℃/14℃\n", - "15℃/22℃\n", - "26\n", - "27\n", - "(5\n", - "26\n", - "—5\n", - "27\n", - "49\n", - "200\n", - "”。\n", - ",《\n", - "25\n", - "》,\n", - ")、\n", - "25\n", - ",60\n", - "):\n", - "1997\n", - "1996\n", - "1/3,\n", - "28%。\n", - "200\n", - ",“\n", - ",“\n", - "21\n", - "25\n", - "———“\n", - "35\n", - "45\n", - "45\n", - "”,\n", - "……\n", - "』、『\n", - "』『\n", - "———\n", - ":“\n", - "1/3\n", - "———\n", - ":“\n", - "1000\n", - "》”,5\n", - "22\n", - ",1996\n", - "8.16\n", - "31\n", - "1997\n", - "1997\n", - "1997\n", - "28\n", - ",1996\n", - "1992\n", - ",1993\n", - ",1997\n", - "1056\n", - "3432\n", - "159\n", - "3300\n", - "6732\n", - ",63\n", - "293\n", - "198\n", - "784.52\n", - "1000\n", - "3300\n", - "4000\n", - "63\n", - "316\n", - "118\n", - "80\n", - "25\n", - "25\n", - "26\n", - "(5\n", - "25\n", - "—5\n", - "26\n", - "166\n", - "60\n", - "800\n", - "22\n", - "22\n", - "1489705\n", - "53.29%,\n", - "1995\n", - "57\n", - "17\n", - "77813\n", - "63.5%;\n", - "790\n", - "98.75%。\n", - "1568308\n", - "496\n", - "777—300\n", - "73.8\n", - "747—400\n", - "3.1\n", - "777—200\n", - "10.1\n", - "367\n", - "777—300\n", - "23\n", - "100\n", - "200\n", - "1000\n", - "100\n", - "1000\n", - "1000\n", - "700\n", - "2200\n", - "2200\n", - "22\n", - "》,\n", - "———\n", - ",“\n", - "85%\n", - "”。\n", - ":“\n", - "7000\n", - "3.5\n", - "1/2\n", - "1995\n", - "》,\n", - "23\n", - "500\n", - "”8000\n", - "3000\n", - "100\n", - "1000\n", - "300\n", - ":“\n", - "’,\n", - ":“《\n", - "”8\n", - ",《\n", - "……”\n", - "300\n", - "40\n", - "23\n", - "26%。\n", - "2370\n", - ",2\n", - "4000\n", - "1997\n", - "1996\n", - "”。\n", - "90%,\n", - "”。\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - "』。\n", - "200\n", - "865\n", - ",5\n", - "23\n", - "56\n", - "1.17\n", - "5333\n", - "507\n", - "40\n", - "23\n", - "40\n", - "2000\n", - "100\n", - ",5\n", - "2000\n", - ",C—1—83\n", - "C—1—83\n", - ",15\n", - "……\n", - "”“\n", - "”,\n", - "400\n", - "C—1—83\n", - "”。\n", - "90%\n", - "》。\n", - "22\n", - "23\n", - ",23\n", - "23\n", - "”,\n", - "23\n", - "23\n", - "”,\n", - ",“\n", - "”,\n", - "1998\n", - "2000\n", - "●“\n", - "22\n", - "34\n", - "1997\n", - "22\n", - "1997\n", - "19\n", - "1996\n", - "”。\n", - "1—2\n", - ")3、\n", - "2、\n", - "1、\n", - ":“\n", - "”;\n", - "2000\n", - "……\n", - "800\n", - "1200\n", - "1548\n", - "452\n", - "2000\n", - ":“\n", - ":“\n", - "”,\n", - "40%,\n", - "”。\n", - ":“\n", - "”,\n", - "3000\n", - "”,\n", - "55\n", - ",10\n", - "”。\n", - "19\n", - "”。\n", - "3000\n", - "———\n", - ",5\n", - "0∶2\n", - "21\n", - "2000\n", - "90\n", - "1996\n", - "),\n", - "2∶3\n", - "———\n", - "100%。\n", - "150\n", - "1998\n", - ",4\n", - "28\n", - "———\n", - ",5\n", - "2∶0\n", - "5∶0\n", - "1∶2\n", - ")。\n", - "……\n", - "):\n", - "):\n", - "5∶0\n", - "”。\n", - "23\n", - "2000\n", - "2∶3\n", - "2∶3\n", - "15∶10、17∶14\n", - "1∶3\n", - "17∶18、15∶4、15∶1\n", - "6∶15、2∶15\n", - "9∶15、4∶15\n", - "1∶2\n", - "3∶15、6∶15\n", - "3∶2\n", - "2∶3\n", - "22\n", - "13℃/27℃\n", - "17℃/25℃\n", - "17℃/28℃\n", - "10℃/26℃\n", - "6℃/20℃\n", - "18℃/24℃\n", - "10℃/18℃\n", - "12℃/19℃\n", - "11℃/24℃\n", - "20℃/28℃\n", - "19℃/26℃\n", - "19℃/24℃\n", - "20℃/25℃\n", - "22℃/29℃\n", - "18℃/25℃\n", - "10℃/18℃\n", - "13℃/19℃\n", - "15℃/25℃\n", - "24℃/30℃\n", - "21℃/30℃\n", - "24℃/29℃\n", - "23℃/30℃\n", - "26℃/32℃\n", - "20℃/28℃\n", - "21℃/31℃\n", - "17℃/27℃\n", - "17℃/26℃\n", - "12℃/29℃\n", - "15℃/26℃\n", - "12℃/27℃\n", - "7℃/23℃\n", - "8℃/24℃\n", - "7℃/18℃\n", - "24℃/29℃\n", - "26℃/29℃\n", - "25℃/29℃\n", - "19℃/30℃\n", - "28℃/33℃\n", - "11℃/22℃\n", - "29℃/36℃\n", - "13℃/25℃\n", - "11℃/18℃\n", - "13℃/23℃\n", - "12℃/23℃\n", - "12℃/25℃\n", - "12℃/24℃\n", - ",22\n", - "23\n", - "5—7\n", - "25\n", - "27\n", - "23\n", - "22\n", - "21\n", - "45\n", - "300\n", - "211\n", - "6000\n", - "21\n", - ",20\n", - ",20\n", - "”,\n", - "100\n", - "”。\n", - "”,\n", - "100\n", - "2、\n", - "1、\n", - "、35\n", - "1·8\n", - "1·26\n", - "328\n", - "6.24\n", - "4.98\n", - "79.7%。\n", - "21\n", - "500\n", - "10%—20%\n", - "19\n", - "、CT、MRI\n", - "20%\n", - "21\n", - "”,\n", - "』,\n", - "』,\n", - "』、『\n", - "』、『\n", - "』、『\n", - "』、『\n", - "』、『\n", - "』,\n", - "』、『\n", - "』、『\n", - "』,\n", - "』,\n", - "』,\n", - "21\n", - "”,\n", - ",5\n", - "21\n", - "21\n", - "25\n", - "28\n", - "……\n", - "1.9\n", - ",1800\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - ",4\n", - "17\n", - "14℃/27℃\n", - "17℃/26℃\n", - "15℃/19℃\n", - "10℃/19℃\n", - "8℃/20℃\n", - "18℃/24℃\n", - "13℃/19℃\n", - "15℃/24℃\n", - "16℃/26℃\n", - "21℃/28℃\n", - "18℃/25℃\n", - "23℃/33℃\n", - "24℃/31℃\n", - "24℃/32℃\n", - "25℃/33℃\n", - "16℃/22℃\n", - "15℃/20℃\n", - "16℃/18℃\n", - "25℃/30℃\n", - "22℃/33℃\n", - "25℃/31℃\n", - "23℃/28℃\n", - "26℃/30℃\n", - "18℃/25℃\n", - "8℃/23℃\n", - "19℃/25℃\n", - "15℃/25℃\n", - "10℃/25℃\n", - "13℃/22℃\n", - "12℃/23℃\n", - "8℃/23℃\n", - "11℃/19℃\n", - "4℃/14℃\n", - "24℃/32℃\n", - "24℃/29℃\n", - "24℃/29℃\n", - "18℃/25℃\n", - "27℃/34℃\n", - "14℃/24℃\n", - "27℃/41℃\n", - "22℃/31℃\n", - "11℃/20℃\n", - "14℃/25℃\n", - "15℃/26℃\n", - "12℃/25℃\n", - "18℃/26℃\n", - ",21\n", - "22\n", - "(5\n", - "21\n", - "—5\n", - "22\n", - ")1986\n", - "———\n", - "900\n", - "127\n", - "”。\n", - "———\n", - "、2\n", - "3000\n", - "21.7\n", - "”,\n", - "”。\n", - "1954\n", - "1998\n", - "2002\n", - "WestChinaInfor.),\n", - "3-4\n", - "19\n", - "6-7\n", - "19\n", - ":“\n", - "》。\n", - "”。\n", - ":“\n", - "3000\n", - "17\n", - "”(\n", - "20%—30%\n", - "97%,\n", - "—13\n", - ":“\n", - ":“\n", - "1993\n", - ":“\n", - "1993\n", - "2、5\n", - "250\n", - "300\n", - "1、5\n", - ":“\n", - ",“\n", - "”。\n", - ",73\n", - ":“\n", - "”。\n", - ",5\n", - "200\n", - "600\n", - "1985\n", - "”,\n", - "”,\n", - "”。\n", - ":8\n", - "”?\n", - "1507\n", - "44\n", - "9000\n", - "44\n", - "4—6\n", - "(5\n", - "—5\n", - "21\n", - ",3\n", - "70\n", - "1.5\n", - ",3000\n", - "130\n", - "60\n", - ",“\n", - "1938\n", - "19\n", - "56\n", - "60\n", - "80\n", - "2000\n", - "……3\n", - "23\n", - "”,\n", - "300\n", - "43\n", - "25\n", - "”,5\n", - "21\n", - "140\n", - "》,\n", - ",15\n", - "8.6\n", - "”,\n", - "”。\n", - "19\n", - "200\n", - "100\n", - "1000\n", - "17000\n", - ",14\n", - "、12\n", - "7.1\n", - "4.5\n", - "5180\n", - "5.3\n", - "19\n", - ":“\n", - "IC\n", - "21\n", - "37\n", - "、72\n", - "19\n", - "1998\n", - "22\n", - "28\n", - "19\n", - "1998\n", - "23\n", - "27\n", - "19\n", - "19\n", - "19\n", - "1997\n", - "124.7\n", - "8.4%。\n", - "C·\n", - ",1938\n", - "28\n", - "81\n", - "500\n", - ":“\n", - "15℃/27℃\n", - "16℃/28℃\n", - "16℃/27℃\n", - "13℃/29℃\n", - "11℃/26℃\n", - "13℃/27℃\n", - "16℃/23℃\n", - "14℃/24℃\n", - "9℃/25℃\n", - "20℃/30℃\n", - "17℃/29℃\n", - "20℃/31℃\n", - "19℃/29℃\n", - "22℃/30℃\n", - "20℃/28℃\n", - "23℃/30℃\n", - "16℃/26℃\n", - "18℃/27℃\n", - "20℃/32℃\n", - "20℃/31℃\n", - "25℃/32℃\n", - "25℃/32℃\n", - "26℃/35℃\n", - "22℃/30℃\n", - "22℃/32℃\n", - "18℃/29℃\n", - "17℃/26℃\n", - "6℃/23℃\n", - "16℃/25℃\n", - "13℃/29℃\n", - "10℃/22℃\n", - "14℃/23℃\n", - "3℃/8℃\n", - "23℃/29℃\n", - "24℃/30℃\n", - "24℃/30℃\n", - "16℃/24℃\n", - "27℃/31℃\n", - "15℃/22℃\n", - "26℃/34℃\n", - "19℃/30℃\n", - "9℃/18℃\n", - "9℃/24℃\n", - "14℃/24℃\n", - "13℃/23℃\n", - "12℃/20℃\n", - ",19\n", - "4—6\n", - "(5\n", - "19\n", - "—5\n", - "1996\n", - ",24\n", - ",17\n", - "』、\n", - "』(\n", - "3200\n", - "150\n", - "100\n", - "21\n", - "1995\n", - ":“\n", - "”“\n", - "150\n", - ":“\n", - ",“\n", - "1994\n", - "70\n", - "1994\n", - ":“\n", - "”1997\n", - "1992\n", - "”5\n", - ")、\n", - ")、\n", - ")、\n", - "1999\n", - "2000\n", - "1929\n", - "150\n", - "”,\n", - "”,\n", - "》、《\n", - "1991\n", - "3000\n", - "70\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》(\n", - "42%\n", - ",1995\n", - ":“\n", - "、3000\n", - "800\n", - "1500\n", - ",1500\n", - ",674\n", - "500\n", - "22\n", - "”,\n", - "50%\n", - "350\n", - "1.3\n", - "1.5\n", - "”。\n", - ":“\n", - "”。\n", - "1995\n", - "6.7%,\n", - "8%,\n", - "100\n", - "51%,100\n", - "300\n", - "9.5%,300\n", - "3%。\n", - ",1994\n", - "3385\n", - "47\n", - "70%\n", - "”,\n", - "”。\n", - "1962\n", - "1953\n", - "”,\n", - ",1947\n", - "1962\n", - "“1962\n", - "”,\n", - "”。\n", - "》,\n", - "48\n", - "14℃/27℃\n", - "17℃/26℃\n", - "15℃/27℃\n", - "13℃/29℃\n", - "10℃/23℃\n", - "15℃/28℃\n", - "16℃/25℃\n", - "13℃/25℃\n", - "12℃/26℃\n", - "18℃/27℃\n", - "17℃/30℃\n", - "18℃/29℃\n", - "19℃/29℃\n", - "22℃/30℃\n", - "20℃/29℃\n", - "21℃/33℃\n", - "14℃/20℃\n", - "16℃/31℃\n", - "18℃/30℃\n", - "19℃/28℃\n", - "25℃/31℃\n", - "26℃/34℃\n", - "26℃/34℃\n", - "24℃/32℃\n", - "21℃/32℃\n", - "18℃/29℃\n", - "16℃/27℃\n", - "8℃/21℃\n", - "16℃/26℃\n", - "13℃/26℃\n", - "10℃/23℃\n", - "13℃/27℃\n", - "9℃/12℃\n", - "23℃/29℃\n", - "25℃/31℃\n", - "25℃/30℃\n", - "16℃/25℃\n", - "30℃/37℃\n", - "19℃/24℃\n", - "27℃/36℃\n", - "15℃/31℃\n", - "12℃/21℃\n", - "11℃/23℃\n", - "13℃/25℃\n", - "12℃/26℃\n", - "17℃/27℃\n", - ":18\n", - "19\n", - "4—6\n", - ");\n", - "(5\n", - "—5\n", - "19\n", - "1938\n", - ",1929\n", - ",1931\n", - "1998\n", - "87\n", - "2000\n", - "190\n", - ",158\n", - ",32\n", - "———50\n", - "540\n", - "1/3\n", - "17\n", - "』。\n", - "--\n", - "2000\n", - "6000\n", - ",“\n", - "1994\n", - "”。\n", - "”,\n", - "21\n", - "1986\n", - "』,\n", - ",『\n", - "』,\n", - "』,\n", - ":『\n", - "』,\n", - ",『\n", - ",『\n", - "』,\n", - "』(\n", - "”,\n", - "”,\n", - "1988\n", - "1996\n", - "1962\n", - "60\n", - "”。\n", - "》,\n", - "”。\n", - "48\n", - "”。\n", - "296\n", - "108\n", - "19.8%。\n", - ":“\n", - ",10\n", - ",1000\n", - "”。\n", - ":“\n", - "”:\n", - "8%,\n", - "”,\n", - "”。\n", - ":“\n", - "”。\n", - "”,\n", - "80\n", - ":『\n", - "300\n", - "”,\n", - "45\n", - "80\n", - "300\n", - "”,50\n", - ",C—53\n", - "90\n", - "60\n", - "”,\n", - ",1943\n", - "1996\n", - "C—53\n", - "25\n", - "、“\n", - "”。\n", - ",“\n", - ",800\n", - "260\n", - "……\n", - "”,\n", - "———《\n", - "》”\n", - "78\n", - "”。\n", - "184\n", - "2∶0\n", - "1996\n", - "150\n", - "……\n", - "———\n", - "———\n", - "1997\n", - "25\n", - "40\n", - "———《\n", - "》。\n", - "B12\n", - "4000\n", - "”,\n", - "”,\n", - "B,\n", - "28\n", - "1998\n", - "1997\n", - "B。\n", - "1997\n", - "170\n", - "470\n", - "67\n", - "2916\n", - "92\n", - "“TEDA”\n", - "---\n", - ")。\n", - "1997—1998\n", - "51\n", - "、42\n", - "、38\n", - ":12\n", - "2∶0\n", - "2∶0\n", - "5∶0\n", - "5∶0\n", - "15℃/24℃\n", - "18℃/25℃\n", - "16℃/24℃\n", - "10℃/20℃\n", - "8℃/19℃\n", - "18℃/23℃\n", - "13℃/19℃\n", - "19℃/29℃\n", - "19℃/32℃\n", - "16℃/22℃\n", - "14℃/23℃\n", - "17℃/24℃\n", - "16℃/21℃\n", - "21℃/28℃\n", - "18℃/26℃\n", - "16℃/23℃\n", - "13℃/20℃\n", - "16℃/23℃\n", - "15℃/23℃\n", - "18℃/30℃\n", - "23℃/29℃\n", - "22℃/29℃\n", - "26℃/33℃\n", - "17℃/22℃\n", - "20℃/26℃\n", - "16℃/24℃\n", - "18℃/30℃\n", - "6℃/19℃\n", - "12℃/21℃\n", - "12℃/23℃\n", - "6℃/18℃\n", - "8℃/19℃\n", - "10℃/20℃\n", - "22℃/26℃\n", - "24℃/28℃\n", - "24℃/28℃\n", - "16℃/27℃\n", - "27℃/37℃\n", - "14℃/21℃\n", - "27℃/39℃\n", - "16℃/26℃\n", - "5℃/9℃\n", - "11℃/21℃\n", - "15℃/24℃\n", - "13℃/22℃\n", - "12℃/24℃\n", - "(5\n", - "—5\n", - "B19\n", - "B19\n", - "B19\n", - "VP2\n", - "B19\n", - "》,\n", - "———\n", - "22\n", - ")『\n", - "70\n", - "、80\n", - "100\n", - "60\n", - ",3\n", - "28\n", - "26\n", - "”。\n", - "》,\n", - "100\n", - "”,\n", - "”。\n", - "48\n", - "23\n", - "250\n", - "738\n", - "1976\n", - "》、《\n", - "1999\n", - "1999\n", - "),\n", - ":『\n", - "』,\n", - "』。\n", - "19\n", - "21\n", - "15℃/24℃\n", - "17℃/28℃\n", - "14℃/24℃\n", - "14℃/25℃\n", - "13℃/22℃\n", - "16℃/30℃\n", - "13℃/19℃\n", - "17℃/29℃\n", - "16℃/32℃\n", - "17℃/21℃\n", - "14℃/20℃\n", - "17℃/22℃\n", - "15℃/20℃\n", - "21℃/28℃\n", - "17℃/23℃\n", - "17℃/25℃\n", - "13℃/20℃\n", - "13℃/26℃\n", - "15℃/22℃\n", - "18℃/30℃\n", - "25℃/30℃\n", - "26℃/34℃\n", - "27℃/34℃\n", - "18℃/23℃\n", - "20℃/27℃\n", - "19℃/26℃\n", - "18℃/30℃\n", - "6℃/17℃\n", - "14℃/23℃\n", - "14℃/27℃\n", - "6℃/18℃\n", - "13℃/20℃\n", - "8℃/20℃\n", - "23℃/29℃\n", - "24℃/31℃\n", - "25℃/31℃\n", - "14℃/24℃\n", - "26℃/36℃\n", - "14℃/22℃\n", - "26℃/39℃\n", - "17℃/28℃\n", - "11℃/22℃\n", - "11℃/25℃\n", - "12℃/23℃\n", - "10℃/21℃\n", - "12℃/20℃\n", - "5—7\n", - "(5\n", - "—5\n", - "107.4%。\n", - "”、“\n", - "”、“\n", - "17\n", - "17\n", - "》、《\n", - "》;\n", - "》、《\n", - "》;\n", - "》、《\n", - "》;\n", - "》、《\n", - "》;\n", - "》、《\n", - "》;\n", - "》;\n", - "》、《\n", - "》;\n", - "》;\n", - "》;\n", - "》;\n", - "》。\n", - "———《\n", - "》,\n", - "300\n", - "(1898—1956)\n", - "1997\n", - "21\n", - "、《\n", - ":《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》。\n", - ":1997\n", - "3、\n", - "120\n", - "2、\n", - "1、\n", - "58\n", - "318\n", - "5000\n", - "”,\n", - ",5\n", - "3:\n", - "2:\n", - "1:\n", - "”。\n", - "5000\n", - "60%\n", - "》,\n", - "———\n", - "35\n", - "3800\n", - "100\n", - ";“\n", - ":“\n", - "17\n", - "70%。\n", - "———\n", - "---\n", - "80%\n", - "”,\n", - ",“\n", - "》、《\n", - "”。\n", - "1937\n", - "1938\n", - "1937\n", - "》,\n", - "1934\n", - "1923\n", - ",1925\n", - "”,\n", - "”,\n", - "1920\n", - ",1922\n", - ",15\n", - "100\n", - "15℃/26℃\n", - "14℃/28℃\n", - "15℃/24℃\n", - "11℃/28℃\n", - "10℃/23℃\n", - "13℃/28℃\n", - "13℃/19℃\n", - "14℃/27℃\n", - "11℃/27℃\n", - "18℃/22℃\n", - "16℃/20℃\n", - "16℃/22℃\n", - "14℃/19℃\n", - "21℃/25℃\n", - "18℃/24℃\n", - "16℃/26℃\n", - "12℃/17℃\n", - "15℃/22℃\n", - "14℃/18℃\n", - "19℃/25℃\n", - "24℃/31℃\n", - "24℃/32℃\n", - "27℃/35℃\n", - "17℃/23℃\n", - "19℃/27℃\n", - "16℃/21℃\n", - "20℃/29℃\n", - "5℃/20℃\n", - "13℃/18℃\n", - "11℃/22℃\n", - "10℃/19℃\n", - "14℃/24℃\n", - "5℃/16℃\n", - "23℃/28℃\n", - "25℃/31℃\n", - "25℃/30℃\n", - "11℃/19℃\n", - "29℃/35℃\n", - "12℃/23℃\n", - "25℃/39℃\n", - "16℃/31℃\n", - "9℃/18℃\n", - "12℃/25℃\n", - "13℃/27℃\n", - "12℃/22℃\n", - "11℃/17℃\n", - "5—6\n", - "(5\n", - "—5\n", - "(1998—2000)》,\n", - "ABB\n", - "”,\n", - "》,\n", - "”。\n", - ",《\n", - "60\n", - "》,\n", - "43\n", - "60\n", - ",《\n", - ",《\n", - "……\n", - "』,\n", - "』,\n", - "』,\n", - "1937\n", - ",1938\n", - ",1930\n", - "85\n", - "500\n", - "、4\n", - "13℃/27℃\n", - "15℃/26℃\n", - "14℃/26℃\n", - "11℃/24℃\n", - "8℃/22℃\n", - "11℃/25℃\n", - "12℃/19℃\n", - "13℃/24℃\n", - "10℃/25℃\n", - "15℃/23℃\n", - "13℃/23℃\n", - "16℃/25℃\n", - "14℃/19℃\n", - "22℃/29℃\n", - "17℃/26℃\n", - "16℃/28℃\n", - "11℃/18℃\n", - "12℃/23℃\n", - "15℃/18℃\n", - "15℃/25℃\n", - "24℃/30℃\n", - "26℃/33℃\n", - "26℃/32℃\n", - "17℃/23℃\n", - "18℃/26℃\n", - "17℃/24℃\n", - "17℃/28℃\n", - "10℃/20℃\n", - "15℃/20℃\n", - "13℃/24℃\n", - "9℃/20℃\n", - "11℃/22℃\n", - "7℃/18℃\n", - "23℃/31℃\n", - "24℃/29℃\n", - "24℃/29℃\n", - "15℃/19℃\n", - "28℃/36℃\n", - "9℃/23℃\n", - "25℃/39℃\n", - "18℃/27℃\n", - "6℃/14℃\n", - "13℃/28℃\n", - "14℃/29℃\n", - "12℃/24℃\n", - "11℃/16℃\n", - "5—6\n", - "5—7\n", - "(5\n", - "—5\n", - "309\n", - "21\n", - "———“\n", - ",“\n", - "”。\n", - "》。\n", - "———\n", - "”。\n", - ",《\n", - "》,\n", - ":『\n", - "』,\n", - "』、『\n", - "』,\n", - "),\n", - ")。\n", - "』。\n", - ":『\n", - "---\n", - ",20\n", - "8%\n", - "7.2%,\n", - "8%\n", - "8.2%,\n", - "12.8%,\n", - "”。\n", - ",8\n", - "—11\n", - "(5\n", - "—5\n", - "150\n", - "81\n", - "80%\n", - "2、\n", - "1、\n", - "256\n", - "1500\n", - "、5\n", - ":“\n", - "2/3,\n", - "1/2,\n", - "”,\n", - "108\n", - "”,\n", - "”、“\n", - "54\n", - "170\n", - "1.3\n", - ",78\n", - "38\n", - "1955\n", - ",1976\n", - "1942\n", - "1945\n", - "1933\n", - "1937\n", - ",1937\n", - ",1939\n", - ",1932\n", - ",1937\n", - "92\n", - "200\n", - "80\n", - "12341\n", - "10729\n", - "7255\n", - "5510\n", - "23\n", - ",5\n", - "28\n", - "21\n", - "2:\n", - ",1\n", - "1:\n", - ",3\n", - "40\n", - "———\n", - "》、《\n", - "》、《\n", - "1935\n", - "———\n", - "”,\n", - "”。\n", - "1930\n", - ",1947\n", - "98\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - "』;\n", - "』,\n", - "』。\n", - "』(\n", - "”、“\n", - "”、“\n", - "”,\n", - "500\n", - "1998\n", - "3000\n", - "38\n", - ")”\n", - "7740\n", - "80\n", - "1200\n", - "120\n", - "1800\n", - ",10\n", - "6000\n", - "”,2000\n", - ",500\n", - ",2000\n", - "4035\n", - "40\n", - "6.7\n", - "9902\n", - "75\n", - "100\n", - "”,\n", - "”。\n", - "”,\n", - ")5\n", - "》,\n", - "100\n", - "》,\n", - "”。\n", - "”,\n", - "、10\n", - "”,\n", - "2673\n", - "1800\n", - "CO\n", - "70.8%,SO2\n", - "87.6%,CO2\n", - "30%。\n", - "1·6\n", - "、30\n", - "850\n", - "200\n", - "60.33%,\n", - "26.74%\n", - "2·25\n", - "1.42%,\n", - "17%\n", - ",1·4\n", - "25\n", - "34\n", - "VTOS\n", - "1997\n", - "8200\n", - "”,\n", - ",1996\n", - ",122\n", - "1·5\n", - "25\n", - "122\n", - "122\n", - ",“\n", - "”———\n", - "”。\n", - "122\n", - "122\n", - "3、\n", - "1、\n", - ":“\n", - "117\n", - "886\n", - "561\n", - "”。\n", - "91.1%。\n", - "110\n", - "622\n", - ",637\n", - "”,\n", - "110\n", - "46\n", - "110\n", - "110\n", - "110\n", - "110\n", - ",110\n", - "110\n", - "110\n", - "1·1\n", - "234\n", - "488\n", - "311\n", - "28\n", - "”。\n", - "、191\n", - "”,\n", - "60%,\n", - "”,\n", - "”。\n", - "1996\n", - "1995\n", - ":“\n", - "”。\n", - "———\n", - "113\n", - ",10\n", - "1260\n", - ")18:00\n", - "17:35\n", - "100\n", - "17:25\n", - "110\n", - "19\n", - "398.73\n", - "0.96\n", - "411.78\n", - "”(\n", - "———\n", - "NBA、\n", - "、NEC\n", - "NEC\n", - "NEC\n", - "31\n", - "45\n", - "300\n", - "1996—1997\n", - "27\n", - "60\n", - "100\n", - ",AMF\n", - "22\n", - ":1998\n", - "7.05\n", - ";200\n", - "22\n", - "57,\n", - ";400\n", - "30,\n", - "01\n", - "12℃/25℃\n", - "15℃/27℃\n", - "15℃/26℃\n", - "14℃/20℃\n", - "9℃/22℃\n", - "7℃/23℃\n", - "10℃/20℃\n", - "4℃/19℃\n", - "3℃/19℃\n", - "18℃/27℃\n", - "19℃/23℃\n", - "19℃/28℃\n", - "15℃/23℃\n", - "19℃/26℃\n", - "23℃/32℃\n", - "18℃/28℃\n", - "11℃/22℃\n", - "16℃/19℃\n", - "18℃/25℃\n", - "20℃/25℃\n", - "25℃/31℃\n", - "24℃/33℃\n", - "26℃/32℃\n", - "20℃/25℃\n", - "22℃/28℃\n", - "18℃/27℃\n", - "16℃/26℃\n", - "8℃/21℃\n", - "16℃/24℃\n", - "11℃/23℃\n", - "8℃/19℃\n", - "14℃/22℃\n", - "12℃/22℃\n", - "20℃/26℃\n", - "25℃/31℃\n", - "25℃/30℃\n", - "18℃/24℃\n", - "27℃/37℃\n", - "14℃/21℃\n", - "27℃/36℃\n", - "19℃/30℃\n", - "8℃/17℃\n", - "12℃/18℃\n", - "11℃/17℃\n", - "10℃/15℃\n", - "13℃/23℃\n", - ":8\n", - "(5\n", - "—5\n", - ":510620\n", - ":020—85512526\n", - "159\n", - "1997\n", - "1000\n", - "“1997\n", - "”。\n", - "“863”\n", - "28\n", - "105\n", - "4000\n", - "6000\n", - "2000\n", - ",1000\n", - "1000\n", - "2、\n", - "1、5\n", - "1000\n", - ",“\n", - "7000\n", - ",“\n", - "”1.5\n", - ",“\n", - "”,\n", - "”、“\n", - "”,\n", - "1990\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "』。\n", - "4—6\n", - "5—7\n", - "4—5\n", - "(5\n", - "—5\n", - "60\n", - ",“\n", - "510\n", - ",1999\n", - "1998\n", - "(1998\n", - "21\n", - "1996\n", - "21\n", - "21\n", - "6.\n", - "5.\n", - ";4.\n", - ";3.\n", - ";2.\n", - ";1.\n", - "2—3\n", - "5.\n", - ";4.\n", - ";3.\n", - ";2.\n", - "4.\n", - "3.\n", - ";1.\n", - "”)\n", - "12℃/24℃\n", - "13℃/21℃\n", - "15℃/24℃\n", - "8℃/21℃\n", - "8℃/19℃\n", - "13℃/22℃\n", - "11℃/18℃\n", - "9℃/17℃\n", - "10℃/21℃\n", - "18℃/25℃\n", - "18℃/26℃\n", - "19℃/26℃\n", - "19℃/29℃\n", - "22℃/29℃\n", - "22℃/30℃\n", - "16℃/24℃\n", - "11℃/17℃\n", - "17℃/23℃\n", - "20℃/28℃\n", - "20℃/28℃\n", - "25℃/32℃\n", - "24℃/33℃\n", - "25℃/32℃\n", - "19℃/26℃\n", - "22℃/28℃\n", - "17℃/23℃\n", - "16℃/27℃\n", - "7℃/23℃\n", - "15℃/24℃\n", - "13℃/25℃\n", - "9℃/22℃\n", - "6℃/24℃\n", - "7℃/20℃\n", - "22℃/27℃\n", - "25℃/31℃\n", - "24℃/31℃\n", - "15℃/24℃\n", - "27℃/36℃\n", - "14℃/18℃\n", - "26℃/35℃\n", - "23℃/37℃\n", - "6℃/16℃\n", - "8℃/15℃\n", - "7℃/14℃\n", - "8℃/16℃\n", - "15℃/21℃\n", - ",6\n", - "(5\n", - "—5\n", - ",5\n", - "60%\n", - "》,\n", - "1988\n", - ",1993\n", - "31\n", - "1987\n", - "1999\n", - ",16\n", - "100\n", - "1954\n", - "300\n", - ":『\n", - "25\n", - ",“\n", - "1999\n", - "1999\n", - "”、\n", - "”,\n", - "200\n", - "13℃/20℃\n", - "15℃/24℃\n", - "15℃/25℃\n", - "10℃/25℃\n", - "8℃/14℃\n", - "14℃/18℃\n", - "11℃/17℃\n", - "12℃/18℃\n", - "12℃/18℃\n", - "17℃/27℃\n", - "17℃/27℃\n", - "17℃/24℃\n", - "19℃/29℃\n", - "21℃/27℃\n", - "21℃/29℃\n", - "18℃/28℃\n", - "11℃/18℃\n", - "16℃/27℃\n", - "20℃/31℃\n", - "19℃/32℃\n", - "25℃/30℃\n", - "24℃/34℃\n", - "25℃/33℃\n", - "22℃/28℃\n", - "21℃/31℃\n", - "17℃/26℃\n", - "17℃/28℃\n", - "5℃/20℃\n", - "14℃/22℃\n", - "13℃/22℃\n", - "9℃/20℃\n", - "7℃/21℃\n", - "3℃/14℃\n", - "23℃/28℃\n", - "24℃/29℃\n", - "24℃/29℃\n", - "13℃/24℃\n", - "30℃/36℃\n", - "13℃/22℃\n", - "25℃/34℃\n", - "20℃/36℃\n", - "13℃/21℃\n", - "11℃/17℃\n", - "7℃/15℃\n", - "6℃/15℃\n", - "11℃/17℃\n", - "5—6\n", - "(5\n", - "—5\n", - "37\n", - "25\n", - "”,\n", - "”、“\n", - "”。\n", - "1996\n", - "23\n", - "28\n", - "900\n", - "70\n", - "100\n", - "1986\n", - "81\n", - "84%,\n", - "23\n", - "》、《\n", - "301\n", - "301\n", - "”,\n", - "301\n", - "1997\n", - "28\n", - "77\n", - "21\n", - "275\n", - "65\n", - "68\n", - "2138\n", - "2160\n", - "194\n", - "2137\n", - ",400\n", - "100\n", - "800\n", - ",35\n", - "70%,\n", - "40\n", - "80\n", - "29\n", - "31\n", - "65\n", - "、“\n", - "”,“\n", - "1200\n", - "17\n", - "100\n", - ";“\n", - "”,\n", - "3、5\n", - "95\n", - "38\n", - ":“\n", - "……”2、\n", - "———\n", - "100\n", - "100\n", - "1、5\n", - "71\n", - "92\n", - "400\n", - ":“\n", - "4000\n", - "400\n", - "13℃/23℃\n", - "15℃/27℃\n", - "14℃/28℃\n", - "13℃/28℃\n", - "10℃/26℃\n", - "11℃/25℃\n", - "13℃/21℃\n", - "12℃/26℃\n", - "14℃/25℃\n", - "20℃/28℃\n", - "20℃/29℃\n", - "21℃/31℃\n", - "19℃/29℃\n", - "21℃/27℃\n", - "22℃/31℃\n", - "17℃/28℃\n", - "13℃/18℃\n", - "17℃/28℃\n", - "20℃/32℃\n", - "19℃/31℃\n", - "23℃/30℃\n", - "25℃/34℃\n", - "25℃/32℃\n", - "25℃/32℃\n", - "22℃/34℃\n", - "19℃/28℃\n", - "17℃/28℃\n", - "5℃/24℃\n", - "15℃/24℃\n", - "18℃/26℃\n", - "9℃/20℃\n", - "14℃/21℃\n", - "3℃/10℃\n", - "21℃/26℃\n", - "23℃/28℃\n", - "23℃/27℃\n", - "15℃/22℃\n", - "29℃/37℃\n", - "14℃/21℃\n", - "23℃/34℃\n", - "26℃/38℃\n", - "12℃/23℃\n", - "11℃/16℃\n", - "8℃/16℃\n", - "9℃/17℃\n", - "13℃/22℃\n", - "4—6\n", - "5—6\n", - "(5\n", - "—5\n", - "29\n", - "100\n", - "4000\n", - "600\n", - "27000\n", - "300\n", - "1948\n", - "(18cm×13cm)。\n", - "1948\n", - "”“\n", - "747—200\n", - "26\n", - ";『\n", - "』,\n", - "……\n", - ",『\n", - ",“\n", - "6000\n", - "20%\n", - "50%\n", - "29\n", - "》,\n", - "》、《\n", - "》、《\n", - "》,\n", - "》,\n", - "》,\n", - "》(\n", - "———《\n", - ":“\n", - ":5\n", - "……\n", - "”,\n", - "”,“\n", - "”,\n", - "———\n", - "”,\n", - "”。\n", - "”。\n", - ":“\n", - "”。\n", - "”,\n", - "”,\n", - "”;\n", - "”;\n", - ":“\n", - "”。\n", - "”,\n", - "”,\n", - "》,\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ":《\n", - "、“\n", - "”,\n", - "1921\n", - "”,\n", - "———\n", - "”,\n", - ",“\n", - "”(1919\n", - "》)。\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - "》,\n", - "、《\n", - "》(\n", - "》(\n", - ":“\n", - "”1898—1998,\n", - "……\n", - ":《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》……\n", - ":“\n", - "”,“\n", - "”。\n", - "”。\n", - ":“\n", - "———\n", - "———\n", - "———\n", - "……\n", - ":“\n", - "”:\n", - "》,\n", - "1947\n", - "1948\n", - "”、“\n", - "1947\n", - "1948\n", - "》、《\n", - "》,\n", - "》。\n", - "”、“\n", - "》、\n", - "》、\n", - "1919\n", - ",1935\n", - "1945\n", - "、“\n", - "”;\n", - "、“\n", - "”。\n", - "1925\n", - ",“\n", - ";“\n", - "”,\n", - "1943\n", - "”,\n", - "”,\n", - "10、\n", - "9、\n", - "8、\n", - "7、\n", - "6、\n", - "627\n", - "5、\n", - "4、\n", - "3、\n", - "2、\n", - "1、\n", - "2/3。\n", - "7、\n", - "6、\n", - "5、\n", - "4、\n", - "1202\n", - "3、\n", - "2、\n", - "230\n", - "100\n", - "500\n", - "1、\n", - "———\n", - "”、“\n", - "400\n", - "”,\n", - "”。\n", - "1571\n", - "200\n", - "1892\n", - "”:\n", - "1640\n", - "1572\n", - ",4\n", - "785\n", - "485\n", - "1571\n", - "———\n", - "2000\n", - "1982\n", - "400\n", - "70\n", - "”。\n", - "”。\n", - "1993\n", - "”,\n", - "”,\n", - "”,\n", - "……\n", - "8000\n", - "600\n", - "500\n", - ":“\n", - "”36\n", - ":“\n", - "1997\n", - "———\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - "23\n", - "1997\n", - "1997\n", - ":“\n", - "875\n", - "370\n", - "125\n", - "30%\n", - ")、\n", - "70\n", - "1000\n", - "1926\n", - "”。\n", - "”。\n", - "”,\n", - "”。\n", - "1200\n", - "300\n", - "426\n", - "2690\n", - "1070\n", - "29\n", - "40\n", - "”。\n", - "”。\n", - ",“\n", - "22\n", - "”。\n", - "29\n", - "27\n", - "29\n", - ":“‘\n", - "28\n", - "80\n", - "78\n", - "28\n", - "29\n", - "○,\n", - "138.70\n", - "1.57\n", - ",4\n", - "29\n", - "139.20\n", - "1991\n", - "2·2%,\n", - "4·8%\n", - "2·6%,\n", - "8970.2\n", - "33.63\n", - "28\n", - "28\n", - "1933\n", - "29\n", - "28\n", - "28\n", - "27\n", - "27\n", - "211.2\n", - "1.5\n", - "3.44\n", - "27\n", - "18.8\n", - "28\n", - "29\n", - "45%\n", - "50%\n", - "51%\n", - "56%,\n", - "27\n", - "150%。\n", - "27\n", - "60%\n", - "80%,\n", - "1∶6.2。\n", - "28\n", - "2010\n", - ",1997\n", - "55.7\n", - "1996\n", - "34.3%,\n", - "8.3%\n", - "1997\n", - "1272\n", - "41.54\n", - "19.83\n", - "27\n", - "1997\n", - "21\n", - "28\n", - "1999\n", - ",“\n", - "”。\n", - "28\n", - "”。\n", - ",“\n", - "”。\n", - ",“\n", - "”,\n", - "”。\n", - "”。\n", - ",“\n", - ":“\n", - "”。\n", - "”。\n", - ",“\n", - "”,\n", - ",“\n", - "》。\n", - "”。\n", - "160\n", - "”,\n", - "”。\n", - "”,\n", - ",“\n", - "28\n", - "28\n", - "\n", - "\n", - "19\n", - "28\n", - "”,\n", - "、16\n", - "”,\n", - "“202”\n", - "”“\n", - "200\n", - ":“\n", - ",2000\n", - "”1997\n", - "1996\n", - "———\n", - "810\n", - "3000\n", - "98%。\n", - "98%\n", - "1997\n", - "91\n", - "48%,\n", - "11.3%,“\n", - "”、“\n", - "70%。\n", - "110”\n", - "110”\n", - "”,\n", - "45\n", - "200\n", - "3000\n", - "———“\n", - "”,\n", - "40\n", - "27\n", - "2560\n", - "150\n", - "”。\n", - "5000\n", - "1996\n", - "42\n", - "1100\n", - ",3\n", - "”,\n", - "700\n", - "”。\n", - ",70%\n", - "230\n", - "“110”\n", - "、“\n", - "607\n", - "”,\n", - "36\n", - ",58\n", - "51\n", - "1996\n", - "1995\n", - "200\n", - "1997\n", - "275188\n", - ",1982\n", - "91091\n", - ",10\n", - "1993\n", - "191657\n", - "”,\n", - "1984\n", - ",8\n", - "500\n", - "1000\n", - "1979\n", - "》,\n", - "---\n", - "1994\n", - "28\n", - "8406\n", - "1173\n", - "400\n", - "113\n", - "100%。\n", - "1996\n", - "23\n", - "6080\n", - ",1638\n", - "”,\n", - "1578\n", - "”,\n", - "1996\n", - "”:\n", - "99.46%,\n", - "94.58%。\n", - "110\n", - "”、“\n", - "”;“\n", - "”。\n", - "》、《\n", - "》,\n", - "”。\n", - "2·\n", - "1·\n", - "1998\n", - ":4·\n", - "3·\n", - ";2·\n", - ";1·\n", - ";4·\n", - "(“189”\n", - ")。\n", - "3·\n", - ";2·\n", - ";1·\n", - "100\n", - "3000\n", - "”。\n", - "”。\n", - "”,\n", - "40\n", - "”,\n", - "1000\n", - "6.8\n", - "6·8\n", - ",“\n", - "……\n", - "”,\n", - "”。\n", - ":“\n", - "1000\n", - "”。\n", - "1997\n", - ":“\n", - "300\n", - "』---\n", - "204\n", - "45%\n", - "”,\n", - "”,\n", - "130\n", - "25\n", - ":“\n", - "25\n", - "———\n", - "26\n", - "———\n", - "26\n", - "”、\n", - "”、\n", - "26\n", - ",60\n", - "26\n", - "1993\n", - "1988\n", - "26\n", - "』,\n", - "、189\n", - ":“\n", - "5.1\n", - "60\n", - "23\n", - "23\n", - ",1948\n", - "25\n", - ",19\n", - "1998\n", - "“ISDN\n", - "”。\n", - "147\n", - "52\n", - "”,\n", - ",“\n", - "”。\n", - ",CT、\n", - "”,\n", - "162\n", - "46\n", - ")。\n", - "19\n", - "90%\n", - "530\n", - "59·4%。\n", - "60\n", - "》,\n", - "”、\n", - "25\n", - "23\n", - "25\n", - "60\n", - "”、\n", - "”,\n", - ":“\n", - "1995\n", - "1489705\n", - "53.29%,\n", - ":“\n", - "150\n", - ",250\n", - "6000\n", - ";12\n", - ";3\n", - "280\n", - "800\n", - "25\n", - "25\n", - "28\n", - "60\n", - ",81\n", - "60\n", - "166\n", - "60\n", - "800\n", - "496\n", - "22\n", - "4000\n", - "》,\n", - "》,\n", - "”、“\n", - "”、\n", - "》、\n", - ",5\n", - "25\n", - "———\n", - "“110”\n", - "“122”\n", - "4150\n", - "105\n", - "1996\n", - "110\n", - "174\n", - "35\n", - "110\n", - "”、“\n", - "19\n", - "21\n", - "102\n", - "102\n", - "170\n", - "220\n", - "2200\n", - "”。\n", - ":“\n", - "”250\n", - "250\n", - "900\n", - "200\n", - "”,\n", - "21\n", - "”,\n", - ":“\n", - "”、\n", - "『\n", - "』\n", - "『\n", - "』\n", - "』,\n", - "』,\n", - "23\n", - "BP\n", - "28\n", - "300\n", - "25%,\n", - "”、“\n", - "”,\n", - "50%\n", - "1998\n", - "22\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - "———\n", - "”,\n", - "———\n", - "———\n", - "147\n", - "2115\n", - "500\n", - "……\n", - "40\n", - "120\n", - "1997\n", - "”。\n", - ",6000\n", - "28\n", - ":“\n", - "……\n", - "1996\n", - "”。\n", - ":“\n", - "”。\n", - ":“\n", - "41%、21%\n", - "29%\n", - ":“\n", - ":“\n", - ",3\n", - "25\n", - "40\n", - "23\n", - "31\n", - "23\n", - "26\n", - ",1\n", - "32\n", - "31\n", - ",365\n", - "400\n", - ":“\n", - "》,\n", - "1983\n", - "80\n", - "19\n", - "……\n", - ":“\n", - "118\n", - "80%\n", - "……\n", - ",50\n", - "34\n", - "70\n", - "”14\n", - ":“\n", - "……\n", - "……”\n", - "”,\n", - "”。\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "21\n", - "32\n", - "21\n", - "21\n", - "36\n", - "22\n", - "77\n", - "21\n", - "1620\n", - "1982\n", - "2.2\n", - "840\n", - "”。\n", - "91%\n", - "146\n", - "753\n", - "(1730\n", - "17\n", - "170\n", - "23\n", - "1948\n", - ",1945\n", - "1944\n", - "”、“\n", - "”、“\n", - "”,\n", - "1941\n", - "1938\n", - ",1940\n", - ",200\n", - "700\n", - "1937\n", - ",1940\n", - "1945\n", - "3000\n", - "———\n", - "”。\n", - "1935\n", - "1936\n", - "5900\n", - "1936\n", - "51\n", - "1935\n", - "1931\n", - "”,\n", - "1884\n", - ",1915\n", - "———\n", - "21\n", - "21\n", - "21\n", - ",21\n", - "1/5\n", - "1978\n", - "25\n", - "22\n", - "200\n", - "”,\n", - "”,\n", - "》、《\n", - "》、NHK\n", - ",1937\n", - "”。\n", - "”。\n", - "100\n", - "”。\n", - "100\n", - "”。\n", - "1946\n", - ":“\n", - ",1946\n", - "27\n", - ":“\n", - "”,\n", - "”。\n", - "66、67、69\n", - "22\n", - "———\n", - ",110\n", - "、110\n", - "3·\n", - "2·\n", - "、110\n", - "、110\n", - "、110\n", - "”、“\n", - "”。\n", - "“110”\n", - "“110”\n", - "”。\n", - "21\n", - ":“\n", - "1994\n", - "———\n", - "80\n", - "1995\n", - ",“\n", - "118\n", - "231500\n", - "1996\n", - "35\n", - "118\n", - "1995\n", - ",1995\n", - "---\n", - ",1995\n", - "120\n", - "1997\n", - ")35\n", - "285\n", - "2920\n", - "”,\n", - "”,\n", - ",1997\n", - "67\n", - "450\n", - "”,\n", - "1100\n", - "3·3\n", - "2·3\n", - "25\n", - "1995—2010\n", - "2·3\n", - "”。\n", - ":“\n", - "……\n", - "———\n", - ",1993\n", - "1.45\n", - "200\n", - "25\n", - ",28\n", - "8000\n", - "35\n", - "31\n", - "5000\n", - "5000\n", - "50%\n", - "》。\n", - "1988\n", - "100\n", - "5200\n", - "),\n", - "』;\n", - "』,\n", - "』,\n", - "》。\n", - "』。\n", - "》,\n", - "23\n", - "7·\n", - "6·\n", - ";4·\n", - ";3·\n", - ";2·\n", - "“110”\n", - "1994\n", - "8000\n", - "45\n", - "410\n", - "1995\n", - "1.25\n", - "1300\n", - "”、“\n", - "130\n", - ",33\n", - ",35\n", - ",78\n", - ",51\n", - "”:\n", - "”,\n", - ",“\n", - "”,\n", - "”,\n", - ":“\n", - "70\n", - "122\n", - "21\n", - "169\n", - ",1997\n", - "27.3\n", - "19\n", - "21\n", - "3501\n", - "300\n", - "21%。\n", - "”。\n", - "1996\n", - "35\n", - "2.6\n", - "29\n", - "23\n", - "60\n", - "19\n", - ",“\n", - ",5\n", - ",5\n", - "1994\n", - "1993\n", - "1986\n", - "1993\n", - "、1998\n", - "”,\n", - ",“\n", - "”“\n", - "11.7\n", - "36.2%。\n", - "31.3\n", - "48.9%。\n", - "84\n", - "2500\n", - "140\n", - "3500\n", - "1100\n", - "1500\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "289\n", - "195\n", - "98\n", - ",《\n", - ",40\n", - "400\n", - "19\n", - "110\n", - "、VCD\n", - "』,\n", - "』,\n", - "110\n", - "48273\n", - "28481\n", - "3958\n", - "159\n", - "1998\n", - "45\n", - "59\n", - "35\n", - "1997\n", - "740\n", - "17.1\n", - "1993\n", - "”,\n", - "”。\n", - "、24\n", - "、11\n", - "、4\n", - "、8\n", - "”,\n", - "3.8\n", - "1996\n", - "8.3\n", - "36.5\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "19\n", - "),\n", - "1995\n", - "”———\n", - ":“\n", - "”“\n", - "600\n", - "803\n", - "292\n", - ",1994\n", - "1995\n", - "———\n", - "———\n", - "1987\n", - "1983\n", - "90%\n", - "1984\n", - "1/3,\n", - "12%\n", - "———\n", - "———\n", - "1997\n", - "3.26\n", - "25.16\n", - "80\n", - "2000\n", - "28\n", - "1983\n", - "1987\n", - "2475\n", - "”。\n", - "70\n", - "80\n", - ":“\n", - "40\n", - "1997\n", - "2837\n", - "1989\n", - ",6\n", - ",1981\n", - "49\n", - "———\n", - ":『\n", - ":『\n", - "……\n", - "5522.86\n", - "———\n", - "1979\n", - "1996\n", - "6100\n", - "1978\n", - "》,\n", - ",“\n", - "1856\n", - "1873\n", - "17\n", - "1839\n", - "”、“\n", - "”、“\n", - "”、“\n", - "200\n", - "22\n", - "26\n", - "89\n", - "60\n", - "86%,\n", - "10.8%,\n", - "3%,\n", - "60%\n", - ",“\n", - "”。\n", - "1995\n", - ",“\n", - "……\n", - "”。\n", - "”,\n", - ")、\n", - "”,\n", - "”,\n", - "0.1\n", - "48\n", - "48\n", - "31\n", - "100\n", - "100\n", - "……\n", - ",11\n", - ",1\n", - "21\n", - "25\n", - "90\n", - "95\n", - "85\n", - "———\n", - "90\n", - "80\n", - "900\n", - "900\n", - "900\n", - "1996\n", - "1994\n", - ",1997\n", - ":“\n", - "……\n", - "90\n", - "6.6\n", - "”。\n", - "”。\n", - "6.6\n", - "6.6\n", - "6.6\n", - "———\n", - ",“\n", - ",“\n", - "”。\n", - ",“\n", - "”“\n", - "”、“\n", - ",“\n", - "97%,\n", - "187\n", - "95\n", - "100%,\n", - "95%,\n", - "23%,\n", - "21.2%,\n", - "95%,\n", - "”138\n", - "73%,\n", - "250\n", - "39\n", - "19.2\n", - "”,\n", - "55\n", - ",49\n", - "”,\n", - "”、\n", - "”。\n", - "5000\n", - "25\n", - "77\n", - "150\n", - "”、“\n", - "”。\n", - "8000\n", - "300\n", - "215\n", - "1.58\n", - "”。\n", - "110\n", - ",1997\n", - ",28\n", - "1995\n", - "47\n", - "200\n", - "1.5\n", - "4200\n", - "70\n", - "200\n", - "1680\n", - "”:\n", - "1990\n", - "28\n", - "2.5\n", - "1.5\n", - ",1997\n", - "600\n", - "536\n", - "1996\n", - "1160\n", - "”、\n", - "”、\n", - "”。\n", - "”、“\n", - "”;\n", - "1991\n", - "”,\n", - "”。\n", - "---\n", - "100\n", - ",90\n", - ",100\n", - ",50\n", - "200\n", - "》。\n", - ",40\n", - "”,\n", - "”、“\n", - "95%\n", - "”。\n", - "”。\n", - ":“\n", - "80\n", - "》,\n", - "200\n", - "400\n", - "”,\n", - "”。\n", - "1995\n", - "1%\n", - "1%—2%,\n", - "8%—13%,\n", - "40%—50%,\n", - "70%—80%。\n", - "50%\n", - "100%\n", - "300\n", - "1978\n", - "1978\n", - ",1997\n", - "》,\n", - "1000\n", - "7%—10%\n", - "4000\n", - "1500\n", - "……\n", - "……\n", - "……\n", - "……\n", - "……\n", - ":『\n", - ":“\n", - "……\n", - "”,\n", - "”……\n", - "……\n", - "1997\n", - "1965\n", - "———\n", - "———\n", - "……\n", - "———\n", - "……\n", - "———\n", - "……\n", - ",“\n", - "”,\n", - ",“\n", - "”。\n", - "———\n", - "———\n", - "……\n", - "———\n", - ",T\n", - "———\n", - "———\n", - "———\n", - "……\n", - ",“\n", - "”,\n", - "”,\n", - "———\n", - "”,\n", - "……\n", - ")1996\n", - "”,\n", - ",《\n", - ",“\n", - "”,\n", - "”,\n", - "———“\n", - ":“\n", - ",《\n", - "1994\n", - "》。\n", - "———\n", - "》,\n", - "———\n", - "”。\n", - "”,\n", - "1000—50000\n", - "”,\n", - "”。\n", - "———\n", - "”、“\n", - "1947\n", - "》,\n", - "1.4\n", - "》。\n", - "---\n", - "”,\n", - ",“\n", - "———\n", - "———\n", - "———\n", - "”,\n", - ",“\n", - "99%\n", - "90%\n", - "1%\n", - "10%\n", - "5000\n", - "5000\n", - "4000\n", - "99%\n", - "90%\n", - "1%\n", - "10%\n", - "———\n", - "……\n", - "———\n", - "》。\n", - ",50\n", - "”。\n", - "---\n", - ")ChinaByte———“\n", - "SKY:www.sky.co.uk/worldcup\n", - ":www.fifa.com\n", - ":france98.srsnet.com\n", - ":worldcup98.sport.gov.cn\n", - ":www.worldcup98.net.cn\n", - ",ChinaByte\n", - "(hit)\n", - "45\n", - "ChinaByte\n", - ",“\n", - ";“\n", - ",ChinaByte\n", - "IT\n", - "ChinaByte,\n", - "———\n", - "(www.worldcup.com.cn)。\n", - "SportsLine\n", - "100\n", - "CBSSportsLine\n", - "Yahoo\n", - ",“\n", - "www.france98.com\n", - "www.worldcup98.com\n", - "、ESPN(\n", - "2000\n", - "PC\n", - "100\n", - "10.5\n", - "SYBASE\n", - "www.france98.com\n", - "170\n", - "57\n", - "5000\n", - "1996\n", - "Web\n", - "”。\n", - "1930\n", - "110\n", - "150\n", - "2.5\n", - "”。\n", - "……\n", - "……\n", - "4000\n", - "23\n", - "40\n", - "120\n", - "40\n", - "),\n", - "8000\n", - "6000\n", - "180\n", - "5000\n", - "9000\n", - "1000\n", - "150\n", - "29\n", - ",1996\n", - "62\n", - "”,\n", - "3000\n", - "40\n", - "1954\n", - "———\n", - "1950\n", - ",20\n", - "1948\n", - "1947\n", - "”。\n", - "70\n", - "●《\n", - "》(\n", - "●『\n", - ",『\n", - "》、《\n", - "》、《\n", - "》(\n", - ",《\n", - "》、《\n", - "》、《\n", - "》)\n", - "》、《\n", - "●“\n", - "》,\n", - ",《\n", - "、《\n", - "》(\n", - ")、\n", - "》、\n", - "》、\n", - "》、\n", - "”,\n", - "》,\n", - "》、《\n", - "》,\n", - "》,\n", - "》,\n", - "”,\n", - "》、《\n", - "》、《\n", - "》,\n", - "》,\n", - "863》,\n", - "》、《\n", - "》、\n", - "》、《\n", - "》、《\n", - "———\n", - "》,\n", - "———\n", - "”。\n", - "……\n", - ":《\n", - "1998\n", - "……\n", - "”。\n", - "……\n", - ":《\n", - "1998\n", - "……\n", - ":《\n", - "1998\n", - "、“\n", - "”、\n", - "……\n", - ":“\n", - "”。\n", - "”,\n", - "』,\n", - ":『\n", - "』,『\n", - "』,『\n", - "』,『\n", - "』,\n", - "……『\n", - "』。\n", - "》、\n", - "》。\n", - ":『\n", - "……』\n", - "』,\n", - "』、『\n", - "』、『\n", - "》,\n", - "》,\n", - "》。\n", - "』、『\n", - "》,\n", - "》,\n", - "》,\n", - "》、\n", - "》、\n", - "》、\n", - "》、\n", - "》、\n", - "》、\n", - "”、\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”、\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "1997\n", - "0.81%,\n", - "”,\n", - "26\n", - "1993\n", - "”,\n", - ":“\n", - "182\n", - "———\n", - ",“\n", - "”,\n", - "……\n", - "8%\n", - ":“\n", - "100\n", - "……\n", - "40\n", - ":“\n", - ":“\n", - "———\n", - "1996\n", - "……\n", - "1991\n", - "33\n", - "———“\n", - "1000\n", - "”。\n", - "———\n", - "1992\n", - "———\n", - "———\n", - "”,\n", - "680\n", - "”、“\n", - "”、“\n", - "”。\n", - "150\n", - "3000\n", - "》、《\n", - "》、《\n", - "77\n", - "』,\n", - ",1997\n", - "1995\n", - "150\n", - "31\n", - ",4\n", - "17\n", - "———\n", - "———\n", - ":(010)64265344\n", - "3—5\n", - "5%\n", - "5·76%6·30%\n", - ",4\n", - ",1\n", - "400\n", - "100\n", - "550\n", - "1000\n", - "5000\n", - "40\n", - "”,\n", - "”。\n", - "40\n", - "”,\n", - "100\n", - ",1996\n", - "40\n", - "”、“\n", - "1997\n", - "”,\n", - "1997\n", - "134\n", - "200\n", - "1996\n", - "1995\n", - ",3\n", - "1995\n", - "600\n", - "34\n", - "1995\n", - "1993\n", - "31\n", - ",1987\n", - "1985\n", - "1∶3\n", - "60\n", - "5000\n", - "1900\n", - "”,\n", - "600\n", - "1995\n", - ",11\n", - "---(\n", - ")(\n", - "”,\n", - "』(\n", - "”。\n", - "”,\n", - "15%。\n", - "”、“\n", - ",“\n", - ",2\n", - "6.5\n", - "”,\n", - "”,\n", - "”、“\n", - "80%—85%,\n", - "6.5\n", - ",“\n", - ",6.5\n", - "”,\n", - "1994\n", - "6.5\n", - ",4\n", - ":“\n", - ":“\n", - "6.5\n", - "———\n", - "”;\n", - "1998\n", - "1997\n", - "400\n", - "719\n", - ",1996\n", - "647\n", - "1998\n", - "1997\n", - "4.6%,\n", - "4.4\n", - "1994\n", - "1992—1996\n", - "5.9%、3.2%、5%、5.6%、9%。\n", - "1985—1988\n", - "1.2%,\n", - "1989—1991\n", - "0.7%。\n", - "”,\n", - "10—15\n", - ")。\n", - "80\n", - "90\n", - "1985\n", - "1996\n", - "58.5%\n", - "43.5%,\n", - "75%\n", - "”。\n", - ",1996\n", - "2.5\n", - ",80\n", - "48%。\n", - ",1996\n", - "56.63%,\n", - "(1985—1996\n", - "56.33%—58.8%\n", - "1985—1996\n", - "397.6\n", - "1926.04\n", - "4.17%,\n", - "5.68%\n", - "80\n", - "62%,\n", - "1978—1984\n", - "133.6\n", - "355.3\n", - "15.1%。\n", - "80\n", - "●9\n", - ":“\n", - "”。\n", - "”。\n", - "200\n", - "”。\n", - "80\n", - "———\n", - "300\n", - "2:6\n", - "27\n", - "1:\n", - "0∶2\n", - "28\n", - "1/8\n", - ",4\n", - "1/8\n", - "”。\n", - "”,\n", - "1/8\n", - "0∶2\n", - "100\n", - "1∶4\n", - "3∶0\n", - "3∶0\n", - "2∶1\n", - "2∶1\n", - "2∶1\n", - "1∶2\n", - "2∶1\n", - "0∶2\n", - "27\n", - "500\n", - "1—6\n", - "17\n", - "……“\n", - ":“\n", - "”,\n", - "80%\n", - "3∶1\n", - "1/8\n", - "”。\n", - "0∶0\n", - "0∶0\n", - "”。\n", - "”———\n", - "1/8\n", - "1∶4\n", - "0∶1\n", - "2∶1\n", - "29\n", - "4、6\n", - "———“\n", - "3、\n", - "2、\n", - "』:\n", - "GNJ\n", - "1、\n", - "1994\n", - "4、\n", - "3、\n", - "2、\n", - ":“\n", - "”1、\n", - "100\n", - "》,\n", - "6000\n", - "4、\n", - "3、\n", - "2、\n", - "1、\n", - "”、“\n", - "1997\n", - "2600\n", - "”、“\n", - "”,\n", - "”。\n", - "1992\n", - "21\n", - "25\n", - ",“\n", - "”、“\n", - "”。\n", - "2000\n", - "2004\n", - "”。\n", - "6∶1\n", - "”,\n", - "0∶0\n", - ",D\n", - "、BBC\n", - ":“\n", - "1978\n", - "1994\n", - "”、“\n", - "---\n", - "40\n", - "7∶2\n", - ":“\n", - "”,\n", - "”。\n", - ":“\n", - ":“\n", - "6∶1\n", - ":“\n", - ",“\n", - "……”\n", - "A、B\n", - "23\n", - "17\n", - "A、B\n", - ",A\n", - ";B\n", - "27\n", - "1998\n", - "27\n", - "17\n", - "300\n", - "3∶0\n", - "32\n", - "1/8\n", - ",C、D\n", - ",8\n", - "6∶1\n", - "3∶1\n", - "26\n", - ",6\n", - "1∶1\n", - "2∶2\n", - "25\n", - "LG\n", - "0∶0\n", - "LG\n", - ")6\n", - "23\n", - "21∶12,20∶22,21∶14。\n", - "2∶1\n", - "2∶1\n", - "23\n", - "—60\n", - "—55\n", - "105328\n", - "1096\n", - "3007142\n", - "1997\n", - "19\n", - "1997\n", - "66.5\n", - "56.8\n", - "169.2\n", - "158.6\n", - "28.6%\n", - "1997\n", - "71.4%,\n", - "71.2%,\n", - "71.7%。\n", - ":“\n", - "”;“\n", - "”;“\n", - "……”\n", - ":“\n", - "”,“\n", - "”。\n", - "2∶1\n", - ",“\n", - "3∶0,\n", - "”。\n", - "”。\n", - "1%\n", - "”。\n", - "”,\n", - "23\n", - "23\n", - "200\n", - "1∶2\n", - "3∶0\n", - "17\n", - "2∶2\n", - "1∶2\n", - "600\n", - "25\n", - "10000\n", - "400\n", - "21\n", - "34\n", - "19\n", - "22\n", - "383.42\n", - "1543.38\n", - "N2\n", - ",555\n", - "22\n", - ":1998\n", - ",555\n", - ")6\n", - "23\n", - ",6\n", - "60\n", - "58\n", - "1966\n", - "1600\n", - "1/8\n", - "1∶0\n", - "22\n", - ",G\n", - ":D\n", - "32\n", - "22\n", - "32\n", - "”。\n", - "96\n", - ",15\n", - "21\n", - "112\n", - "19\n", - "”,\n", - "”,\n", - "43\n", - "2.5\n", - "22\n", - "2∶1\n", - ",10\n", - "44\n", - ",19\n", - ",10\n", - ",21\n", - ",4\n", - "21\n", - ",10\n", - "1∶1\n", - "2∶1\n", - "23\n", - "500\n", - "500\n", - "1998\n", - "”,\n", - "21\n", - "21\n", - "NEC\n", - "21\n", - "2∶1\n", - "27∶15,\n", - "6∶2。\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "”。\n", - "”、“\n", - "”。\n", - "”。\n", - "2∶1\n", - ";4\n", - ",3\n", - "———\n", - "H1,\n", - "1/8\n", - "E1,\n", - "0∶0,\n", - "B1,\n", - "1/8\n", - "B2\n", - "60\n", - "21\n", - "21\n", - ",15\n", - "100\n", - "39\n", - "40\n", - "2∶1\n", - "21\n", - "1∶0\n", - "22\n", - "36\n", - "2∶1\n", - "3∶0\n", - "2∶1\n", - "0∶2\n", - "2∶1\n", - "2∶1\n", - "2∶1\n", - "2∶1\n", - "2∶1\n", - ")。\n", - ",6\n", - "4000\n", - "101\n", - "26\n", - "”。\n", - ",“\n", - "”,“\n", - "”。\n", - "”。\n", - ",“\n", - "”。\n", - ":“\n", - "90\n", - ",8\n", - ",“\n", - "”。\n", - "21\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "”。\n", - ",“\n", - "”。\n", - "”,\n", - ":“\n", - "34\n", - "N2\n", - "555\n", - "22\n", - "555\n", - "21\n", - "19\n", - "0∶1\n", - "2∶2\n", - "5∶0\n", - "0∶0\n", - "34\n", - ",20\n", - "27\n", - "21\n", - "22\n", - "2∶0\n", - "2∶2\n", - "31\n", - "21\n", - ",H\n", - "5∶0\n", - "、《\n", - "1998\n", - "homesick。\n", - "”!\n", - ",《\n", - "”,\n", - ")(\n", - "》,\n", - "》、《\n", - "……\n", - "1950\n", - "1953\n", - ",1956\n", - "……\n", - ":“\n", - "……”\n", - "……\n", - "……\n", - "》,\n", - "》、《\n", - "……\n", - "》、《\n", - "》,\n", - "》,\n", - "》、《\n", - "》,\n", - "1950\n", - "1976\n", - ",1979\n", - "》。\n", - "1958\n", - "1956\n", - "26\n", - "》。\n", - ",《\n", - "》,\n", - "1∶0\n", - "92\n", - "”,\n", - "”,\n", - "1962\n", - "3∶0。\n", - ":“\n", - ",“\n", - "”。\n", - "1982\n", - "1∶1\n", - "1/4\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "5000\n", - "9800\n", - "1.4\n", - ",“\n", - "”。\n", - "3∶0\n", - ",B\n", - ",“\n", - "”。\n", - "1982\n", - "0∶3\n", - "1/4\n", - ",LG\n", - ",B\n", - ":1998\n", - "28\n", - "25、26\n", - "43\n", - ",5\n", - "3∶0\n", - ",6\n", - "17\n", - "1∶0。\n", - ",7\n", - ",C\n", - "1∶1\n", - "4∶0\n", - "6∶4\n", - "0∶0\n", - "3∶1\n", - "442\n", - "433\n", - "60\n", - "1938\n", - "”。\n", - "32\n", - "19\n", - "600\n", - ",A\n", - "”。\n", - "23\n", - ",《\n", - "”。\n", - "”、“\n", - "”,\n", - "3∶0\n", - "3∶0\n", - "23\n", - ",9\n", - ",11\n", - "1∶1\n", - "17\n", - "19\n", - "160\n", - "90\n", - "105\n", - "21\n", - "2∶0。\n", - "33\n", - "2∶0\n", - "21\n", - "17\n", - "1∶1\n", - "1∶1\n", - ")(\n", - ",“\n", - "”。\n", - "32\n", - ",6\n", - ",“\n", - ":“\n", - "》,\n", - ",16\n", - "50%。\n", - "1992\n", - "40%;1994\n", - "31%;1996\n", - "28%。\n", - "”。\n", - "1994\n", - "2.5\n", - ",1990\n", - "2.25\n", - ",16\n", - "37\n", - "2.31\n", - "”。\n", - "32\n", - "———\n", - "”、“\n", - "”、“\n", - "(www.worldcup98.net.cn)\n", - "200\n", - "1400\n", - "49\n", - "37\n", - "2.31\n", - "3、\n", - "2、\n", - "1、\n", - "……\n", - "5000\n", - "4000\n", - "1000\n", - "2000\n", - "3700\n", - ",5\n", - "40\n", - ":“\n", - ":“\n", - ",20\n", - "3、\n", - ",\n", - "○\n", - "1、\n", - "110\n", - "110\n", - "1997\n", - "110\n", - "2、\n", - "1、\n", - "3、\n", - "2、\n", - ",3000\n", - "7000\n", - "21\n", - "34\n", - "28\n", - "NBA\n", - "86∶82\n", - "3∶1\n", - "100\n", - "704\n", - "4∶2\n", - "3∶1,\n", - ":“\n", - "”1·5\n", - "”。\n", - "”,\n", - "———\n", - "32\n", - "”,\n", - "32\n", - "19\n", - "”。\n", - "58\n", - ",9\n", - "37\n", - ",7\n", - "2∶2\n", - "40\n", - "2∶2\n", - ",10\n", - "700\n", - "1999\n", - "1999\n", - "“9”\n", - ",1999\n", - "500\n", - "46\n", - ",“\n", - ":“\n", - ":《\n", - "———\n", - "15%\n", - "27\n", - "1997\n", - "1998\n", - "18.7\n", - "94\n", - "1904\n", - "”。\n", - ":6\n", - "26\n", - "2∶1\n", - "28\n", - "38\n", - ",7\n", - "39\n", - ",5\n", - "2∶1\n", - "1995\n", - "2∶1\n", - "3∶0\n", - ",“\n", - "45\n", - "”,\n", - ":“\n", - "8000\n", - "———\n", - "”,\n", - "32\n", - ",1982\n", - "1996\n", - "———\n", - "41.8%\n", - "5625\n", - ":“\n", - "22\n", - ",“\n", - "”。\n", - ":“(\n", - "”。\n", - "21\n", - ",1200\n", - "———\n", - "”。\n", - "———\n", - "2006\n", - ",2002\n", - "2000\n", - "40\n", - "2002\n", - "23\n", - "”。\n", - "”———\n", - "”,\n", - ",“\n", - "2、\n", - "———“\n", - "”。\n", - "1、\n", - "———“\n", - "”。\n", - ":“\n", - ":“\n", - "88∶93\n", - "NBA\n", - "2∶1\n", - "NBA\n", - "43\n", - "54∶96\n", - ",“\n", - "”。\n", - "32\n", - "40\n", - "“4”,\n", - "70\n", - "———\n", - "———\n", - "70∶59\n", - "83∶81\n", - "41\n", - "37∶28\n", - "72∶67\n", - "71∶65\n", - "No.1(\n", - ")。\n", - "”,\n", - "---\n", - ":“\n", - ":“\n", - "70\n", - "191\n", - "111\n", - "80\n", - "———\n", - "21\n", - "62\n", - "2/3\n", - "111\n", - "80\n", - "191\n", - "10∶21、21∶19\n", - "21∶9\n", - "18∶21、21∶17\n", - "21∶11\n", - "79∶93\n", - "45∶44\n", - "17\n", - "100\n", - "180\n", - "2/3\n", - "(24\n", - "94\n", - "83227\n", - "66124\n", - "57122\n", - "47219\n", - "54419\n", - "53518\n", - "53518\n", - "45417\n", - "36415\n", - "42714\n", - "35514\n", - "26512\n", - "32811\n", - "17510\n", - "50。\n", - "62\n", - "56。\n", - "74。\n", - "10000\n", - "32\n", - "57\n", - "49。\n", - "5000\n", - "10000\n", - "5000\n", - "46\n", - "23,\n", - "53\n", - "76\n", - "1500\n", - "61。\n", - "800\n", - "01\n", - "48。\n", - "200\n", - "22\n", - "96。\n", - "100\n", - "95\n", - "17\n", - "36\n", - "2∶1\n", - "2∶1\n", - "21∶10\n", - "14∶22\n", - "13∶17\n", - "21∶19\n", - "3∶0\n", - "1∶1\n", - "0∶0\n", - "1∶1\n", - "0∶1\n", - "4∶2\n", - "0∶0\n", - "37\n", - "0∶1\n", - "1∶1\n", - "WNBA\n", - "WNBA\n", - "WNBA(\n", - "3∶0\n", - "0∶3\n", - ",3\n", - "7∶15、12∶15、7∶15。\n", - "———\n", - "》,\n", - ";“\n", - "”,“\n", - "”。\n", - ",“\n", - "”。\n", - "”,\n", - "1984\n", - "……”\n", - ",“\n", - ",“\n", - ",“\n", - "”。\n", - "”。\n", - "”,\n", - "”。\n", - "》,\n", - "》(\n", - "),\n", - "”,\n", - "”。\n", - "”。\n", - ":“\n", - "……\n", - "……\n", - "1952\n", - "》,\n", - "1972\n", - "———\n", - "、、\n", - "》,\n", - "”。\n", - "1947\n", - "”;\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - "1994\n", - ",“\n", - "”。\n", - "”。\n", - ":“\n", - "2∶1\n", - "0∶2(3∶6、2∶6)\n", - "3000\n", - "2000\n", - "32\n", - "”,\n", - "7000\n", - "8000\n", - "80\n", - "45\n", - "2.5\n", - "”,\n", - "26\n", - ",56\n", - ";6\n", - ",“\n", - "”。\n", - "1∶1\n", - "1∶0\n", - "1∶2\n", - "1998\n", - "1∶1\n", - "”。\n", - ",“\n", - "”。\n", - "”,\n", - "”。\n", - "1988\n", - "3∶1\n", - "15∶11\n", - "15∶7\n", - "16∶14\n", - "3∶0\n", - "15∶7\n", - "3∶1\n", - "31\n", - "35\n", - "83∶83,\n", - "45∶40\n", - "71∶88\n", - "93∶95\n", - "NBA\n", - "”,\n", - "3.5\n", - "85∶88\n", - "(NBA)\n", - ",300\n", - "13267\n", - "12483\n", - "17\n", - "354\n", - ",218\n", - "218\n", - "1997\n", - "600\n", - "”。\n", - ":“\n", - "”……\n", - "”。\n", - ":“\n", - "”。\n", - "”———\n", - "”,\n", - "100\n", - ":“\n", - "63%,\n", - "15%\n", - ":“\n", - ":“\n", - ":“\n", - "1/8\n", - ",1/8\n", - ":“D\n", - ",13\n", - ",“\n", - "”。\n", - ":“\n", - ",“\n", - "”。\n", - ",“\n", - "1∶1\n", - ":“\n", - "”。\n", - ":“\n", - ":“\n", - "———\n", - ")(\n", - "82\n", - "62\n", - "”。\n", - "、5\n", - "88∶83\n", - ",7\n", - "NBA\n", - "NBA\n", - "、10\n", - "65∶54\n", - "2∶2\n", - "2∶3\n", - "4∶1\n", - "138\n", - "4∶1。\n", - ",“\n", - "138\n", - "NBA\n", - "“ILOVETHISGAME”。\n", - "NBA\n", - ",“\n", - "”,\n", - "6000\n", - "1/3\n", - "”。\n", - "200\n", - "22\n", - "2000\n", - "○○○\n", - "83,\n", - "”,\n", - "”,\n", - "1996\n", - "、1997\n", - "90\n", - "”。\n", - "1990\n", - ",32\n", - "75%—90%\n", - ":3200\n", - ",11\n", - ",12\n", - "”。\n", - "0∶3\n", - "”。\n", - "”,\n", - "”。\n", - "1∶0\n", - "、2∶2\n", - "25\n", - "”。\n", - "22\n", - "”。\n", - "173\n", - "23\n", - "38\n", - "33\n", - "233\n", - "38\n", - "33\n", - "”,\n", - "”……\n", - "”、“\n", - "”、“\n", - "”……\n", - "”,\n", - "”:“\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "———“\n", - "54∶60\n", - "96∶89\n", - "40\n", - "76∶75\n", - "66∶63\n", - "———26\n", - "96∶60。\n", - ":E\n", - ",F\n", - "15%\n", - "27\n", - "32\n", - "90%\n", - "”。\n", - "2500\n", - "3.3\n", - ",6\n", - "370\n", - "480\n", - "4000\n", - ",850\n", - "1.3\n", - "7500\n", - "64\n", - "、32\n", - "1998\n", - "1994\n", - "———“\n", - "”,\n", - "(14)、\n", - "(9)、\n", - "(7)、\n", - "(6)、\n", - "(2)、\n", - "(2)。\n", - "4∶1\n", - "40\n", - "31\n", - "26\n", - "……\n", - "60—65\n", - ":“\n", - "……”\n", - "100\n", - "、400\n", - "、1500\n", - "、5000\n", - "17\n", - "35\n", - "90\n", - "———\n", - "70\n", - "80\n", - "19\n", - "85\n", - "90\n", - "80\n", - "90\n", - "95\n", - "90\n", - "88\n", - "(http∶//www.peopledaily.com.cn)\n", - ":http∶//www.worldcup.net.cn。\n", - ",E\n", - "79∶68\n", - ";E\n", - "103∶76\n", - ";F\n", - "82∶57\n", - ";F\n", - "88∶79\n", - "74\n", - "49∶37\n", - "40%。\n", - "83%。\n", - "94∶93\n", - "110—120\n", - "200\n", - "192\n", - "”2002\n", - "31\n", - "40\n", - "80%\n", - ":“\n", - "———\n", - "”,\n", - "”。\n", - "90\n", - "”,\n", - "1992\n", - "67%。\n", - "58∶97\n", - "61∶52\n", - "28∶42\n", - "20%,\n", - "54∶72\n", - "60%,\n", - "22\n", - "40\n", - "67∶72\n", - "70∶72,\n", - "32∶28\n", - "31\n", - "70∶72\n", - "100\n", - "1∶1\n", - "1∶1\n", - "3∶1\n", - "1∶1\n", - "37\n", - ",6\n", - "82226\n", - "65123\n", - "57022\n", - "46218\n", - "52517\n", - "44416\n", - "43515\n", - "35414\n", - "35414\n", - "42614\n", - "34513\n", - "26412\n", - "31810\n", - "1659\n", - "31\n", - "“2\n", - "”、“\n", - "360\n", - "9.800\n", - "0.38\n", - "360\n", - "9.575\n", - "“SE(\n", - "E)”\n", - "1998\n", - "、1\n", - "nef\n", - ",nef\n", - "nef\n", - "11%\n", - "49%。\n", - "CCR5\n", - "gp120,\n", - "“AIDSVAX”\n", - "25\n", - "”。\n", - "》28\n", - "”。\n", - "28\n", - "29\n", - "1996\n", - "25\n", - ",1975\n", - "7000\n", - "400\n", - "1995\n", - ",1996\n", - "17\n", - "39\n", - ",65\n", - ",20\n", - "1996\n", - "348\n", - "300\n", - "40%,\n", - "25%。\n", - "15%,\n", - "60%。\n", - "8%\n", - "50%。\n", - "1995\n", - ";1961\n", - ";1962\n", - ",1996\n", - "7700\n", - "8999\n", - ",1996\n", - "3600\n", - ",6.6\n", - "9.1\n", - "1998\n", - "1997\n", - "3000\n", - "2100\n", - ",100\n", - "1996\n", - "135\n", - "1·2\n", - "28\n", - "27\n", - ",90%\n", - "100%\n", - "2003\n", - "1995\n", - ",1996\n", - "23\n", - "27\n", - "(SOHO)\n", - "3000\n", - "20%。\n", - "80%。\n", - "80%\n", - "70\n", - "27\n", - "80%。\n", - "》25\n", - "1000\n", - "76\n", - "17\n", - "25\n", - "6000\n", - "3200\n", - ",“\n", - ",“\n", - "”,\n", - "2、\n", - "”。\n", - "1、\n", - "2000\n", - "3000\n", - ":“\n", - "’,\n", - "’。\n", - "1998\n", - "370\n", - "40,\n", - ",1997\n", - "39\n", - "39\n", - "”。\n", - ",“\n", - "1993\n", - "”,\n", - "———“\n", - "”。\n", - "1994\n", - "1993\n", - "25000\n", - "1991\n", - "”,\n", - "400\n", - "、3\n", - "、8000\n", - ",7\n", - "1996\n", - "1995\n", - "”、“\n", - ",1996\n", - "”,\n", - "”、\n", - "”,\n", - "1990\n", - "26\n", - "“122”\n", - ":“\n", - "”4\n", - "40\n", - "“122”\n", - "”,\n", - ":“\n", - "”4\n", - "09\n", - ",“122”\n", - ",0\n", - "35\n", - "“122”\n", - "“122”\n", - ",“122”\n", - "“122”\n", - ",“\n", - "“122”\n", - ")。\n", - "”。\n", - "1998\n", - "”。\n", - "95%,\n", - "80%,\n", - "”。\n", - "———\n", - ":“\n", - "600\n", - "124\n", - "126\n", - "1995\n", - ":“\n", - "……\n", - "1000\n", - "300\n", - "40\n", - "1997\n", - "600\n", - "3/5,\n", - "———\n", - "”,\n", - "”。\n", - ",“\n", - ",6\n", - ":“\n", - ",“\n", - "”,\n", - "3000\n", - "2%\n", - "”,\n", - "”,\n", - "”。\n", - "1994\n", - "1994\n", - "5000\n", - "1993\n", - ":1991\n", - "1989\n", - "1991\n", - "2000\n", - "),\n", - "1992\n", - "5000\n", - "3.3\n", - "、1\n", - "、5000\n", - "、1\n", - "8.8\n", - "”———\n", - "600\n", - "”,\n", - "1.765\n", - "1.25\n", - "1800\n", - "1250\n", - "1998\n", - "9.5%\n", - "4.9%\n", - ":“\n", - "1998\n", - "26.16\n", - "9%,\n", - "1989\n", - "236\n", - "234\n", - "———\n", - ",“\n", - "215\n", - "260\n", - "185\n", - "222\n", - ",“\n", - "522\n", - "100\n", - "3.5\n", - "25\n", - ",27600\n", - ",20\n", - ",3787\n", - "1000\n", - "(1\n", - "38000\n", - "100\n", - "150\n", - "”,\n", - "”。\n", - "0.3%\n", - ",2\n", - "19.9%,3、4\n", - "6.4%\n", - "7%,5\n", - "3%\n", - "6.6%。\n", - "140\n", - "140.34\n", - "2.60\n", - "25\n", - ":“\n", - "8000\n", - "8000\n", - "7500\n", - "32\n", - "30%。\n", - "20%—30%。\n", - "1996\n", - "8000\n", - "40\n", - "),\n", - "1.5\n", - "39\n", - "1000\n", - "3000\n", - "60%,\n", - "90\n", - "8%,\n", - "1990\n", - "390\n", - "1995\n", - "640\n", - "5.5%\n", - "7.6%。\n", - "21\n", - "33%\n", - "68%,\n", - "26%\n", - "5%。\n", - "34\n", - "75%,50\n", - "50%,60\n", - "10%。\n", - "70%,\n", - "1.5\n", - "11%。\n", - "1976\n", - "9%\n", - "1996\n", - "32%。\n", - "6.5\n", - "1.65\n", - "3400\n", - "1994\n", - "1995\n", - "3000\n", - ",1996\n", - "7000\n", - "1.3\n", - ",3000\n", - "997\n", - "25\n", - "2000\n", - "124.5\n", - "2690\n", - "2650\n", - "260\n", - ")24\n", - "135.5\n", - "60%\n", - "0·25%,\n", - "1/4\n", - "23\n", - "1·3\n", - "23\n", - ",1996\n", - "1998\n", - ",12\n", - "1996\n", - "6570\n", - "6000\n", - "6369\n", - "23\n", - "23\n", - "44%,\n", - "26%。\n", - "41%,\n", - ",“\n", - "”。\n", - "23\n", - "23\n", - "295\n", - "2500\n", - "1.3\n", - "2010\n", - "2020\n", - "1966\n", - "25\n", - "25\n", - "26\n", - "39\n", - "1974\n", - "、1979\n", - "、1981\n", - "1987\n", - "1/3\n", - "2/5。\n", - "56.7\n", - "12063.5\n", - ")。\n", - "35%,\n", - "162\n", - "(1\n", - "1.20635\n", - "218.7\n", - "75\n", - "1300\n", - "23\n", - "22\n", - "100\n", - "2000\n", - ":“\n", - "90\n", - "1800\n", - "1998\n", - "22\n", - "2、6\n", - "21\n", - "3000\n", - "1、\n", - "1997\n", - "16·9\n", - "1880\n", - "1997\n", - "0·25\n", - ",1998\n", - "124·5\n", - "13·16\n", - "12·76\n", - "21\n", - "18·68\n", - "13·02\n", - "22\n", - "11·92\n", - "10·55\n", - ",“\n", - "”。\n", - "22\n", - "”。\n", - ",1997\n", - "131\n", - "31.8\n", - "1994\n", - "0.7%\n", - ",1997\n", - "10%。\n", - "”,\n", - "1/3\n", - ",1/3\n", - "1997\n", - "5000\n", - ",160\n", - ",10\n", - "1800\n", - ",1997\n", - "80\n", - "31\n", - "”。\n", - "2005\n", - "10·8%,\n", - "2010\n", - "2020\n", - "22\n", - "2.5\n", - "(1\n", - "1997—2001\n", - "4.85\n", - "1.5\n", - "1996\n", - "2002\n", - "232\n", - "7%\n", - "75\n", - "73\n", - "22\n", - "138·53\n", - "13600\n", - "12000\n", - "17\n", - "40%。\n", - "26\n", - "145\n", - "2670\n", - "1600\n", - "17\n", - "224\n", - ",1\n", - "1·9\n", - "530\n", - "70%。\n", - "2020\n", - "22\n", - "1998\n", - "3000\n", - ",1000\n", - "600\n", - "4·5\n", - "200\n", - "31\n", - "35\n", - ",122\n", - "21\n", - "1999\n", - "5000\n", - "8·3\n", - "(LaNina)\n", - "70%—80%。\n", - "”,\n", - "25\n", - "25\n", - "40\n", - "”,\n", - ",X\n", - "”,\n", - "1200\n", - "12%,\n", - "43\n", - "1/3\n", - "8%\n", - "3500\n", - "1500\n", - "750\n", - ",1996\n", - "18.25\n", - "1995\n", - "5.25%。\n", - ",1996\n", - "1995\n", - "9.4\n", - "---\n", - "22\n", - "25\n", - "22\n", - "”。\n", - "15.19%。\n", - "7.08%,\n", - "79.55%;\n", - "67.39%;\n", - "9.81%。\n", - "195.64\n", - "3.2%,\n", - "5.4%\n", - "2、\n", - "1、\n", - ",21\n", - ",21\n", - "21\n", - "21\n", - "1000\n", - "21\n", - "1996\n", - ";1997\n", - "”。\n", - "28\n", - "1996\n", - "21\n", - ",6\n", - "8AFE\n", - "1.3\n", - "1.6\n", - "1.0\n", - ";6\n", - "3,\n", - "5·897\n", - "6·934\n", - "200\n", - "122·07\n", - "100·14\n", - "8712·87\n", - "19\n", - "”。\n", - "”。\n", - "72\n", - "28\n", - ",1996\n", - "”,\n", - "1985\n", - "100%。\n", - "19\n", - "11%\n", - "200\n", - "160\n", - "19\n", - "420\n", - "40\n", - "60\n", - "55%\n", - "100%\n", - "10%、\n", - "12·5\n", - "25\n", - "17\n", - "95\n", - "6·7\n", - "19\n", - ",6·187\n", - "36\n", - "1962\n", - "4%、\n", - "1·6%\n", - "”,\n", - ",“\n", - "”。\n", - "”,\n", - "”,\n", - "800\n", - "400\n", - "1996\n", - "”。\n", - "65%。\n", - "17\n", - ",《\n", - ":55%\n", - ",60%\n", - ",“\n", - "”“\n", - ":“\n", - ",“\n", - "———\n", - "’。\n", - ":“\n", - "……”\n", - "”。\n", - "”,\n", - "57\n", - ":“\n", - ":“\n", - "》,\n", - ":“\n", - "……\n", - ":1987\n", - ";1988\n", - ";1990\n", - "5000\n", - ";1991\n", - ";1991\n", - "—1993\n", - ";1995\n", - ";1996\n", - "37\n", - "”。\n", - "》(\n", - "---\n", - "》(\n", - "1999\n", - ",2000\n", - "”,\n", - "1925\n", - "》(5\n", - "19\n", - "),\n", - "100733。\n", - "”,\n", - "、《\n", - "):\n", - "1994\n", - "》,\n", - ":“\n", - "———“\n", - ":“\n", - "):\n", - "”,\n", - "):\n", - "BISS\n", - "”,\n", - "”,\n", - "……\n", - "”。\n", - "):\n", - "---\n", - "④)\n", - "90\n", - "40\n", - "1100\n", - "……\n", - "60\n", - "1959\n", - "43\n", - "28\n", - "120\n", - "1997\n", - "27\n", - ",77\n", - "1994\n", - "1985\n", - "’”。\n", - "60\n", - ":“\n", - "”:1959\n", - "》,\n", - "”。\n", - "、《\n", - "170\n", - ",《\n", - "1946\n", - "60\n", - "1942\n", - "1944\n", - "76\n", - "17\n", - ":“\n", - "17\n", - "17\n", - ",“\n", - "》),\n", - "》)\n", - "1992\n", - "”。\n", - "》17\n", - ",100\n", - ",88\n", - "17\n", - "313\n", - "55\n", - "、LG\n", - "5.3%。\n", - "0.7%,\n", - "200\n", - "17\n", - ",17\n", - "137\n", - "17\n", - "3.15\n", - "141\n", - ",“\n", - "”,\n", - "60\n", - ",80\n", - "3000—3500\n", - "4000\n", - "3000\n", - "4000\n", - "2000\n", - "2000\n", - "1200\n", - "300\n", - "400\n", - "1000\n", - "80\n", - "90\n", - "80\n", - "3000\n", - "1991\n", - "1984\n", - "1985\n", - "1933\n", - ",1956\n", - ")(\n", - "135·50\n", - "41.00\n", - "41.50\n", - "1。\n", - "22.21\n", - "295.15\n", - "143.25\n", - "137.89。\n", - "———\n", - "23.28\n", - "1776.41\n", - "500\n", - "19.52\n", - "1107.14\n", - "37\n", - "164.17\n", - "8829.46\n", - "17\n", - "17\n", - ",17\n", - "143.47\n", - "136.37。\n", - "17\n", - "”。\n", - "356\n", - "747—200\n", - "17\n", - "1982\n", - ":“\n", - ":“\n", - "》16\n", - "1972\n", - "TRW\n", - "7500\n", - "140\n", - "”,\n", - "200\n", - "17\n", - "141.98\n", - "3.15\n", - "“NO”。\n", - "150\n", - "”,\n", - "”,\n", - "”,\n", - "”;\n", - ":“\n", - "”。\n", - "”。\n", - "220\n", - "、1\n", - "1600\n", - "1985\n", - "2025\n", - "GDP\n", - ",65\n", - "1991\n", - "12%\n", - "2010\n", - "20%\n", - "2020\n", - "25%,\n", - ";1986\n", - "1997\n", - "90\n", - "2020\n", - "150\n", - "),\n", - "“140\n", - "146.43\n", - "1990\n", - ",“\n", - "”。\n", - "29\n", - ",“\n", - "”。\n", - "1991\n", - "25\n", - "”———\n", - "VOA\n", - "1997\n", - ":“\n", - "”,\n", - ",VOA\n", - "”,\n", - "120\n", - "1993\n", - ",VOA\n", - "1987\n", - ",VOA\n", - "VOA\n", - "1896\n", - "1987\n", - "VOA\n", - "VOA\n", - "6000\n", - "VOA\n", - "(VOA)\n", - "———\n", - "———\n", - "36\n", - "65\n", - "100\n", - "———\n", - "90%\n", - "———\n", - "”。\n", - "———\n", - ",“\n", - "”。\n", - "》、《\n", - "、80\n", - "90\n", - "1997\n", - "1983\n", - "1982\n", - "1975\n", - "1929\n", - "---\n", - "80\n", - "”。\n", - ",1997\n", - "1996\n", - ":“\n", - "1995\n", - "1997\n", - "60—70\n", - "1991\n", - "1988\n", - "1986\n", - "5000\n", - "1986\n", - ",12\n", - ",20\n", - "1982\n", - "———\n", - "62\n", - "1964\n", - "”,\n", - "』---\n", - "……\n", - "1901\n", - "10%\n", - "15%\n", - "60\n", - "150\n", - "1000\n", - "945\n", - "40\n", - "(DVD)\n", - "”(SACD)\n", - "660\n", - "1996\n", - "380\n", - "38·79\n", - "12%。\n", - "263·39\n", - "15·98\n", - "5·7%。\n", - "42·45\n", - "5·2%,\n", - "140∶1\n", - "1434∶1\n", - "300\n", - "288.21\n", - "1987\n", - "13·8%。\n", - "2·4%,\n", - "33\n", - "11699\n", - "10·3%。\n", - "15000\n", - "14825.17\n", - "197.16\n", - "“140\n", - ",6\n", - "146.43\n", - "2.42\n", - "1990\n", - "3·\n", - "《2000\n", - "2·\n", - "”,\n", - ":1·\n", - ")6\n", - ":“\n", - ",“\n", - "”。\n", - ",“\n", - "”,“\n", - "”。\n", - "》12\n", - "21\n", - "1992\n", - "———\n", - "80%\n", - "100%,\n", - ",“\n", - "”。\n", - ",“\n", - "200\n", - "90\n", - "37\n", - ",5\n", - "”(\n", - "),\n", - ",30\n", - "2、\n", - "1、\n", - "48\n", - "1∶143.79\n", - "144.72\n", - ",1998\n", - "1.9%\n", - "21.1%,\n", - "1.2%,\n", - "1955\n", - "23\n", - ",0.7%\n", - "1974\n", - "0.5%,\n", - "1.3%,\n", - "5.3%。\n", - "1997\n", - "0.7%。\n", - "2100\n", - "3100\n", - "1995\n", - "5%\n", - "10%\n", - "1998\n", - "5·5\n", - "7000\n", - "140\n", - "150\n", - "160\n", - "———\n", - "———\n", - "1997—1998\n", - "75000\n", - "570\n", - "),\n", - "12900\n", - "98\n", - "70%,\n", - "240000\n", - "150∶1,\n", - "2933\n", - "90%\n", - "1998\n", - "46%\n", - "1999\n", - ";18%\n", - "1998\n", - "”;25%\n", - "1998\n", - "———\n", - ",4\n", - "3.9%\n", - "4.1%,\n", - "1953\n", - "7.2%,\n", - "1995\n", - "80%。\n", - "144.72∶1,\n", - "”。\n", - ",“\n", - "”(\n", - "),\n", - "”,\n", - "”。\n", - "1997\n", - "2.9%,\n", - "-0.7%。\n", - "”。\n", - "1998\n", - "1997\n", - "”,\n", - "1997\n", - "”(\n", - "”(\n", - "),\n", - "”,\n", - ",“\n", - "”。\n", - ",1998\n", - "0.6%。\n", - "”。\n", - ",《\n", - ",《\n", - "”,\n", - "”。\n", - "ATM\n", - "(ATM)\n", - "2—12\n", - "40%\n", - "”:\n", - "IBM\n", - "30%—50%。\n", - "6%,\n", - "”,\n", - "”,\n", - "30%\n", - "1000\n", - "70%\n", - "1/3,\n", - "1/3。\n", - "“WINTELCO”。\n", - "770\n", - "”。\n", - "40\n", - "(SANFRANCISCO),\n", - "“FRANCISCO”。\n", - "———\n", - "(CISCO)\n", - "”,\n", - "———\n", - "(CISCO)\n", - "……\n", - "237\n", - "———\n", - "124\n", - "21\n", - ":“\n", - "”1996\n", - "VCD\n", - "VCD\n", - "———\n", - "———\n", - "”,\n", - "VCD,\n", - "VCD\n", - "C-Cube\n", - "OK、\n", - "VCD\n", - "VCD\n", - "VCD\n", - "VCD\n", - "VCD\n", - "VCD\n", - "VCD\n", - "VCD\n", - "DVD\n", - "VCD\n", - "300\n", - "VCD\n", - ",1997\n", - "1000\n", - "60%—70%\n", - "---\n", - "VCD\n", - "……\n", - "100\n", - "100\n", - ",《\n", - "100\n", - ",“\n", - "100\n", - "》,\n", - "《“\n", - "》,\n", - "》,\n", - "》,\n", - "……\n", - "》,\n", - "》,\n", - "》,\n", - "》,\n", - "1000\n", - "”———\n", - "”,\n", - "”,\n", - "100\n", - ":“\n", - "”,“\n", - "”。\n", - ",《\n", - "100\n", - ",3\n", - "49\n", - "43%,\n", - "1∶140\n", - "1993\n", - "28%,\n", - "18%,\n", - "9.1\n", - "1987\n", - "4.5%\n", - ",1997\n", - "7400\n", - "1998\n", - "1996\n", - "25\n", - "637\n", - "17\n", - "450\n", - "1996\n", - ",“\n", - "”。\n", - "1998\n", - ":“\n", - "”,“\n", - "”。\n", - "20,\n", - "”。\n", - "36\n", - "190\n", - "280\n", - "150\n", - "141\n", - "“145\n", - ":10\n", - "412\n", - "、11\n", - ",“\n", - "”、“\n", - "10%,\n", - "0.56%,\n", - "1.31%。\n", - ":10\n", - ",8\n", - "“140\n", - ",11\n", - "141\n", - "142\n", - ":“\n", - "2000\n", - "400\n", - "),\n", - "1700\n", - "1997\n", - "1995\n", - "”———\n", - "80%\n", - "500\n", - "2.5%\n", - "4.5%。\n", - "9.1%。\n", - "1.6%\n", - "6%。\n", - ",4\n", - "1.3%。\n", - "40\n", - "1128.57\n", - "1073.47\n", - "15014.04\n", - "325.22\n", - "141∶1,\n", - "142∶1,\n", - "141.67∶1,\n", - "0.84\n", - "5250\n", - "134.6\n", - "1336\n", - "34.3\n", - "),\n", - "51%。\n", - "15%—18%\n", - "0.5\n", - "(AMD)\n", - "(NSI)\n", - "1000\n", - "7%,\n", - "20%—30%\n", - "80%\n", - "90%\n", - "“X86”\n", - "”。\n", - "(DEC)、\n", - "40\n", - "”,\n", - "21\n", - "140·67\n", - "1991\n", - "140·30\n", - "1∶141·35\n", - "40∶1\n", - "2·5%,5\n", - "9·2%。\n", - "40%\n", - "50%\n", - "8·27%。\n", - "》。\n", - "150\n", - "90\n", - "1·6\n", - "6000\n", - "70\n", - "100\n", - "100\n", - "1·5\n", - "100\n", - "662\n", - "2340\n", - "2、\n", - "35\n", - "1、1998\n", - "”,\n", - ",“\n", - "”,\n", - "100\n", - "150\n", - ",“\n", - "250\n", - "”。\n", - "40%,\n", - "2500\n", - "2750\n", - "74\n", - "99·99%\n", - "60%,\n", - "”。\n", - "120\n", - ",“\n", - "39\n", - "39%\n", - "4%。\n", - "———\n", - "(DNA),\n", - "200\n", - "1997\n", - "70\n", - ",90\n", - "“RVA4EV”\n", - "3.985\n", - "3.990\n", - "1.6934\n", - "1.7095\n", - "43.12\n", - "43.95\n", - ",8\n", - "140\n", - "5·79\n", - "312·37\n", - "311·70\n", - "68\n", - "1998\n", - "1997\n", - "2、\n", - "1、\n", - "200\n", - ",1997\n", - "430\n", - "8.4%,\n", - "1—4\n", - "135.4\n", - "14.1%。\n", - "1997\n", - "1997\n", - ",1997\n", - ",1997\n", - "1997\n", - "9%\n", - ",1997\n", - "1992\n", - ":“\n", - "1996》\n", - "”,\n", - "”;\n", - "21\n", - "80\n", - "11·5\n", - "40\n", - "25\n", - "35\n", - "0·9\n", - "1899\n", - "1894\n", - "”,\n", - "》7\n", - "29\n", - "”。\n", - "”。\n", - "27\n", - "2000\n", - "31\n", - "90%\n", - "———\n", - "1400\n", - "2200\n", - "25%\n", - "1200\n", - "1960\n", - ",“\n", - "”。\n", - ":“\n", - "”,\n", - "5000\n", - "31\n", - "140∶1。\n", - "”,“\n", - "”。\n", - ",“\n", - "”。\n", - "9000\n", - "“140\n", - ",9\n", - "140∶1\n", - "”,\n", - "140·70\n", - "1·13\n", - "1996\n", - "84001”\n", - "84001”\n", - "1799\n", - "199\n", - "19\n", - "》6\n", - "40\n", - "2/3\n", - ",1/3\n", - "5000\n", - "DNA\n", - "DNA\n", - "DNA(\n", - "2、\n", - "1、6\n", - ",140\n", - "60\n", - "170\n", - "170\n", - "1965\n", - "1968\n", - "”,\n", - ",30\n", - "2006\n", - "2006\n", - ",“\n", - ":“\n", - ",‘\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1975\n", - "85\n", - "1974\n", - "34\n", - ",29\n", - "1720\n", - "13.5\n", - "1750\n", - ")。\n", - ":“\n", - "”。\n", - "”;\n", - ",“\n", - "”。\n", - "”;\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "29\n", - "”,\n", - "1969\n", - "”。\n", - ":“\n", - "200\n", - "、60\n", - "1940\n", - "”……\n", - "25\n", - ":“\n", - "”。\n", - "1998—1999\n", - "41·78\n", - "41·80\n", - "1,\n", - "268·50\n", - "130·20\n", - "1.2\n", - "3.5\n", - "4.3\n", - ")。\n", - "29\n", - "11.75\n", - "3.8\n", - ")。\n", - "2.7\n", - "),\n", - "3.4\n", - ")。\n", - "1000\n", - "80\n", - "46\n", - "1/3。\n", - "80\n", - "1984\n", - "1975\n", - ",3\n", - ",1957\n", - "5000\n", - "1935\n", - "”(ORANGINA)\n", - "”,\n", - "75\n", - "6000\n", - "1992\n", - "4000\n", - ",1992\n", - "300\n", - "1600\n", - "1997\n", - "1996\n", - "46\n", - "45\n", - "100\n", - "1995\n", - "2000\n", - "300\n", - "2000\n", - "40\n", - "2600\n", - "1996\n", - "1000\n", - "188.3\n", - "61.4%。\n", - "6000\n", - "1995\n", - "100\n", - "100\n", - "1300\n", - ":“\n", - "200\n", - "”。\n", - "200\n", - "1993\n", - "”。\n", - "”。\n", - "1992\n", - "2000\n", - "96%,\n", - "1800\n", - ",30\n", - "”,\n", - "”,\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”……\n", - "8000\n", - "2.6\n", - "68%。\n", - "80\n", - "”。\n", - ":“\n", - "1800\n", - "……\n", - "”18\n", - "237\n", - "19\n", - "200\n", - "”,\n", - "……\n", - "610\n", - "40\n", - ",19\n", - "”。\n", - "……\n", - "1994\n", - "45\n", - "2500\n", - "35\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "2/3\n", - "”,\n", - "”。\n", - ":“\n", - "”,\n", - ",“\n", - ",“\n", - "”,\n", - ":“\n", - ",80\n", - "”(\n", - ")(\n", - "21\n", - "”、“\n", - "”。\n", - "”,\n", - "”,\n", - ",70\n", - "KU\n", - "1.9\n", - "540\n", - ",“\n", - ",“\n", - ",1999\n", - "3·5%\n", - "70\n", - "”。\n", - "150\n", - "1.6\n", - "10·1%。\n", - "3.0976\n", - "18·9%。\n", - "139.70\n", - "138.83。\n", - ",2\n", - "“140\n", - "139.75∶1,\n", - "9000\n", - "”,\n", - ",“\n", - "1997\n", - "50%;4\n", - "4.1%,\n", - "4%\n", - "”;0.5%\n", - "……\n", - "1995\n", - "80∶1,3\n", - "40%\n", - "130\n", - "135∶1,\n", - "139∶1,\n", - "“140\n", - "”,\n", - "“140\n", - "”———\n", - ",14\n", - "410\n", - "759\n", - "100\n", - "3512\n", - "6.5\n", - "1991\n", - "280\n", - "200\n", - "19\n", - "884\n", - ",13\n", - "120\n", - "”,\n", - "”,\n", - ":“\n", - "150%,\n", - "10%,\n", - "139\n", - "150\n", - "》,\n", - "2·73\n", - "700\n", - "1000\n", - "2·25\n", - "1/3\n", - "”。\n", - "8·65\n", - "2·064\n", - "1·3\n", - "7640\n", - "1973\n", - ",25\n", - "25\n", - "90%。\n", - "),\n", - "150\n", - "2002\n", - "19\n", - ",”\n", - ",90%\n", - "1996\n", - "1.6\n", - "1.2\n", - "4000\n", - ",”\n", - "1998\n", - "29\n", - ",”\n", - "1997\n", - "1995\n", - "1/2\n", - ",1996\n", - ",1997\n", - "1994\n", - ",“\n", - "37\n", - "200\n", - "———\n", - "”(\n", - "21\n", - "250—300\n", - "900\n", - "100\n", - "250\n", - "21\n", - "60%\n", - "300\n", - "30%—35%。\n", - "450\n", - "550\n", - "450—600\n", - "1997\n", - "”,\n", - "500\n", - "21\n", - "500\n", - "350—400\n", - "280\n", - "ICE\n", - "TGV\n", - "1832.5\n", - "270\n", - "300\n", - "515.3\n", - "160\n", - "200\n", - "300—350\n", - "20.9\n", - "39.2\n", - "4000\n", - "90%\n", - "1964\n", - "210\n", - "0.65%。\n", - "3.14\n", - "1.77\n", - "4.13\n", - "3.43\n", - "19\n", - "120\n", - "70%\n", - ",1825\n", - "38\n", - "600\n", - "34\n", - "”。\n", - "14%—15%,\n", - "10%—20%,\n", - "20%,\n", - "40%。\n", - "90\n", - "350\n", - "1991\n", - "300—350\n", - "AAR\n", - "250\n", - "21\n", - "1983\n", - "”。\n", - "1994\n", - "1991\n", - "80\n", - "1961\n", - "31\n", - "150\n", - ",150\n", - "17\n", - "200\n", - ",130\n", - "5·32%\n", - "40·6%。\n", - "-6·21%。\n", - ",1998\n", - "-10·1%,\n", - "85%。\n", - "200\n", - "200\n", - ")。\n", - "21\n", - "98%。\n", - "———\n", - "(SAT)\n", - ";14\n", - "85\n", - "138.1\n", - "62%。\n", - "3609.9\n", - "4120\n", - "107\n", - "),\n", - "14%;\n", - "82.8\n", - "139.1\n", - "68%,\n", - ":6\n", - "1998—1999\n", - "1/4\n", - "2、6\n", - "1、5\n", - "31\n", - "71\n", - "”,\n", - "”。\n", - ",6\n", - "1200\n", - "60%\n", - "”。\n", - "1999\n", - "1997\n", - "381\n", - "1400\n", - "2200\n", - "25%\n", - ",“\n", - "”,\n", - "”,\n", - "1996\n", - "5.29\n", - ",44%\n", - "》,\n", - ",76%\n", - ";91%\n", - "1997\n", - "1996\n", - "”,\n", - "1994\n", - ",5\n", - "115\n", - "1984\n", - "、80\n", - "”5\n", - "1997\n", - "160\n", - "37\n", - "》5\n", - "29\n", - "《〈\n", - "》。\n", - "49·5\n", - "44\n", - "45\n", - "100\n", - "27\n", - "47\n", - "700\n", - "26\n", - "100\n", - "300\n", - "318\n", - "1994\n", - "1863\n", - "《20\n", - "100\n", - ":1960\n", - ",“\n", - "500\n", - "150\n", - "),\n", - "”。\n", - "2000\n", - "300\n", - "19\n", - ",5\n", - "27\n", - "500\n", - "”。\n", - ",6.8\n", - "2500\n", - "2025\n", - "4800\n", - "22\n", - "800\n", - "30%,\n", - "7%。\n", - "150\n", - "90%\n", - "2/3\n", - "50%,\n", - "40%,\n", - "1/3\n", - ":1995\n", - "39.2\n", - ",1996\n", - "40.3\n", - ",1997\n", - "150\n", - "、200\n", - "1991\n", - ",“\n", - "31\n", - "1600\n", - "31\n", - ",5\n", - "400\n", - "”。\n", - "140\n", - "”。\n", - ",70%\n", - "200\n", - "60\n", - "46.8%,\n", - "1700\n", - "1991\n", - "1/3\n", - "”。\n", - "600\n", - "700\n", - "1300\n", - "27\n", - "140\n", - "”。\n", - "”,\n", - "150%。\n", - "27\n", - "60%\n", - "80%;\n", - "6.20\n", - "1.2\n", - "27\n", - "6·7\n", - "31\n", - "31\n", - "31\n", - ",“\n", - ",“\n", - ":“\n", - ":“\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - ",“\n", - "2500\n", - "22\n", - "1∶10。\n", - ":“\n", - "27%—33%,\n", - "60%\n", - "……\n", - ",“\n", - ":“\n", - "28\n", - ",1995\n", - "10·8\n", - "31\n", - "90%\n", - "14%\n", - "20%\n", - "5%\n", - "12%。\n", - "”,\n", - "4000\n", - "2·5\n", - "31\n", - "28\n", - "1989\n", - "29\n", - "31\n", - "6·4\n", - "3500\n", - "250\n", - "7·1\n", - "64000\n", - "800\n", - "2840\n", - ",1·23\n", - ",2·5\n", - "6·014\n", - "255.2\n", - "31\n", - "200\n", - "41·75—41·78\n", - "100(BSE—100)\n", - "200(BSE—200)\n", - "———\n", - "3897·10\n", - "210·71\n", - "》30\n", - "140\n", - "29\n", - "6·7\n", - "50%\n", - "150%。\n", - "10%。\n", - "27\n", - "80%,\n", - "1∶6·2,\n", - "1∶6·188\n", - "380%。\n", - "29\n", - "80%;\n", - "29\n", - "27\n", - "1∶6·2\n", - "1∶6·133;\n", - "28\n", - "28\n", - "23\n", - "21\n", - "4.1\n", - "1.6\n", - "5.9\n", - "A。\n", - "A。\n", - "23\n", - "29\n", - "1500\n", - "1988\n", - ",1990\n", - "97%\n", - "”,\n", - "”。\n", - ":“\n", - "120\n", - "1227\n", - ",“\n", - "100%”。\n", - "1.8\n", - "100%,\n", - ":“\n", - "23\n", - "”,\n", - ",1997\n", - "22\n", - "———\n", - "19\n", - "”,\n", - "1995\n", - ":“\n", - "1995\n", - ":“\n", - "1995\n", - "1997\n", - ":“\n", - "1996\n", - ":“\n", - "1995\n", - "”。\n", - "35\n", - "”。\n", - "28\n", - "28\n", - "1993\n", - "27\n", - "833\n", - "833\n", - "》29\n", - "833\n", - "833\n", - "21\n", - "2000\n", - "12%。\n", - ",1992\n", - "2010\n", - "1990\n", - "80%。\n", - ",“\n", - "2500\n", - "30%。\n", - "21\n", - "》。\n", - "ISO14000\n", - "200\n", - "593\n", - "、400\n", - "、209\n", - "1970\n", - "365\n", - "100\n", - "230\n", - "25%,\n", - "1990—1995\n", - "12%,\n", - "17\n", - "61%。\n", - "1000\n", - ",95%\n", - "2500\n", - "1/4\n", - "1/3;\n", - "600\n", - "15%\n", - "270\n", - "9000\n", - "21\n", - "100\n", - "60\n", - "1945\n", - "60\n", - "(6\n", - "……\n", - "28\n", - ",3\n", - "3000\n", - "72\n", - ";90\n", - "251\n", - "22\n", - "”。\n", - "』(\n", - ",“\n", - "630\n", - "40\n", - "』(\n", - "28\n", - "27\n", - "27\n", - "29\n", - "25\n", - "80\n", - "108\n", - "74%,\n", - "28\n", - "”,\n", - "28\n", - "28\n", - "28\n", - "21\n", - "”。\n", - "100\n", - "”。\n", - "”。\n", - "1929\n", - "8%—10%,\n", - "3·85\n", - "168\n", - "2400\n", - "2000\n", - "》27\n", - "》26\n", - "》27\n", - "”。\n", - "28\n", - "29\n", - "27\n", - "1987\n", - ":“\n", - ":“\n", - ":“\n", - "28\n", - ",27\n", - ":“\n", - "40\n", - "》、\n", - "》、\n", - ",1000\n", - "28\n", - ",《\n", - "28\n", - "》26\n", - "》26\n", - "》25\n", - "》26\n", - "》25\n", - "》26\n", - ",“\n", - "”。\n", - "》28\n", - ",21\n", - "28\n", - "”。\n", - "28\n", - "27\n", - "28\n", - "”,\n", - "”。\n", - "1996\n", - "”,\n", - "13.1%\n", - "40%\n", - "30%,\n", - "9%。\n", - "1997\n", - ",1996\n", - "85%\n", - "1997\n", - "1995\n", - "1996\n", - "40%\n", - "56\n", - "56\n", - "101\n", - ",650\n", - "27\n", - "6.3\n", - ",1991\n", - "”。\n", - "28\n", - "1999\n", - "27\n", - "27\n", - ",30\n", - ",160\n", - "》、\n", - "2000\n", - "80\n", - "28\n", - "27\n", - "”,\n", - "40%,\n", - "———\n", - "300\n", - "108\n", - "1972\n", - "”。\n", - "75%\n", - "108\n", - "28\n", - ")24\n", - "25\n", - "27\n", - ")(\n", - ",1993\n", - "26\n", - "27\n", - "26\n", - ":“\n", - "》50\n", - ",“\n", - "”。\n", - "27\n", - "1999\n", - "17\n", - "———\n", - "25\n", - "70\n", - "600\n", - "”4\n", - "1200\n", - "》。\n", - "90\n", - "1995\n", - "33\n", - "1950\n", - "1997\n", - "2.1\n", - "72.32\n", - "7.3\n", - "6828\n", - "64%,\n", - "”。\n", - "“KCC”\n", - "63.2\n", - "“KCC”\n", - "2:\n", - "1:\n", - ":“\n", - "80%,\n", - "600\n", - "56%。\n", - "”。\n", - "、“\n", - ",1997\n", - "4000\n", - ":“\n", - "98%\n", - "100%,1997\n", - "3000\n", - "1000\n", - "70\n", - "—80\n", - "—30\n", - ":“\n", - "70\n", - "1990\n", - "1000\n", - "200\n", - "”。\n", - "”。\n", - "500\n", - "1997\n", - "2.5\n", - "19.1%。\n", - "350\n", - "90\n", - "K,\n", - "”。\n", - "”:\n", - "”,\n", - "1992\n", - "2000\n", - "90\n", - "40\n", - "1993\n", - "”,\n", - "”。\n", - "”,\n", - "1997\n", - "1.2\n", - "600\n", - "K”\n", - "90\n", - "……\n", - "K”\n", - "K”,\n", - "1/4\n", - "1/3\n", - "1/4\n", - "2/3\n", - ",9\n", - ",1997\n", - "2090\n", - "5140\n", - "8%\n", - "1996\n", - "9·47%,\n", - "1997\n", - "10·8%。\n", - "1997\n", - "1996\n", - "1994\n", - "”,\n", - "120\n", - "5000\n", - "1·26\n", - "1000\n", - "1500\n", - "1996\n", - "3000\n", - "1·5\n", - "5000\n", - "15%\n", - "1·5\n", - "1/3\n", - ",5\n", - "5.15\n", - "2000\n", - "1992\n", - "500\n", - "1/3\n", - "———\n", - ",“\n", - "”。\n", - "21—23\n", - ";30%\n", - "27\n", - "66\n", - "1988\n", - "2000\n", - "25\n", - "1997\n", - "1992\n", - "1.2\n", - "CDMA(\n", - "1987\n", - "22\n", - "”。\n", - ",“\n", - "”。\n", - "21\n", - "》,\n", - "”、\n", - "600\n", - "……\n", - ":“\n", - "100\n", - "》、《\n", - "》、《\n", - "》3\n", - "”。\n", - ",“\n", - "”,\n", - "”。\n", - "40\n", - "25\n", - ")(\n", - "25\n", - "25\n", - "21\n", - "23\n", - "25\n", - "23\n", - "25\n", - "VX\n", - "23\n", - "122\n", - ")、\n", - "25\n", - ")。\n", - "25\n", - ",39\n", - "25\n", - "26\n", - "23\n", - "22\n", - "”,\n", - "”,\n", - ":“\n", - "23\n", - "26\n", - "23\n", - ",《\n", - ",《\n", - "2045\n", - "200\n", - ",《\n", - "40%\n", - "1.5\n", - "35\n", - "75%\n", - "80%\n", - "20%\n", - "25%。\n", - "1101\n", - "》,\n", - "153\n", - "75%\n", - "82%\n", - "”、“\n", - "1999\n", - "2500\n", - ",“\n", - "”。\n", - "600\n", - "21\n", - "”,\n", - "13·1%\n", - "1948\n", - "1967\n", - "”,\n", - "2020\n", - "70%\n", - "6000\n", - "28\n", - ",28\n", - "1998\n", - ":“\n", - ",“\n", - ",《\n", - "”,“\n", - "”。\n", - "》21\n", - "27\n", - "5.4\n", - "23\n", - "22\n", - ",400\n", - "200\n", - "2600\n", - "1100\n", - "539\n", - "98%。\n", - "”。\n", - "23\n", - "”。\n", - "22\n", - "2、6\n", - "23\n", - ",1983\n", - "1、\n", - "100\n", - "23\n", - "23\n", - "2/3\n", - "”。\n", - "23\n", - "23\n", - "”,\n", - "”,\n", - "21\n", - "”。\n", - "”。\n", - "70\n", - "22\n", - ":“\n", - "”。\n", - "1998\n", - "1991\n", - "23\n", - "19\n", - "1996\n", - "22\n", - "22\n", - "”。\n", - "21\n", - "21\n", - "”。\n", - "22\n", - "”,\n", - "”。\n", - "”;\n", - "”。\n", - "”。\n", - ":“\n", - "”。\n", - "17\n", - "”。\n", - "”。\n", - "”,\n", - "21\n", - "……\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”,\n", - "73\n", - "300\n", - ":“\n", - "21\n", - "21\n", - "16%\n", - "12%,\n", - "25\n", - "2000\n", - "”“\n", - "”。\n", - "”,\n", - "14.5%,\n", - "37%。\n", - "1994\n", - "1986\n", - "”,\n", - "282\n", - "50%。\n", - "31\n", - "1994\n", - "46·24%。\n", - "40\n", - "96%\n", - "50·64%\n", - "21\n", - ",1200\n", - "21\n", - ",3\n", - "500\n", - "21\n", - "21\n", - ",1991\n", - "1954\n", - "21\n", - "21\n", - "31\n", - "265\n", - ",137\n", - ",117\n", - ",11\n", - "21\n", - ",“\n", - "35\n", - "22\n", - "———“\n", - "17\n", - ")、\n", - "),\n", - "22\n", - ")、\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - "”。\n", - ",“\n", - "”,\n", - "25\n", - ",5\n", - "21\n", - ",“\n", - "21\n", - "21\n", - "19\n", - "1980\n", - "21\n", - ":“\n", - "》。\n", - "400\n", - "600\n", - "83\n", - "4000\n", - "1881\n", - "1885\n", - "1.7\n", - "100\n", - "19\n", - "19\n", - "1974\n", - "”。\n", - "1993\n", - "1997\n", - "1146\n", - "1146\n", - "”,\n", - "”。\n", - "21\n", - "19\n", - "40\n", - "25\n", - "1000\n", - "”,\n", - "”,\n", - "”,\n", - ":“\n", - "19\n", - "19\n", - ",“\n", - "21\n", - "22\n", - "45\n", - "52\n", - "1175\n", - "52\n", - "1175\n", - "”,\n", - "18%\n", - "21\n", - ":“\n", - "”,\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "”。\n", - "1963\n", - ",17\n", - ",“\n", - "”。\n", - "……\n", - "1993\n", - "1979\n", - "21\n", - "90\n", - ")。\n", - "100\n", - "47\n", - ":“\n", - ":“\n", - ":“\n", - "1/4,\n", - "21\n", - "19\n", - "”,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - ",《\n", - ",《\n", - "———\n", - ",“\n", - "、“\n", - "”,\n", - "1950\n", - "》(1972),\n", - "”。\n", - "》(1964—1965),\n", - ",《\n", - ":“\n", - "》(1948.11)\n", - "1948\n", - ":“\n", - "》。\n", - "(1905—1951)\n", - "》(1965—1966)、\n", - "》(1970—1972)、\n", - "》(1960—1963)\n", - "》(1965)\n", - "》(1975)\n", - "》(1963)\n", - "》(1947)\n", - "》(1948)\n", - "1945\n", - "”,\n", - "”)\n", - "1945\n", - "”,\n", - "”,\n", - ",“\n", - "”。\n", - "”。\n", - "———\n", - "”。\n", - "》(\n", - "),\n", - "”,\n", - "”,\n", - "》、《\n", - "》、《\n", - "》(\n", - "1971\n", - "1997\n", - ")。\n", - "”(\n", - ")。\n", - ",“\n", - ":“\n", - ",“\n", - "”。\n", - "》(1989)\n", - "”。\n", - ")、\n", - "”。\n", - "”!\n", - ":“\n", - "”;“\n", - ")、\n", - "”。\n", - "1953\n", - "(1890—1969)\n", - "”、“\n", - "”、“\n", - "”,\n", - "”,\n", - "”,\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "75%\n", - "13·1%\n", - "1948\n", - "1967\n", - ",“\n", - "”,\n", - "”。\n", - "”、\n", - "15%。\n", - "13·1%\n", - "”。\n", - "……\n", - "300\n", - "800\n", - "21\n", - "19\n", - ":“\n", - "200\n", - "47\n", - "21\n", - ",“\n", - "”。\n", - ",1700\n", - "200\n", - "”。\n", - "1856\n", - "17\n", - "),\n", - "),\n", - "1000\n", - "3000\n", - "60\n", - "80\n", - "1996\n", - "17\n", - ",89\n", - "1999\n", - "1999\n", - "1995\n", - ",43\n", - "4、\n", - "”。\n", - "2、\n", - "———\n", - "1、\n", - "1823\n", - "”。\n", - "6163\n", - "3—5\n", - "2700\n", - "1997\n", - ")2:\n", - "1:\n", - ",1997\n", - "389\n", - "』,\n", - "』。\n", - "3、\n", - "2、\n", - "1、\n", - "(TBM)\n", - "1998\n", - "166\n", - "1998\n", - "112\n", - "1995\n", - "9505\n", - "8951\n", - "264\n", - "237\n", - "456\n", - "18456\n", - "23\n", - "TBM\n", - "1996\n", - "267.8\n", - "97\n", - "160\n", - "60%,\n", - "”,\n", - "1997\n", - "36\n", - "、36\n", - ":“\n", - "40\n", - ":“\n", - ":“\n", - ":“\n", - "17\n", - "70\n", - "1/4\n", - ",4\n", - "“9”\n", - ",“CC”\n", - "“9”\n", - "———\n", - "40\n", - "、62\n", - "64\n", - "1987\n", - "、1995\n", - "1999\n", - "19\n", - "———\n", - "”。\n", - "》;\n", - "……\n", - ")。\n", - "”、\n", - "”。\n", - "---\n", - "3∶3\n", - ",“\n", - "5∶2\n", - "4∶3\n", - "9∶5\n", - ",10\n", - "29\n", - "109∶94\n", - "40\n", - ":105\n", - ",19\n", - "4∶6\n", - "6∶3\n", - "1∶6\n", - "1∶2\n", - ":“\n", - "1300\n", - "2002\n", - ":“\n", - "2002\n", - "10080\n", - "70\n", - "100\n", - "2100\n", - "1999\n", - "OTC\n", - "),\n", - "1998\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ");\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ")、\n", - ");\n", - ");\n", - ");\n", - ");\n", - ")。\n", - ",1997\n", - "1997\n", - ",1\n", - ",1997\n", - "3540\n", - "1647\n", - "1893\n", - "21\n", - "0.6%。\n", - ",1997\n", - "……\n", - "”,\n", - "207\n", - "Jeep\n", - ",1999\n", - "—2001\n", - ",“\n", - "”。\n", - "15000\n", - "———\n", - ",7\n", - "”。\n", - "35\n", - "15000\n", - "21\n", - ",“\n", - "250\n", - "2000\n", - "”。\n", - "400\n", - "”。\n", - "”。\n", - "90\n", - "40\n", - ",3\n", - "),\n", - "1123\n", - "880\n", - "60\n", - "45\n", - "),\n", - "),\n", - "60\n", - "60\n", - "45\n", - "———5\n", - "32\n", - "2∶0\n", - "21∶19\n", - "3∶1\n", - "23∶21\n", - "17∶9\n", - "20∶15\n", - "15∶18\n", - "21∶20。\n", - "”;\n", - "……\n", - "79\n", - "200\n", - "”,\n", - "4∶1\n", - "”,\n", - "”。\n", - "1/4\n", - "2∶1\n", - "5∶6\n", - "1/4\n", - "2∶0\n", - "2∶2。\n", - "90\n", - "———\n", - "1∶0\n", - ":“\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "》,\n", - "》、《\n", - "》,\n", - "———《\n", - "》、《\n", - "》、《\n", - "”。\n", - "……\n", - "1999\n", - "1992\n", - "1994\n", - "1998\n", - ";1996\n", - ";1997\n", - "……\n", - ":“\n", - "”;\n", - ":“\n", - "”;\n", - ":“\n", - "……”\n", - "1995\n", - "”,\n", - "”,\n", - "”(\n", - "”、\n", - "”。\n", - "1995\n", - "1996\n", - ",“\n", - "”。\n", - "”。\n", - "”,\n", - "”,\n", - "1993\n", - ",“\n", - "1994\n", - "’。\n", - "”,\n", - "“92\n", - "”,\n", - "1992\n", - "1993\n", - "”,\n", - "”,“\n", - ":“\n", - "1976\n", - "……\n", - "1992\n", - "———\n", - "”。\n", - "”,\n", - "……\n", - "”,\n", - ":“\n", - "”。\n", - "』(\n", - "……\n", - "———\n", - "———\n", - "……\n", - "”,\n", - "……\n", - "……\n", - "”,\n", - "1993\n", - "347\n", - "250\n", - "22\n", - ",4000\n", - "1000\n", - "×8\n", - "1968\n", - ",4\n", - "1.3\n", - ",23\n", - "17\n", - "2000\n", - "25\n", - "(18\n", - "25\n", - "3∶3\n", - "1975\n", - "1985\n", - "1993\n", - "47\n", - "”。\n", - "22\n", - "』,\n", - "”,\n", - "”。\n", - "1997\n", - "3000\n", - "28\n", - "300\n", - ",A、B、C\n", - "”,\n", - "”。\n", - ",7\n", - "”。\n", - "”。\n", - "0∶3\n", - "”,\n", - "”,\n", - ":“\n", - "”?\n", - ",12\n", - "———\n", - ":“\n", - "29\n", - "29\n", - "200\n", - "200\n", - "79\n", - "76\n", - "28\n", - "31\n", - ",1998\n", - "25\n", - "”。\n", - ",“\n", - "31\n", - "23\n", - ",3\n", - "1998\n", - "1999\n", - "101』\n", - "28\n", - "6500\n", - "28\n", - "26\n", - "28\n", - "42\n", - "60\n", - "429\n", - "4100\n", - "200\n", - "”。\n", - "”。\n", - "”。\n", - "”,“\n", - "”,“\n", - ",“\n", - ",“\n", - ":“\n", - "”,\n", - "200\n", - "400\n", - "800\n", - "2000\n", - "5.6\n", - "1.6\n", - "39\n", - "1872\n", - "125\n", - "57\n", - ",1992\n", - "27\n", - "1998\n", - "———\n", - "1966\n", - "33\n", - "1966\n", - "1993\n", - "1993\n", - "33\n", - "500\n", - "1991\n", - "35\n", - ",28\n", - "———\n", - "1966\n", - "1967\n", - "2000\n", - "29\n", - "15.7%\n", - "1979\n", - "1997\n", - "71\n", - "”。\n", - ":“\n", - "”。\n", - "27\n", - "17\n", - "”。\n", - "34\n", - "———\n", - "2004\n", - "———\n", - "1999\n", - ",1999\n", - ",2000\n", - "1997\n", - "35\n", - "26\n", - "283\n", - "2·6\n", - "2000\n", - "110\n", - "49\n", - "7·3%,\n", - "6·7%,\n", - "1965\n", - "1996\n", - "31\n", - "9·2%,\n", - "44350\n", - "1533\n", - "7096\n", - "、5699\n", - "5632\n", - "11521\n", - "11405\n", - "15336\n", - "23646\n", - "1996\n", - "74335\n", - "51492\n", - "1998\n", - "129\n", - "600\n", - "27\n", - "1997\n", - "17\n", - "28\n", - "28\n", - "1996\n", - "、1997\n", - "39\n", - "70\n", - "1997\n", - "157\n", - "1996\n", - "224\n", - "19770\n", - "COSCO(\n", - "1995\n", - "1995\n", - "1996\n", - "1997\n", - "1996\n", - "1997\n", - "1996\n", - "ISO9002\n", - ";1996\n", - "1997\n", - "14·66\n", - "1997\n", - "18·57\n", - ",3\n", - "25%\n", - ",“\n", - "1·7\n", - "160\n", - "27\n", - "100\n", - "2%,\n", - "11%。\n", - "1850\n", - "150\n", - "25\n", - "23\n", - "2、\n", - "1、\n", - "”,\n", - "1978\n", - "1996\n", - "450\n", - ",“\n", - "”。\n", - "27\n", - ",27\n", - "”。\n", - "38\n", - "600\n", - "250\n", - "350\n", - "100\n", - "735\n", - "”,\n", - "”。\n", - "22\n", - "40%。\n", - ")4\n", - "13.22\n", - "12.59\n", - ",“\n", - "23\n", - "1996\n", - "”,\n", - "1996\n", - "”,\n", - "20%,\n", - "20%\n", - "”,\n", - "1992\n", - "500\n", - "1991\n", - "”,\n", - ",”\n", - ",“\n", - "42\n", - ",5\n", - "1990\n", - "100\n", - ",“\n", - "”,\n", - "2000\n", - "”———\n", - "5000\n", - "1.5\n", - ":“\n", - "20%\n", - "”。\n", - "1999\n", - "90\n", - "23\n", - "”。\n", - "25\n", - "”,\n", - "4000\n", - ";3000\n", - "17\n", - "25\n", - "25·3\n", - "6·41\n", - "44·28\n", - "8·27\n", - "10·6\n", - ")。\n", - "54\n", - ",4000\n", - "22\n", - "16466\n", - "421\n", - "40\n", - "1·02\n", - "31\n", - "22\n", - "28\n", - "29\n", - ",9\n", - "9000\n", - "1995、1996\n", - "1997\n", - "21\n", - "1179\n", - "12·1%,\n", - "1/3\n", - ",1997\n", - "10·7%。\n", - "2、\n", - "”(\n", - ")。\n", - "1、\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - ":“\n", - "”。\n", - "2000\n", - "”。\n", - "4%\n", - "30%\n", - "35\n", - "3000\n", - "”,\n", - "300\n", - "60%\n", - "70\n", - "”、“\n", - "”,\n", - "560\n", - "3000\n", - "、300\n", - "21\n", - "”。\n", - ",4\n", - "1991\n", - "”、“\n", - "”、“\n", - ",3\n", - "8000\n", - "1986\n", - "”,\n", - ",83\n", - "”,\n", - "1985\n", - "1979\n", - ":“\n", - "39\n", - "77\n", - ",1979\n", - "”:\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ":“\n", - "”,\n", - "1991\n", - "》,\n", - "”,\n", - "”,\n", - "873\n", - "1980\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ":“\n", - "”1941\n", - "1937\n", - "”,\n", - ",“\n", - "”,“\n", - "”。\n", - "”。\n", - "34\n", - "”,\n", - "”。\n", - "》。\n", - "ABC》。\n", - ":“\n", - "”。\n", - ",“\n", - "60\n", - ",1/4\n", - "”。\n", - ",“\n", - ":“\n", - "1992\n", - ":“\n", - "70\n", - ",1937\n", - "1997\n", - "26\n", - "23\n", - "40\n", - "———\n", - "1989\n", - "1990\n", - "》。\n", - "100\n", - "40\n", - "BOT\n", - "1988\n", - "90\n", - "1949\n", - "1939\n", - "1937\n", - "”。\n", - "1931\n", - "100\n", - "---\n", - "1991\n", - "ISO9002\n", - "21\n", - "”。\n", - "》,\n", - "1998\n", - "”,\n", - "”。\n", - "1997\n", - "2003\n", - "3%\n", - "1998\n", - "260\n", - "40\n", - "23\n", - "21\n", - "22\n", - "2、22\n", - "1、\n", - "3%,\n", - "2.8%,\n", - "3%。\n", - "1800\n", - "10.6%。\n", - "1.7%\n", - "2.9%。\n", - "2.9%,\n", - "3.1%,\n", - "1998\n", - "1997\n", - "1996\n", - "4.4%,1998\n", - "1999\n", - "6.7%\n", - "7.0%。\n", - "5%,1999\n", - "4%。\n", - "1997\n", - "1996\n", - "8.5%,\n", - "195\n", - "1.8793\n", - ",1997\n", - "2.2%,1998\n", - "1999\n", - "2.7%\n", - "2.8%。\n", - "”,\n", - "”,\n", - "1996\n", - "0.3\n", - "60%\n", - "25%,\n", - "2.4%。\n", - "”。\n", - "”,\n", - "1998\n", - "”。\n", - "”。\n", - ":“\n", - ",1998\n", - "25\n", - "1998\n", - "》,\n", - "1998\n", - "1999\n", - "2.8%\n", - "3%。\n", - "1924\n", - "1931\n", - "150\n", - "200\n", - ",30\n", - "140\n", - "1996\n", - "22\n", - "3·2%\n", - "9%\n", - ",1998\n", - "8·8%\n", - "6%\n", - "7·1%\n", - "3·9%,\n", - "6·8%\n", - "4·8%。\n", - ",1997\n", - "《1998\n", - ",1997\n", - "7·5%\n", - "6·1%,\n", - "4%\n", - "22\n", - "53\n", - "22\n", - "727—200\n", - "53\n", - "21\n", - "5.5%\n", - "5%,\n", - "1973\n", - "1998\n", - "698·6\n", - "1111·6\n", - ",3\n", - "1179\n", - "1317\n", - "138\n", - "213\n", - "35·2%。\n", - "21\n", - "21\n", - "1/3\n", - "98”\n", - "8%,“\n", - "21\n", - "”,\n", - "500\n", - "21\n", - "1/5\n", - ":“\n", - "400\n", - "”。\n", - "———\n", - ",CAC40\n", - "25·29\n", - "3860·40\n", - "———\n", - "4%\n", - "5%,\n", - "DAX30\n", - "53·06\n", - "5388·94\n", - "22\n", - "21\n", - "500\n", - "2000\n", - "16·73\n", - "1903·87\n", - "6%,\n", - "115\n", - "43·10\n", - "9184·94\n", - "22\n", - "28\n", - "26\n", - ":“\n", - "80%\n", - "9000\n", - "1%。\n", - "19\n", - "———\n", - "77\n", - "GNP\n", - "2%,\n", - "5%—6%;\n", - "9%—10%,\n", - "5%\n", - "39\n", - "44\n", - "IMF\n", - "”,\n", - "23\n", - "21\n", - "33\n", - "67\n", - "57\n", - "IMF\n", - "4·\n", - "3·\n", - "2·\n", - "1·\n", - "”。\n", - "”。\n", - "IMF\n", - "1998\n", - "10%\n", - "IMF\n", - ",IMF\n", - "570\n", - "IMF\n", - "(IMF)\n", - "”(\n", - "”。\n", - "110\n", - "2、\n", - "1、4\n", - "2000\n", - "6500\n", - "8·5\n", - "6600\n", - "7600\n", - "8500\n", - "26\n", - "450\n", - "190\n", - "70\n", - ",1993\n", - "1·13\n", - ",1988\n", - "4000\n", - "330\n", - "1994\n", - "1995\n", - "1996\n", - "2005\n", - "1650\n", - "3.1\n", - "1.3\n", - "1990\n", - "13.3\n", - "1997\n", - "55.57\n", - "1990\n", - "41\n", - "1997\n", - "202\n", - "400%。\n", - "90\n", - "2000\n", - "”,\n", - "21\n", - "”。\n", - "33·7\n", - "4·3\n", - "1998\n", - "9·2\n", - "18·3\n", - "5·7\n", - "1996\n", - "117·2\n", - "116·2\n", - "418·2\n", - "9·79%。\n", - "19\n", - ",1997\n", - "534·4\n", - "1996\n", - "7·28%,\n", - "416·7\n", - "1996\n", - "2·9%。\n", - ":“\n", - ":“\n", - ",24\n", - "1.1\n", - "6500\n", - "2.3\n", - "”。\n", - "CNN\n", - ":“\n", - "26\n", - ",“\n", - "”。\n", - "1、\n", - "170\n", - "40%\n", - ",3\n", - "53%。\n", - "35\n", - "40\n", - ",3\n", - "25%。\n", - ",3\n", - "52·4\n", - "2·2\n", - "2000\n", - "50%\n", - "2000\n", - "1996\n", - "2001\n", - "60\n", - "62\n", - "2009\n", - "55\n", - "62\n", - "65\n", - "300\n", - "60\n", - "”),\n", - "0.5%\n", - "11%—12%\n", - "40\n", - "75%\n", - "24%,\n", - "22%。\n", - "6%,\n", - "9%。\n", - "1996\n", - "9.7%,\n", - "15.2%。\n", - "10%。\n", - "2%\n", - "3%。\n", - "1997\n", - "55%。\n", - "1995\n", - "6000\n", - "1995\n", - "20%\n", - "380\n", - "3997\n", - "”,\n", - ",80%\n", - "500\n", - "240\n", - ",6\n", - "103\n", - ":“\n", - "5%\n", - "”,\n", - ",1996\n", - "200\n", - "300\n", - "2000\n", - ",1989\n", - ",700\n", - "139\n", - ",540\n", - "173\n", - "26\n", - "13—16\n", - "26\n", - "1935\n", - "”,\n", - "4.4%,\n", - "3.8%;\n", - "14.1%;\n", - "4%,\n", - "9.3%;\n", - "7.2%,\n", - "3.9%,\n", - "3.2%,\n", - "1.9%。\n", - "4.7%,\n", - "640\n", - "37\n", - "0.8%\n", - "30%\n", - "60%。\n", - "45\n", - "45\n", - "60%\n", - "50%\n", - "45\n", - "50%,\n", - "66.7%,\n", - ")、\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”,\n", - "》、《\n", - "》(\n", - "---\n", - "”、“\n", - "”、“\n", - "”。\n", - "1996\n", - ",《\n", - "———《\n", - "》,\n", - ",“\n", - "”。\n", - "1992\n", - "1996\n", - ",1995\n", - ":“\n", - "1979\n", - ",30\n", - "---\n", - "60\n", - "60\n", - "61\n", - "64\n", - "1827\n", - "31\n", - ":“\n", - "”,“\n", - "”。\n", - "78\n", - "1927\n", - "”。\n", - "》、《\n", - "》、《\n", - "》、\n", - "、《\n", - "”,\n", - "”,\n", - ",《\n", - "1981\n", - "———\n", - "2007\n", - "———\n", - ",11\n", - "830\n", - "60\n", - "”,\n", - "40%,\n", - "……\n", - "300\n", - "38\n", - "38\n", - "”。\n", - "700\n", - ",“\n", - "”,\n", - "”。\n", - "”。\n", - "74\n", - "1941\n", - "1987\n", - "”,\n", - "17\n", - ",1500\n", - ",1300\n", - "”!\n", - "19\n", - "———\n", - ",“\n", - "”……\n", - ",1000\n", - "……(\n", - ")(\n", - "”。\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "1979\n", - ":“\n", - ",“\n", - "”,\n", - "80\n", - "70\n", - "60\n", - ":60\n", - ",“\n", - "”,\n", - "200\n", - "》。\n", - ":“\n", - "300\n", - ",“\n", - "”。\n", - "(1898—1989)\n", - "5.4\n", - "100\n", - "40\n", - "750\n", - "1996\n", - "1997\n", - "1997\n", - "100\n", - "600\n", - "45%\n", - "60\n", - "3%。\n", - "8%\n", - "1500\n", - "1/4\n", - "22\n", - "22\n", - "277\n", - "163\n", - "10%。\n", - "50%\n", - "1.57\n", - "617\n", - "1.73\n", - "769\n", - "834\n", - "284\n", - "90\n", - "1/3,\n", - "1.5\n", - ",15\n", - "1%\n", - "15%。\n", - "1973\n", - "60\n", - "59%。\n", - "———\n", - "60\n", - "21\n", - "”。\n", - "2001\n", - "2200\n", - "EDI、\n", - "1996\n", - "”,\n", - "”,\n", - "”,\n", - "60\n", - "41\n", - "33\n", - ",1996\n", - "”。\n", - "”,“\n", - "”。\n", - ",7\n", - "1·2\n", - "31\n", - "28\n", - "2·02\n", - "(1\n", - "0·54\n", - "85%\n", - "1000\n", - "27%,\n", - "36%,\n", - "———\n", - "3000\n", - "———\n", - "22\n", - "15·82\n", - ",14\n", - "13·45\n", - "55\n", - "13·29\n", - "21\n", - "13·22\n", - "12·59\n", - "”,\n", - "”,\n", - "”,\n", - "15·1203\n", - "64·5%,\n", - ",1997\n", - "1000\n", - "17439\n", - "17·4%。\n", - "1030·4\n", - "1154·3\n", - "124\n", - "(1\n", - "38\n", - "),\n", - "85%。\n", - "”。\n", - "1998—1999\n", - ",1998\n", - "6.93\n", - ",1999\n", - "90\n", - "500\n", - "1997\n", - "700\n", - "25%。\n", - "150\n", - "1980\n", - "28%。\n", - "4.6%,\n", - "3%\n", - "250\n", - "330\n", - "90\n", - "7.5\n", - "1996\n", - "15%\n", - "”。\n", - "3%\n", - "22\n", - ",3\n", - "40%。\n", - "2、\n", - ",4\n", - "1、\n", - "9000\n", - "298\n", - "2300\n", - "648\n", - "5700\n", - "128\n", - "87\n", - "7·5\n", - "3·3\n", - "108\n", - ",“\n", - "1·3%\n", - "2·7%\n", - "4·4%,\n", - "6·7%\n", - "4·1%\n", - "2·4%。\n", - "2·9%。\n", - "3·1%,\n", - "3·5%。\n", - "1974—1975\n", - "、1980—1983\n", - "1990—1991\n", - "90\n", - "1970\n", - ",1997\n", - "29\n", - "1998\n", - "-0.3%,\n", - ",10\n", - "132∶1\n", - "129∶1。\n", - "”,\n", - ",4\n", - "”。\n", - ",“\n", - "”。\n", - "2003\n", - "1.75%\n", - "”,\n", - ";1998\n", - ",1999\n", - "1998\n", - "”。\n", - "34%\n", - "3%。\n", - "1997\n", - "575\n", - "7500\n", - "NEC\n", - "3·2\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "4000\n", - "454\n", - "”,\n", - "2006\n", - "2010\n", - "1999\n", - "”。\n", - "”。\n", - "60—70\n", - ",“\n", - "2005\n", - ";2007\n", - "2011\n", - "”。\n", - "2015\n", - "50—100\n", - "2000\n", - "299\n", - "1800\n", - "COLIA1\n", - "”。\n", - "P53\n", - "P53\n", - "1995\n", - "1500\n", - "27\n", - "1967\n", - "CBFI\n", - "COR。\n", - ",1988\n", - "》,\n", - ",1986\n", - "———\n", - "2、\n", - "1、4\n", - "34\n", - "12—18\n", - "1400\n", - "1.5\n", - "17\n", - "1996\n", - "》,\n", - "”。\n", - ",“\n", - "”,10\n", - "2.5\n", - "1400\n", - "700\n", - "5·9%\n", - "1·9%,\n", - "1995\n", - "18·4%。\n", - "17%,\n", - "12·8%\n", - "9·7%。\n", - "31·1%\n", - "36·3%,\n", - "2·3%\n", - "7·1%。\n", - "1383\n", - "45·6%,\n", - "41·7%\n", - "3030\n", - "2090\n", - "45%。\n", - ",《\n", - ";《\n", - "”。\n", - "”;《\n", - ";《\n", - "……\n", - "1982\n", - "humulin,\n", - "140\n", - "300\n", - "100\n", - "52·5%。\n", - "400\n", - "1400\n", - "29%\n", - "31%。\n", - "127\n", - "67%。\n", - "500\n", - "90\n", - "414\n", - "36\n", - "900\n", - "100\n", - "414\n", - "300\n", - "90\n", - "C—150\n", - ")1997\n", - "3000\n", - "75%\n", - "68%。\n", - "1996\n", - "9%\n", - "13%。\n", - "1997\n", - "81\n", - "1300\n", - "730\n", - "90%\n", - "———\n", - "》、《\n", - "》,\n", - "1998\n", - "”。\n", - "1850\n", - "1863\n", - "200\n", - "8%\n", - ",15\n", - "19\n", - "1997\n", - "(1997\n", - "1998\n", - "102190\n", - "163890\n", - "10%\n", - "30%,\n", - "1·6\n", - "19\n", - "1200\n", - "———\n", - "1998\n", - "59\n", - "39\n", - "71\n", - "4300\n", - "(2346\n", - "),\n", - "4530\n", - "249\n", - "65.7%、36.2%\n", - "33.7%;\n", - "11%,\n", - "5.3%。\n", - "7900\n", - "8700\n", - ",3\n", - "8904.44\n", - "4300\n", - "5009\n", - "5200\n", - "5911\n", - "2998\n", - "3800\n", - "8000、5000、6000\n", - "3000\n", - "1998\n", - "2.5%,\n", - "9000\n", - "、38\n", - "、3.550\n", - "、37\n", - "、1300\n", - "、1.59\n", - "———\n", - ",1\n", - ";2\n", - "1∶12000\n", - ";3\n", - "1∶133\n", - "1∶1.80\n", - "1∶1.84,\n", - "2.2%。\n", - "1957\n", - ",40\n", - "V2\n", - "V2。\n", - "1970\n", - "———\n", - "2006\n", - "236\n", - "、730\n", - "28\n", - ",1996\n", - "708\n", - "8000\n", - "400\n", - ",26\n", - ",300\n", - "”。\n", - ":18\n", - "———\n", - ",“\n", - "9.2\n", - "1990\n", - "32\n", - ",80\n", - "”,“\n", - "”。\n", - "1985\n", - "80\n", - "95%。\n", - "“2·15”\n", - "122\n", - "44\n", - "256\n", - "1997\n", - "、10\n", - "17\n", - "100%\n", - "10%\n", - "DPA)\n", - "DPA\n", - "70%\n", - "28\n", - "88%,\n", - ")、\n", - "200,\n", - "95%。\n", - ",“\n", - "28\n", - "1970\n", - "———\n", - "26\n", - "28\n", - "”。\n", - ":“\n", - "60\n", - "40\n", - "”,\n", - "36\n", - "25\n", - ",“\n", - "”。\n", - "4.3\n", - "”。\n", - "”,\n", - "———\n", - ",1997\n", - "31%,\n", - "30%,\n", - "18%、11%、10%。\n", - "1998\n", - "”。\n", - ",1997\n", - "1997\n", - "9.065\n", - "1996\n", - "2.669\n", - "1997\n", - "1998\n", - "110\n", - "1997\n", - "1998\n", - "240\n", - "28.7\n", - "33\n", - "2000\n", - ",25\n", - "”。\n", - "2.6\n", - "270\n", - "1995\n", - "40\n", - ")。\n", - "39\n", - "44\n", - "(ISO)\n", - "———\n", - "”,\n", - "”。\n", - "1994\n", - "(1998—2001)\n", - "27.5\n", - "30%。\n", - "500\n", - "1991\n", - "164\n", - "32\n", - "2000\n", - "1998\n", - "1998\n", - "1980\n", - "850\n", - "2350\n", - "70\n", - "45\n", - "28\n", - "1997\n", - "20660\n", - "2350\n", - "3%,\n", - "10%。\n", - "718\n", - "165\n", - "26%,\n", - "90\n", - "6600\n", - "1990\n", - "4080\n", - "61·8%。\n", - "2、\n", - "1、\n", - "442\n", - "1976\n", - ",20\n", - "1998\n", - "1·6\n", - "134∶1\n", - ",3\n", - "”,\n", - "”(\n", - "”)\n", - ",1998\n", - ",2\n", - "60\n", - ",3\n", - "———\n", - "———\n", - "———\n", - "———\n", - "4.3%,\n", - "700\n", - "5000\n", - "29\n", - "———\n", - ",3\n", - "430\n", - "70%\n", - "1998—1999\n", - "4%\n", - "20%\n", - "17%;\n", - "”,\n", - ",“\n", - "》,\n", - "》,7\n", - "23\n", - "3000\n", - "1993\n", - "”。\n", - ",1995\n", - "300\n", - "1992\n", - ",“\n", - "———“\n", - "”。\n", - ",3\n", - "1991\n", - ",3\n", - "”(\n", - "1990\n", - "90\n", - ":“\n", - "34\n", - "……\n", - "———\n", - "3.\n", - "2.\n", - "1.\n", - "),\n", - "C、A、E\n", - "9—10\n", - "1200\n", - "”,\n", - "(WHO)\n", - ":“\n", - ",1/3\n", - "”1992\n", - ",1991\n", - "1995\n", - ",1994\n", - "”,1994\n", - ",1995\n", - "”,\n", - ",1996\n", - "”,1996\n", - "CAD\n", - "”,\n", - "”,\n", - "、T\n", - "1990\n", - "”。\n", - "1990\n", - "、T\n", - "80\n", - "100\n", - "”。\n", - "”。\n", - "”。\n", - "』,\n", - "』,\n", - "』,\n", - "』。\n", - ":『\n", - "”,\n", - "3、\n", - "”,\n", - "2、\n", - "1、\n", - "32\n", - "29\n", - "466\n", - "……\n", - "120\n", - ":“\n", - ":“\n", - "”10\n", - ":“\n", - "、20\n", - "……\n", - ":“\n", - ":“\n", - ":“\n", - "1.8\n", - "1998\n", - "28\n", - "”:\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - "1994\n", - "”,\n", - "”,\n", - "』:\n", - "』、『\n", - "』、『\n", - "』,\n", - "』,\n", - "』,\n", - "”,\n", - "”,\n", - "19\n", - ")。\n", - "》,\n", - ",7\n", - "17\n", - "—19\n", - "32.662\n", - "22.383\n", - "A19.114\n", - "17.095\n", - "10.581\n", - "16.112\n", - "A15.663\n", - "A15.524\n", - "A13.895\n", - "A13.221\n", - "23.222\n", - "22.143\n", - "20.204\n", - "15.585\n", - "12.571\n", - "21.172\n", - "15.643\n", - "15.154\n", - "14.505\n", - "13.80\n", - "1988\n", - ",10\n", - "636\n", - "24.5\n", - "23\n", - "”。\n", - "2241\n", - "1228\n", - "282.49%。\n", - "26\n", - "110%,\n", - "1000\n", - "108\n", - "100\n", - "1000\n", - "50、55、60、65\n", - "———\n", - "Ku\n", - "———\n", - "1997\n", - "2.35\n", - "1998\n", - "17\n", - "35786\n", - "600\n", - "19\n", - ")。\n", - "”,\n", - "”,\n", - "140\n", - "146.75\n", - ",6\n", - "17\n", - "60\n", - "136\n", - "(IMF)\n", - "56\n", - "1.83\n", - "100%,\n", - "150%,\n", - "1996\n", - "1.64\n", - "25\n", - "7.5%,\n", - "1.67\n", - "5.5%\n", - "9000\n", - "17\n", - "、5\n", - "、10\n", - "、10\n", - "3000\n", - "500\n", - "700\n", - "1700\n", - "800\n", - "1000\n", - "1995\n", - "80\n", - "1997\n", - "200\n", - "1994\n", - "1993\n", - "1.2\n", - "80\n", - "80\n", - "140\n", - "1985\n", - "1995\n", - "250\n", - "79.83\n", - "1994\n", - "7%,\n", - "1981\n", - "18.5%,\n", - "1992\n", - "3.1875%。\n", - "1997\n", - "1300\n", - "1979\n", - "》、《\n", - "》,\n", - "》、《\n", - "》(\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - "———\n", - "《“\n", - ":〈\n", - ":“\n", - "》,\n", - ":“\n", - "———\n", - "》,\n", - "”“\n", - "》,\n", - "》,\n", - "1954\n", - "》,\n", - "》(\n", - ",1979\n", - "),\n", - "》,\n", - "———\n", - "》(\n", - "),\n", - "———\n", - "”、“\n", - "———\n", - "、“\n", - "”,\n", - ")、\n", - "”、“\n", - "”、“\n", - "”。\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - ":“\n", - "”,“\n", - "”。\n", - "”。\n", - "”,\n", - ":“\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "”;《\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "》,\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - ":“\n", - "”,\n", - "1992\n", - ":“\n", - "……\n", - "”。\n", - "”、\n", - "8%,\n", - "”。\n", - "”。\n", - ",『\n", - "』,\n", - ":“\n", - "”,\n", - "”,\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - ",“\n", - "”。\n", - "”。\n", - "”,\n", - ",“\n", - "”,\n", - "”———\n", - "ISO9001\n", - "UL、\n", - "VDE、\n", - "CSA\n", - "EEC、CSA\n", - "UL\n", - "———\n", - "”,\n", - ",“\n", - "1996\n", - "”,\n", - "”。\n", - ":“\n", - "0.01\n", - "GE\n", - "”,\n", - "”。\n", - "156\n", - "、545\n", - "”。\n", - "“OEC”\n", - "”,\n", - "”。\n", - "“OEC”\n", - "”,\n", - ",“\n", - "76\n", - "1984\n", - "———\n", - "”。\n", - "”。\n", - "1997\n", - "108\n", - "47\n", - "120\n", - "———“\n", - "70\n", - ":“\n", - "25\n", - "125\n", - ":“\n", - "25\n", - ":“\n", - "”5\n", - "』。\n", - "100\n", - "———“\n", - "”。\n", - "”(\n", - "17\n", - "2.2\n", - "40\n", - "60\n", - "40\n", - ",50\n", - "40\n", - ",40\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - "),\n", - "35\n", - "54\n", - "1997\n", - "120\n", - "90\n", - "240\n", - "32920\n", - "48770\n", - "23690\n", - "58000\n", - "81690\n", - "15.5\n", - "7.5\n", - "”,\n", - "”(\n", - "159\n", - "1997\n", - "47\n", - "47\n", - ",7\n", - "27\n", - ",8\n", - "31\n", - "1996\n", - "27\n", - "1995\n", - "1993\n", - ",1992\n", - "”。\n", - ",1996\n", - "1997\n", - "3%\n", - "“21\n", - "70\n", - "80\n", - "80\n", - "“21\n", - "”。\n", - "80\n", - "”:\n", - ";2\n", - "”,\n", - "”;\n", - "1/4\n", - ",40%\n", - "1/10\n", - "1%\n", - "』,\n", - "”,\n", - "”,\n", - "500\n", - "23·3%,\n", - "7·8%。\n", - "500\n", - "1996\n", - "500\n", - "500\n", - "1997\n", - "1780\n", - "6980\n", - "100\n", - "16.2\n", - "1987\n", - "5%\n", - "25%,\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "4∶3,\n", - "9000\n", - "9033.23\n", - "90\n", - "———\n", - "6000\n", - "100\n", - "1996\n", - "370\n", - "———\n", - "817\n", - "33.875\n", - "176.75\n", - "10.06\n", - "71.75\n", - "27\n", - "550\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - "8000\n", - "14%。\n", - "7900\n", - "9000\n", - "700\n", - "9000\n", - "9033.23\n", - "18%\n", - "16%\n", - "15%。\n", - "4·5%,\n", - "5·5%,\n", - "6%,\n", - "8%。\n", - ",1998\n", - "2001\n", - ",1997—1998\n", - "2·81\n", - "9%。\n", - "4·5%\n", - "8%\n", - ",11\n", - ",1997\n", - "1·1\n", - "1·9\n", - ":“\n", - "”。\n", - "43\n", - "”,\n", - "1968\n", - "58\n", - "3000\n", - "1500\n", - "100\n", - "1985\n", - "89\n", - "”。\n", - "3911\n", - "0.8\n", - "0.3\n", - "1.1\n", - "1995\n", - "7.2\n", - "”。\n", - "”,\n", - "40\n", - "100\n", - "”。\n", - "35\n", - "1.12\n", - "290\n", - "127\n", - "180\n", - "297\n", - "80\n", - "60\n", - "1988\n", - "5000\n", - "80\n", - "8.5\n", - "110\n", - "80\n", - "3911\n", - "1991\n", - "1410\n", - "13.1\n", - "200\n", - "———\n", - ")(\n", - ":“\n", - "”。\n", - "1600\n", - "58.5\n", - "90%\n", - ",25%—33%\n", - "8%\n", - "15%\n", - "”,\n", - ",95%\n", - ":“\n", - "”。\n", - ":“\n", - "”。\n", - "———\n", - "1996\n", - "95%。\n", - "60\n", - "1/4\n", - "80\n", - "10%。\n", - "”,\n", - "300\n", - "500\n", - "”,\n", - ",1998\n", - "7510\n", - "7675\n", - "40%,\n", - "124·5\n", - "22\n", - ",“\n", - "”。\n", - "2.2%。\n", - "2003\n", - "100%,2011\n", - "60%。\n", - "(1996\n", - "15.7%),\n", - "4700\n", - "8000\n", - "10%,\n", - "”,\n", - "122%,\n", - "5.1%,\n", - "23.8%,\n", - "14%。\n", - "60%,\n", - "121%。\n", - ":“\n", - "”,\n", - ",“\n", - ":“\n", - ")。\n", - "”。\n", - "”。\n", - "25\n", - "2000\n", - "100\n", - "33\n", - "10%\n", - "30%\n", - "17\n", - "700\n", - "》4\n", - "280\n", - "M—8\n", - "54\n", - "1996\n", - "716\n", - "212\n", - "928\n", - "1993\n", - "454\n", - ",1994\n", - "720\n", - ",1995\n", - "1025\n", - ",1996\n", - "6893\n", - "899\n", - "1993\n", - "1997\n", - "9781\n", - "54\n", - "(1\n", - "6.3\n", - "”。\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - "”。\n", - "225\n", - "185·12\n", - ",“\n", - "”。\n", - "3A\n", - ",1997\n", - "1800\n", - ",“\n", - "”。\n", - "”,\n", - "119\n", - "601\n", - "12.46\n", - ",5\n", - "1994\n", - "1991\n", - "1992\n", - "1.1\n", - "1990\n", - "3333\n", - "1993\n", - "1350\n", - "1913\n", - ",1971\n", - ",10\n", - "”,\n", - "9000\n", - "90\n", - "1904\n", - ",“\n", - "”;\n", - ":“\n", - ":“\n", - ":“\n", - "”3\n", - "———\n", - "3.4\n", - "---\n", - "M81\n", - ",1963\n", - "100\n", - "1000\n", - "X—\n", - "X—1\n", - "X—\n", - "X—\n", - "70\n", - "X—\n", - "60\n", - "1054\n", - "1.4\n", - ");1967\n", - "127\n", - "”(\n", - "rc,\n", - "rc=2GM/C\n", - "),\n", - ",G=6.67×10\n", - ",C\n", - "2.997×10\n", - "rc\n", - ",1938\n", - ",1963\n", - ",1991\n", - "FundamentalofCosmicPhysics\n", - "88\n", - "1998\n", - "1200\n", - "1997\n", - "700\n", - "GMP\n", - "6000\n", - "70%\n", - "1995\n", - ",53\n", - "、98%\n", - "1300\n", - ",2\n", - ",1998\n", - ":“\n", - ",“\n", - "29\n", - ":“\n", - "(97·12·30)\n", - ")。\n", - "1995\n", - "1997\n", - "32\n", - "---\n", - "BP\n", - ":“\n", - ":“\n", - "”1996\n", - ",1994\n", - "1998\n", - "”,\n", - "1997\n", - "”,\n", - ",7\n", - "1987\n", - "2000\n", - "2、24\n", - "114\n", - "1、\n", - "”!\n", - ":“\n", - "”,\n", - "1995\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”。\n", - "650\n", - "1987\n", - "36\n", - "”3\n", - ")(\n", - "1400\n", - "70\n", - "1994\n", - ",46\n", - "100\n", - "———\n", - "”,\n", - "……\n", - ",1993\n", - ",5\n", - "”,\n", - "100\n", - ",39\n", - ",“\n", - "600\n", - "……\n", - "70\n", - "22\n", - ":“\n", - "300\n", - "1995\n", - "———(\n", - "28\n", - "31\n", - "490\n", - "15%\n", - "2.3\n", - "2.8\n", - "2、\n", - "1、\n", - "737—700\n", - "737\n", - "144\n", - "737—700\n", - "7000\n", - "27\n", - "737—700\n", - "1997\n", - "》,\n", - "”,\n", - "》,\n", - "250\n", - "70\n", - "),\n", - "100\n", - "……\n", - "(1997—2002\n", - "1996—1997\n", - "150\n", - "110\n", - "2000\n", - "1997\n", - ")15\n", - "30%。\n", - "16.8\n", - "1990\n", - "18%\n", - "1997\n", - "1%;\n", - "1990\n", - "21%\n", - "1997\n", - "6.2%。\n", - "90\n", - "1996\n", - "40\n", - ":1997\n", - "480\n", - "5.3%;\n", - "80\n", - "200\n", - ");\n", - "14.7%,\n", - "2006\n", - "(1\n", - "3.4\n", - ")。\n", - ",1998—1999\n", - "6.2%。\n", - "1%\n", - "2%,\n", - "”。\n", - "”,\n", - "》2\n", - "”,\n", - "88%\n", - "12%,\n", - "8.16\n", - "5.3\n", - "3056\n", - "300\n", - "”,\n", - "1996\n", - "1.7\n", - "650\n", - "771\n", - "1620\n", - "2400\n", - "87\n", - "1996\n", - "5.56\n", - "5480\n", - "1.8\n", - "6.46\n", - "5487\n", - "1971\n", - "70\n", - "”,\n", - "、DDT\n", - "1996\n", - "”,\n", - "”,\n", - "”。\n", - "5.5∶1,\n", - "3.3∶1\n", - "6∶1。\n", - "3500\n", - "3000\n", - "2·3\n", - "1·5\n", - "2·2\n", - "2150\n", - "1914\n", - "4000\n", - "1·1\n", - "200\n", - "320\n", - "51\n", - "65\n", - "624\n", - "”,\n", - "”。\n", - "”(MERLIN)\n", - "”。\n", - "80\n", - "31\n", - "3000\n", - "109\n", - "29\n", - "39·4\n", - "40·6\n", - "》。\n", - ":“\n", - ",1997\n", - "17\n", - ")(\n", - "32\n", - "70%\n", - "30%\n", - "40%\n", - "),\n", - "”。\n", - "60\n", - ":“\n", - ",“\n", - "70\n", - "87\n", - "———\n", - "31\n", - "2800\n", - "1500\n", - "48\n", - "19\n", - "(6·8\n", - ")。\n", - "”。\n", - "7000\n", - "1·16\n", - "”。\n", - ",“\n", - "”。\n", - "2698·7\n", - "2574\n", - "1998\n", - "31\n", - "31\n", - "31\n", - "124·5\n", - "29\n", - ",《\n", - ",《\n", - "》,\n", - "150\n", - "1%,\n", - "150\n", - "1200\n", - "》。\n", - "———\n", - "》,\n", - "”,\n", - "2001\n", - "1997\n", - "1998\n", - "1997\n", - ");\n", - "1998\n", - "”(3\n", - "),\n", - "1999\n", - "2001\n", - "1996\n", - "80\n", - "”,\n", - "1986\n", - "BIGBANG\n", - "》、《\n", - "1996\n", - "2001\n", - "BIGBANG(\n", - "),\n", - "”。\n", - "2%。\n", - "48\n", - "33\n", - "80\n", - "29\n", - "21\n", - ":“\n", - "”64\n", - ",1972\n", - "”17\n", - "”。\n", - "1986\n", - "28\n", - "72\n", - ",7\n", - "”。\n", - "100\n", - "1000\n", - "———\n", - "”,\n", - "———\n", - "”。\n", - "———\n", - ")(\n", - ",300\n", - "……\n", - ":“4\n", - "”。\n", - "1992\n", - ",45\n", - "300\n", - "……4\n", - "27\n", - "”,\n", - "』(\n", - "1957\n", - ",1997\n", - "28\n", - "28\n", - "2000\n", - "29\n", - "9%\n", - "28\n", - "”。\n", - "30%,\n", - "13·1%。\n", - "40%。\n", - "29\n", - "13·1%\n", - ",“\n", - "”。\n", - "”。\n", - "”、“\n", - "”。\n", - "”,\n", - "70\n", - "83\n", - "27\n", - ",“\n", - "”,\n", - "29\n", - "1962\n", - "1996\n", - "156\n", - ",1993\n", - ":“\n", - ":“\n", - "72\n", - "27\n", - "29\n", - ")4\n", - "29\n", - "27\n", - ",15\n", - ",“\n", - "”。\n", - "”,\n", - "27\n", - "300\n", - "28\n", - "1995\n", - "》,\n", - "1996\n", - "1997\n", - "25\n", - "1993\n", - "1980\n", - "1925\n", - "》,\n", - "168\n", - "107\n", - ",《\n", - "》(\n", - "),\n", - "1993\n", - ",1997\n", - "29\n", - "1925\n", - "》。\n", - "》,\n", - ")。\n", - "》。\n", - ",“\n", - "28\n", - "”。\n", - "27\n", - "90\n", - "27\n", - "22\n", - "1997\n", - "28\n", - ",27\n", - "27\n", - "28\n", - "28\n", - "27\n", - "”,\n", - "27\n", - "21\n", - "55\n", - "27\n", - "2000\n", - ":“\n", - "”。\n", - "100\n", - ",21\n", - "2013\n", - "2015\n", - "》,\n", - "”,\n", - ",17\n", - "”,\n", - "255\n", - "”。\n", - "60\n", - "40\n", - "1969\n", - "---\n", - ")(\n", - "21\n", - "28\n", - "27\n", - "21\n", - ":“\n", - "”。\n", - "”,\n", - "2·5\n", - "300\n", - "27\n", - "”。\n", - "36%\n", - "22%,\n", - "1/3。\n", - "26\n", - "13%\n", - ")、\n", - "130\n", - "20%\n", - "80\n", - "112\n", - "32\n", - "、17\n", - "、14\n", - "27\n", - "112\n", - "550\n", - "1997\n", - ",24\n", - "470\n", - "820\n", - "45%\n", - "20%\n", - "30%,\n", - "4·6\n", - "10%\n", - "1996\n", - "”,\n", - "”。\n", - "”,“\n", - "”。\n", - "”、“\n", - "”。\n", - "1996\n", - "”,\n", - "1996\n", - "22\n", - "26\n", - "9%\n", - "13·1%\n", - "26\n", - ":“\n", - "1986\n", - ",1991\n", - "1915\n", - ",1930\n", - ",1936\n", - ")。\n", - "28\n", - "29\n", - "29\n", - "70\n", - "”,“\n", - "”。\n", - "83\n", - "27\n", - ",28\n", - "29\n", - "26\n", - "26\n", - "、LG\n", - "21\n", - "27\n", - "27\n", - "》2000\n", - "75\n", - "251\n", - ",4\n", - "27\n", - "35\n", - "23\n", - "2、\n", - "131\n", - "1、4\n", - "27\n", - "1991\n", - "44\n", - "1997\n", - "240\n", - "21\n", - "8%,\n", - "3%,\n", - "8·8%,\n", - "0·8%。\n", - "11%,\n", - "27\n", - ",“\n", - "700\n", - "35\n", - "206\n", - "36\n", - ",5000\n", - "75\n", - "、160\n", - "、110\n", - "—8\n", - "”。\n", - "”,\n", - "”。\n", - "”(\n", - ",12\n", - "———\n", - ")(\n", - ")(\n", - "80\n", - ",1995\n", - ",1996\n", - "5000\n", - ",12\n", - "400\n", - "23\n", - "25\n", - ",5\n", - "25\n", - ",“\n", - "”。\n", - "26\n", - "(4\n", - "25\n", - ")66\n", - "25\n", - "604\n", - "347\n", - "———\n", - ",“\n", - "———\n", - "”。\n", - "2000\n", - "、“\n", - "———\n", - "25\n", - "2000\n", - "11%\n", - "9%\n", - "13·1%\n", - "25\n", - "1946\n", - "60\n", - "76\n", - "1942\n", - "1944\n", - "22\n", - "425\n", - "25\n", - "500\n", - "17\n", - "3500\n", - "22\n", - "21\n", - "25\n", - "”。\n", - "”,\n", - "60\n", - "26\n", - ",“\n", - "、“\n", - "、“\n", - "———\n", - "32\n", - "251\n", - "、25\n", - ":25\n", - "25\n", - "”。\n", - "”。\n", - "26\n", - "21\n", - ",3\n", - "1995\n", - "26\n", - "2、\n", - "1、\n", - "”、“\n", - "”,\n", - "1997\n", - "4500\n", - "80%,\n", - "1997\n", - "1991\n", - "1991\n", - "250\n", - "3500\n", - "4.1\n", - "———\n", - "1993\n", - "3120\n", - "1995\n", - "、3\n", - "220\n", - "400\n", - "400\n", - "53%,\n", - "77·56%。\n", - "3383·7\n", - "32·35%。\n", - "8000\n", - "”!\n", - "1000\n", - "1000\n", - "28\n", - "40\n", - "60\n", - "80\n", - "500\n", - "40\n", - "1.5\n", - "40\n", - "”,\n", - "3.8\n", - "2.5\n", - "2200\n", - "”。\n", - "』、『\n", - "……\n", - "”。\n", - ":“\n", - "6%,\n", - "3900\n", - "94%,\n", - "6%\n", - ":“\n", - "”,\n", - "TITANIC\n", - "》。\n", - "80\n", - "”,\n", - "”,\n", - "”。\n", - "1.5%、10%、1.3%,\n", - "”,\n", - "1997\n", - "15%\n", - "1997\n", - "600\n", - "98.4%,\n", - "1.6%。\n", - "1984\n", - "1984\n", - "”,\n", - "23\n", - ")(\n", - "”。\n", - "63\n", - ":“\n", - ":“\n", - ":“\n", - "”,\n", - "9835\n", - "700\n", - "”。\n", - "”,“\n", - "”。\n", - "1853\n", - ",1884\n", - ",1946\n", - ",10\n", - "》,\n", - "22\n", - ":4\n", - "21\n", - "———“\n", - "”,\n", - "1995\n", - "”,\n", - "1997\n", - "》、\n", - "26\n", - "70\n", - "81\n", - "216\n", - "》、《\n", - "》、\n", - "》、\n", - "23\n", - "1996\n", - "1996\n", - "1990\n", - "1963\n", - "61\n", - "、6\n", - "23\n", - "22\n", - "”。\n", - "23\n", - "200\n", - "22\n", - "23\n", - "23\n", - "22\n", - "”。\n", - ",“\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ",“\n", - "”。\n", - "”,“\n", - "”。\n", - "”,\n", - "“‘\n", - "”。\n", - ",《\n", - "”。\n", - "》。\n", - "300\n", - ":“\n", - "21\n", - "21\n", - "22\n", - ",21\n", - ",“\n", - "21\n", - "21\n", - "100\n", - ",20\n", - ",21\n", - "》。\n", - "26\n", - "21\n", - ",13\n", - "23\n", - "21\n", - ":“\n", - "700\n", - "350\n", - "),\n", - "43%。\n", - "———3\n", - "”。\n", - "2000\n", - "7.5\n", - "—180\n", - ",1\n", - ",2\n", - "1991\n", - ",3\n", - "6000\n", - ",13\n", - ",300\n", - "1437\n", - "1986\n", - "、2\n", - "、3\n", - "1977\n", - "、1978\n", - "、1983\n", - "1984\n", - "1970\n", - "1000\n", - "———\n", - ")(\n", - ")(\n", - "21\n", - "60\n", - "1/10。\n", - ",1997\n", - "6455\n", - "1996\n", - "6664\n", - "21\n", - "1997\n", - "58\n", - "9800\n", - "23\n", - "22\n", - "23\n", - "1997\n", - "7.75\n", - "17·6%,\n", - "60%\n", - "”。\n", - "”,\n", - "”。\n", - "1991\n", - "250\n", - "70\n", - "55\n", - "21\n", - "22\n", - "3000\n", - "4000\n", - "4000\n", - "800\n", - "”,\n", - "21\n", - ",“\n", - "”。\n", - "22\n", - "59\n", - "、1\n", - "”。\n", - "1990\n", - "19\n", - ",18\n", - "22\n", - "21\n", - "1983\n", - "21\n", - "21\n", - "21\n", - "22\n", - ",350\n", - "1999\n", - "9%\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "17\n", - "21\n", - "21\n", - "”,\n", - "17\n", - ",14\n", - ")21\n", - "21\n", - "21\n", - "22\n", - ")。\n", - "”。\n", - "26\n", - "25\n", - ",“\n", - "”,\n", - "”、“\n", - "”,\n", - ":“\n", - "21\n", - "”,\n", - "”。\n", - "”。\n", - "500\n", - "2000\n", - "2000\n", - "1956\n", - "1993\n", - ",《\n", - "1956\n", - "1993\n", - "1993\n", - "”。\n", - "0.3%,\n", - "”,\n", - "2∶0\n", - "19\n", - "66\n", - "73%。\n", - "19\n", - "63·5%,\n", - "14%。\n", - "19\n", - "150\n", - "1998\n", - "19\n", - "19\n", - "55\n", - "6000\n", - "1980\n", - "1988\n", - "400\n", - "19\n", - "17\n", - "17\n", - "22\n", - ",26\n", - "17\n", - "1972\n", - "1960\n", - "17\n", - "17\n", - "CNN、《\n", - "》、《\n", - "200\n", - "……\n", - "WTO、\n", - "』---\n", - "”。\n", - "17\n", - "17\n", - "17\n", - ",6\n", - "17\n", - "17\n", - "17\n", - "17\n", - "17\n", - "17\n", - "17\n", - "17\n", - "”,“\n", - "”。\n", - "17\n", - "1991\n", - "1993\n", - "1996\n", - ",“\n", - "80\n", - "200\n", - "42\n", - ";《\n", - "……\n", - "》,\n", - "———\n", - ",74%\n", - "1948\n", - "1967\n", - "17\n", - ",540\n", - "97%\n", - "39\n", - "122\n", - ":1·\n", - ";2·\n", - ";3·\n", - "《2000\n", - "》。\n", - "》,\n", - ")6\n", - ",15\n", - "”、“\n", - "2000\n", - "1984\n", - "17\n", - ",“\n", - ",“\n", - "”,\n", - "30%\n", - "”,\n", - "”。\n", - "”。\n", - ",15\n", - "4%\n", - "6%\n", - "”;\n", - ",15\n", - "”。\n", - "17\n", - "”,\n", - "》17\n", - "17\n", - "》、《\n", - "》、《\n", - "1972\n", - "17\n", - "、5\n", - "、5\n", - "”,\n", - ",15\n", - ",“\n", - "83\n", - "2000\n", - "2000\n", - ",1997\n", - "30%,\n", - "2%。\n", - "1500\n", - ",2000\n", - "3/4\n", - "200\n", - "—300\n", - "2000\n", - "37\n", - "1993\n", - "2000\n", - "---\n", - "”。\n", - "19\n", - "”。\n", - "”,\n", - "27\n", - "”。\n", - "”。\n", - ",100\n", - "100\n", - "22\n", - "38.7\n", - "350\n", - "100\n", - "”,\n", - "”。\n", - "60\n", - "200\n", - "687\n", - "22\n", - "”。\n", - "”。\n", - "”、“\n", - "1995\n", - "1989\n", - "80\n", - "”。\n", - ":“\n", - ",70—80\n", - "90\n", - "1.2\n", - "8000\n", - "”,\n", - "F—16\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - ",1991\n", - "”。\n", - ",“\n", - "”。\n", - ",13\n", - "80\n", - "”,\n", - "3000\n", - ":“\n", - "”,\n", - "28\n", - "175\n", - "35\n", - ")35\n", - "3000\n", - "75\n", - ")。\n", - "207\n", - "、273\n", - ",“\n", - "———\n", - "”。\n", - "”。\n", - "”。\n", - ",“\n", - "”。\n", - "”。\n", - ":13\n", - ":“\n", - "、《\n", - "》、《\n", - "……\n", - "”。\n", - "1200\n", - "———\n", - "”。\n", - ",20\n", - "1998\n", - "93\n", - "500\n", - "1990\n", - "100\n", - "100\n", - "21\n", - "100\n", - "21\n", - "”。\n", - "36\n", - "1988\n", - "1996\n", - "1988\n", - "2:\n", - "67\n", - "、30\n", - "1:\n", - "1.18\n", - ",80%\n", - "),\n", - "———\n", - "———\n", - "1.2\n", - "130\n", - "100\n", - "1995\n", - "1997\n", - "2.6\n", - "1.3\n", - "”,\n", - "31\n", - "4600\n", - "1997\n", - "》、《\n", - "19\n", - "1.3\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "”。\n", - "1995\n", - "1997\n", - "10.5\n", - "1997\n", - "5.6\n", - "53%,\n", - "2%\n", - "90\n", - "80%\n", - "24%\n", - "”、“\n", - "”、“\n", - "2010\n", - "”,\n", - "1/4,\n", - "1997\n", - "52%;\n", - "8%,\n", - "600\n", - "74%,\n", - "26%,\n", - "7.2%,\n", - "8%,\n", - "3%,\n", - ",“\n", - "25\n", - "”,\n", - "”。\n", - "”。\n", - "”。\n", - ":“\n", - "70\n", - "”。\n", - ",“\n", - "”,\n", - "”,“\n", - "”。\n", - "”,“\n", - "25\n", - "”。\n", - "42\n", - "1998—1999\n", - "”,\n", - "”,\n", - "21\n", - "1172\n", - ":5\n", - "1172\n", - "》;\n", - "1172\n", - ":5\n", - ":“\n", - ",“\n", - ",10\n", - "88·7\n", - "55%,\n", - "70·3\n", - "”,\n", - "”,\n", - "”。\n", - "21\n", - "100\n", - "21\n", - "100\n", - "100\n", - "1993\n", - ",150\n", - "23\n", - ",1\n", - ",8\n", - "》、《\n", - "2008\n", - "400\n", - "34\n", - "》。\n", - "31\n", - "34\n", - ",“\n", - "”。\n", - ":“\n", - "”、“\n", - "”。\n", - "2000\n", - ",“\n", - "”。\n", - "25\n", - ",“\n", - "———\n", - "”。\n", - "1994\n", - "164\n", - "1997\n", - "230\n", - ",12\n", - "3.9%\n", - "1994\n", - "1993\n", - "1978\n", - "《1997\n", - "1994\n", - ",7\n", - "”。\n", - ";“\n", - "”。\n", - "”。\n", - "”。\n", - "6.8\n", - "7.2\n", - "1993\n", - "1991\n", - "”。\n", - ";5\n", - ";6\n", - "44\n", - ",135\n", - "”。\n", - "53\n", - "”。\n", - "1991\n", - "”。\n", - ",100\n", - "”。\n", - ",“\n", - "”。\n", - ",1/3\n", - "21\n", - "”,\n", - "》。\n", - "1994\n", - "”。\n", - "”,\n", - ",“\n", - "1996\n", - "1988\n", - "42\n", - "1993\n", - "17%\n", - ",50%\n", - ",33%\n", - "”。\n", - "”。\n", - "40\n", - "100\n", - "62\n", - "21\n", - ":“\n", - "———\n", - "”。\n", - "》。\n", - "25\n", - "1942\n", - "1993\n", - "1990\n", - "1963\n", - ",1990\n", - "1943\n", - "54\n", - "”。\n", - "”。\n", - "1985\n", - ",1987\n", - "》,\n", - "1991\n", - "1997\n", - "41.3\n", - "26.9\n", - "14.4\n", - "55\n", - ",1997\n", - "54\n", - "80\n", - "2000\n", - "21\n", - "”。\n", - "、《\n", - "》、\n", - "1979\n", - "21\n", - "43\n", - ",“\n", - "”。\n", - "1970\n", - "200\n", - "),\n", - ":7\n", - "”,\n", - "”。\n", - "51\n", - "48\n", - "1172\n", - "———\n", - "”。\n", - "”。\n", - "”。\n", - "”、“\n", - "”;\n", - "125\n", - ":“\n", - "”。\n", - "”,\n", - ",“\n", - "”。\n", - "2003\n", - "2008\n", - ",130\n", - "》、《\n", - "21\n", - "21\n", - ",15\n", - "19\n", - "4000\n", - ",1300\n", - "1.4\n", - "1.9\n", - "3.3%\n", - "4.1%。\n", - ":“\n", - "25\n", - "21\n", - "”。\n", - ",“\n", - "”,“\n", - "”。\n", - ",185\n", - "》、《\n", - "》、《\n", - "”。\n", - "”,\n", - "”;\n", - "2.9\n", - "20.4%,\n", - "18.6%,\n", - "20.6%。\n", - "”。\n", - "21\n", - "60%\n", - "40%\n", - "50%,\n", - "30%\n", - "40%。\n", - "60%。\n", - "”,\n", - ":“\n", - "》,\n", - "”。\n", - "2008\n", - ",“\n", - "”。\n", - "5·6\n", - "》6\n", - ",“\n", - ",“\n", - "”,\n", - "”。\n", - "5000\n", - "97\n", - "500\n", - "5000\n", - "7000\n", - "200\n", - "400\n", - "60\n", - "3000\n", - "2000\n", - "”。\n", - ",“\n", - "1800\n", - "500\n", - "800\n", - "”。\n", - "———“\n", - "1996\n", - "———\n", - "”,\n", - "130\n", - "———\n", - "”。\n", - "1998\n", - "2.7\n", - "3100\n", - "3.8\n", - ",6\n", - ",6\n", - "”A300\n", - "190\n", - "C130\n", - "40\n", - "60\n", - "”A310\n", - "210\n", - "70\n", - "80\n", - "C130\n", - "90\n", - "1993\n", - "1997\n", - "25\n", - "1992\n", - "0.7%\n", - "0.36%\n", - "77.7%\n", - "98%。\n", - "78%\n", - ",1993\n", - "2/3\n", - "7000\n", - ",1/10\n", - "60%;\n", - "60%\n", - "15%\n", - "50%\n", - "3.4\n", - "100\n", - "2100\n", - "95\n", - "25\n", - "35\n", - ",20\n", - "70\n", - "”,\n", - "70·8%\n", - "”。\n", - "”,\n", - "23\n", - "、30\n", - "》,\n", - "1172\n", - "》。\n", - "……\n", - "”,\n", - "”,\n", - "”,\n", - ":“\n", - "VCD\n", - "1995\n", - "1997\n", - "———\n", - "1995\n", - "1993\n", - ":“\n", - "1996\n", - ":“\n", - "”。\n", - "1996\n", - "”。\n", - "1994\n", - "———\n", - "……\n", - "———\n", - "……\n", - "———\n", - "……\n", - "……\n", - "……\n", - "……\n", - "4:\n", - "3:\n", - "2:\n", - ",6\n", - "1:\n", - "”。\n", - "”。\n", - "”。\n", - "”,\n", - "”。\n", - "3824\n", - "3642\n", - "5.6%,\n", - "”,\n", - "3609.9\n", - "4120\n", - "14%;\n", - "82.8\n", - "139.1\n", - "68%;\n", - "62%。\n", - "”。\n", - "”,\n", - "3.91\n", - "0.5\n", - "1998—1999\n", - ",2\n", - ",“\n", - "”,\n", - "”。\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - "40\n", - "”,\n", - "”,\n", - "”,“\n", - "”,“\n", - "”。\n", - ",“\n", - "”。\n", - "1998—1999\n", - "14%,\n", - "107\n", - "”。\n", - "28\n", - "”。\n", - "1996\n", - "1967\n", - "17\n", - "15%。\n", - "15%\n", - ":“\n", - "21\n", - ":“\n", - ":“\n", - "》。\n", - "”。\n", - "1/4\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - ",“\n", - "1999\n", - "1995\n", - "27\n", - "”,\n", - "27\n", - "1994\n", - "”,\n", - "”,\n", - "”;\n", - "”,\n", - ":“\n", - ",“\n", - "”。\n", - ")3\n", - "1995\n", - "1990\n", - "1982\n", - "”。\n", - "》、《\n", - "》、《\n", - "》3\n", - "25\n", - ",《\n", - "1·6\n", - "28\n", - "F—16\n", - "1997—1998\n", - "1995\n", - "2/3,\n", - "1948\n", - "1530\n", - "111\n", - "75\n", - "1993\n", - "77\n", - "1991\n", - "”,\n", - "1948\n", - "1988\n", - "40\n", - "1988\n", - "1998\n", - "36\n", - ",80\n", - "49\n", - "17\n", - "29\n", - "1948\n", - "29\n", - "---\n", - "”,\n", - ",“\n", - "”。\n", - "38000\n", - "”,\n", - "”。\n", - ":“\n", - "”,\n", - ",“\n", - "”,\n", - "28\n", - "3·3%\n", - "2·3%。\n", - "300\n", - "260\n", - "1·15\n", - "9%,\n", - "14·8\n", - "11%。\n", - "40\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - ",“\n", - "”,\n", - "”,\n", - "”。\n", - "200\n", - "28\n", - "2002\n", - "1993\n", - "50%\n", - "20%\n", - "22%。\n", - "200\n", - "220\n", - "12%\n", - "2010\n", - "80\n", - ":“\n", - ",5\n", - "31\n", - "2000\n", - "29\n", - "”。\n", - "”。\n", - ",1962\n", - "1948\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "”。\n", - "21\n", - "34\n", - "2、6\n", - "90\n", - "1、\n", - "28\n", - ":“\n", - "21\n", - "250\n", - "1980\n", - "5000\n", - "”。\n", - "”,\n", - "200\n", - "3.2\n", - "180\n", - ",1978\n", - "21\n", - "61\n", - "”,\n", - "”。\n", - "”。\n", - "46\n", - ",5\n", - ":“\n", - "》,5\n", - "1999\n", - ",5\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "2000\n", - ",“‘\n", - "”。\n", - "1998\n", - ",1999\n", - ",1999\n", - "2000\n", - "”。\n", - "21\n", - ":“\n", - "1998\n", - "”。\n", - "14·6%\n", - "40%,\n", - "21\n", - "132\n", - "47\n", - "132\n", - "———\n", - "1995\n", - "10%;\n", - "1948\n", - "23\n", - "19\n", - "132\n", - "31\n", - "1991\n", - "74\n", - "1964\n", - "1991\n", - "25\n", - "”。\n", - "70\n", - "5117\n", - "53\n", - "7654\n", - "23\n", - "”。\n", - "”,\n", - ",4\n", - "100\n", - "40\n", - "33\n", - ",17\n", - "29\n", - ",“\n", - "”。\n", - ",“\n", - ",“\n", - "113\n", - "3%,\n", - "78\n", - "50.43%,\n", - "34.38%\n", - "6·33%\n", - "72.77%\n", - ",31\n", - ",31\n", - "31\n", - "31\n", - ",“\n", - ",“\n", - "29\n", - "”、\n", - "》。\n", - "2、\n", - "1、5\n", - "”,\n", - "”,\n", - "》,\n", - "”,\n", - "21\n", - "”,\n", - "”。\n", - ":“\n", - ":“\n", - "》,\n", - "31\n", - "4000\n", - "29\n", - "31\n", - "425\n", - "”,\n", - "”。\n", - "1992\n", - ",“\n", - "”。\n", - "60\n", - "”5\n", - "28%\n", - "33%,\n", - "70\n", - "54\n", - "1070\n", - "40%,\n", - "”,\n", - "”。\n", - "”,\n", - ",“\n", - "”。\n", - "”,\n", - "29\n", - "29\n", - "19\n", - "31\n", - "29\n", - "28\n", - "27\n", - "31\n", - "27\n", - "31\n", - "2、5\n", - "29\n", - "1、\n", - "29\n", - "29\n", - "》,\n", - "》,\n", - "”。\n", - "》,\n", - "31\n", - ",“\n", - "”。\n", - "”。\n", - "31\n", - "》。\n", - "》。\n", - "21\n", - "21\n", - "31\n", - "31\n", - "552\n", - "10%\n", - "444\n", - ",90\n", - "》5645\n", - "390\n", - "28\n", - ":《\n", - "》。\n", - "40\n", - "4.8\n", - "50%。\n", - "500\n", - "28\n", - "500\n", - "2000\n", - "”,6\n", - "29\n", - "2000\n", - "70\n", - "21\n", - "56\n", - "1000\n", - "100\n", - "300\n", - "500\n", - "6000\n", - "),\n", - "”。\n", - "”。\n", - "1990\n", - ",“\n", - "”。\n", - "29\n", - "580\n", - "1164\n", - "5%,\n", - "200\n", - "28\n", - "15%\n", - "20%\n", - "70%,\n", - ")。\n", - "29\n", - "1997\n", - "160\n", - "13.6%。\n", - ",1996\n", - "25\n", - "……(\n", - "28\n", - ",1/4\n", - "(1/4\n", - ")。\n", - "……\n", - "1/8\n", - "27\n", - "1/8\n", - ",A、B\n", - "”,“\n", - "”。\n", - ",“\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "90\n", - "”。\n", - ",“\n", - "”。\n", - "1/8\n", - "———\n", - "1/8\n", - "1998\n", - "”。\n", - ",“\n", - "”。\n", - "1/8\n", - "1/8\n", - "1/8\n", - "———\n", - ",“\n", - "”。\n", - ":“\n", - ":“\n", - "1/8\n", - "1/8\n", - "26\n", - ",G\n", - "2∶0\n", - "1∶1\n", - "———\n", - "1∶0\n", - "4∶1\n", - ",20\n", - "1/8\n", - "28\n", - "115\n", - "』。\n", - ",10\n", - ":6\n", - ",40\n", - "”,\n", - "85%\n", - "62%,\n", - "1822\n", - "1100\n", - "90%,\n", - "24237\n", - "3000\n", - "”,\n", - "1996\n", - "240\n", - "80%。\n", - ":122\n", - "95%,\n", - "1000\n", - "”。\n", - "27\n", - "1994\n", - "500\n", - "1998\n", - "》,\n", - "”,\n", - "』:\n", - "』,\n", - "———\n", - "———\n", - "70\n", - "》,\n", - ",『\n", - ",《\n", - "”。\n", - ",80%\n", - "”,\n", - "、(\n", - "),\n", - "”,\n", - "1644\n", - "”,\n", - "”。\n", - "》,\n", - "”、“\n", - "”、“\n", - "1900\n", - "》、《\n", - ",“\n", - "”,\n", - ":“\n", - "”,\n", - "———\n", - "———\n", - "”。\n", - ",“\n", - "”,\n", - "”,\n", - "”。\n", - "1898\n", - "1998\n", - "》,\n", - "》,\n", - "》。\n", - "1997\n", - "1997\n", - "9711\n", - ",1680\n", - "、850\n", - "3300\n", - "2000\n", - "400\n", - "476\n", - "1997\n", - "6—8\n", - "5—7\n", - "(1996\n", - "—1997\n", - "1987\n", - "1997\n", - "1997\n", - "1997\n", - "(%)\n", - "》。\n", - "1997\n", - "”,3\n", - "”。\n", - "1000\n", - "870\n", - ",171\n", - "111\n", - "6000\n", - "1000\n", - "200\n", - "926\n", - "7698\n", - "7.64%(\n", - "3)。\n", - "1997\n", - "124\n", - "4000\n", - "1000\n", - "28\n", - "63\n", - "433\n", - "17300\n", - "667\n", - "30000\n", - "10%,\n", - "250\n", - "29.4%,\n", - "6347\n", - "14%,\n", - "1244\n", - "3862\n", - "212\n", - "36\n", - "113\n", - "55\n", - "77\n", - "52\n", - "37\n", - "17\n", - "599\n", - "》。\n", - "30%。\n", - "3.69\n", - "0.45\n", - "4.04\n", - "7.02\n", - "43\n", - "1780\n", - "),\n", - "3185.1\n", - "1.2\n", - "1997\n", - "375\n", - "90%\n", - "1.3\n", - "200\n", - "0.5%,\n", - "0.3%,\n", - "3.9\n", - "40%,\n", - "0.33\n", - "1/2。\n", - "19.9\n", - "1996\n", - "1997\n", - "25.8\n", - "(“\n", - "206.9\n", - "168.6\n", - "1997\n", - "25\n", - "421.2\n", - "100\n", - "386.7\n", - "1997\n", - "2465\n", - "35440\n", - "733.7\n", - "1.3\n", - "13.92%,\n", - "3%—4%;\n", - "0.114\n", - "160\n", - "105\n", - "6932.1\n", - "38%;\n", - "551.3\n", - "71.4%;\n", - "375.9\n", - "2100\n", - ",70%\n", - "4210\n", - "23.22\n", - "12.83\n", - ",1997\n", - "11.65\n", - "6.28\n", - "53.9%。\n", - "1993\n", - ",1994\n", - "1/3。\n", - "1000\n", - "5313\n", - "39.0%;\n", - "8310\n", - "61.0%。\n", - "40%\n", - ",1997\n", - "1997\n", - "1997\n", - "27\n", - "(%)\n", - "(%)(%)(\n", - "”。\n", - "192.7\n", - "7661\n", - "126.9\n", - "、66.3\n", - "578.6\n", - "1997\n", - "476\n", - "807.5\n", - "10.0%。\n", - "2)。\n", - "17\n", - "96\n", - "97\n", - "1375513827\n", - "67487661\n", - "90\n", - "91\n", - "92\n", - "93\n", - "94\n", - "95\n", - "8851982011264119591233713077\n", - "21212382828384547826014\n", - ",“\n", - "1997\n", - "1.4\n", - "1)。\n", - "57.1%;\n", - "71.7%;\n", - "80.4%;\n", - "21.7%;\n", - "50.0%。\n", - "43\n", - "55\n", - "33\n", - "60\n", - "53.5—65.8\n", - "56.5\n", - ")。\n", - "49\n", - "70\n", - "54.9%。\n", - "67.3—77.8\n", - "71.0\n", - ")。\n", - "1997\n", - ";“\n", - "”(\n", - "、pH\n", - "114\n", - "142\n", - "1997\n", - "15.30\n", - "/(\n", - "),\n", - "21.48\n", - "/(\n", - "),\n", - "9.29\n", - "/(\n", - ")。\n", - "381\n", - "200\n", - "(200\n", - "67\n", - "72.0%。\n", - "32—741\n", - "291\n", - "100\n", - "34\n", - "(50\n", - "),\n", - "36.2%。\n", - "49\n", - "41\n", - "4—140\n", - "45\n", - "72\n", - "60\n", - "52.3%\n", - "37.5%\n", - "(60\n", - ")。\n", - "3—248\n", - "66\n", - "1997\n", - "———\n", - "1997\n", - "1997\n", - "55\n", - "21\n", - "ISO14001\n", - "ISO14000\n", - ")12\n", - "1997\n", - "(ISO14000)\n", - "28\n", - "79\n", - "231\n", - "29\n", - "1997\n", - "361\n", - "29\n", - "1997\n", - "17\n", - "900\n", - "28\n", - "70\n", - "”,\n", - "1997\n", - "1997\n", - "180\n", - "106.6\n", - "523\n", - "177.8\n", - "1997\n", - "502.4\n", - "257.2\n", - "116.4\n", - "128.8\n", - "28\n", - "10.3%。\n", - "1997\n", - "45.2%,\n", - "2.2\n", - "1077\n", - "1.0%。\n", - "6.6\n", - "62.3%。\n", - "1997\n", - "10.6\n", - "2010\n", - "2000\n", - "pH\n", - "4.5\n", - "2000\n", - "2000\n", - "”(\n", - "1998\n", - "》,\n", - "2006\n", - "2010\n", - "1211\n", - "1301\n", - "1997\n", - "232\n", - "2.7\n", - "ODS60121(ODP\n", - "1996\n", - "ODS2.3(ODP\n", - "———\n", - "CFC11\n", - "1991\n", - "1997\n", - "1994\n", - "ODS\n", - "(ODS)\n", - ",1997\n", - "42\n", - "112\n", - "90.4%\n", - "79.4%,\n", - "0.4\n", - "4.4\n", - "1997\n", - "88.4%\n", - "76.9%。\n", - "pH\n", - "5.0,\n", - "70%。\n", - "pH\n", - "5.0,\n", - "70%。\n", - "pH\n", - "5.6\n", - "90%\n", - ",71.7%\n", - "pH\n", - "4.5\n", - "pH\n", - "5.6\n", - "44\n", - "47.8%,\n", - ",75%\n", - "pH\n", - "5.6。\n", - "1997\n", - "pH\n", - "3.74—7.79\n", - "1505\n", - "548\n", - "36.4%,\n", - "957\n", - "685\n", - "43.8%,\n", - "880\n", - "1873\n", - "1565\n", - "83.6%;\n", - "308\n", - "1363\n", - "73.6%;\n", - "489\n", - "1997\n", - "2346\n", - "1852\n", - "78.9%;\n", - "494\n", - "1997\n", - "31\n", - "100\n", - "1562\n", - "71.4%,\n", - "14.3%,\n", - "14.3%。\n", - "———《\n", - "》。\n", - "99\n", - "325\n", - "1997\n", - "78.9%,\n", - "84.7%;\n", - "54.4%,\n", - "61.8%,\n", - "11%\n", - "12%;\n", - "16%,\n", - "47%,\n", - "81%,\n", - "38%。\n", - "44%,\n", - "46%,\n", - "94%,\n", - "42%。\n", - "18.7%,\n", - "21.4%,\n", - "6.5%,\n", - "53.4%。\n", - ")、\n", - "70.6%\n", - "50%\n", - "50%\n", - "52%\n", - "71%\n", - "62.5%\n", - ",29.2%\n", - "70\n", - "21\n", - ",1996\n", - "133\n", - ",1997\n", - "226\n", - "66.7%\n", - "67.7%\n", - "COD\n", - "666\n", - "407\n", - "(COD)\n", - "1757\n", - "COD\n", - "1073\n", - "COD\n", - "684\n", - "188\n", - "39\n", - "227\n", - "189\n", - "416\n", - "1997\n", - "1997\n", - "”,\n", - ",《\n", - ",“\n", - "”,\n", - "CD\n", - "CD\n", - "》,\n", - "———\n", - ",“\n", - ",《\n", - "”。\n", - "》,\n", - "———\n", - "》,\n", - "》,\n", - "1997\n", - ",《\n", - "……\n", - ":“\n", - "”。\n", - ",“\n", - "”,\n", - "1991\n", - "1944\n", - "1945\n", - "1946\n", - "》、《\n", - "》,\n", - "……\n", - "———\n", - "———\n", - "》、《\n", - "》、《\n", - "》、《\n", - "130\n", - "(2)\n", - "75\n", - "1987\n", - "32\n", - "80\n", - "201\n", - "———《\n", - "》、《\n", - "》。\n", - "23\n", - "———\n", - "4.2\n", - "23\n", - "”,\n", - "23\n", - ")、\n", - "”,\n", - ":“\n", - "200\n", - ",5\n", - ":“\n", - "”6\n", - "23\n", - ":“\n", - "”90\n", - "1993\n", - "———“\n", - "90\n", - "“030”\n", - "29\n", - "1966\n", - "“030”\n", - "1956\n", - "1937\n", - ":“\n", - "2000\n", - ":“\n", - "……\n", - "……”2\n", - "25\n", - ":“\n", - ":“\n", - "1996\n", - "2600\n", - "250\n", - "500\n", - "1000\n", - "184\n", - "1996\n", - "80\n", - "300\n", - "1996\n", - "1989\n", - ",1992\n", - "1998\n", - "31\n", - "63\n", - "300\n", - "52%,\n", - "”。\n", - "1977\n", - ",1981\n", - "2006\n", - "95\n", - "500\n", - "2001\n", - "90\n", - "23\n", - "2000\n", - "1%;\n", - "30%\n", - "100%;\n", - "98.8%\n", - "84.4%,\n", - "1.5%;\n", - "95%,\n", - "85%。\n", - "268\n", - "25\n", - "2000\n", - "6.56\n", - "CFC\n", - "CFC\n", - "CFC\n", - "CFC\n", - ",1999\n", - "CFC\n", - "22\n", - "CFC(\n", - "CFC\n", - "CFC\n", - "VCD、CD、LD\n", - "102263\n", - "26455\n", - "74\n", - ")529\n", - "169005\n", - "7000\n", - "”。\n", - "100\n", - "22\n", - "100\n", - "40\n", - ":6\n", - "19\n", - "”。\n", - "”:\n", - "160\n", - "”,\n", - "70\n", - "40\n", - "19\n", - "230\n", - "”。\n", - "86\n", - "”,\n", - "……\n", - "”。\n", - "35\n", - "33\n", - "……\n", - "、11\n", - "、86\n", - "———\n", - "———\n", - "1978\n", - "23\n", - "、《\n", - "100\n", - "22\n", - "MPS\n", - "WORD97\n", - ":“\n", - "52\n", - "300\n", - "33\n", - "1000\n", - ",3000\n", - "300\n", - "”,\n", - "———\n", - "19\n", - "———\n", - "”,\n", - "1996\n", - "》,\n", - "”5\n", - "”7\n", - "”5\n", - "”,\n", - "”,\n", - "57\n", - "”、\n", - "”,\n", - "62\n", - "1981\n", - "”。\n", - "40\n", - "),\n", - "1848\n", - ")。\n", - "”、“\n", - "21\n", - ":6\n", - "19\n", - ")3\n", - "),\n", - "53\n", - "20%\n", - "1997\n", - "351\n", - "1/10\n", - "1997\n", - "3.1\n", - "1.7\n", - "27569\n", - "4.4%,\n", - "1997\n", - "30566\n", - "》、《863\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1997\n", - "368\n", - "10.4%。\n", - "1996\n", - "122.4\n", - "36.8%。\n", - "60%\n", - "”,\n", - "”。\n", - "1987\n", - "———\n", - ")、\n", - "1.2\n", - "1000\n", - "68\n", - "28\n", - "、IBM\n", - "3—5\n", - "1000\n", - "”。\n", - "60\n", - "1500\n", - "”,\n", - "30%\n", - "100\n", - ":“\n", - "”、“\n", - "”。\n", - "”,“\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”。\n", - "”,“\n", - "”,\n", - "———\n", - "、“\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "”(\n", - ")、\n", - ")、\n", - "”,\n", - "”,\n", - ",“\n", - "”,“\n", - "”。\n", - ":“\n", - "”,\n", - "”,\n", - "》,\n", - "》、\n", - "》、\n", - "1997\n", - "1980\n", - "1988\n", - "(1645\n", - "(1674\n", - "、“\n", - "”、“\n", - "”、\n", - "”。\n", - ")、\n", - "40\n", - "55\n", - "42\n", - ",16\n", - "27\n", - "、MBA(\n", - "1958\n", - ",40\n", - "40\n", - "2133\n", - "》,5\n", - "2.31\n", - "17\n", - "》,\n", - "1997\n", - "300\n", - "150\n", - "100\n", - "”,\n", - ",“\n", - "3663\n", - "”。\n", - "1.2\n", - "100\n", - "25\n", - "1996\n", - "1996\n", - "1993\n", - "1990\n", - "”。\n", - "1990\n", - "0.5\n", - "1100\n", - "300\n", - "1.2\n", - "GMP\n", - "171\n", - "1993\n", - "1693\n", - "38.05%,\n", - ",“\n", - "95%,\n", - "70%\n", - "40\n", - "1995\n", - "110\n", - ":“\n", - "171\n", - "1800\n", - "171\n", - "40\n", - "1982\n", - "1992\n", - "2500\n", - ",5\n", - "90%,\n", - "90%,\n", - "1959\n", - "40\n", - "40\n", - "50%\n", - "70\n", - "1000\n", - "1000\n", - "1997\n", - "(GDP)\n", - "74772\n", - ",“\n", - ",“\n", - ",“\n", - "”。\n", - ":“\n", - "……\n", - "100\n", - ":“\n", - "400\n", - "1∶5\n", - "、1∶1\n", - "80%\n", - "1∶5\n", - ";1∶1\n", - "5—10\n", - "250\n", - "22\n", - "1100\n", - ")“\n", - "—1\n", - "17\n", - "—1\n", - ",16\n", - "87.5\n", - "0.5\n", - "”,\n", - "80\n", - "40\n", - "40\n", - "》、《\n", - "》、《\n", - "26\n", - "1400\n", - "1600\n", - "1595\n", - "17\n", - "),\n", - "45\n", - "),\n", - "》(\n", - "》)\n", - "245\n", - "”。\n", - "”、“\n", - "”,\n", - "100\n", - "17\n", - "”,\n", - "100\n", - ",“\n", - "25\n", - "》,\n", - ",“\n", - "100\n", - "200\n", - "”,\n", - "150\n", - "100\n", - "”、“\n", - ")。\n", - "3.4%,\n", - "50%\n", - "30%。\n", - "200\n", - "200\n", - "”、“\n", - "1994\n", - ":“\n", - "5%,\n", - "52%,\n", - "3.3\n", - "1986\n", - "42\n", - "800\n", - "40\n", - ",1995\n", - "1997\n", - "52\n", - "23\n", - "1997\n", - "》,\n", - "1985\n", - "1995\n", - "、1996\n", - "、1997\n", - "33.87\n", - "、38.65\n", - "、42.57\n", - "9.33、1.83、12.08\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - "2460\n", - "1992\n", - "21\n", - "》。\n", - "540\n", - "262.2\n", - "27.3%。\n", - "17\n", - "”T\n", - "———\n", - "———\n", - "3∶0\n", - "1∶2\n", - "2∶0\n", - "17\n", - "1∶2\n", - "2∶1\n", - "』。\n", - "MVP\n", - "NBA\n", - ")。\n", - "NBA\n", - "(MVP)\n", - ",“\n", - "45\n", - "NBA\n", - "59\n", - "83\n", - "81∶83\n", - "90\n", - "”。\n", - "NBA\n", - "87∶86\n", - "4∶2\n", - "NBA\n", - "32\n", - "120\n", - "”。\n", - "73\n", - ",“\n", - "”。\n", - "1∶0\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - ",20\n", - "”(\n", - "2∶0,\n", - "42\n", - "2∶0\n", - "27\n", - "3∶1\n", - "1∶0\n", - "2700\n", - "21\n", - "148\n", - "3000\n", - "3/4。\n", - ",《\n", - ")》\n", - ":《\n", - ")》\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "20%\n", - "21\n", - "21\n", - "---\n", - "1996\n", - "390\n", - "1000\n", - "343\n", - "53042\n", - ",1996\n", - "20%,\n", - "75%。\n", - "1996\n", - "80\n", - ",1983\n", - "52%,75%\n", - "65\n", - "1996\n", - "100\n", - "1997\n", - "”(\n", - ")、“\n", - "”(\n", - ")、“\n", - "”(\n", - ":1996\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - "》。\n", - "2∶3\n", - "3∶0\n", - "15∶12\n", - "15∶8\n", - "1∶5\n", - "15∶11\n", - "17∶16\n", - "8∶4、13∶9\n", - "、14\n", - "、15\n", - "、16\n", - "1∶3\n", - "3∶2\n", - ":“\n", - ",“\n", - "”。\n", - "”。\n", - ",“\n", - ",“\n", - ":“\n", - "”“\n", - ":“\n", - "……”\n", - ":“\n", - "350\n", - "200\n", - ":“\n", - ":“300\n", - "“FILA”。\n", - ":“\n", - ",350\n", - "1200\n", - "”。\n", - ":“\n", - "145\n", - "“FILA”\n", - ":“\n", - "”6\n", - "”。\n", - "”。\n", - "”。\n", - ",“\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "1∶0\n", - "28\n", - "”。\n", - "300\n", - "”,\n", - "40\n", - "75\n", - "》6\n", - "19\n", - "》,\n", - "》,\n", - "》,\n", - "》。\n", - "97/98\n", - "0∶1\n", - "0∶0\n", - "1∶0\n", - "28\n", - "”。\n", - "300\n", - "100\n", - "97%\n", - "1995\n", - "1995\n", - "4000\n", - "1991\n", - ":“\n", - "’,‘\n", - "70\n", - "1988\n", - "500\n", - "2000\n", - "62\n", - "———\n", - "500\n", - "21\n", - "———\n", - "43\n", - "26\n", - ",20\n", - "200\n", - "》、《\n", - "》、《\n", - "34\n", - "43\n", - "43\n", - "2400\n", - "1995\n", - "43\n", - "———\n", - "———\n", - "———\n", - "———\n", - "》。\n", - "”、“\n", - "”、“\n", - "》,\n", - ")。\n", - "』。\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "”,\n", - "”,“\n", - "”、“\n", - "”,“\n", - "”,\n", - "、“\n", - ",《\n", - ",《\n", - ":『\n", - ":『\n", - "』。\n", - "』。\n", - ":『\n", - "》。\n", - ",《\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "---\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "1986—1997\n", - ",“\n", - ",1919\n", - "80\n", - "”、“\n", - "1840\n", - "1949\n", - "4000\n", - "”。\n", - "”。\n", - "1995\n", - "”。\n", - ",1\n", - "———\n", - "100\n", - "”、“\n", - "”、“\n", - "”、“\n", - "《21\n", - "100\n", - ":118\n", - "100\n", - "94.5%,\n", - "48.5\n", - "22.6\n", - "61\n", - "1995\n", - ",11\n", - "———\n", - "98%\n", - "5800\n", - "”,\n", - "14000\n", - "304\n", - "1997\n", - "45\n", - "7.36\n", - "19.50%。\n", - "”(\n", - "2%\n", - "3%;\n", - ",1997\n", - "68.65\n", - "19.68%。\n", - "22\n", - "1000\n", - "530\n", - "2400\n", - "3400\n", - "60\n", - "25%,\n", - "51%,1997\n", - "1992\n", - "5000\n", - "4000\n", - "4000\n", - "6000\n", - "》、《\n", - "》、《\n", - "》,\n", - "》、《\n", - "》、《\n", - "》。\n", - "”,\n", - "1991\n", - ",《\n", - "———\n", - "240\n", - "61.9%,\n", - "”。\n", - "42\n", - "90%\n", - "》。\n", - "1997\n", - "……\n", - ";1995\n", - "、1996\n", - "21\n", - "4%\n", - "1997\n", - "27\n", - "1997\n", - "27\n", - "”,\n", - "”,“\n", - "”。\n", - "1994\n", - "1995\n", - "80\n", - ",1997\n", - "600\n", - "11200\n", - "GPS\n", - "GPS\n", - "GPS\n", - "GPS\n", - "818\n", - "GPS\n", - "1396\n", - "533\n", - ",1998\n", - "100—150\n", - "”。\n", - ",“\n", - "”、“\n", - "”。\n", - "40%\n", - ",60%\n", - "17\n", - "”、“\n", - "1993\n", - "80\n", - "”。\n", - "”———\n", - "VCD、DVD,\n", - "VCD\n", - "2838\n", - "、2858\n", - "VCD\n", - "28\n", - ",“\n", - "29\n", - "100\n", - "VGA\n", - "”,\n", - "4000\n", - ",“\n", - ":“\n", - "”,“\n", - "13.08\n", - "0.61\n", - "2.55\n", - "8—14\n", - "4.73\n", - "3.29\n", - "5.73\n", - "1/3\n", - "5000\n", - "”,\n", - "3000\n", - "41\n", - ":“\n", - "40\n", - ",70\n", - "”。\n", - "”、“\n", - "”,\n", - "41\n", - "1000\n", - "5500\n", - "6500\n", - "600\n", - "160\n", - "600\n", - "21\n", - "14500\n", - "50%\n", - "”,\n", - "》,\n", - "18—20\n", - "”,\n", - ")6\n", - "105\n", - "25\n", - "》、《\n", - "》、《\n", - "》、《\n", - "JgE\n", - ":“\n", - ",“\n", - "”。\n", - "1995\n", - "1989\n", - ",1994\n", - "8800\n", - "”、“\n", - "3000\n", - "VCD\n", - "17、18\n", - "5000\n", - "”、“\n", - "1997\n", - "”、“\n", - "』,\n", - "』,\n", - "』、『\n", - "”“\n", - ",“\n", - ":“\n", - "55\n", - "75\n", - "2000\n", - "……”“\n", - "800\n", - "”“\n", - "400\n", - "”“\n", - "1000\n", - "、1000\n", - "40\n", - ",100\n", - "140\n", - "100\n", - "310.5\n", - "230\n", - "163\n", - "130\n", - "100\n", - "147.5\n", - ",1\n", - "100\n", - "100\n", - "100\n", - ":163\n", - ",4\n", - "310.5\n", - ":“\n", - ",47!\n", - ",1\n", - ":“\n", - "《1997\n", - "、“\n", - "”、\n", - "DNA\n", - "”,\n", - "”,\n", - "40\n", - "21\n", - "),\n", - "”,\n", - "200\n", - "175\n", - "180\n", - "1000\n", - "”,\n", - "35\n", - ",180\n", - "TIPS\n", - "5000\n", - ",6\n", - "、34\n", - "———\n", - "2000\n", - "15%。\n", - "83%\n", - ":“\n", - "———\n", - "1990\n", - "———\n", - ",80\n", - "200\n", - "100\n", - ",90\n", - "———5\n", - "》、\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1991\n", - "”。\n", - ",1997\n", - "”。\n", - "1997\n", - "1.5\n", - "400\n", - "1997\n", - "2000\n", - ",5\n", - "877\n", - ":5000\n", - ",3600\n", - "……\n", - "、VCD\n", - "、LD\n", - "『ChinaBaby\n", - "』(\n", - "———\n", - "1980\n", - "28\n", - ",13\n", - "”,\n", - "6763\n", - "15053\n", - "21003\n", - "”,\n", - "”,“\n", - "98\n", - "1986\n", - "”,\n", - "“98\n", - ",“\n", - "98\n", - ",“\n", - "5200\n", - "66%,\n", - "”、“\n", - "”。\n", - ")、78\n", - "),\n", - ",1072\n", - "70\n", - "2000\n", - ",80%\n", - ",“\n", - "7.4\n", - ",1.7\n", - "”、“\n", - "”、“\n", - "(50\n", - "27.6\n", - "85%\n", - ":“\n", - ",‘\n", - ",“\n", - "1996\n", - ":“\n", - "”,\n", - "65\n", - "350\n", - ",1997\n", - "16500\n", - "3388\n", - "3109\n", - "53\n", - "”,\n", - "1927\n", - "”。\n", - "———\n", - "》,\n", - "1944\n", - "1948\n", - "———\n", - "”,\n", - ",《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - ",“\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "』,\n", - ",“\n", - "’,\n", - "240\n", - "”、“\n", - "”、“\n", - "———1997\n", - "DNV\n", - "ISO9001\n", - "20.2\n", - "46.5\n", - "30.5%,\n", - "14.9\n", - ",33.5%\n", - "1000\n", - "”,\n", - "”,\n", - "(GNT)\n", - "1987\n", - "600\n", - "17\n", - "80\n", - "”,\n", - "》(\n", - "”。\n", - "1986\n", - "》,\n", - ";1994\n", - "》,\n", - ";1995\n", - "———“\n", - ";1997\n", - "》,\n", - "1967\n", - "”。\n", - "3000\n", - "、1000\n", - "300\n", - "70\n", - "600\n", - "1995\n", - "1994\n", - "1989\n", - "80\n", - "———\n", - "”,\n", - ":“\n", - ":“\n", - "168\n", - ":“\n", - "1997\n", - "23\n", - "37\n", - "21\n", - "”,\n", - "119\n", - ",15\n", - ",70\n", - "”、“\n", - "”、“\n", - "110\n", - "1984\n", - "29\n", - "1978\n", - "784\n", - ",12\n", - "21\n", - "450\n", - "90%\n", - "73\n", - "》CD\n", - "100\n", - "500\n", - "500\n", - "”,\n", - "”。\n", - "”。\n", - "”,\n", - "》,\n", - "》1—13\n", - "(1949\n", - "—1950\n", - "),\n", - ",18\n", - "107\n", - "(1949\n", - "—1999\n", - ")。\n", - "1000\n", - "243991\n", - "1996\n", - "———\n", - "”,\n", - ",《1997·\n", - "1997\n", - "《1997·\n", - "》,\n", - "1990\n", - "1000\n", - "1984\n", - "35\n", - "130\n", - ":20\n", - "2400\n", - ",60\n", - "(knowhow),\n", - "”。\n", - "1997\n", - "17\n", - "———\n", - "1998\n", - "》。\n", - "2346\n", - "1873\n", - "1505\n", - "《1997\n", - "2000\n", - "1998\n", - "EPS\n", - "》,\n", - "400\n", - ",《\n", - "》、《\n", - ",《\n", - "》、《\n", - "”,\n", - "”。\n", - ":5\n", - "31\n", - "》、\n", - "《D\n", - "》、\n", - "》、《\n", - "29\n", - "”,\n", - "25\n", - "”5\n", - "29\n", - ",《\n", - "、108\n", - "600\n", - "21\n", - "28\n", - "2001\n", - "100\n", - "———\n", - "———\n", - "1995\n", - "”,\n", - "550\n", - "1968\n", - "———\n", - "---\n", - "”,\n", - "”。\n", - "1998\n", - "”。\n", - "90\n", - "1980\n", - "”。\n", - ",“\n", - "———\n", - "264\n", - "1000\n", - "270\n", - ",1998\n", - "50%。\n", - "19\n", - "19\n", - "———\n", - "19\n", - ")、\n", - "19\n", - ":“\n", - ",3\n", - ",7\n", - "),\n", - "--\n", - "305\n", - "1994\n", - "300\n", - "31\n", - "62\n", - "29\n", - ")》。\n", - "》,\n", - "1000\n", - "1997\n", - "”,\n", - "100\n", - "1995\n", - "”,\n", - "1991\n", - "100\n", - "90\n", - ",《\n", - "》、《\n", - "1993\n", - ",5\n", - "、3\n", - "、9\n", - "863\n", - "”,\n", - "》,\n", - "1996\n", - "90\n", - "47\n", - "1.2\n", - "1993\n", - "),\n", - "7300\n", - "1988\n", - "500\n", - "》。\n", - "3000\n", - "《“\n", - "———\n", - ",“\n", - "100\n", - "200\n", - "1000\n", - "》VCD\n", - "106\n", - "》,\n", - "31\n", - "———\n", - "90\n", - "3000\n", - "90\n", - "60\n", - "3000\n", - "1997\n", - "90\n", - "3000\n", - "90\n", - "300\n", - ",“\n", - "”。\n", - "”。\n", - "”:\n", - "……\n", - "200\n", - "》。\n", - "226\n", - "28\n", - "49\n", - "》、\n", - "》……\n", - "260\n", - "1995\n", - "”、1996\n", - "”、1997\n", - "31\n", - "100\n", - ":“\n", - "1992\n", - "60\n", - "2000\n", - "”。\n", - "1994\n", - "31\n", - "”。\n", - "100\n", - "31\n", - "300\n", - "1000\n", - "———\n", - "”,\n", - "”。\n", - "124\n", - "60\n", - "———\n", - "———\n", - "2200\n", - "14%\n", - "16%。\n", - "1300\n", - "”,\n", - "180\n", - ",30\n", - "(6\n", - "—7\n", - "80\n", - "29\n", - "100\n", - "》。\n", - "》、《\n", - "》、《\n", - "22\n", - "28\n", - "29\n", - "21\n", - "38\n", - ",23\n", - "”、“\n", - "———\n", - ";2\n", - "99%\n", - ",1997\n", - "650\n", - "29\n", - "》。\n", - "28\n", - ",1998\n", - "29\n", - ":6\n", - "27\n", - "29\n", - "23\n", - "———\n", - ",29\n", - "1000\n", - "26\n", - "1500\n", - "29\n", - "29\n", - "27\n", - "17\n", - "1.617\n", - "200\n", - "500\n", - "3000\n", - "7685\n", - "22\n", - "22\n", - "26\n", - "10.2\n", - ":『\n", - "1989\n", - "25\n", - "45\n", - ",1989\n", - "100\n", - "158\n", - "1995\n", - "2.5\n", - "2500\n", - "120\n", - "17.5\n", - "209\n", - "1989\n", - ",2\n", - "1.5\n", - "1400\n", - "……\n", - ",1997\n", - "3500\n", - "1986\n", - "800\n", - "48\n", - ",29\n", - "(6\n", - "29\n", - "—6\n", - "2000\n", - "312\n", - "15.5%;\n", - "628\n", - "31.2%;35\n", - "293\n", - "14.5%。\n", - "81\n", - "2000\n", - "28\n", - "28\n", - "32.59\n", - "0.59\n", - "21.38\n", - "2.38\n", - "28\n", - "26.32\n", - "0.02\n", - "26.50\n", - "0.59\n", - "28\n", - "22\n", - "30%\n", - "28\n", - "———\n", - "31\n", - "79\n", - "126\n", - "31\n", - "33\n", - ")。\n", - "28\n", - "28\n", - "27\n", - "1997\n", - "1996\n", - "28\n", - "》,\n", - "”,\n", - "”。\n", - "28\n", - "),\n", - "293\n", - "48\n", - ")260\n", - "28\n", - "1995\n", - "1996\n", - "128\n", - "11808\n", - "67714\n", - "28\n", - "”,\n", - "8300\n", - "26\n", - "47\n", - ",19\n", - "984\n", - "101\n", - ",282\n", - ",300\n", - "335\n", - "2500\n", - "》,\n", - "28\n", - ",80%\n", - "”,\n", - "”,\n", - "》,\n", - "28\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - ":“\n", - "”5\n", - ",4\n", - "5345\n", - "70%\n", - "770\n", - "、945\n", - "、976.5\n", - "1050\n", - "27\n", - "200\n", - "”。\n", - "27\n", - "500\n", - "1993\n", - ",80\n", - "1987\n", - "1974\n", - "1972\n", - "2.4\n", - "1971\n", - "100\n", - "22\n", - ":“\n", - "21\n", - "1972\n", - "300\n", - ";5\n", - "808\n", - "784\n", - ",1993\n", - "784\n", - "97%,\n", - "27\n", - "808\n", - "”,\n", - "、12\n", - ",10\n", - "570\n", - "3400\n", - "60\n", - "1100\n", - "27\n", - "103\n", - "———\n", - "500\n", - "”。\n", - ",109\n", - "27\n", - "2000\n", - ",69\n", - "1.4\n", - "88.5\n", - "98.71%;\n", - "1104\n", - "77.55%。\n", - "3.25\n", - "27\n", - "73\n", - "1998\n", - "86\n", - "1996\n", - ",1997\n", - "1996\n", - "24.4\n", - "1996\n", - "1.8\n", - "2500\n", - "185\n", - ",30\n", - "8%,\n", - ":“\n", - "1997\n", - "10%\n", - "3%\n", - "4%\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "23\n", - "85%\n", - "35\n", - "80%\n", - "85%\n", - ":“\n", - "1998\n", - "66%\n", - "……\n", - "……\n", - ":“\n", - "48\n", - "……(\n", - "26\n", - "200\n", - ":“Goodluck,China!\n", - "108\n", - "4.85\n", - "56\n", - "40\n", - "1·3\n", - "800\n", - "650\n", - "3.2\n", - "1987\n", - "”。\n", - "2∶2\n", - ",“\n", - "”。\n", - "1/8\n", - "”。\n", - "40\n", - "7∶3\n", - "”,\n", - "”。\n", - "1/8\n", - "”,\n", - "”,\n", - "17\n", - "1∶0\n", - "36\n", - ",“\n", - "”。\n", - "56\n", - "38\n", - "、12\n", - "、6\n", - "---\n", - "100\n", - "200\n", - "、5\n", - "1983\n", - "28\n", - "2002\n", - "25\n", - "1∶0\n", - "2∶0\n", - "2∶1\n", - "28\n", - "———\n", - "35\n", - ",10\n", - ",4\n", - ",32\n", - "1∶0\n", - "26\n", - "22℃/33℃\n", - "21℃/29℃\n", - "24℃/33℃\n", - "19℃/32℃\n", - "16℃/29℃\n", - "18℃/27℃\n", - "18℃/24℃\n", - "18℃/25℃\n", - "21℃/30℃\n", - "23℃/30℃\n", - "21℃/25℃\n", - "22℃/29℃\n", - "23℃/26℃\n", - "26℃/34℃\n", - "24℃/29℃\n", - "23℃/33℃\n", - "19℃/26℃\n", - "24℃/33℃\n", - "23℃/28℃\n", - "22℃/30℃\n", - "26℃/30℃\n", - "25℃/32℃\n", - "26℃/34℃\n", - "22℃/30℃\n", - "22℃/31℃\n", - "20℃/25℃\n", - "18℃/25℃\n", - "15℃/26℃\n", - "25℃/35℃\n", - "18℃/33℃\n", - "10℃/24℃\n", - "18℃/31℃\n", - "22℃/32℃\n", - "26℃/34℃\n", - "26℃/32℃\n", - "26℃/32℃\n", - "21℃/29℃\n", - "27℃/35℃\n", - "8℃/14℃\n", - "28℃/34℃\n", - "19℃/31℃\n", - "9℃/15℃\n", - "16℃/27℃\n", - "18℃/31℃\n", - "14℃/23℃\n", - "20℃/26℃\n", - "26\n", - "27\n", - "27\n", - "(6\n", - "26\n", - "—6\n", - "27\n", - "34—38\n", - "29\n", - ":26\n", - "—27\n", - "25\n", - ":“\n", - "350\n", - "60\n", - "500\n", - ",《\n", - "》、《\n", - "5000\n", - "5300\n", - "80\n", - "19\n", - "25\n", - "98\n", - "98\n", - "1.3\n", - "140\n", - ",6\n", - "25\n", - "25\n", - "”、“\n", - "”、“\n", - "170\n", - "”,\n", - "100\n", - "1984\n", - "”。\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "28\n", - "》、《\n", - "》、《\n", - "》、《\n", - "25\n", - "、79\n", - "47\n", - "25\n", - "70\n", - "———《\n", - "》,\n", - "25\n", - "25\n", - "25\n", - "25\n", - "25\n", - "1980\n", - "17\n", - "1996\n", - "25\n", - ",1990\n", - "22\n", - ",1984\n", - "35\n", - ":“\n", - ",1949\n", - "》(\n", - "),《\n", - "1988\n", - ",《\n", - "》,\n", - "90\n", - "1988\n", - "1982\n", - "1987\n", - ")》\n", - "1981\n", - "”,\n", - "1997\n", - "905804\n", - "3788041\n", - "1983\n", - "25\n", - "20℃/28℃\n", - "21℃/28℃\n", - "24℃/33℃\n", - "19℃/32℃\n", - "18℃/30℃\n", - "19℃/27℃\n", - "17℃/23℃\n", - "20℃/27℃\n", - "19℃/28℃\n", - "23℃/28℃\n", - "22℃/27℃\n", - "22℃/26℃\n", - "23℃/26℃\n", - "23℃/27℃\n", - "22℃/28℃\n", - "24℃/31℃\n", - "19℃/25℃\n", - "22℃/33℃\n", - "23℃/27℃\n", - "22℃/33℃\n", - "25℃/31℃\n", - "25℃/29℃\n", - "26℃/33℃\n", - "22℃/30℃\n", - "22℃/30℃\n", - "20℃/24℃\n", - "18℃/24℃\n", - "11℃/24℃\n", - "23℃/33℃\n", - "18℃/30℃\n", - "11℃/26℃\n", - "17℃/30℃\n", - "22℃/32℃\n", - "25℃/28℃\n", - "25℃/32℃\n", - "25℃/32℃\n", - "20℃/28℃\n", - "28℃/35℃\n", - "8℃/17℃\n", - "29℃/37℃\n", - "22℃/33℃\n", - "10℃/17℃\n", - "13℃/23℃\n", - "11℃/19℃\n", - "10℃/18℃\n", - "21℃/28℃\n", - "25\n", - "26\n", - "(6\n", - "25\n", - "—6\n", - "26\n", - "———\n", - "———\n", - "”,\n", - "1997\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "1999\n", - "2000\n", - "100\n", - ",6\n", - "22\n", - "31\n", - ",“\n", - "70\n", - ")、\n", - ")、\n", - ")、\n", - "300\n", - "400\n", - "1998\n", - "1.1\n", - "1992\n", - "100\n", - "———\n", - "23\n", - "23\n", - "23\n", - "16.91\n", - "0.4\n", - "23\n", - "29.84\n", - "0.48\n", - "23\n", - "17\n", - "33.08\n", - "0.37\n", - ",6\n", - "23\n", - "”,\n", - ",3\n", - "476\n", - "337\n", - ",“\n", - "500\n", - "78\n", - "670\n", - ":“\n", - "……”36\n", - "---\n", - "80\n", - "2010\n", - "2000\n", - "21\n", - "23\n", - "1997\n", - "926\n", - "7.64%;\n", - "7000\n", - "700\n", - "”、“\n", - "”、“\n", - "”、“\n", - "44.85\n", - "2600\n", - "5700\n", - "1997\n", - "2.985\n", - "134\n", - ";1996\n", - "1997\n", - "2.1\n", - "6.5\n", - ",“\n", - "1306\n", - ",“\n", - "4500\n", - ",20\n", - "19\n", - "20℃/27℃\n", - "18℃/28℃\n", - "22℃/31℃\n", - "19℃/32℃\n", - "16℃/29℃\n", - "18℃/27℃\n", - "17℃/22℃\n", - "19℃/31℃\n", - "18℃/29℃\n", - "23℃/27℃\n", - "22℃/26℃\n", - "22℃/29℃\n", - "22℃/27℃\n", - "26℃/32℃\n", - "22℃/26℃\n", - "22℃/30℃\n", - "18℃/23℃\n", - "22℃/29℃\n", - "22℃/27℃\n", - "22℃/26℃\n", - "25℃/31℃\n", - "25℃/29℃\n", - "26℃/34℃\n", - "21℃/29℃\n", - "22℃/29℃\n", - "20℃/25℃\n", - "19℃/23℃\n", - "13℃/27℃\n", - "23℃/35℃\n", - "17℃/29℃\n", - "12℃/25℃\n", - "20℃/30℃\n", - "21℃/32℃\n", - "27℃/33℃\n", - "24℃/29℃\n", - "24℃/29℃\n", - "18℃/26℃\n", - "27℃/35℃\n", - "6℃/12℃\n", - "29℃/36℃\n", - "20℃/32℃\n", - "12℃/18℃\n", - "15℃/25℃\n", - "14℃/26℃\n", - "12℃/19℃\n", - "20℃/28℃\n", - ",24\n", - "25\n", - "(6\n", - "—6\n", - "25\n", - "23\n", - "21\n", - "》,\n", - "5.6\n", - "23\n", - "130\n", - "160\n", - "300\n", - "500\n", - "23\n", - "23\n", - "》,\n", - "》;\n", - "1998\n", - "23\n", - "23\n", - "”、“\n", - "”。\n", - "100\n", - "”,6\n", - "23\n", - "23\n", - "、11\n", - "23\n", - "』,\n", - "』『\n", - "』,\n", - "』,\n", - ",『\n", - "』,\n", - "』,\n", - "』?\n", - ":『\n", - "』,\n", - "』。\n", - "』(\n", - "”、“\n", - "》,\n", - "21\n", - "”,\n", - "18℃/28℃\n", - "18℃/28℃\n", - "23℃/29℃\n", - "18℃/28℃\n", - "14℃/27℃\n", - "18℃/26℃\n", - "18℃/23℃\n", - "16℃/26℃\n", - "18℃/28℃\n", - "22℃/26℃\n", - "22℃/29℃\n", - "22℃/28℃\n", - "23℃/28℃\n", - "24℃/27℃\n", - "20℃/26℃\n", - "21℃/28℃\n", - "19℃/22℃\n", - "22℃/29℃\n", - "22℃/27℃\n", - "23℃/27℃\n", - "28℃/33℃\n", - "25℃/29℃\n", - "27℃/36℃\n", - "21℃/28℃\n", - "24℃/30℃\n", - "20℃/25℃\n", - "18℃/22℃\n", - "14℃/26℃\n", - "24℃/34℃\n", - "19℃/28℃\n", - "11℃/24℃\n", - "17℃/28℃\n", - "19℃/30℃\n", - "25℃/32℃\n", - "27℃/31℃\n", - "27℃/30℃\n", - "17℃/26℃\n", - "27℃/34℃\n", - "6℃/11℃\n", - "28℃/37℃\n", - "20℃/32℃\n", - "11℃/18℃\n", - "13℃/25℃\n", - "13℃/24℃\n", - "12℃/22℃\n", - "20℃/29℃\n", - "23\n", - "(6\n", - "23\n", - "—6\n", - "22\n", - "———“\n", - "”。\n", - "1000\n", - "826.7\n", - "1994\n", - "67\n", - "83\n", - "21\n", - ":《\n", - "》(\n", - ")、《“\n", - "》(\n", - ")、《“\n", - "》(\n", - ")、《\n", - "”》(\n", - ")、《“\n", - "》(\n", - ")。\n", - ":《\n", - "”》(\n", - ")、《\n", - "》(\n", - ")、《“\n", - ":《21200》(\n", - ")。\n", - "”、“\n", - "”。\n", - "22\n", - "22\n", - "31\n", - "17\n", - "307\n", - "67\n", - ",87\n", - ",123\n", - ",30\n", - "22\n", - "22\n", - "22\n", - ",12\n", - "4.\n", - "3.\n", - "2.\n", - ":1.\n", - ",1993\n", - "22\n", - "21\n", - ",“\n", - ",1991\n", - ":“\n", - "1996\n", - ":“\n", - "”。\n", - ":“\n", - "22\n", - ":“\n", - "……\n", - "23\n", - "1979\n", - "25℃/31℃\n", - "20℃/28℃\n", - "23℃/30℃\n", - "18℃/26℃\n", - "13℃/26℃\n", - "18℃/22℃\n", - "16℃/23℃\n", - "18℃/26℃\n", - "16℃/27℃\n", - "22℃/29℃\n", - "24℃/28℃\n", - "22℃/26℃\n", - "23℃/29℃\n", - "26℃/33℃\n", - "21℃/28℃\n", - "26℃/34℃\n", - "19℃/23℃\n", - "25℃/33℃\n", - "26℃/30℃\n", - "24℃/29℃\n", - "28℃/34℃\n", - "26℃/32℃\n", - "27℃/36℃\n", - "21℃/28℃\n", - "22℃/29℃\n", - "21℃/27℃\n", - "17℃/26℃\n", - "13℃/27℃\n", - "26℃/36℃\n", - "17℃/31℃\n", - "7℃/22℃\n", - "17℃/29℃\n", - "19℃/29℃\n", - "26℃/33℃\n", - "28℃/31℃\n", - "29℃/31℃\n", - "18℃/28℃\n", - "27℃/35℃\n", - "7℃/13℃\n", - "29℃/37℃\n", - "17℃/29℃\n", - "8℃/17℃\n", - "13℃/24℃\n", - "15℃/24℃\n", - "12℃/21℃\n", - "21℃/33℃\n", - ",22\n", - "23\n", - "(6\n", - "22\n", - "—6\n", - "23\n", - ",30\n", - "21\n", - "21\n", - "19\n", - "1995\n", - "1996\n", - "8799\n", - "4000\n", - ")、\n", - "1993\n", - "1991\n", - "1995\n", - "”。\n", - "、9\n", - "、9\n", - ":“\n", - "》。\n", - ",《\n", - "”、“\n", - "”。\n", - ":“\n", - "67\n", - "”,\n", - "17\n", - "”。\n", - "》,\n", - "》。\n", - "40\n", - "》,50\n", - "”、“\n", - "200\n", - "———\n", - "300\n", - "500\n", - "200\n", - "200\n", - "2000\n", - "1995\n", - ":“\n", - "”,\n", - "27\n", - ":“\n", - "1000\n", - ":“\n", - "”、\n", - "700\n", - "65\n", - "500\n", - "200\n", - "2.5\n", - "4000\n", - "2000\n", - "60\n", - "2000\n", - ",2.5\n", - "2.5\n", - "1990\n", - ":“\n", - "27\n", - "2.3\n", - "124\n", - "112\n", - "4800\n", - "———\n", - "』。\n", - "』。\n", - "』、『\n", - "』,\n", - "』(\n", - ")。\n", - "8000\n", - "2000\n", - "40\n", - "10082\n", - "1956\n", - "1958\n", - "1000\n", - "14278\n", - "1122\n", - "3572\n", - "40\n", - "2000\n", - "31\n", - "930\n", - "5000\n", - "、10\n", - "3000\n", - "8000\n", - "1998\n", - "1996\n", - "》,\n", - "400\n", - "32\n", - ",80%\n", - "9.6\n", - "61\n", - "2000\n", - "1996\n", - "14500\n", - "100\n", - ")、\n", - ")4\n", - "1993\n", - ":“\n", - "150\n", - ":5\n", - ",335\n", - "300\n", - "22\n", - "”。\n", - "19\n", - ",7\n", - ",“\n", - "”,\n", - "1517\n", - "785\n", - ",“\n", - ",5\n", - ",“\n", - "4986\n", - "12%。\n", - "”)\n", - "200\n", - "100\n", - "”,\n", - "”———\n", - "”。\n", - ":6\n", - "19\n", - ",10\n", - "”,\n", - "700\n", - "1.2\n", - "35\n", - "1700\n", - "100\n", - ",9\n", - "22\n", - "800\n", - "80\n", - ",90\n", - "1.5\n", - "8.6\n", - "860\n", - "1800\n", - "1/3\n", - "1/3。\n", - "1986\n", - "650\n", - "700\n", - "』,\n", - "』。\n", - ":『\n", - "○○○\n", - "1/3”\n", - "1/3,\n", - "1/3,\n", - "1/3。\n", - "60%\n", - "1994\n", - "60\n", - "3000\n", - "184\n", - "7%。\n", - "1997\n", - "41\n", - "、36\n", - "52\n", - "1993\n", - "154\n", - "、“\n", - "、“\n", - "90\n", - ",“\n", - "”,“\n", - "”,\n", - "1997\n", - "88%;\n", - "36\n", - "86%,\n", - "70\n", - "80\n", - "”,\n", - "90\n", - "”。\n", - "”。\n", - ")(\n", - "———\n", - "”、“\n", - "”。\n", - "”。\n", - ",“\n", - "”。\n", - "17\n", - "”。\n", - "”,\n", - "”。\n", - "”。\n", - "”。\n", - "”,\n", - "0.18\n", - "1954\n", - "27\n", - "1/4\n", - "』。\n", - "……(\n", - "19\n", - "》,\n", - "3000\n", - "100\n", - "———\n", - ",“\n", - "、7\n", - ",2\n", - ",4\n", - "1/8\n", - "4∶0\n", - "———\n", - "22\n", - "19\n", - "555\n", - "11.66\n", - "0∶4\n", - "1∶0\n", - "19\n", - "18℃/31℃\n", - "18℃/33℃\n", - "22℃/36℃\n", - "16℃/31℃\n", - "13℃/27℃\n", - "17℃/26℃\n", - "15℃/21℃\n", - "14℃/26℃\n", - "16℃/26℃\n", - "22℃/26℃\n", - "21℃/29℃\n", - "21℃/27℃\n", - "24℃/31℃\n", - "23℃/27℃\n", - "20℃/26℃\n", - "24℃/35℃\n", - "18℃/23℃\n", - "22℃/37℃\n", - "25℃/32℃\n", - "20℃/25℃\n", - "28℃/33℃\n", - "25℃/30℃\n", - "26℃/36℃\n", - "22℃/31℃\n", - "21℃/28℃\n", - "22℃/28℃\n", - "17℃/26℃\n", - "12℃/27℃\n", - "25℃/36℃\n", - "17℃/31℃\n", - "14℃/27℃\n", - "14℃/30℃\n", - "20℃/30℃\n", - "26℃/34℃\n", - "28℃/34℃\n", - "28℃/34℃\n", - "22℃/28℃\n", - "26℃/35℃\n", - "7℃/16℃\n", - "28℃/38℃\n", - "22℃/35℃\n", - "17℃/25℃\n", - "12℃/22℃\n", - "12℃/21℃\n", - "13℃/22℃\n", - "17℃/27℃\n", - "(6\n", - "19\n", - "—6\n", - "98\n", - "19\n", - ",1995\n", - ":“\n", - "”。\n", - ",5\n", - "”,648\n", - "1500\n", - "150\n", - ",1993\n", - "19—22\n", - "”,\n", - "》,\n", - "”。\n", - ",1930\n", - "80\n", - "1000\n", - ",“\n", - "1998\n", - "27\n", - "93\n", - "1997\n", - "19\n", - ":“\n", - "”,\n", - "70\n", - "80\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "1978\n", - "1979\n", - "1972\n", - "”,\n", - "1963\n", - "1954\n", - "1949\n", - "96\n", - "46\n", - "1945\n", - "1944\n", - "1942\n", - "1937\n", - "1935\n", - "———\n", - "1930\n", - "1927\n", - "1926\n", - "1925\n", - "》,\n", - "”、“\n", - "”、“\n", - "”、“\n", - "1921\n", - "”,\n", - ",1920\n", - "”,\n", - ",1905\n", - "25\n", - "),\n", - "1998\n", - "27\n", - "93\n", - "……\n", - "“8·22”\n", - "6000\n", - "846\n", - "229\n", - "52\n", - "60\n", - "1400\n", - "1997\n", - "44.84\n", - "14.4\n", - "1.4\n", - ",1997\n", - "2981\n", - "316\n", - "2262\n", - "),\n", - "19℃/32℃\n", - "18℃/31℃\n", - "22℃/36℃\n", - "16℃/31℃\n", - "12℃/23℃\n", - "16℃/24℃\n", - "17℃/20℃\n", - "14℃/23℃\n", - "17℃/26℃\n", - "22℃/27℃\n", - "19℃/29℃\n", - "21℃/25℃\n", - "20℃/28℃\n", - "26℃/31℃\n", - "21℃/28℃\n", - "20℃/30℃\n", - "17℃/22℃\n", - "23℃/38℃\n", - "24℃/28℃\n", - "23℃/25℃\n", - "28℃/34℃\n", - "25℃/30℃\n", - "28℃/36℃\n", - "22℃/30℃\n", - "23℃/31℃\n", - "18℃/26℃\n", - "19℃/28℃\n", - "11℃/28℃\n", - "24℃/35℃\n", - "18℃/30℃\n", - "14℃/26℃\n", - "13℃/27℃\n", - "20℃/32℃\n", - "26℃/35℃\n", - "27℃/33℃\n", - "27℃/33℃\n", - "20℃/28℃\n", - "28℃/35℃\n", - "10℃/19℃\n", - "30℃/44℃\n", - "20℃/37℃\n", - "14℃/28℃\n", - "15℃/25℃\n", - "15℃/29℃\n", - "18℃/28℃\n", - "17℃/30℃\n", - "19\n", - "(6\n", - "—6\n", - "19\n", - ")3、\n", - "7500\n", - "4000\n", - "2、\n", - "1、\n", - "43\n", - "1986\n", - "500\n", - "130\n", - "148\n", - "300\n", - "”。\n", - ",1929\n", - ",1932\n", - "86\n", - "”。\n", - ",3000\n", - "130\n", - "———\n", - "5500\n", - "200\n", - "29\n", - "1.5\n", - "2.6\n", - "“2·13”\n", - ",1997\n", - "17\n", - "1997\n", - "17\n", - "……\n", - "”,\n", - "”———\n", - "”。\n", - "300\n", - "”。\n", - "1939\n", - ":“\n", - "1997\n", - "4200\n", - "3500\n", - ",“\n", - "”;\n", - "200\n", - "”。\n", - ",“\n", - "”,\n", - "25\n", - "”,\n", - "25\n", - ",1997\n", - "47\n", - "300\n", - "2500\n", - "3000\n", - ",(\n", - ",“\n", - "”、“\n", - "”、“\n", - "85%、98%\n", - "181\n", - "1976\n", - "200\n", - "”“\n", - "40\n", - "”———\n", - "”,\n", - "』---\n", - "”:\n", - "》,\n", - ",《\n", - "343\n", - "80\n", - "17\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》4\n", - "18℃/27℃\n", - "20℃/30℃\n", - "22℃/33℃\n", - "17℃/32℃\n", - "15℃/28℃\n", - "16℃/24℃\n", - "15℃/20℃\n", - "17℃/24℃\n", - "15℃/23℃\n", - "23℃/29℃\n", - "21℃/32℃\n", - "21℃/24℃\n", - "24℃/29℃\n", - "25℃/31℃\n", - "20℃/24℃\n", - "23℃/32℃\n", - "16℃/21℃\n", - "25℃/36℃\n", - "24℃/31℃\n", - "23℃/28℃\n", - "26℃/32℃\n", - "24℃/29℃\n", - "28℃/36℃\n", - "23℃/31℃\n", - "23℃/29℃\n", - "21℃/25℃\n", - "19℃/29℃\n", - "14℃/28℃\n", - "25℃/36℃\n", - "17℃/32℃\n", - "12℃/26℃\n", - "14℃/23℃\n", - "18℃/30℃\n", - "25℃/34℃\n", - "27℃/33℃\n", - "27℃/33℃\n", - "20℃/29℃\n", - "27℃/36℃\n", - "11℃/19℃\n", - "29℃/43℃\n", - "22℃/37℃\n", - "18℃/31℃\n", - "14℃/24℃\n", - "14℃/24℃\n", - "15℃/25℃\n", - "15℃/25℃\n", - ",17\n", - "(6\n", - "17\n", - "—6\n", - "》CD\n", - "、《\n", - "》,\n", - "60\n", - "97\n", - ":“\n", - "150\n", - "1982\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "25\n", - ",10\n", - "”,\n", - "1984\n", - "》、《\n", - "37\n", - "1994\n", - "19.319\n", - "73\n", - "1993\n", - "27\n", - ":1993\n", - "28\n", - ":“\n", - "”,\n", - "”。\n", - ",1985\n", - "1994\n", - ",5\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "17\n", - "28\n", - "———\n", - "』『\n", - "』『\n", - "』,\n", - "』。\n", - "』,\n", - "』,『\n", - "』,\n", - "』?\n", - "22℃/28℃\n", - "22℃/33℃\n", - "21℃/34℃\n", - "18℃/32℃\n", - "16℃/28℃\n", - "16℃/26℃\n", - "18℃/24℃\n", - "16℃/26℃\n", - "14℃/27℃\n", - "21℃/29℃\n", - "20℃/29℃\n", - "21℃/27℃\n", - "20℃/27℃\n", - "26℃/33℃\n", - "21℃/24℃\n", - "24℃/34℃\n", - "17℃/25℃\n", - "25℃/30℃\n", - "23℃/29℃\n", - "23℃/28℃\n", - "27℃/34℃\n", - "29℃/37℃\n", - "28℃/37℃\n", - "22℃/30℃\n", - "23℃/29℃\n", - "18℃/26℃\n", - "18℃/28℃\n", - "15℃/28℃\n", - "22℃/28℃\n", - "20℃/32℃\n", - "15℃/23℃\n", - "18℃/27℃\n", - "17℃/28℃\n", - "27℃/34℃\n", - "28℃/34℃\n", - "28℃/34℃\n", - "16℃/21℃\n", - "26℃/34℃\n", - "11℃/20℃\n", - "27℃/36℃\n", - "20℃/39℃\n", - "16℃/26℃\n", - "11℃/20℃\n", - "12℃/22℃\n", - "12℃/21℃\n", - "14℃/20℃\n", - "17\n", - "(6\n", - "—6\n", - "17\n", - ",5\n", - "1%。\n", - "30%、\n", - "30%\n", - "1—5\n", - "26.5\n", - "27.11%,\n", - "14.5\n", - "13.62\n", - ":5\n", - "”8\n", - ":“\n", - "300\n", - "200\n", - "100\n", - "200\n", - "”“\n", - "27\n", - "200\n", - "17\n", - "45\n", - "38\n", - ",3\n", - "11525\n", - ",33178\n", - "400\n", - "”,\n", - "”、“\n", - "———\n", - "》。\n", - "”,\n", - "1994\n", - "DTD\n", - "300\n", - ",50\n", - "1989\n", - "1977\n", - "———\n", - "”(\n", - ")(\n", - "1·5\n", - "1·2\n", - "3000\n", - "35\n", - "2、\n", - "700\n", - "1、\n", - "21\n", - "3045.69\n", - "7.9%,\n", - "37.6%,\n", - "4.1\n", - "1517.83\n", - "1%,\n", - "34.1%,\n", - "9.1\n", - "7.6\n", - "800\n", - "1000\n", - "8、\n", - "7、\n", - "6、\n", - "5、\n", - "4、\n", - "3、\n", - "2、\n", - "32\n", - "1、1996\n", - "26\n", - "、20\n", - "1996\n", - "7000\n", - ":“\n", - "21\n", - "32\n", - "150\n", - "20%\n", - "”、“\n", - "”、“\n", - "”、“\n", - "“21\n", - ",《\n", - "200\n", - "38—39\n", - "100\n", - "》(\n", - "110\n", - "1997\n", - ":6\n", - ",11\n", - "”。\n", - "4.2\n", - "1100\n", - "1000\n", - "”,\n", - "4.6\n", - ":6\n", - ",1996\n", - "7.5\n", - ",52.7\n", - "200\n", - "200\n", - "52.7\n", - "1992\n", - "263\n", - "268\n", - "》,\n", - "”,\n", - "》,\n", - "”,\n", - "”,\n", - "》、《\n", - "(1996)》、《1958\n", - "1993\n", - "99.8%,\n", - "66%\n", - "56.2%。\n", - "85%\n", - ",1997\n", - "99%\n", - "39%,\n", - "54%,\n", - "60%。\n", - "73%\n", - "1978—1996\n", - "70%\n", - "80%\n", - "90%\n", - "GDP\n", - ",1996\n", - "99.7%,1991—1996\n", - "76.6%。\n", - ":“\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - ":“\n", - "”,\n", - "”。\n", - "』(\n", - "”,\n", - "”。\n", - "”。\n", - "53.222\n", - "52.903\n", - "A35.844\n", - "30.405\n", - "22.871\n", - "21.502\n", - "19.823\n", - "A12.644\n", - "A11.195\n", - "11.121\n", - "28.442\n", - "27.663\n", - "15.384\n", - "13.935\n", - "11.911ST\n", - "18.642ST\n", - "16.773\n", - "14.084\n", - "13.075\n", - "12.82\n", - "、25\n", - "1.9\n", - "1000—2000\n", - "),\n", - "”、“\n", - "”,\n", - "90\n", - "200%—400%,\n", - "1000\n", - "40%\n", - "500\n", - "”,\n", - "2000\n", - "130\n", - "”,\n", - "1998\n", - "29\n", - ",7\n", - "54700\n", - "1240\n", - "2.37\n", - "3.5\n", - "200\n", - "4000\n", - "5/6\n", - "、9/10\n", - "1.2\n", - "16.9\n", - "1/10,\n", - "1993\n", - "4500\n", - "3900\n", - "80\n", - "90\n", - "1000\n", - "90%;\n", - "4%\n", - "1%;\n", - "400\n", - "9000\n", - "2000\n", - "1200\n", - "500\n", - "125\n", - "1275\n", - ",5.6\n", - "、95\n", - "、22\n", - "、73\n", - "、248\n", - "、1340\n", - "25.6\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "———\n", - "”;\n", - "200\n", - "100\n", - "1000\n", - "(240\n", - "),\n", - "(120\n", - "),\n", - "(80\n", - "),\n", - "(30\n", - "),\n", - "(30\n", - "),\n", - "(60\n", - "),\n", - "(50\n", - "1.5\n", - ",18\n", - ",24\n", - "146.16\n", - "147.70\n", - "2.70\n", - "297\n", - "2381\n", - "1673\n", - "1800\n", - "301\n", - "300\n", - "2000\n", - "1.5\n", - "3000\n", - "360\n", - "90\n", - "4.2\n", - "2000\n", - ",12—13\n", - "159\n", - "”,\n", - "1954\n", - "150\n", - "———\n", - "》、《\n", - ":《\n", - ":(\n", - ":(\n", - ",《\n", - ",《\n", - ":(《\n", - ":(《\n", - ":“\n", - ":(《\n", - ")《\n", - "》,\n", - "』,\n", - ",『\n", - ":『\n", - "》,\n", - "』;\n", - "……\n", - "———“\n", - "”。\n", - "”。\n", - "》,\n", - "、《\n", - "、《\n", - "),\n", - "”,\n", - "———\n", - "1928\n", - "28\n", - "1927\n", - "》,\n", - "1995\n", - "》。\n", - "……\n", - ":“\n", - "”,\n", - "”,\n", - ",《\n", - "”,\n", - "》,\n", - ",“\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "、《\n", - "、“\n", - "———\n", - "”,\n", - "”,\n", - "”,\n", - "12%\n", - ":“\n", - "100%,\n", - "50%,\n", - "1991\n", - "1987\n", - "”。\n", - ",1991\n", - "20%\n", - "1997\n", - "7%,\n", - "1996\n", - "41·4%,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - ":“\n", - "”,\n", - "———\n", - ",《\n", - "》(\n", - "》)\n", - ")《\n", - "”,\n", - "”,\n", - ",20\n", - "”;\n", - "”,\n", - "”,\n", - "”,\n", - "———\n", - "》。\n", - "200\n", - ":25\n", - "250\n", - "150\n", - "”,\n", - "216\n", - "80\n", - ":“\n", - ":“\n", - "……10\n", - "28\n", - "2020S\n", - "),\n", - "300\n", - "1633\n", - "OK”\n", - ":『\n", - "『CCIB』\n", - "(IWTO),\n", - "”。\n", - "”。\n", - "”,“\n", - "”,\n", - "35\n", - "”,\n", - "”、“\n", - "”。\n", - "……\n", - "”,\n", - "”。\n", - ":“\n", - "……”\n", - ":“\n", - "),\n", - "”5\n", - "29\n", - ",“\n", - "23\n", - "”,\n", - "“W”\n", - "———\n", - "1996\n", - "300\n", - "”,\n", - "———\n", - ",“\n", - "26\n", - ",80\n", - "---\n", - "”,\n", - ";“\n", - "”,“\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - ",“\n", - ",“\n", - "”,\n", - "”,\n", - "”。\n", - "”。\n", - ",1996\n", - "1995\n", - "77.6%,1997\n", - "1996\n", - "59·7%。\n", - "1996\n", - "1997\n", - "54.8%\n", - "74%,\n", - "1996\n", - "8%、\n", - "10%\n", - ",30%\n", - "8%—10%\n", - "1998\n", - ")、\n", - ")、\n", - ")、\n", - "”,\n", - "”,\n", - ",“\n", - "(“\n", - "”(\n", - "”(\n", - "———“\n", - "、“\n", - "、“\n", - "”?\n", - "”?\n", - "”,\n", - "”,\n", - ",“\n", - "43.542\n", - "36.293\n", - "19.674\n", - "15.945\n", - "A14.511\n", - "15.242\n", - "A13.443\n", - "12.634\n", - "A10.805\n", - "A9.861\n", - "32.292\n", - "30.093\n", - "22.214\n", - "15.035XR\n", - "14.391ST\n", - "17.202\n", - "15.303\n", - "14.114\n", - "14.115\n", - "12.90\n", - "1995\n", - ";1996\n", - "”;1997\n", - "”,\n", - "40\n", - "100\n", - "300\n", - "36\n", - ":“\n", - ",3\n", - "28\n", - "”。\n", - "1400\n", - ":“\n", - "———\n", - "”。\n", - "1997\n", - ":“\n", - "40\n", - ":“\n", - "1994\n", - "32\n", - "1993\n", - "1975\n", - "1954\n", - "———\n", - "20%,\n", - "100%。\n", - "14.8\n", - "1100\n", - "296\n", - "”,\n", - "218\n", - ",98%\n", - "22\n", - ",10\n", - "”,\n", - "”,\n", - ",“\n", - "”。\n", - "15%,\n", - "20%,\n", - "”。\n", - "1998\n", - "1996\n", - "76%,\n", - "40%。\n", - "”。\n", - ",《\n", - "25\n", - ":“\n", - "300\n", - "1988\n", - "60\n", - "42.4%,\n", - "29.3%,\n", - "”,\n", - "80\n", - ",200\n", - "”,\n", - "1987\n", - "》,1988\n", - "1982\n", - "1980\n", - "———\n", - "90\n", - ",18\n", - "1987\n", - ")》(\n", - "》)\n", - "1988\n", - ")》\n", - "50%。\n", - "100\n", - "100\n", - "100\n", - "369\n", - "、921\n", - ":“\n", - "▲『\n", - "32\n", - "28\n", - "”。\n", - "100\n", - "1998\n", - ",11\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "”,“\n", - "”。\n", - "”,\n", - "”。\n", - "2∶2\n", - "100\n", - "13000\n", - ",100\n", - "0∶0,\n", - ",B\n", - "———6\n", - "23\n", - "1990\n", - "1/4\n", - ":“\n", - ",“\n", - "”,\n", - ":“\n", - "”。\n", - ",“\n", - "”。\n", - "———B\n", - "19\n", - ",0∶0\n", - "3.5\n", - "0∶1\n", - "17℃/30℃\n", - "19℃/30℃\n", - "19℃/31℃\n", - "15℃/28℃\n", - "12℃/26℃\n", - "15℃/23℃\n", - "14℃/20℃\n", - "16℃/26℃\n", - "14℃/27℃\n", - "20℃/26℃\n", - "18℃/24℃\n", - "20℃/26℃\n", - "18℃/27℃\n", - "22℃/29℃\n", - "20℃/26℃\n", - "22℃/33℃\n", - "16℃/22℃\n", - "19℃/32℃\n", - "22℃/30℃\n", - "21℃/31℃\n", - "26℃/32℃\n", - "25℃/32℃\n", - "26℃/35℃\n", - "19℃/24℃\n", - "22℃/28℃\n", - "20℃/25℃\n", - "18℃/25℃\n", - "14℃/28℃\n", - "18℃/29℃\n", - "17℃/29℃\n", - "10℃/21℃\n", - "14℃/27℃\n", - "12℃/18℃\n", - "23℃/30℃\n", - "25℃/29℃\n", - "24℃/30℃\n", - "18℃/21℃\n", - "27℃/33℃\n", - "12℃/17℃\n", - "27℃/33℃\n", - "21℃/35℃\n", - "17℃/28℃\n", - "9℃/16℃\n", - "9℃/16℃\n", - "8℃/15℃\n", - "13℃/22℃\n", - "(6\n", - "—6\n", - "“《\n", - "1987\n", - "》,\n", - "86\n", - ",25\n", - "1937\n", - ")《\n", - "95\n", - "1·9\n", - "540\n", - "7000\n", - "80\n", - "96%\n", - "67\n", - "35\n", - "40%。\n", - "”,\n", - "2000\n", - "3000\n", - "25\n", - "2000\n", - "120\n", - "420\n", - "400\n", - ":“\n", - "”。\n", - "120\n", - "17\n", - "100\n", - "1955\n", - ",1928\n", - "19\n", - "89\n", - "1000\n", - "12—15\n", - "12—13\n", - ";14—18\n", - "3—5\n", - ",《\n", - "2100\n", - "740\n", - "1996\n", - "35\n", - "1500\n", - ",『\n", - "』。\n", - "』、『\n", - "』、『\n", - "』。\n", - "』,\n", - "10%\n", - "32\n", - "44.4\n", - "18℃/32℃\n", - "19℃/29℃\n", - "21℃/31℃\n", - "15℃/24℃\n", - "13℃/26℃\n", - "16℃/27℃\n", - "16℃/22℃\n", - "12℃/21℃\n", - "12℃/25℃\n", - "20℃/25℃\n", - "20℃/26℃\n", - "20℃/26℃\n", - "19℃/25℃\n", - "22℃/26℃\n", - "19℃/24℃\n", - "23℃/32℃\n", - "16℃/22℃\n", - "20℃/29℃\n", - "23℃/28℃\n", - "20℃/28℃\n", - "26℃/31℃\n", - "25℃/32℃\n", - "26℃/34℃\n", - "19℃/25℃\n", - "22℃/28℃\n", - "17℃/23℃\n", - "17℃/24℃\n", - "13℃/28℃\n", - "16℃/27℃\n", - "14℃/27℃\n", - "9℃/22℃\n", - "15℃/26℃\n", - "11℃/18℃\n", - "23℃/29℃\n", - "25℃/28℃\n", - "25℃/29℃\n", - "17℃/23℃\n", - "26℃/33℃\n", - "13℃/19℃\n", - "26℃/33℃\n", - "21℃/34℃\n", - "18℃/30℃\n", - "14℃/21℃\n", - "13℃/20℃\n", - "11℃/18℃\n", - "13℃/22℃\n", - ",11\n", - "(6\n", - "—6\n", - "”。\n", - "1988\n", - "”。\n", - "1000\n", - "———\n", - "———\n", - "———\n", - "2800\n", - ",1.5\n", - "800\n", - ",5\n", - "2316\n", - "1952\n", - "800\n", - "1600\n", - "100\n", - "1065\n", - "265\n", - "125\n", - "1.5\n", - "2134\n", - "3、\n", - "2、\n", - "38.5%。\n", - "1、\n", - ",1996\n", - "19℃/30℃\n", - "19℃/30℃\n", - "21℃/32℃\n", - "17℃/24℃\n", - "11℃/25℃\n", - "16℃/27℃\n", - "14℃/20℃\n", - "12℃/22℃\n", - "13℃/24℃\n", - "19℃/27℃\n", - "20℃/30℃\n", - "19℃/27℃\n", - "20℃/27℃\n", - "21℃/29℃\n", - "19℃/24℃\n", - "22℃/32℃\n", - "17℃/23℃\n", - "17℃/29℃\n", - "20℃/28℃\n", - "20℃/24℃\n", - "24℃/29℃\n", - "25℃/31℃\n", - "27℃/33℃\n", - "18℃/23℃\n", - "21℃/28℃\n", - "16℃/20℃\n", - "17℃/24℃\n", - "13℃/27℃\n", - "17℃/26℃\n", - "16℃/27℃\n", - "7℃/20℃\n", - "16℃/24℃\n", - "11℃/20℃\n", - "22℃/29℃\n", - "22℃/27℃\n", - "23℃/27℃\n", - "12℃/19℃\n", - "25℃/33℃\n", - "11℃/19℃\n", - "27℃/37℃\n", - "21℃/34℃\n", - "16℃/26℃\n", - "12℃/21℃\n", - "13℃/19℃\n", - "12℃/17℃\n", - "12℃/23℃\n", - "(6\n", - "—6\n", - "40\n", - "》、《\n", - "》、《\n", - ",10\n", - "65\n", - ")“\n", - "150\n", - "(http∶//www.peopledaily.com.cn)\n", - "1700\n", - "1088\n", - "3:\n", - "2:\n", - "1:\n", - "100\n", - "19\n", - "107\n", - "”。\n", - "”,\n", - ",1940\n", - "78\n", - "100\n", - "35\n", - "1963\n", - ",“\n", - "”,\n", - "60\n", - "”1957\n", - "28\n", - ",“\n", - "40\n", - "26\n", - "100\n", - "200\n", - "———《\n", - "———\n", - "》,\n", - ",『\n", - "17℃/30℃\n", - "19℃/31℃\n", - "20℃/34℃\n", - "15℃/30℃\n", - "13℃/26℃\n", - "14℃/22℃\n", - "14℃/20℃\n", - "13℃/22℃\n", - "10℃/23℃\n", - "19℃/25℃\n", - "20℃/27℃\n", - "20℃/27℃\n", - "20℃/30℃\n", - "20℃/25℃\n", - "20℃/26℃\n", - "21℃/30℃\n", - "16℃/22℃\n", - "16℃/28℃\n", - "21℃/28℃\n", - "22℃/32℃\n", - "21℃/27℃\n", - "24℃/29℃\n", - "27℃/34℃\n", - "20℃/26℃\n", - "20℃/25℃\n", - "17℃/25℃\n", - "16℃/24℃\n", - "10℃/25℃\n", - "19℃/30℃\n", - "15℃/26℃\n", - "8℃/20℃\n", - "17℃/24℃\n", - "17℃/26℃\n", - "23℃/28℃\n", - "21℃/28℃\n", - "21℃/27℃\n", - "14℃/20℃\n", - "26℃/35℃\n", - "11℃/19℃\n", - "29℃/34℃\n", - "21℃/34℃\n", - "14℃/25℃\n", - "14℃/21℃\n", - "12℃/18℃\n", - "12℃/19℃\n", - "14℃/20℃\n", - "(6\n", - "—6\n", - "”,\n", - "60\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "36\n", - "23\n", - "1983\n", - "85\n", - "1913\n", - "500\n", - "85\n", - "1997\n", - "85\n", - "1994\n", - "1990\n", - "”。\n", - "1973\n", - "1994\n", - "1973\n", - "1934\n", - ",1954\n", - ",5\n", - "31\n", - "———\n", - "》、\n", - "》、\n", - ",1939\n", - ",1941\n", - "79\n", - "5.6\n", - ":54\n", - "375\n", - "B—24\n", - "1944\n", - "31\n", - ",10\n", - "2000\n", - "0.5\n", - "2.6\n", - "54\n", - "23\n", - ",40\n", - "70\n", - "103\n", - "15℃/27℃\n", - "15℃/25℃\n", - "19℃/30℃\n", - "15℃/29℃\n", - "13℃/23℃\n", - "16℃/21℃\n", - "13℃/20℃\n", - "11℃/25℃\n", - "10℃/24℃\n", - "19℃/24℃\n", - "20℃/27℃\n", - "19℃/24℃\n", - "19℃/28℃\n", - "22℃/29℃\n", - "23℃/28℃\n", - "19℃/32℃\n", - "15℃/20℃\n", - "16℃/30℃\n", - "22℃/28℃\n", - "24℃/34℃\n", - "23℃/30℃\n", - "23℃/27℃\n", - "26℃/33℃\n", - "19℃/26℃\n", - "20℃/28℃\n", - "18℃/26℃\n", - "16℃/26℃\n", - "9℃/25℃\n", - "17℃/27℃\n", - "14℃/28℃\n", - "8℃/23℃\n", - "13℃/27℃\n", - "17℃/27℃\n", - "22℃/30℃\n", - "22℃/28℃\n", - "22℃/29℃\n", - "14℃/18℃\n", - "27℃/34℃\n", - "12℃/19℃\n", - "28℃/35℃\n", - "20℃/33℃\n", - "11℃/19℃\n", - "15℃/24℃\n", - "16℃/25℃\n", - "14℃/23℃\n", - "12℃/23℃\n", - ",7\n", - "—10\n", - "(6\n", - "—6\n", - "4000\n", - "3340\n", - "3000\n", - ",60%\n", - "”,\n", - "、“\n", - "700\n", - "50%\n", - "60%。\n", - ":“\n", - "2000\n", - "1000\n", - "1000\n", - "1000\n", - "4394\n", - "135\n", - "68\n", - "175\n", - "175\n", - "144\n", - "1.81\n", - "75%,\n", - "448\n", - "67\n", - "1.65\n", - "59\n", - "46\n", - "2.3961\n", - "、1\n", - "51/52\n", - ",1996\n", - "55\n", - "、200\n", - "3、\n", - "1995\n", - "28\n", - "2、\n", - "3300\n", - "7—15\n", - "1、\n", - ",6\n", - "27\n", - "”、“\n", - "”、“\n", - "》、\n", - "800\n", - "300\n", - ",《\n", - ",《\n", - "———\n", - "19\n", - "30.5\n", - "89.0\n", - "5.3\n", - "19\n", - "06\n", - "5.1\n", - "30.5\n", - "89.5\n", - "1938\n", - ",1929\n", - ",1930\n", - ",1932\n", - "26\n", - "83\n", - "”。\n", - ":“\n", - "51\n", - ",『\n", - "』,\n", - "』,\n", - "』,\n", - "』,\n", - "』。\n", - "』。\n", - "……\n", - "”,“\n", - "”,“\n", - "”,“\n", - "”,“\n", - "”,“\n", - "”,“\n", - "》。\n", - "30%\n", - "70\n", - "》,\n", - "、“\n", - "44941\n", - "19022\n", - "3382\n", - "10270\n", - "5370\n", - "1134.7\n", - "51549\n", - "14066\n", - "5426\n", - "8406\n", - "143\n", - "91\n", - "909\n", - "3·3\n", - "2043\n", - "100\n", - "150\n", - "》、《\n", - "40%—60%\n", - ",1998\n", - "4.2%,\n", - "151.39\n", - "79%\n", - "46.7%。\n", - "206.73\n", - "7.7\n", - "1.3\n", - "8438.89\n", - "7813.11\n", - "10290.36\n", - ",“\n", - "”,\n", - "”,\n", - "2000\n", - ";5\n", - "2.2\n", - ",6\n", - "”,\n", - "”,\n", - "”。\n", - "BP\n", - "”:\n", - "”?\n", - "3.3\n", - "”。\n", - "”1\n", - "2300\n", - "”。\n", - ":6\n", - ":“\n", - ",“\n", - "500\n", - "》,\n", - "1832\n", - ",1841\n", - "25\n", - "20—2\n", - "“3·15”\n", - "”,\n", - "1927\n", - "———\n", - ":“\n", - ":“\n", - "……”\n", - "”,\n", - "1956\n", - ")。\n", - "1948\n", - "》,\n", - "”,\n", - ":“\n", - "42\n", - "120\n", - "7000\n", - "800\n", - "36\n", - ",1982\n", - "1956\n", - "———\n", - "21\n", - ":“\n", - ";1989\n", - ";1994\n", - "305\n", - "3400\n", - ";1996\n", - "65\n", - ":“\n", - "1983\n", - "———\n", - "》,\n", - "29\n", - "6.5\n", - "》、\n", - "1949\n", - "》,\n", - "1945\n", - "),\n", - "1945\n", - "》。\n", - ":1.“\n", - "”:\n", - "1997\n", - "1997\n", - "———\n", - "1995\n", - "52\n", - "---\n", - "1993\n", - "1989\n", - "》,\n", - "1987\n", - "1982\n", - "”,\n", - "33\n", - ":“\n", - "1/3\n", - "1942\n", - "1938\n", - ":“\n", - "”,\n", - "”,\n", - ":“\n", - "1925\n", - "80\n", - "”“\n", - ":“\n", - "”。\n", - ",“\n", - "”。\n", - "》,\n", - "300\n", - "1917\n", - "……\n", - ",10\n", - "”,\n", - "”;\n", - "95\n", - "1988\n", - "———\n", - "2000\n", - "1991\n", - "———\n", - "———\n", - "———\n", - ",80%\n", - "———\n", - "4—5\n", - "———\n", - "———\n", - "”。\n", - "———\n", - "———\n", - "2—3\n", - "、B\n", - "90\n", - "80%\n", - "”,\n", - "5890\n", - "4071\n", - "”,\n", - "164\n", - "105\n", - "2000\n", - ",90%\n", - "1600\n", - ",50\n", - "46\n", - "65\n", - ":“\n", - "WB—1\n", - "WB—1\n", - "……\n", - "”!\n", - "---\n", - "———\n", - "———\n", - "———\n", - "”。\n", - "……\n", - "115\n", - "340\n", - "2729\n", - "1200\n", - "1/4。\n", - "280\n", - "1985\n", - "178\n", - "68\n", - "405\n", - "1/3\n", - "”、\n", - "……\n", - "200\n", - "40\n", - "29\n", - "”!\n", - "300\n", - "———\n", - "”。\n", - "”,\n", - "”;\n", - "”。\n", - "》、《\n", - "》、《\n", - ",“\n", - "”,\n", - "”。\n", - "”,\n", - "100\n", - "2/3\n", - "”。\n", - "”,\n", - "”。\n", - "———\n", - "》,\n", - "》。\n", - "1/3\n", - "1/2,\n", - "19\n", - "”。\n", - "1994\n", - "”。\n", - "”。\n", - "1918\n", - "1982\n", - "”,\n", - "”,\n", - "”(\n", - "》。\n", - "1918\n", - "》,\n", - "1922\n", - "1925\n", - "》、《\n", - "》、《\n", - "》、《\n", - "”(\n", - ")。\n", - "……\n", - "……\n", - "———\n", - "”,\n", - "……\n", - "”,\n", - "“FAX”\n", - "”。\n", - "”,\n", - "),\n", - "》(\n", - "”、“\n", - "”、“\n", - "”,\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - "———\n", - "),\n", - ",“\n", - "”,\n", - ",“\n", - "……\n", - "……\n", - ":“\n", - "”,\n", - "……\n", - "……\n", - "》、\n", - "……\n", - "45\n", - "1992\n", - "”,\n", - "”。\n", - "1998\n", - "———\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "”。\n", - "》、《\n", - "》,\n", - "……\n", - "”,\n", - ")。\n", - ":“\n", - "……\n", - "……\n", - "……\n", - "……\n", - "……\n", - "……\n", - ",“\n", - "》)、\n", - "》)、\n", - "》)、\n", - "》)\n", - "》,\n", - "……\n", - "》,\n", - "》,\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "1986\n", - "”;\n", - ":“\n", - "2、\n", - "”,\n", - "”,\n", - ":“\n", - ":“\n", - "》。\n", - "Rousseau(\n", - ",“\n", - ":“\n", - "……”\n", - "1993\n", - "”,\n", - "———\n", - "superannuatedCoquette\n", - "superannuated(\n", - "),\n", - "Coquette\n", - "Coquette,\n", - "”,\n", - ":superannuatedCoquette。\n", - "———\n", - "》,\n", - "”,\n", - ":“\n", - "”。\n", - "”,\n", - "∶(010)65912806\n", - "3—5\n", - "80)\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - ":“\n", - "”,\n", - "”。\n", - ":“\n", - "80\n", - "3.25\n", - "3.5\n", - "54\n", - "21\n", - "1993\n", - "23\n", - "48\n", - "、“\n", - ")“\n", - "———\n", - "……(\n", - "25\n", - ":“\n", - "74\n", - "2/3\n", - "29\n", - ":“\n", - "’,\n", - "25\n", - "25\n", - "7000\n", - "、“\n", - "”1.5\n", - ",“\n", - "”。\n", - ")(\n", - "”,\n", - "”。\n", - "……\n", - "”。\n", - "———\n", - "……\n", - "……\n", - "……\n", - "———\n", - "1995\n", - "———\n", - ":“\n", - "———\n", - "1994\n", - "———\n", - "———\n", - "……\n", - "……\n", - "”,\n", - "……\n", - "……\n", - ",“\n", - "”———\n", - ":“\n", - "”“\n", - "”。\n", - "VCD。\n", - "”,\n", - "1998\n", - "”。\n", - "》),\n", - "……\n", - ":“\n", - "……\n", - "”,\n", - "———\n", - "”,\n", - "”,\n", - "……\n", - "……\n", - "”,\n", - "”,\n", - "”……\n", - "……\n", - "……\n", - "……\n", - "———\n", - "”,\n", - ",“\n", - "”。\n", - "”。\n", - "》,\n", - "”,\n", - ")、\n", - ")、\n", - ")。\n", - "1949\n", - "》、《\n", - "》,\n", - "———\n", - "1959\n", - "……”\n", - "……”\n", - "”,\n", - "1957\n", - "1949\n", - ",“\n", - "』《\n", - ",《\n", - "●《\n", - "》,\n", - "》(\n", - "”,\n", - "———\n", - "》(\n", - "),\n", - "---\n", - "”,\n", - "---\n", - "”,\n", - "……“\n", - ",“\n", - "……\n", - "———\n", - ":“\n", - "”,“\n", - "1984\n", - "”。\n", - "”,“\n", - "”;“\n", - "”。\n", - ":“\n", - "”,“\n", - "”。\n", - "”。\n", - "》,\n", - "———\n", - "』,\n", - ":『\n", - "』,\n", - "6·\n", - ",“\n", - "5·\n", - "”,“\n", - "”,\n", - "4·\n", - "3·\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "2·\n", - "”。\n", - "1·\n", - ",“\n", - ",“\n", - ")。\n", - "1997\n", - "”。\n", - "、“\n", - "”,\n", - "”,\n", - ")、\n", - "1996\n", - "1991\n", - "”。\n", - "1991\n", - "1965\n", - "90%\n", - "1962\n", - "1962\n", - "1986\n", - "21\n", - "1985\n", - "”。\n", - "1983\n", - "1984\n", - "31\n", - "》,\n", - "1984\n", - "83\n", - "1954\n", - ",《\n", - "1954\n", - "1978\n", - "1975\n", - "1983\n", - "》、《\n", - "》,\n", - "1978\n", - "1959\n", - "28\n", - "1959\n", - "1954\n", - "21\n", - "1987\n", - "”。\n", - "”,\n", - "81\n", - ",“\n", - "”。\n", - "1986\n", - "1987\n", - "1986\n", - "”,\n", - "1983\n", - "1943\n", - "———\n", - "22\n", - "”。\n", - "1941\n", - "900\n", - "73%。\n", - "1022\n", - "516\n", - "1371\n", - "”,\n", - "2224\n", - "1.112\n", - "”,\n", - ",『\n", - "』。\n", - "』,\n", - "』,\n", - "』,\n", - "』;\n", - "』,\n", - "』。\n", - "』。\n", - "』(\n", - ",1997\n", - "25.3\n", - "110.5%,\n", - "2.4\n", - "2800\n", - "65\n", - "600\n", - "”,\n", - "”,\n", - "”、“\n", - "”、“\n", - "97\n", - "445\n", - "1997\n", - "———\n", - "40\n", - "”、“\n", - "628\n", - "40%\n", - "332\n", - "91\n", - "40\n", - "40\n", - "261\n", - "95\n", - "117\n", - "53\n", - ",“\n", - "99%,\n", - ",88%\n", - "』(\n", - "……\n", - "……”“\n", - ",(\n", - "”“\n", - "……”\n", - "”“\n", - "31\n", - "……”\n", - "1990\n", - "25\n", - "”,\n", - "”。\n", - "2.5\n", - "6000\n", - "”,\n", - "”,\n", - "1994\n", - "”。\n", - "1958\n", - "”。\n", - ")(\n", - "300\n", - "420\n", - "154\n", - "292\n", - ",90%\n", - "”,\n", - "62\n", - "70\n", - "589\n", - "54\n", - "22307\n", - "55\n", - "7817\n", - "31%,\n", - "25\n", - "699\n", - "3%。\n", - "”、“\n", - "”、“\n", - "”、“\n", - "17\n", - "”、“\n", - "240\n", - "300\n", - "1000\n", - ":“\n", - "330\n", - ":“\n", - ":“\n", - "36\n", - "”、“\n", - "”,\n", - ":“\n", - "21\n", - "6000\n", - "500\n", - "1000\n", - ",40\n", - "1995\n", - "40\n", - ":“\n", - "4.4\n", - "6000\n", - "1992\n", - ":“\n", - "19\n", - ":“\n", - "”1995\n", - "2.6\n", - "”。\n", - "40\n", - ":1.\n", - ";2.\n", - ";3.\n", - ":1.\n", - ";2.\n", - ";3.\n", - ";4.\n", - "9980\n", - "99800\n", - ")47\n", - "7.2%\n", - "589\n", - "1958\n", - ",1951\n", - "1951\n", - "5000\n", - "0.5\n", - ")、\n", - "113\n", - "113\n", - "86\n", - "589\n", - "47\n", - "27\n", - "834\n", - "203\n", - "521\n", - "507\n", - "1997\n", - "30421\n", - "73861\n", - "190128\n", - "19\n", - "142\n", - ":3\n", - "26\n", - "10.63\n", - "26\n", - "),\n", - "1997\n", - "29\n", - "1996\n", - "、8\n", - ",“\n", - "49\n", - "2495\n", - "1996\n", - "”、\n", - "IIR、\n", - "1990\n", - "1995\n", - "80\n", - "2.35\n", - ",“\n", - "1998\n", - ",“\n", - "1080\n", - "74772\n", - "1·4%,\n", - "7%—8%,\n", - "10%\n", - "3%—4%\n", - "5000\n", - "100\n", - "1997\n", - "1080\n", - "90\n", - ")。\n", - "276\n", - "176\n", - "480\n", - "600\n", - "1997\n", - "1080\n", - "39.3%。\n", - "200\n", - "22\n", - "▲1997\n", - "1080\n", - "▲1997\n", - "90\n", - "5000\n", - "100\n", - "7%—8%,\n", - "3%—4%,\n", - "1.4%\n", - "”。\n", - ",1997\n", - "226\n", - "1100\n", - "34\n", - "140\n", - "1997\n", - "1997\n", - "120\n", - "1997\n", - "200\n", - "100\n", - "1995\n", - "1997\n", - "160\n", - "166\n", - "》,\n", - "3、\n", - ")(\n", - ")2、\n", - ")(\n", - ")1、\n", - ")(\n", - "……\n", - "”、“\n", - "”,\n", - ",《\n", - "1997\n", - "》、《\n", - "》、《\n", - ",《\n", - ",1943\n", - ",1965\n", - "……\n", - "200\n", - "1986\n", - "2.5\n", - "1994\n", - "700\n", - ",“\n", - "”。\n", - "2000\n", - "1982\n", - ",“\n", - "”。\n", - "1935\n", - ",1950\n", - "86\n", - "(1+2)\n", - "(1+2)\n", - "22\n", - "65\n", - "(1+2)\n", - ",《\n", - "(1+2)\n", - "1966\n", - "1973\n", - "“1+2”\n", - "90\n", - "”,\n", - "”,\n", - ",65\n", - "4.9%,\n", - "90\n", - ",65\n", - "11%,\n", - "1131\n", - "351\n", - "226\n", - "179\n", - "55\n", - "30%,\n", - "80%\n", - "21%,\n", - "21%\n", - "1993\n", - "500\n", - "3—10\n", - "60\n", - "1996\n", - "2000\n", - "7.5\n", - "1/4。\n", - "Cisco、\n", - "IDT\n", - "2/3\n", - "1998\n", - ",“4000\n", - ",10\n", - ",“\n", - "”。\n", - "4000\n", - "、7\n", - "、9\n", - "、12\n", - "———\n", - "”,\n", - "27\n", - "900\n", - "1997\n", - "1994\n", - "———\n", - "1995\n", - "“CE”\n", - "1996\n", - "”。\n", - "”,\n", - "90\n", - ",90\n", - "26\n", - "”:“\n", - "240\n", - "1200\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "400\n", - "1000\n", - "……\n", - "……\n", - "1986\n", - "1990\n", - "37.5\n", - "1991\n", - "1995\n", - "257.8\n", - "70%\n", - ",1997\n", - "2000\n", - "———(\n", - "40\n", - "———\n", - "”。\n", - ",500\n", - "140\n", - "120\n", - "200\n", - ":“\n", - "1988\n", - "———\n", - "1994\n", - "21\n", - "5000\n", - "17\n", - "419\n", - "11.27\n", - ",220\n", - "———\n", - "”,\n", - "1971\n", - ",《\n", - "”、“\n", - "80\n", - ",“\n", - "7%,\n", - "15%\n", - "90\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "2011\n", - "810\n", - "22\n", - "36\n", - "2001\n", - ",600\n", - "0.03%,\n", - "(“\n", - "1999\n", - "1998\n", - "”、“\n", - "”、\n", - "400\n", - ",1997\n", - "109\n", - ",631\n", - "1997\n", - "4300\n", - "35\n", - "31\n", - "28212\n", - "1997\n", - "850\n", - "1996\n", - "49.12%,\n", - "2.94\n", - "1996\n", - "79.53%,\n", - "2.75\n", - "1996\n", - "9.69%。\n", - "200\n", - "1997\n", - "6366\n", - "100.64\n", - "37.4\n", - ",1997\n", - "266\n", - "1996\n", - "6.6%,\n", - "3.69\n", - "1996\n", - "22.3%,\n", - "4.03\n", - "33.5%。\n", - "31%,\n", - "6%。\n", - ",1997\n", - "7.64\n", - "1996\n", - "10.2%,\n", - "6.4\n", - "1996\n", - "12%;\n", - "1.2\n", - "1996\n", - "1.6%。\n", - "608\n", - "98\n", - "78174\n", - "1876\n", - "2.4%。\n", - "53\n", - "232\n", - "1200\n", - "60%\n", - "44\n", - "60%\n", - "1.8\n", - "》,\n", - "17\n", - "66%\n", - "2720\n", - "4850\n", - "”,\n", - ",1\n", - ",22\n", - "518\n", - "”。\n", - "”。\n", - "111\n", - "』,\n", - "』,\n", - "4100\n", - "3000\n", - "100\n", - ",1100\n", - ":“\n", - "3000\n", - "200\n", - "、100\n", - "100\n", - "1995\n", - "500\n", - "1994\n", - ":“\n", - "19\n", - "……\n", - "986\n", - "、200\n", - "28\n", - ":“\n", - "———“\n", - "”。\n", - "1995\n", - "39\n", - "》635\n", - "》。\n", - "”,\n", - "74\n", - "31\n", - "25\n", - "1.25\n", - "1800\n", - "1994\n", - "200\n", - "1996\n", - "4000\n", - "1550\n", - "701\n", - "31\n", - "80\n", - ",5000\n", - "400\n", - "31\n", - "108\n", - "”。\n", - "500\n", - "30%\n", - ",300\n", - "3000\n", - "4000\n", - "1995\n", - "---\n", - "”,\n", - ":“\n", - "”、“\n", - "”、“\n", - "”;\n", - ":“\n", - "”;\n", - "”……\n", - "……\n", - "”,\n", - ",“\n", - ",“\n", - ",“\n", - "○”\n", - "34\n", - "1100\n", - "1000\n", - "136\n", - "1993\n", - "1100\n", - "○”\n", - "1995\n", - "500\n", - "”,\n", - "”,\n", - "”,\n", - "60\n", - "6000\n", - "1995\n", - "150\n", - ",1992\n", - "○”\n", - "○”\n", - "54\n", - "47\n", - ",“\n", - "”。\n", - "1000\n", - ":4\n", - "1989\n", - ":“\n", - "”20\n", - ",“\n", - "”、“\n", - "”……\n", - "1998\n", - ")“\n", - "3480W(3000kcal/h)\n", - "7.5A,\n", - "1A\n", - "1A\n", - ")。\n", - "20%—40%\n", - "×140W。\n", - "2.8\n", - ",F\n", - ",C\n", - ",R\n", - ",D\n", - "×100W。\n", - "52.9%\n", - ",36.7%\n", - "60.1%,\n", - "10.4%。\n", - "36.5%;\n", - "22.1%;\n", - "20.3%\n", - "15.6%;\n", - "5.5%。\n", - "22%\n", - "”,\n", - "9.8%\n", - "58.7%\n", - "66%\n", - "4.\n", - "3.\n", - "2.\n", - "1.\n", - "114\n", - "30%,\n", - "3%。\n", - "750\n", - "880\n", - ";2000\n", - "1150\n", - "1500\n", - "4.\n", - "2.5—7\n", - "10—30\n", - "W。\n", - "10%。\n", - "20%—30%。\n", - "20%—30%。\n", - "1000\n", - ",15\n", - "1500\n", - ",30\n", - "2500\n", - ",40\n", - "3000\n", - ")、\n", - "13℃,\n", - "(12.8℃),\n", - ";3—5\n", - "5℃—6℃、14℃、19℃—20℃,\n", - "8.7℃、14.5℃\n", - "20℃。\n", - "4℃—6℃,\n", - "7℃—8℃。\n", - ",2\n", - "80\n", - "39℃—41℃。\n", - "1997\n", - "68%\n", - ",2000\n", - "1000\n", - "10.7%—47.9%,\n", - ":“\n", - "1998\n", - "98\n", - ",“\n", - ",24\n", - ":“\n", - "70%\n", - "1992\n", - "100\n", - ":“\n", - "6553\n", - "224\n", - ":“\n", - "1994\n", - "6000—7000\n", - "2500—3500\n", - "”,\n", - "105\n", - "2.51、\n", - "3.03;\n", - "30%\n", - "98\n", - "———“\n", - "40\n", - "30%。\n", - "”,\n", - ",72\n", - "3、4\n", - ",7、8\n", - "1300\n", - "81\n", - "3—10\n", - ":“\n", - "20%\n", - "———(\n", - ",“\n", - "”,\n", - ",“\n", - "”,\n", - "———\n", - "”(\n", - "),\n", - "”,\n", - "”,“\n", - "”。\n", - ",“\n", - ":《\n", - "”。\n", - "”,\n", - "”、“\n", - ":《\n", - ":《\n", - "〕ⅱ\n", - "……\n", - ":『\n", - ":『\n", - "》。\n", - "……\n", - ":『\n", - ":『\n", - ",《\n", - "……\n", - "……\n", - ",《\n", - "……,\n", - "”,\n", - "”、“\n", - "”,“\n", - "……\n", - ":“\n", - "”,\n", - "》,\n", - ",《\n", - "”,\n", - "……\n", - ":“\n", - "”,\n", - ":“\n", - ",《\n", - ";《\n", - ";《\n", - ";《\n", - "1979\n", - "233\n", - "95\n", - "328\n", - "”。\n", - "2、\n", - "1、\n", - "1949\n", - "1996\n", - "48\n", - "31\n", - "60\n", - "1993\n", - "85%,\n", - ",10\n", - "80\n", - "42\n", - "17\n", - "17\n", - "19\n", - ",42\n", - ",40\n", - ":“\n", - "90\n", - "40\n", - "---\n", - ",1998\n", - "3000\n", - "1996\n", - "7000\n", - "1993\n", - "1997\n", - "125\n", - "200\n", - "”,\n", - "———\n", - "』,\n", - "』,\n", - "』。\n", - "』。\n", - "』。\n", - "1997\n", - "》,\n", - "97\n", - "127\n", - "40\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - ",“\n", - "21\n", - "……\n", - ":“\n", - "”,\n", - ":(010)65013467\n", - "B12\n", - "18%—25%。\n", - "56%,\n", - "39%,\n", - "16%,\n", - "15%,\n", - "81)\n", - "”、“\n", - "”、“\n", - "”,\n", - "2300\n", - "400\n", - "”,\n", - "1500\n", - "1996\n", - "1000\n", - "):\n", - "”,\n", - "):\n", - "100\n", - "):\n", - "”,\n", - "):\n", - "21\n", - "):\n", - "):\n", - "21\n", - "):\n", - "”,\n", - "5000\n", - "):\n", - ",60%\n", - "70%\n", - "90%。\n", - "200\n", - "300\n", - ",1\n", - "60%—70%。\n", - "):\n", - "):\n", - "):\n", - "8000\n", - "):\n", - "7000\n", - "”1.5\n", - "):\n", - "》。\n", - "”,\n", - "》,\n", - "》,\n", - "》,\n", - "》,\n", - ",《\n", - "》、《\n", - "”,\n", - "”,\n", - ",“\n", - "1997\n", - "———\n", - "———\n", - "———\n", - "”。\n", - "80\n", - ",《\n", - "70\n", - ":“\n", - "》,\n", - "”(\n", - "21\n", - "”)\n", - "”———\n", - "———“\n", - "”,\n", - "001\n", - "1986\n", - "》,\n", - "”,\n", - "”,\n", - ":“\n", - "2500\n", - "400\n", - "1992\n", - "1900\n", - "———\n", - "80\n", - "》,\n", - "5020\n", - "1.6\n", - "1298\n", - "1445\n", - "400\n", - "(1041—1048),\n", - "”,\n", - "600\n", - "》、《\n", - "636\n", - "),\n", - "……\n", - "1996\n", - "———\n", - "CD—ROM、DVD\n", - "E—MAIL,\n", - "”,\n", - "”、\n", - "100\n", - "):\n", - "”,\n", - "2000\n", - "”,\n", - "”。\n", - "3,\n", - "”?\n", - ",《\n", - "):\n", - "……”\n", - "”、“\n", - "》,\n", - "”。\n", - "”、“\n", - "”、“\n", - "”。\n", - "”,\n", - "(52\n", - "):\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”“\n", - "---\n", - "③)\n", - "1/10;\n", - "———\n", - "40\n", - "600—800\n", - "300\n", - "”(\n", - "”(\n", - ",1998\n", - "”。\n", - "———\n", - "”,\n", - "———\n", - "———\n", - "”;\n", - "”,\n", - "150\n", - "52\n", - "1.5\n", - "330\n", - "130\n", - "”、\n", - "1000\n", - "80%\n", - "———\n", - "6500\n", - "),1997\n", - "4.6\n", - "5135\n", - "1996\n", - "80%\n", - "90%,\n", - "50.0%。\n", - "1998\n", - "130\n", - "82%。\n", - "1997\n", - "300\n", - "25\n", - "11.4\n", - "———116\n", - "60\n", - ",1992\n", - "1982\n", - "5000\n", - "4000\n", - "300\n", - ",4000\n", - "13.33\n", - "1996\n", - "———\n", - "2000\n", - "2000\n", - ",300\n", - "4000\n", - "6000\n", - "1.7\n", - ",“\n", - "70\n", - "40\n", - "119\n", - "6000\n", - "---\n", - "》,\n", - "1997\n", - ",1976\n", - "……\n", - "”,\n", - ",《\n", - "(1469)\n", - "》、《\n", - "》、《\n", - "1977\n", - ",1947\n", - ")(\n", - ":“\n", - "》,\n", - "》,\n", - "”,\n", - "1997\n", - "1991\n", - "1975\n", - "”。\n", - "”。\n", - "”、“\n", - "”。\n", - "———\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - "”,\n", - "”:\n", - ",“\n", - "”、\n", - "”,\n", - "》、\n", - "”,\n", - "”,\n", - "”,\n", - ":“\n", - "”,\n", - ",“\n", - "”,\n", - "”,\n", - "”,\n", - ":“\n", - "”3、\n", - ")(\n", - "〉)2、\n", - ")(\n", - "〉)\n", - "”,\n", - "”,\n", - "28\n", - "27\n", - "1986\n", - "213\n", - ",27\n", - ":“\n", - "”,\n", - "76.5\n", - "5.1\n", - "59.5\n", - ",115\n", - "40\n", - "960\n", - "220\n", - "81\n", - "53.3\n", - "36.6\n", - "2200\n", - "200\n", - "28\n", - ",1994\n", - ",(\n", - "175\n", - "———\n", - "———\n", - "”,\n", - "……\n", - "……\n", - "”,\n", - "”,\n", - "———\n", - "……\n", - "96%。\n", - "2、\n", - "6474.8\n", - "794.7\n", - "86.9\n", - "32\n", - "1、\n", - "28\n", - "”。\n", - "》,\n", - ":《\n", - "1988\n", - "”,1994\n", - ",43\n", - "1400\n", - "600\n", - "4300\n", - ":“\n", - "78\n", - "43\n", - "1955\n", - ":“40\n", - ")’,‘\n", - ",12\n", - ",500\n", - "480\n", - "2/3\n", - "80%\n", - "592\n", - "43.4%。\n", - "5000\n", - "……\n", - "90\n", - "150\n", - "……\n", - "———\n", - "”,\n", - ":“\n", - "1000\n", - "”;\n", - "”。\n", - "……\n", - "60\n", - ":“\n", - "99\n", - "》、《\n", - "》、《\n", - "200\n", - ",64%\n", - ",1991\n", - ":“\n", - "”,\n", - "……1987\n", - "”。\n", - "36%\n", - "97%。\n", - "155\n", - "”,\n", - "26\n", - ",“\n", - "”。\n", - ",“\n", - "”。\n", - "……\n", - "1978\n", - "”。\n", - "”。\n", - ",“\n", - "1978\n", - ",“\n", - "”。\n", - "1992\n", - "”,\n", - "”,\n", - "1978\n", - "”,\n", - "”,\n", - "“‘\n", - "”。\n", - "”,\n", - "”,\n", - "1977\n", - ",《\n", - "1978\n", - "》,\n", - "(1)\n", - "1500\n", - "284\n", - "40%\n", - "”,\n", - "80\n", - ":“\n", - "400\n", - "1000\n", - "”(\n", - ",16\n", - ",12\n", - ",100\n", - "1991\n", - "23\n", - "1995\n", - "3000\n", - "1998\n", - "100\n", - ",1996\n", - "30%\n", - "40%\n", - "1988\n", - "1.5\n", - "1985\n", - "———\n", - ",300\n", - ",10\n", - "435\n", - ",“\n", - "1000\n", - "—35\n", - "—2\n", - "1998\n", - "200\n", - "1996\n", - "1996\n", - "———\n", - "1996\n", - "70\n", - "1995\n", - "1991\n", - "”、“\n", - ",1973\n", - "1997\n", - "———\n", - "1994\n", - "1991\n", - "”,\n", - ",1964\n", - "、“\n", - "○』\n", - "○』\n", - "○』\n", - "○』\n", - "(『\n", - "○』\n", - ")。\n", - "○』\n", - "』、『\n", - "140\n", - "”。\n", - "36.61\n", - "”,\n", - "80%,\n", - "50%。\n", - "372\n", - "23862\n", - "41851\n", - "600\n", - "———\n", - "”,\n", - "600\n", - ":“\n", - ":“\n", - ":“\n", - "”20\n", - ":“\n", - "209\n", - "28\n", - "12·3%,“\n", - "180\n", - "31\n", - "8%,\n", - "9.5%。\n", - "800\n", - "1000\n", - "》。\n", - "”,\n", - ":“\n", - "”,\n", - "———\n", - "———\n", - "”———\n", - "”,\n", - "”,\n", - "”、“\n", - "”,\n", - "———“\n", - "》、\n", - "》、\n", - "》、《\n", - "1998\n", - "》,\n", - "1997\n", - "”、\n", - ",“\n", - "”,\n", - "”。\n", - "”。\n", - "”。\n", - "1995、1996\n", - ",1997\n", - "———\n", - "……\n", - "……\n", - ")12\n", - "223\n", - "”,\n", - "223\n", - "214\n", - "”,\n", - "223\n", - "214\n", - "”。\n", - "1998\n", - ",“\n", - "1755\n", - "),\n", - "”。\n", - "”。\n", - "1998\n", - "27\n", - "”,\n", - "”;\n", - "”(\n", - "———\n", - "———\n", - "”,\n", - "”,\n", - "》、《\n", - "》、《\n", - "》,\n", - "》、《\n", - "》,\n", - "》,\n", - "》,\n", - ":“\n", - "1992\n", - "”,\n", - "———\n", - "”。\n", - "》,\n", - "”、“\n", - ")。\n", - "』,\n", - ":『\n", - "』。\n", - "”,\n", - "”。\n", - "1997\n", - "”。\n", - "———\n", - "1.2\n", - "……\n", - "280\n", - "》,\n", - "23\n", - "160\n", - "34\n", - "14.7%。\n", - "1998\n", - ",《\n", - "”,\n", - "”,\n", - "”、“\n", - "”。\n", - "90%\n", - "101\n", - "80%\n", - "72.3%。\n", - "300\n", - "112\n", - "19\n", - "35\n", - "58.5%。\n", - "79\n", - "101\n", - "21.8%。\n", - "”。\n", - "』,\n", - "”,“\n", - "100\n", - "120\n", - "”。\n", - "1995\n", - "32.65%,\n", - "7.7%,\n", - "59.7%。\n", - "”,\n", - "100%\n", - "”:\n", - "”。\n", - "1997\n", - "510\n", - "853.64\n", - "25%\n", - "37.2%。\n", - "68\n", - ",41\n", - ")”\n", - "781\n", - "204.39\n", - "92.11\n", - "3398.18\n", - "189.2\n", - "18.21\n", - "35.44\n", - "1513\n", - "145.16\n", - "46\n", - "2837\n", - "252\n", - "1409\n", - "”。\n", - ":“\n", - "1994\n", - "”,\n", - "”,\n", - "”,\n", - "》,\n", - "”。\n", - ",“\n", - "”、“\n", - "”、\n", - "”。\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "30%\n", - "1988\n", - "1983\n", - ",『\n", - "』,\n", - "』,\n", - "』,\n", - ",『\n", - "』、『\n", - "』,\n", - "』,\n", - "』,『\n", - "』。\n", - ":“\n", - ",‘\n", - ":“\n", - ",‘\n", - "———\n", - "2050\n", - "),\n", - "1%。\n", - ";“\n", - "”,“\n", - "”,“\n", - "”,\n", - "),\n", - "”、“\n", - "”。\n", - "”、“\n", - "(learningsociety),\n", - ",“\n", - ",“\n", - "”、“\n", - "”、“\n", - "),\n", - ",“\n", - "50%\n", - "48.312\n", - "22.733\n", - "22.394\n", - "22.065\n", - "15.291\n", - "15.412\n", - "13.773\n", - "A12.794\n", - "8.305\n", - "7.781\n", - "47.992XR\n", - "30.593\n", - "19.644\n", - "17.215\n", - "15.08\n", - "30%\n", - "30%\n", - "500\n", - "29\n", - "、30\n", - "”(\n", - "),\n", - "40—50\n", - "1993—1995\n", - "8%\n", - ",“\n", - "”,\n", - ",“\n", - "”,\n", - "4000\n", - "8000\n", - "1.16\n", - "4000\n", - "55%\n", - "1.15\n", - "”,\n", - "”。\n", - "8%\n", - "10—20\n", - "5—10\n", - "9.4\n", - "300\n", - "10.5\n", - "GDP\n", - "5%\n", - "GDP\n", - "80\n", - "43%、41%、24%\n", - "47%,\n", - ",《\n", - "》(\n", - "》。\n", - "”,\n", - ",《\n", - "———\n", - "》、《\n", - "”、“\n", - "”、“\n", - "”、“\n", - "———\n", - "……\n", - "》,\n", - ":“\n", - "———\n", - "……\n", - "1996\n", - "……\n", - "1995\n", - "———\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "……\n", - ":“\n", - ":“\n", - "———\n", - "1948\n", - "6—7\n", - "》,\n", - "”,\n", - "”,“\n", - "”、“\n", - "”、“\n", - "》,\n", - ",《\n", - ",“\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "”,\n", - "》、《\n", - "6、7\n", - "》、《\n", - "》、《\n", - "》、《97\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "———\n", - "……\n", - ":“\n", - "”,\n", - ":“\n", - "……”\n", - "”。\n", - "》(\n", - "),\n", - "21\n", - "21\n", - "1997\n", - "1000\n", - "31\n", - "abb、att、ibm\n", - "90\n", - "21\n", - "19\n", - "1981\n", - "1996\n", - "39%,\n", - "211\n", - "26\n", - "),\n", - "600\n", - ")。\n", - "21\n", - "》,\n", - "21\n", - "26\n", - "』,\n", - "1992\n", - "96\n", - "”。\n", - ":“\n", - "21\n", - ":11\n", - "60\n", - "21\n", - "21\n", - "———\n", - "》。\n", - "1984\n", - ",《\n", - "21\n", - "1997\n", - "--\n", - "○○○\n", - ",“\n", - "》。\n", - "》,\n", - "25\n", - ")12\n", - "21\n", - "60\n", - "”,\n", - "90\n", - "36\n", - ":“\n", - "”。\n", - ":“\n", - "--\n", - "---\n", - ":———\n", - "———\n", - "———\n", - "———\n", - "1998\n", - "1998\n", - ":1997\n", - "1998\n", - "1998\n", - "1997\n", - "》(\n", - "》)\n", - "》,\n", - "1997\n", - ":1.\n", - "2.\n", - "3.\n", - "4.\n", - "5.\n", - "1997\n", - ":1.《\n", - "》(\n", - "77\n", - ");2.《\n", - "》(\n", - "88\n", - ");3.《\n", - "》(\n", - "180\n", - ");4.《\n", - "》(\n", - "235\n", - ");5.《\n", - "》(\n", - "315\n", - ");6.《\n", - "》(\n", - "193\n", - ");7.《\n", - "》(\n", - "199\n", - ");8.《\n", - "》(\n", - "246\n", - ");9.《\n", - "》(\n", - "286\n", - ");10.《\n", - "》(\n", - "186\n", - ");11.《1981\n", - "》(\n", - "373\n", - ");12.《\n", - "》(\n", - "367\n", - ");13.《\n", - "》(\n", - "381\n", - ");14.《\n", - "》(\n", - "432\n", - ")。\n", - ":1.《\n", - "》(\n", - "115\n", - ";2.《\n", - "》(\n", - "135\n", - ";3.\n", - ";4.《\n", - "》(\n", - "101\n", - ";5.《\n", - "》(\n", - "385\n", - ";6.《\n", - "》(\n", - "366\n", - ";7.《\n", - "》(\n", - "288\n", - "a《\n", - "》、\n", - "c《\n", - "》;8.《\n", - "》(\n", - "383\n", - "(3)\n", - ";9.《\n", - "》(\n", - "486\n", - "(2)\n", - ";10.1992\n", - "17\n", - "》(\n", - "151\n", - ";11.1995\n", - "27\n", - "》(\n", - "245\n", - ":1.\n", - "”、“\n", - "”、“\n", - "2.\n", - "3.\n", - "4.\n", - "5.\n", - "6.\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "7.\n", - "8.\n", - "9.\n", - "”;\n", - "10.\n", - "”。\n", - "19\n", - "(157\n", - "28\n", - "(1997\n", - "》,\n", - "1996\n", - "1997\n", - "》、《\n", - "1996\n", - "1997\n", - "》,\n", - "2010\n", - "1997\n", - ")(\n", - "”、“\n", - "”、\n", - ")(\n", - ":1997\n", - "”,\n", - ":“\n", - "650\n", - "”,\n", - ":10\n", - "1996\n", - "”,\n", - "600\n", - ")》\n", - ")》\n", - ":“\n", - ":“\n", - "”“\n", - ":“\n", - "435\n", - "17\n", - "435\n", - "427\n", - ",424\n", - "424\n", - ":“\n", - ":“\n", - "1992\n", - "”、“\n", - "”、“\n", - "”、“\n", - "21\n", - "21\n", - "1997\n", - "”、“\n", - "”、\n", - "28\n", - "”、“\n", - "”、\n", - ")《\n", - ",“\n", - "”:\n", - "”,\n", - "”。\n", - "90\n", - "1979\n", - "”,\n", - "”,\n", - "”:\n", - ",1995\n", - ",“\n", - "”。\n", - ",“\n", - "1995\n", - "”、“\n", - "”。\n", - "”。\n", - ",“\n", - "”。\n", - ":“\n", - "》、《\n", - "》、《\n", - "21\n", - "52\n", - "21\n", - "、“\n", - ",21\n", - "1997\n", - "26\n", - "26\n", - ":1997\n", - "19\n", - "100\n", - ",1925\n", - "1927\n", - "1931\n", - ";1932\n", - "1933\n", - "1934\n", - "1935\n", - "21\n", - "38\n", - ":“\n", - "1932\n", - "1932\n", - "1934\n", - "”,\n", - "”,\n", - "”,\n", - "25\n", - "25\n", - "1972\n", - "1/4\n", - "1996\n", - "200\n", - "1500\n", - "160\n", - "1997\n", - "34\n", - "),\n", - "25\n", - "25\n", - "21\n", - "”)\n", - "1997\n", - "1997\n", - "21\n", - ":1.《\n", - "》;2.《\n", - "1998—2000\n", - "》。\n", - "1997\n", - "17\n", - "17\n", - "』,\n", - "』,\n", - "1997\n", - ",10\n", - "),\n", - ":“\n", - ":“\n", - ":“\n", - "”2001\n", - ":“\n", - ":“\n", - ":“\n", - ")1.5\n", - "17\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "2.5\n", - "19\n", - "17\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "3.5\n", - "23\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "4.5\n", - "19\n", - "(1)62\n", - "(2)59\n", - "(3)64\n", - "5.5\n", - "19\n", - "60.8%\n", - ":(1)\n", - "(2)\n", - "(3)\n", - ":1.\n", - "26889108,\n", - ")。\n", - "2.\n", - "1000\n", - "500\n", - "130\n", - "1000\n", - "3.\n", - "4.\n", - "26889108\n", - "229\n", - "》。\n", - "5.\n", - ":1.(1)2.(1)3.(1)4.(2)5.(3)。\n", - ")1.5\n", - "27\n", - "》。\n", - "(1)16\n", - "(2)19\n", - "(3)15\n", - "2.5\n", - "31\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "3.5\n", - "23\n", - "70%\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "4.5\n", - "26\n", - "30%\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "5.6\n", - "(1)\n", - "(2)\n", - "(3)\n", - ":1.\n", - "26889108,\n", - ")。\n", - "2.\n", - "1000\n", - "500\n", - "130\n", - "1000\n", - "4.\n", - "5.\n", - "26889108\n", - "230\n", - "》。\n", - "6.\n", - ":1.(2)2.(1)3.(3)4.(1)5.(1)。\n", - ")7\n", - "27\n", - ":———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "21\n", - "”,\n", - "21\n", - "40\n", - ":———\n", - "———\n", - "———\n", - "———\n", - "1999\n", - "》,\n", - "31\n", - "47\n", - "131\n", - "1996\n", - "41\n", - "28%,\n", - "31%。\n", - "41\n", - "17\n", - "———“\n", - "110”\n", - "”、“\n", - ")11\n", - "25\n", - "25\n", - "25\n", - "21\n", - ":———\n", - "———\n", - "———\n", - "———\n", - "———\n", - "—20\n", - "40\n", - "21\n", - ")12\n", - "———\n", - ",“\n", - "”。\n", - "”,\n", - ",《\n", - "》、《\n", - "》、\n", - "21\n", - "1997\n", - "1982\n", - "》,\n", - "2020\n", - "21\n", - "(1997\n", - ":“\n", - "26\n", - "1997\n", - "26\n", - "1997\n", - "———\n", - "34\n", - "1997\n", - ":“\n", - "”“\n", - ")、\n", - ")1.\n", - "25\n", - ":①\n", - "2.\n", - "2380\n", - ":①\n", - "3.\n", - ":①\n", - "4.\n", - "28\n", - ":①\n", - "5.\n", - ")1.4\n", - "2.4\n", - "21\n", - "3.4\n", - "22\n", - "4.\n", - "21\n", - "9.6\n", - "5.\n", - "43\n", - "185\n", - ")1.6\n", - ":“\n", - "”。\n", - "(1)\n", - "(2)\n", - "(3)\n", - "2.6\n", - "318\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "3.6\n", - "150\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "4.6\n", - ":(1)\n", - "(2)\n", - "(3)\n", - "5.6\n", - "(1)\n", - "(2)\n", - "(3)\n", - "--\n", - "』,\n", - "』;\n", - "』,\n", - "』、『\n", - "』、\n", - "』。\n", - "』;\n", - "』、『\n", - "』、\n", - "》。\n", - "』;\n", - "”——\n", - "22\n", - "”,\n", - "”。\n", - "1996\n", - "》,\n", - ",13\n", - "、538\n", - "28\n", - ",15\n", - "26\n", - "》,\n", - "”11\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》。\n", - "1994\n", - "1041\n", - ");\n", - "80\n", - "298\n", - "105\n", - "2000\n", - "1000\n", - "1995\n", - ":“‘\n", - "”,\n", - "”。\n", - ":“\n", - "”,\n", - "”,\n", - "———\n", - "”。\n", - "”。\n", - ":“\n", - "”,“\n", - "”。\n", - ",56\n", - "1955\n", - ":“\n", - "”1974\n", - ":“\n", - "”,\n", - ":“\n", - "1978\n", - "1988\n", - "1978\n", - ":“\n", - "”1979\n", - ":“\n", - "1977\n", - "1981\n", - "1978\n", - "”。\n", - "”,“\n", - "3000\n", - "1975\n", - ":“\n", - "”1978\n", - ":“\n", - "”。\n", - "1982\n", - ":“\n", - "1986\n", - ":“\n", - "1978\n", - ":“\n", - "”1979\n", - "”1981\n", - ":“\n", - ",1978\n", - ":“\n", - ":“\n", - "”,“\n", - "”,\n", - "25%\n", - "7、\n", - "8、\n", - "MD82\n", - "MD90—30\n", - "AE100\n", - "”,1971\n", - "1974\n", - "”,\n", - "19759\n", - "1978\n", - ":“\n", - "》,\n", - "”、“\n", - "”、“\n", - "』,\n", - ",1995\n", - "1996\n", - "219\n", - "60\n", - "21\n", - "1994\n", - ",1996\n", - "1996\n", - "1996\n", - "”、“\n", - "82\n", - "1995\n", - "1996\n", - "”、“\n", - "》,\n", - ")《\n", - ")。\n", - "70\n", - "1985\n", - "”:\n", - "1/2\n", - "1/2\n", - "90%\n", - ":『\n", - ":『\n", - "』『\n", - ":『\n", - "』;\n", - ":『\n", - "』;\n", - ":『\n", - "』。\n", - "》、《\n", - "》,《\n", - ":《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》、《\n", - "》,《\n", - "》,《\n", - "》。\n", - "21\n", - "45\n", - "51\n", - "28\n", - "21\n", - "29\n", - "1958\n", - "139\n", - ",1997\n", - "》,\n", - "”。\n", - "2.2%,\n", - "34%,\n", - "1/4\n", - "3000\n", - "26\n", - "300\n", - "abt\n", - "80%\n", - "1.5\n", - "58\n", - "”,\n", - "500\n", - "64\n", - "23\n", - "1.5\n", - "3000\n", - "6610\n", - "513\n", - "42\n", - "194\n", - "202\n", - "80\n", - "1978\n", - "703\n", - "63\n", - "47%\n", - "35%。\n", - "17\n", - "1996\n", - "390\n", - ",1996\n", - "5000\n", - "》、《\n", - "》、《\n", - "”、“\n", - "”、“\n", - "1996\n", - "》、《\n", - "》、《\n", - "》、《\n", - "22\n", - "1987\n", - "491\n", - "100\n", - "97\n", - "33.57\n", - "27\n", - "37\n", - ",37\n", - "、11\n", - "、3\n", - "》,\n", - "37\n", - ")1.\n", - "”2.\n", - "3.\n", - "”4.\n", - "”5.\n", - "”6.\n", - "”7.\n", - "8.\n", - "9.\n", - "10.\n", - "11.\n", - "12.\n", - "13.\n", - "14.\n", - "15.\n", - "16.\n", - "17.\n", - "”18.\n", - "19.\n", - "20.\n", - "21.\n", - "22.\n", - "23.\n", - "24.\n", - "25.\n", - "26.\n", - "27.\n", - "28.\n", - "29.\n", - "30.\n", - "31.\n", - "32.\n", - "33.\n", - "34.\n", - "35.\n", - "36.\n", - "37.\n", - ")1.\n", - "2.\n", - "3.\n", - "4.\n", - "5.\n", - "6.\n", - "7.\n", - "8.\n", - "9.\n", - "10.\n", - "11.\n", - "(3\n", - ")1.“\n", - "2.\n", - "”3.\n", - ")1.\n", - ")2.\n", - "”3.\n", - "”(\n", - ")、“\n", - "”(\n", - ")4.\n", - "”(\n", - ")、“\n", - "”(\n", - "1994\n", - "1995\n", - "313\n", - "1206\n", - "69\n", - "1.5\n", - "4900\n", - "17\n", - "1993\n", - "1994\n", - "1109\n", - "508\n", - "2642\n", - "143\n", - "”:———\n", - "———\n", - "”;\n", - "———\n", - "———\n", - "”。\n", - "900\n", - "3.3\n", - "29\n", - "21\n", - "21\n", - ",33\n", - ",12\n", - "pos\n", - "bims\n", - ",“\n", - "”。\n", - "70\n", - "”,\n", - "……\n", - ":1、\n", - ")2、\n", - ":『\n", - "》。\n", - "『ce』\n", - "『pl』\n", - "』,\n", - "》,\n", - "、500\n", - "35\n", - "130\n", - "150\n", - "),\n", - "”。\n", - "1894\n", - "》,\n", - "1904\n", - "1942\n", - "1992\n", - "》,\n", - "70\n", - "1993\n", - "”,\n", - "60\n", - "2000\n", - "”。\n", - "》,\n", - "”,\n", - "”!\n", - "”。\n", - "1998\n", - "),\n", - "———’97\n", - "1996\n", - "1996\n", - "1996\n", - "96\n", - "935\n", - "191\n", - "53\n", - "21\n", - "40\n", - "———’97\n", - "———1997\n", - "———\n", - "———\n", - "———\n", - "1996\n", - "———\n", - "———\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”、“\n", - "”。\n", - ",1997\n", - "——\n", - "”、“\n", - "99\n", - "28\n", - "23\n", - "1000\n", - "103\n", - "1500\n", - "144\n", - "277\n", - "25\n", - "162\n", - "58\n", - "21\n", - "”。\n", - "”:\n", - "”;\n", - "”,\n", - ":“\n", - ":“\n", - "”。\n", - ":1.\n", - "2.\n", - "3.\n", - "”,\n", - ",“\n", - "”,\n", - "”。\n", - "1994\n", - ":“\n", - "”———\n", - "”,\n", - ":“\n", - ":“\n", - "、“\n", - ",“\n", - ":“\n", - ":“\n", - "--《\n", - "),\n", - "25\n", - ",1921\n", - ",1922\n", - "1942\n", - "55\n", - "》(\n", - "),\n", - ":“\n", - "”!\n", - "”,\n", - "”,\n", - "”,\n", - "”。\n", - "”,\n", - ":“\n", - "”,\n", - "”。\n", - "”,\n", - ":“\n", - "、“\n", - "“××\n", - "”,\n", - "———\n", - "”,\n", - "———\n", - "———\n", - "…………\n", - "”“\n", - "17\n", - "1981\n", - "1993\n", - "52\n", - "483\n", - "63\n", - "56\n", - ",“\n", - "”。\n", - "》。\n", - "”。\n", - "”,\n", - "”。\n", - ":“\n", - "”。\n", - "——《\n", - "》。\n", - ",《\n", - "500\n", - "……\n", - "———\n", - "”,\n", - "”,\n", - "”……\n", - "1860\n", - "》,\n", - "”,\n", - "》、《\n", - "”,\n", - "”。\n", - "——\n", - "2000\n", - "……\n", - "5380\n", - "4500\n", - "———\n", - "42000\n", - "……\n", - "》(\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "——\n", - "》(\n", - "———\n", - ":“\n", - "、“\n", - "”、“\n", - "”,\n", - "”,\n", - "』。\n", - "』,\n", - "』,\n", - "……\n", - "”,\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《“\n", - "》(\n", - ")、《\n", - "》(\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - ")、《\n", - "》(\n", - "———\n", - "》,\n", - "》、《\n", - "》、《\n", - "》,\n", - "』,\n", - "』(\n", - "》、《\n", - "』,\n", - "(《\n", - "》,\n", - "1∶0\n", - "1∶1,\n", - "”,\n", - "”、“\n", - "”、“\n", - ":“\n", - "……”\n", - ":“\n", - ":“\n", - ":“\n", - "”,7\n", - "”,8\n", - "”,10\n", - "”……\n", - "”。\n", - "》,\n", - "》,\n", - "》,\n", - "》,\n", - "”。\n", - "》,\n", - ":“\n", - ":“\n", - "”(\n", - "55\n", - "《“\n", - ",《\n", - ":“\n", - ":“\n", - ":“\n", - ":“\n", - ",“\n", - "1990\n", - ":———\n", - "———\n", - "4095\n", - "26.74%;\n", - "520\n", - "2415\n", - "683\n", - "———\n", - "1374012\n", - "578\n", - "252538\n", - "”、“\n", - "”、\n", - "150\n", - "1984\n", - "1990\n", - "1997\n", - ",1990\n", - "1991\n", - "1997\n", - "1995\n", - "1995\n", - "》;\n", - "》,\n", - "1997\n", - "49\n", - "1948\n", - ",1970\n", - ",1973\n", - ",1988\n", - "1991\n", - "1992\n", - "100\n", - "110\n", - "”(\n", - "”。\n", - "”、\n", - "SOS\n", - "SOS\n", - "100\n", - "27\n", - "100\n", - "100\n", - "”。\n", - "21\n", - ",523\n", - "300\n", - "102\n", - "88\n", - "102\n", - "88\n", - ":———\n", - "64\n", - "110\n", - "110\n", - "》,\n", - ";———\n", - ",58\n", - "5098\n", - ";———\n", - "、88\n", - ";———\n", - "570\n", - "379\n", - "5609\n", - ";———\n", - "2573\n", - "6512\n", - "40%\n", - "5400\n", - "3100\n", - "62\n", - "560\n", - "——\n", - "1982\n", - "1990\n", - "”,\n", - "100\n", - "”。\n", - "365\n", - "”,\n", - "”,\n", - ":“\n", - "”,\n", - "”,\n", - "30%,\n", - "48%,\n", - "1.6\n", - "4859\n", - "4065\n", - "3311\n", - "2681\n", - "1959\n", - "1128.4\n", - "707\n", - ")342\n", - "519.4\n", - "470\n", - "33\n", - "13.6\n", - "”、“\n", - "”、“\n", - "”(\n", - ")、\n", - "”。\n", - "○、\n", - "○,\n", - "』,\n", - "○、\n", - "——\n", - "61\n", - "315\n", - "3047\n", - "1752\n", - "1364\n", - "808\n", - "70%,\n", - "60%\n", - "23\n", - "1996\n", - "102\n", - "100\n", - "1978\n", - "2.63\n", - "102\n", - "38.8\n", - "100\n", - "”。\n", - ",“\n", - "2487\n", - "4%\n", - "2010\n", - "》,\n", - "97\n", - ":“\n", - "”。\n", - "21\n", - "PATA)\n", - "5100\n", - "102\n", - "6.4\n", - "1638\n", - "97\n", - "PATA\n", - "、PATA\n", - "1500\n", - "200\n", - "1952\n", - "2000\n", - "1.8\n", - "1993\n", - "PATA,\n", - "1994\n", - "”,\n", - ",PATA\n", - "PATA\n", - "——\n", - "”、“\n", - "100\n", - "”……\n", - "”、\n", - "”,\n", - "25\n", - "……\n", - ",“\n", - "19\n", - "40\n", - "……”\n", - "60\n", - "70\n", - "1975\n", - "100\n", - "80\n", - "36\n", - "1996\n", - "1170\n", - "845\n", - "———\n", - "21\n", - ",29\n", - "K11683”。\n", - ",18\n", - "200\n", - ",17\n", - ":“\n", - "190\n", - "200\n", - "”“\n", - "200\n", - "200\n", - ":“\n", - "190\n", - "200\n", - "17\n", - "17\n", - ",“\n", - "K11683”\n", - ":“\n", - "K11625”,\n", - "K11625”\n", - ":“\n", - "120\n", - "120\n", - "400\n", - "100\n", - "70\n", - "--\n", - "1997\n", - "、“\n", - ",75\n", - ":“\n", - "1994\n", - "》,\n", - "1500\n", - "5800\n", - ",5800\n", - "1500\n", - "1995\n", - "(2000\n", - "),\n", - "”。\n", - "”(\n", - "1996\n", - ":“\n", - "21.6\n", - "56.6\n", - ":“\n", - ":“\n", - "’”\n", - "1000\n", - "2000\n", - "19\n", - "38\n", - "21\n", - ")6\n", - "21\n", - "”,\n", - "5800\n", - "4000\n", - "820\n", - "2000\n", - "’97\n", - "”,“\n", - "--\n", - "600\n", - "40\n", - ",600\n", - "100\n", - ",“\n", - "、5\n", - "1000\n", - ":“\n", - "》,\n", - ":(○\n", - "○)\n", - "——\n", - "3000\n", - "580\n", - "80\n", - "2%,\n", - "500\n", - "———\n", - "”。\n", - "100\n", - "70\n", - "1000,\n", - "200\n", - "25\n", - "2000\n", - "35\n", - "1988\n", - ",1996\n", - "25\n", - "”,\n", - ")(\n", - "”(\n", - "”。\n", - ":77\n", - "97\n", - "97\n", - "82\n", - "82\n", - ",1939\n", - "61\n", - "75\n", - "17\n", - ",8\n", - ",31\n", - ",《\n", - ",《\n", - "”。\n", - "22\n", - "”。\n", - ",《\n", - ",“\n", - "”。\n", - "35\n", - "60\n", - "35\n", - ",6\n", - "”。\n", - "1535\n", - "”,\n", - "”。\n", - "”。\n", - "60\n", - "45\n", - "1976\n", - "175\n", - "45\n", - "”,\n", - "》,\n", - "1500\n", - "86\n", - "65%\n", - ",18%\n", - "80\n", - ",“\n", - "200\n", - "1000\n", - "”———\n", - ":“\n", - "60\n", - "1992\n", - "1/4\n", - "2000\n", - "700\n", - "100\n", - "600\n", - "1.5\n", - ":“\n", - "”、“\n", - "”。\n", - "”。\n", - "”。\n", - "———\n", - ",“\n", - "),\n", - ")“\n", - "17\n", - "400\n", - "40\n", - ":“\n", - ",“\n", - ":“\n", - "600\n", - "———\n", - "”。\n", - ",5\n", - "4.8\n", - "),\n", - "7.5\n", - "12.5\n", - "1/4。\n", - "”。\n", - "40\n", - ",1973\n", - "92\n", - "1881\n", - "”,\n", - "》、《\n", - "》、《\n", - "1907\n", - "》,\n", - ":“\n", - "70\n", - ",10\n", - ",“\n", - ",7000\n", - "1990\n", - "”。\n", - "”,\n", - "300\n", - ",300\n", - ",4\n", - ":“4\n", - "……\n", - "……\n", - ":“\n", - "22\n", - "1840\n", - "”,\n", - "90\n", - ",“\n", - "……\n", - "1996\n", - ",60\n", - "23%,\n", - "18.4%。\n", - "1987\n", - "65\n", - "13.3%,\n", - "16%。\n", - "60\n", - ",2/3\n", - ",3/4\n", - "80\n", - "40%\n", - "1/4\n", - "80\n", - "”,\n", - "——\n", - "1995\n", - "”。\n", - "……\n", - "”。\n", - "”1935\n", - "1974\n" - ] - } - ], + "outputs": [], "source": [ "train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \\\n", " tokenizer.tokenize_ner(text=train_df[TEXT_COL],\n", @@ -26118,7 +388,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": { "scrolled": true }, @@ -26144,47 +414,260 @@ "output_type": "stream", "text": [ "\n", - "Iteration: 1%| | 24/2813 [00:30<58:35, 1.26s/it]\u001b[A\n", - "Iteration: 1%| | 24/2813 [00:49<58:35, 1.26s/it]\u001b[A\n", - "Iteration: 2%|▏ | 48/2813 [01:00<58:08, 1.26s/it]\u001b[A\n", - "Iteration: 2%|▏ | 48/2813 [01:20<58:08, 1.26s/it]\u001b[A\n", - "Iteration: 3%|▎ | 72/2813 [01:31<57:47, 1.26s/it]\u001b[A\n", - "Iteration: 3%|▎ | 72/2813 [01:50<57:47, 1.26s/it]\u001b[A\n", - "Iteration: 3%|▎ | 96/2813 [02:01<57:30, 1.27s/it]\u001b[A\n", - "Iteration: 3%|▎ | 96/2813 [02:20<57:30, 1.27s/it]\u001b[A\n", - "Iteration: 4%|▍ | 120/2813 [02:32<57:15, 1.28s/it]\u001b[A\n", - "Iteration: 4%|▍ | 120/2813 [02:50<57:15, 1.28s/it]\u001b[A\n", - "Iteration: 5%|▌ | 144/2813 [03:03<56:59, 1.28s/it]\u001b[A\n", - "Iteration: 5%|▌ | 144/2813 [03:20<56:59, 1.28s/it]\u001b[A\n", - "Iteration: 6%|▌ | 168/2813 [03:34<56:38, 1.29s/it]\u001b[A\n", - "Iteration: 6%|▌ | 168/2813 [03:50<56:38, 1.29s/it]\u001b[A\n", - "Iteration: 7%|▋ | 192/2813 [04:06<56:16, 1.29s/it]\u001b[A\n", - "Iteration: 7%|▋ | 192/2813 [04:20<56:16, 1.29s/it]\u001b[A\n", - "Iteration: 8%|▊ | 216/2813 [04:37<55:55, 1.29s/it]\u001b[A\n", - "Iteration: 8%|▊ | 216/2813 [04:50<55:55, 1.29s/it]\u001b[A\n", - "Iteration: 9%|▊ | 240/2813 [05:08<55:28, 1.29s/it]\u001b[A\n", - "Iteration: 9%|▊ | 240/2813 [05:20<55:28, 1.29s/it]\u001b[A\n", - "Iteration: 9%|▉ | 264/2813 [05:39<55:02, 1.30s/it]\u001b[A\n", - "Iteration: 9%|▉ | 264/2813 [05:50<55:02, 1.30s/it]\u001b[A\n", - "Iteration: 10%|█ | 288/2813 [06:10<54:36, 1.30s/it]\u001b[A\n", - "Iteration: 10%|█ | 288/2813 [06:30<54:36, 1.30s/it]\u001b[A\n", - "Iteration: 11%|█ | 312/2813 [06:42<54:05, 1.30s/it]\u001b[A\n", - "Iteration: 11%|█ | 312/2813 [07:00<54:05, 1.30s/it]\u001b[A\n", - "Iteration: 12%|█▏ | 336/2813 [07:13<53:35, 1.30s/it]\u001b[A\n", - "Iteration: 12%|█▏ | 336/2813 [07:30<53:35, 1.30s/it]\u001b[A\n", - "Iteration: 13%|█▎ | 360/2813 [07:44<53:02, 1.30s/it]\u001b[A\n", - "Iteration: 13%|█▎ | 360/2813 [08:00<53:02, 1.30s/it]\u001b[A\n", - "Iteration: 14%|█▎ | 384/2813 [08:15<52:33, 1.30s/it]\u001b[A\n", - "Iteration: 14%|█▎ | 384/2813 [08:30<52:33, 1.30s/it]\u001b[A\n", - "Iteration: 15%|█▍ | 408/2813 [08:46<52:03, 1.30s/it]\u001b[A\n", - "Iteration: 15%|█▍ | 408/2813 [09:00<52:03, 1.30s/it]\u001b[A\n", - "Iteration: 15%|█▌ | 432/2813 [09:17<51:34, 1.30s/it]\u001b[A\n", - "Iteration: 15%|█▌ | 432/2813 [09:30<51:34, 1.30s/it]\u001b[A\n", - "Iteration: 16%|█▌ | 456/2813 [09:49<51:06, 1.30s/it]\u001b[A\n", - "Iteration: 16%|█▌ | 456/2813 [10:00<51:06, 1.30s/it]\u001b[A\n", - "Iteration: 17%|█▋ | 480/2813 [10:20<50:32, 1.30s/it]\u001b[A\n", - "Iteration: 17%|█▋ | 480/2813 [10:40<50:32, 1.30s/it]\u001b[A\n", - "Iteration: 18%|█▊ | 504/2813 [10:51<49:59, 1.30s/it]\u001b[A" + "Iteration: 1%| | 24/2813 [00:30<59:29, 1.28s/it]\u001b[A\n", + "Iteration: 1%| | 24/2813 [00:49<59:29, 1.28s/it]\u001b[A\n", + "Iteration: 2%|▏ | 48/2813 [01:00<58:40, 1.27s/it]\u001b[A\n", + "Iteration: 2%|▏ | 48/2813 [01:20<58:40, 1.27s/it]\u001b[A\n", + "Iteration: 3%|▎ | 72/2813 [01:31<58:02, 1.27s/it]\u001b[A\n", + "Iteration: 3%|▎ | 72/2813 [01:50<58:02, 1.27s/it]\u001b[A\n", + "Iteration: 3%|▎ | 96/2813 [02:01<57:33, 1.27s/it]\u001b[A\n", + "Iteration: 3%|▎ | 96/2813 [02:20<57:33, 1.27s/it]\u001b[A\n", + "Iteration: 4%|▍ | 120/2813 [02:32<57:05, 1.27s/it]\u001b[A\n", + "Iteration: 4%|▍ | 120/2813 [02:50<57:05, 1.27s/it]\u001b[A\n", + "Iteration: 5%|▌ | 144/2813 [03:03<56:39, 1.27s/it]\u001b[A\n", + "Iteration: 5%|▌ | 144/2813 [03:20<56:39, 1.27s/it]\u001b[A\n", + "Iteration: 6%|▌ | 168/2813 [03:33<56:16, 1.28s/it]\u001b[A\n", + "Iteration: 6%|▌ | 168/2813 [03:50<56:16, 1.28s/it]\u001b[A\n", + "Iteration: 7%|▋ | 192/2813 [04:04<55:52, 1.28s/it]\u001b[A\n", + "Iteration: 7%|▋ | 192/2813 [04:20<55:52, 1.28s/it]\u001b[A\n", + "Iteration: 8%|▊ | 216/2813 [04:35<55:26, 1.28s/it]\u001b[A\n", + "Iteration: 8%|▊ | 216/2813 [04:50<55:26, 1.28s/it]\u001b[A\n", + "Iteration: 9%|▊ | 240/2813 [05:06<55:01, 1.28s/it]\u001b[A\n", + "Iteration: 9%|▊ | 240/2813 [05:20<55:01, 1.28s/it]\u001b[A\n", + "Iteration: 9%|▉ | 264/2813 [05:37<54:35, 1.28s/it]\u001b[A\n", + "Iteration: 9%|▉ | 264/2813 [05:50<54:35, 1.28s/it]\u001b[A\n", + "Iteration: 10%|█ | 288/2813 [06:08<54:08, 1.29s/it]\u001b[A\n", + "Iteration: 10%|█ | 288/2813 [06:20<54:08, 1.29s/it]\u001b[A\n", + "Iteration: 11%|█ | 312/2813 [06:39<53:41, 1.29s/it]\u001b[A\n", + "Iteration: 11%|█ | 312/2813 [06:50<53:41, 1.29s/it]\u001b[A\n", + "Iteration: 12%|█▏ | 336/2813 [07:10<53:13, 1.29s/it]\u001b[A\n", + "Iteration: 12%|█▏ | 336/2813 [07:30<53:13, 1.29s/it]\u001b[A\n", + "Iteration: 13%|█▎ | 360/2813 [07:41<52:44, 1.29s/it]\u001b[A\n", + "Iteration: 13%|█▎ | 360/2813 [08:00<52:44, 1.29s/it]\u001b[A\n", + "Iteration: 14%|█▎ | 384/2813 [08:12<52:15, 1.29s/it]\u001b[A\n", + "Iteration: 14%|█▎ | 384/2813 [08:30<52:15, 1.29s/it]\u001b[A\n", + "Iteration: 15%|█▍ | 408/2813 [08:43<51:45, 1.29s/it]\u001b[A\n", + "Iteration: 15%|█▍ | 408/2813 [09:00<51:45, 1.29s/it]\u001b[A\n", + "Iteration: 15%|█▌ | 432/2813 [09:14<51:16, 1.29s/it]\u001b[A\n", + "Iteration: 15%|█▌ | 432/2813 [09:30<51:16, 1.29s/it]\u001b[A\n", + "Iteration: 16%|█▌ | 456/2813 [09:45<50:46, 1.29s/it]\u001b[A\n", + "Iteration: 16%|█▌ | 456/2813 [10:00<50:46, 1.29s/it]\u001b[A\n", + "Iteration: 17%|█▋ | 480/2813 [10:16<50:14, 1.29s/it]\u001b[A\n", + "Iteration: 17%|█▋ | 480/2813 [10:30<50:14, 1.29s/it]\u001b[A\n", + "Iteration: 18%|█▊ | 504/2813 [10:47<49:43, 1.29s/it]\u001b[A\n", + "Iteration: 18%|█▊ | 504/2813 [11:00<49:43, 1.29s/it]\u001b[A\n", + "Iteration: 19%|█▉ | 528/2813 [11:18<49:10, 1.29s/it]\u001b[A\n", + "Iteration: 19%|█▉ | 528/2813 [11:30<49:10, 1.29s/it]\u001b[A\n", + "Iteration: 20%|█▉ | 552/2813 [11:49<48:40, 1.29s/it]\u001b[A\n", + "Iteration: 20%|█▉ | 552/2813 [12:00<48:40, 1.29s/it]\u001b[A\n", + "Iteration: 20%|██ | 576/2813 [12:20<48:08, 1.29s/it]\u001b[A\n", + "Iteration: 20%|██ | 576/2813 [12:40<48:08, 1.29s/it]\u001b[A\n", + "Iteration: 21%|██▏ | 600/2813 [12:51<47:37, 1.29s/it]\u001b[A\n", + "Iteration: 21%|██▏ | 600/2813 [13:10<47:37, 1.29s/it]\u001b[A\n", + "Iteration: 22%|██▏ | 624/2813 [13:22<47:05, 1.29s/it]\u001b[A\n", + "Iteration: 22%|██▏ | 624/2813 [13:40<47:05, 1.29s/it]\u001b[A\n", + "Iteration: 23%|██▎ | 648/2813 [13:53<46:33, 1.29s/it]\u001b[A\n", + "Iteration: 23%|██▎ | 648/2813 [14:10<46:33, 1.29s/it]\u001b[A\n", + "Iteration: 24%|██▍ | 672/2813 [14:24<46:02, 1.29s/it]\u001b[A\n", + "Iteration: 24%|██▍ | 672/2813 [14:40<46:02, 1.29s/it]\u001b[A\n", + "Iteration: 25%|██▍ | 696/2813 [14:55<45:33, 1.29s/it]\u001b[A\n", + "Iteration: 25%|██▍ | 696/2813 [15:10<45:33, 1.29s/it]\u001b[A\n", + "Iteration: 26%|██▌ | 720/2813 [15:26<45:00, 1.29s/it]\u001b[A\n", + "Iteration: 26%|██▌ | 720/2813 [15:40<45:00, 1.29s/it]\u001b[A\n", + "Iteration: 26%|██▋ | 744/2813 [15:57<44:28, 1.29s/it]\u001b[A\n", + "Iteration: 26%|██▋ | 744/2813 [16:10<44:28, 1.29s/it]\u001b[A\n", + "Iteration: 27%|██▋ | 768/2813 [16:28<43:58, 1.29s/it]\u001b[A\n", + "Iteration: 27%|██▋ | 768/2813 [16:40<43:58, 1.29s/it]\u001b[A\n", + "Iteration: 28%|██▊ | 792/2813 [16:59<43:28, 1.29s/it]\u001b[A\n", + "Iteration: 28%|██▊ | 792/2813 [17:10<43:28, 1.29s/it]\u001b[A\n", + "Iteration: 29%|██▉ | 816/2813 [17:30<42:58, 1.29s/it]\u001b[A\n", + "Iteration: 29%|██▉ | 816/2813 [17:50<42:58, 1.29s/it]\u001b[A\n", + "Iteration: 30%|██▉ | 840/2813 [18:01<42:24, 1.29s/it]\u001b[A\n", + "Iteration: 30%|██▉ | 840/2813 [18:20<42:24, 1.29s/it]\u001b[A\n", + "Iteration: 31%|███ | 864/2813 [18:31<41:53, 1.29s/it]\u001b[A\n", + "Iteration: 31%|███ | 864/2813 [18:50<41:53, 1.29s/it]\u001b[A\n", + "Iteration: 32%|███▏ | 888/2813 [19:02<41:20, 1.29s/it]\u001b[A\n", + "Iteration: 32%|███▏ | 888/2813 [19:20<41:20, 1.29s/it]\u001b[A\n", + "Iteration: 32%|███▏ | 912/2813 [19:33<40:48, 1.29s/it]\u001b[A\n", + "Iteration: 32%|███▏ | 912/2813 [19:50<40:48, 1.29s/it]\u001b[A\n", + "Iteration: 33%|███▎ | 936/2813 [20:04<40:16, 1.29s/it]\u001b[A\n", + "Iteration: 33%|███▎ | 936/2813 [20:20<40:16, 1.29s/it]\u001b[A\n", + "Iteration: 34%|███▍ | 960/2813 [20:35<39:46, 1.29s/it]\u001b[A\n", + "Iteration: 34%|███▍ | 960/2813 [20:50<39:46, 1.29s/it]\u001b[A\n", + "Iteration: 35%|███▍ | 984/2813 [21:06<39:16, 1.29s/it]\u001b[A\n", + "Iteration: 35%|███▍ | 984/2813 [21:20<39:16, 1.29s/it]\u001b[A\n", + "Iteration: 36%|███▌ | 1008/2813 [21:37<38:44, 1.29s/it]\u001b[A\n", + "Iteration: 36%|███▌ | 1008/2813 [21:50<38:44, 1.29s/it]\u001b[A\n", + "Iteration: 37%|███▋ | 1032/2813 [22:08<38:13, 1.29s/it]\u001b[A\n", + "Iteration: 37%|███▋ | 1032/2813 [22:20<38:13, 1.29s/it]\u001b[A\n", + "Iteration: 38%|███▊ | 1056/2813 [22:39<37:42, 1.29s/it]\u001b[A\n", + "Iteration: 38%|███▊ | 1056/2813 [22:50<37:42, 1.29s/it]\u001b[A\n", + "Iteration: 38%|███▊ | 1080/2813 [23:10<37:11, 1.29s/it]\u001b[A\n", + "Iteration: 38%|███▊ | 1080/2813 [23:20<37:11, 1.29s/it]\u001b[A\n", + "Iteration: 39%|███▉ | 1104/2813 [23:40<36:41, 1.29s/it]\u001b[A\n", + "Iteration: 39%|███▉ | 1104/2813 [24:00<36:41, 1.29s/it]\u001b[A\n", + "Iteration: 40%|████ | 1128/2813 [24:11<36:11, 1.29s/it]\u001b[A\n", + "Iteration: 40%|████ | 1128/2813 [24:30<36:11, 1.29s/it]\u001b[A\n", + "Iteration: 41%|████ | 1152/2813 [24:42<35:40, 1.29s/it]\u001b[A\n", + "Iteration: 41%|████ | 1152/2813 [25:00<35:40, 1.29s/it]\u001b[A\n", + "Iteration: 42%|████▏ | 1176/2813 [25:13<35:09, 1.29s/it]\u001b[A\n", + "Iteration: 42%|████▏ | 1176/2813 [25:30<35:09, 1.29s/it]\u001b[A\n", + "Iteration: 43%|████▎ | 1200/2813 [25:44<34:39, 1.29s/it]\u001b[A\n", + "Iteration: 43%|████▎ | 1200/2813 [26:00<34:39, 1.29s/it]\u001b[A\n", + "Iteration: 44%|████▎ | 1224/2813 [26:15<34:08, 1.29s/it]\u001b[A\n", + "Iteration: 44%|████▎ | 1224/2813 [26:30<34:08, 1.29s/it]\u001b[A\n", + "Iteration: 44%|████▍ | 1248/2813 [26:46<33:37, 1.29s/it]\u001b[A\n", + "Iteration: 44%|████▍ | 1248/2813 [27:00<33:37, 1.29s/it]\u001b[A\n", + "Iteration: 45%|████▌ | 1272/2813 [27:17<33:07, 1.29s/it]\u001b[A\n", + "Iteration: 45%|████▌ | 1272/2813 [27:30<33:07, 1.29s/it]\u001b[A\n", + "Iteration: 46%|████▌ | 1296/2813 [27:48<32:36, 1.29s/it]\u001b[A\n", + "Iteration: 46%|████▌ | 1296/2813 [28:00<32:36, 1.29s/it]\u001b[A\n", + "Iteration: 47%|████▋ | 1320/2813 [28:19<32:06, 1.29s/it]\u001b[A\n", + "Iteration: 47%|████▋ | 1320/2813 [28:30<32:06, 1.29s/it]\u001b[A\n", + "Iteration: 48%|████▊ | 1344/2813 [28:50<31:34, 1.29s/it]\u001b[A\n", + "Iteration: 48%|████▊ | 1344/2813 [29:10<31:34, 1.29s/it]\u001b[A\n", + "Iteration: 49%|████▊ | 1368/2813 [29:21<31:02, 1.29s/it]\u001b[A\n", + "Iteration: 49%|████▊ | 1368/2813 [29:40<31:02, 1.29s/it]\u001b[A\n", + "Iteration: 49%|████▉ | 1392/2813 [29:52<30:31, 1.29s/it]\u001b[A\n", + "Iteration: 49%|████▉ | 1392/2813 [30:10<30:31, 1.29s/it]\u001b[A\n", + "Iteration: 50%|█████ | 1416/2813 [30:23<30:00, 1.29s/it]\u001b[A\n", + "Iteration: 50%|█████ | 1416/2813 [30:40<30:00, 1.29s/it]\u001b[A\n", + "Iteration: 51%|█████ | 1440/2813 [30:54<29:29, 1.29s/it]\u001b[A\n", + "Iteration: 51%|█████ | 1440/2813 [31:10<29:29, 1.29s/it]\u001b[A\n", + "Iteration: 52%|█████▏ | 1464/2813 [31:25<28:58, 1.29s/it]\u001b[A\n", + "Iteration: 52%|█████▏ | 1464/2813 [31:40<28:58, 1.29s/it]\u001b[A\n", + "Iteration: 53%|█████▎ | 1488/2813 [31:55<28:27, 1.29s/it]\u001b[A\n", + "Iteration: 53%|█████▎ | 1488/2813 [32:10<28:27, 1.29s/it]\u001b[A\n", + "Iteration: 54%|█████▍ | 1512/2813 [32:26<27:55, 1.29s/it]\u001b[A\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Iteration: 54%|█████▍ | 1512/2813 [32:40<27:55, 1.29s/it]\u001b[A\n", + "Iteration: 55%|█████▍ | 1536/2813 [32:57<27:23, 1.29s/it]\u001b[A\n", + "Iteration: 55%|█████▍ | 1536/2813 [33:10<27:23, 1.29s/it]\u001b[A\n", + "Iteration: 55%|█████▌ | 1560/2813 [33:28<26:53, 1.29s/it]\u001b[A\n", + "Iteration: 55%|█████▌ | 1560/2813 [33:40<26:53, 1.29s/it]\u001b[A\n", + "Iteration: 56%|█████▋ | 1584/2813 [33:59<26:22, 1.29s/it]\u001b[A\n", + "Iteration: 56%|█████▋ | 1584/2813 [34:10<26:22, 1.29s/it]\u001b[A\n", + "Iteration: 57%|█████▋ | 1608/2813 [34:30<25:51, 1.29s/it]\u001b[A\n", + "Iteration: 57%|█████▋ | 1608/2813 [34:50<25:51, 1.29s/it]\u001b[A\n", + "Iteration: 58%|█████▊ | 1632/2813 [35:01<25:20, 1.29s/it]\u001b[A\n", + "Iteration: 58%|█████▊ | 1632/2813 [35:20<25:20, 1.29s/it]\u001b[A\n", + "Iteration: 59%|█████▉ | 1656/2813 [35:32<24:49, 1.29s/it]\u001b[A\n", + "Iteration: 59%|█████▉ | 1656/2813 [35:50<24:49, 1.29s/it]\u001b[A\n", + "Iteration: 60%|█████▉ | 1680/2813 [36:03<24:18, 1.29s/it]\u001b[A\n", + "Iteration: 60%|█████▉ | 1680/2813 [36:20<24:18, 1.29s/it]\u001b[A\n", + "Iteration: 61%|██████ | 1704/2813 [36:33<23:46, 1.29s/it]\u001b[A\n", + "Iteration: 61%|██████ | 1704/2813 [36:50<23:46, 1.29s/it]\u001b[A\n", + "Iteration: 61%|██████▏ | 1728/2813 [37:04<23:17, 1.29s/it]\u001b[A\n", + "Iteration: 61%|██████▏ | 1728/2813 [37:20<23:17, 1.29s/it]\u001b[A\n", + "Iteration: 62%|██████▏ | 1752/2813 [37:35<22:45, 1.29s/it]\u001b[A\n", + "Iteration: 62%|██████▏ | 1752/2813 [37:50<22:45, 1.29s/it]\u001b[A\n", + "Iteration: 63%|██████▎ | 1776/2813 [38:06<22:14, 1.29s/it]\u001b[A\n", + "Iteration: 63%|██████▎ | 1776/2813 [38:20<22:14, 1.29s/it]\u001b[A\n", + "Iteration: 64%|██████▍ | 1800/2813 [38:37<21:44, 1.29s/it]\u001b[A\n", + "Iteration: 64%|██████▍ | 1800/2813 [38:50<21:44, 1.29s/it]\u001b[A\n", + "Iteration: 65%|██████▍ | 1824/2813 [39:08<21:12, 1.29s/it]\u001b[A\n", + "Iteration: 65%|██████▍ | 1824/2813 [39:20<21:12, 1.29s/it]\u001b[A\n", + "Iteration: 66%|██████▌ | 1848/2813 [39:39<20:42, 1.29s/it]\u001b[A\n", + "Iteration: 66%|██████▌ | 1848/2813 [39:50<20:42, 1.29s/it]\u001b[A\n", + "Iteration: 67%|██████▋ | 1872/2813 [40:10<20:11, 1.29s/it]\u001b[A\n", + "Iteration: 67%|██████▋ | 1872/2813 [40:30<20:11, 1.29s/it]\u001b[A\n", + "Iteration: 67%|██████▋ | 1896/2813 [40:41<19:40, 1.29s/it]\u001b[A\n", + "Iteration: 67%|██████▋ | 1896/2813 [41:00<19:40, 1.29s/it]\u001b[A\n", + "Iteration: 68%|██████▊ | 1920/2813 [41:12<19:09, 1.29s/it]\u001b[A\n", + "Iteration: 68%|██████▊ | 1920/2813 [41:30<19:09, 1.29s/it]\u001b[A\n", + "Iteration: 69%|██████▉ | 1944/2813 [41:42<18:38, 1.29s/it]\u001b[A\n", + "Iteration: 69%|██████▉ | 1944/2813 [42:00<18:38, 1.29s/it]\u001b[A\n", + "Iteration: 70%|██████▉ | 1968/2813 [42:13<18:07, 1.29s/it]\u001b[A\n", + "Iteration: 70%|██████▉ | 1968/2813 [42:30<18:07, 1.29s/it]\u001b[A\n", + "Iteration: 71%|███████ | 1992/2813 [42:44<17:37, 1.29s/it]\u001b[A\n", + "Iteration: 71%|███████ | 1992/2813 [43:00<17:37, 1.29s/it]\u001b[A\n", + "Iteration: 72%|███████▏ | 2016/2813 [43:15<17:06, 1.29s/it]\u001b[A\n", + "Iteration: 72%|███████▏ | 2016/2813 [43:30<17:06, 1.29s/it]\u001b[A\n", + "Iteration: 73%|███████▎ | 2040/2813 [43:46<16:36, 1.29s/it]\u001b[A\n", + "Iteration: 73%|███████▎ | 2040/2813 [44:00<16:36, 1.29s/it]\u001b[A\n", + "Iteration: 73%|███████▎ | 2064/2813 [44:17<16:04, 1.29s/it]\u001b[A\n", + "Iteration: 73%|███████▎ | 2064/2813 [44:30<16:04, 1.29s/it]\u001b[A\n", + "Iteration: 74%|███████▍ | 2088/2813 [44:48<15:33, 1.29s/it]\u001b[A\n", + "Iteration: 74%|███████▍ | 2088/2813 [45:00<15:33, 1.29s/it]\u001b[A\n", + "Iteration: 75%|███████▌ | 2112/2813 [45:19<15:01, 1.29s/it]\u001b[A\n", + "Iteration: 75%|███████▌ | 2112/2813 [45:30<15:01, 1.29s/it]\u001b[A\n", + "Iteration: 76%|███████▌ | 2136/2813 [45:50<14:31, 1.29s/it]\u001b[A\n", + "Iteration: 76%|███████▌ | 2136/2813 [46:10<14:31, 1.29s/it]\u001b[A\n", + "Iteration: 77%|███████▋ | 2160/2813 [46:21<14:00, 1.29s/it]\u001b[A\n", + "Iteration: 77%|███████▋ | 2160/2813 [46:40<14:00, 1.29s/it]\u001b[A\n", + "Iteration: 78%|███████▊ | 2184/2813 [46:52<13:30, 1.29s/it]\u001b[A\n", + "Iteration: 78%|███████▊ | 2184/2813 [47:10<13:30, 1.29s/it]\u001b[A\n", + "Iteration: 78%|███████▊ | 2208/2813 [47:22<12:59, 1.29s/it]\u001b[A\n", + "Iteration: 78%|███████▊ | 2208/2813 [47:40<12:59, 1.29s/it]\u001b[A\n", + "Iteration: 79%|███████▉ | 2232/2813 [47:53<12:28, 1.29s/it]\u001b[A\n", + "Iteration: 79%|███████▉ | 2232/2813 [48:10<12:28, 1.29s/it]\u001b[A\n", + "Iteration: 80%|████████ | 2256/2813 [48:24<11:57, 1.29s/it]\u001b[A\n", + "Iteration: 80%|████████ | 2256/2813 [48:40<11:57, 1.29s/it]\u001b[A\n", + "Iteration: 81%|████████ | 2280/2813 [48:55<11:26, 1.29s/it]\u001b[A\n", + "Iteration: 81%|████████ | 2280/2813 [49:10<11:26, 1.29s/it]\u001b[A\n", + "Iteration: 82%|████████▏ | 2304/2813 [49:26<10:55, 1.29s/it]\u001b[A\n", + "Iteration: 82%|████████▏ | 2304/2813 [49:40<10:55, 1.29s/it]\u001b[A\n", + "Iteration: 83%|████████▎ | 2328/2813 [49:57<10:24, 1.29s/it]\u001b[A\n", + "Iteration: 83%|████████▎ | 2328/2813 [50:10<10:24, 1.29s/it]\u001b[A\n", + "Iteration: 84%|████████▎ | 2352/2813 [50:28<09:53, 1.29s/it]\u001b[A\n", + "Iteration: 84%|████████▎ | 2352/2813 [50:40<09:53, 1.29s/it]\u001b[A\n", + "Iteration: 84%|████████▍ | 2376/2813 [50:59<09:22, 1.29s/it]\u001b[A\n", + "Iteration: 84%|████████▍ | 2376/2813 [51:10<09:22, 1.29s/it]\u001b[A\n", + "Iteration: 85%|████████▌ | 2400/2813 [51:30<08:51, 1.29s/it]\u001b[A\n", + "Iteration: 85%|████████▌ | 2400/2813 [51:40<08:51, 1.29s/it]\u001b[A\n", + "Iteration: 86%|████████▌ | 2424/2813 [52:01<08:20, 1.29s/it]\u001b[A\n", + "Iteration: 86%|████████▌ | 2424/2813 [52:20<08:20, 1.29s/it]\u001b[A\n", + "Iteration: 87%|████████▋ | 2448/2813 [52:32<07:50, 1.29s/it]\u001b[A\n", + "Iteration: 87%|████████▋ | 2448/2813 [52:50<07:50, 1.29s/it]\u001b[A\n", + "Iteration: 88%|████████▊ | 2472/2813 [53:02<07:19, 1.29s/it]\u001b[A\n", + "Iteration: 88%|████████▊ | 2472/2813 [53:20<07:19, 1.29s/it]\u001b[A\n", + "Iteration: 89%|████████▊ | 2496/2813 [53:33<06:48, 1.29s/it]\u001b[A\n", + "Iteration: 89%|████████▊ | 2496/2813 [53:50<06:48, 1.29s/it]\u001b[A\n", + "Iteration: 90%|████████▉ | 2520/2813 [54:04<06:17, 1.29s/it]\u001b[A\n", + "Iteration: 90%|████████▉ | 2520/2813 [54:20<06:17, 1.29s/it]\u001b[A\n", + "Iteration: 90%|█████████ | 2544/2813 [54:35<05:46, 1.29s/it]\u001b[A\n", + "Iteration: 90%|█████████ | 2544/2813 [54:50<05:46, 1.29s/it]\u001b[A\n", + "Iteration: 91%|█████████▏| 2568/2813 [55:06<05:15, 1.29s/it]\u001b[A\n", + "Iteration: 91%|█████████▏| 2568/2813 [55:20<05:15, 1.29s/it]\u001b[A\n", + "Iteration: 92%|█████████▏| 2592/2813 [55:37<04:44, 1.29s/it]\u001b[A\n", + "Iteration: 92%|█████████▏| 2592/2813 [55:50<04:44, 1.29s/it]\u001b[A\n", + "Iteration: 93%|█████████▎| 2616/2813 [56:08<04:13, 1.29s/it]\u001b[A\n", + "Iteration: 93%|█████████▎| 2616/2813 [56:20<04:13, 1.29s/it]\u001b[A\n", + "Iteration: 94%|█████████▍| 2640/2813 [56:39<03:42, 1.29s/it]\u001b[A\n", + "Iteration: 94%|█████████▍| 2640/2813 [56:50<03:42, 1.29s/it]\u001b[A\n", + "Iteration: 95%|█████████▍| 2664/2813 [57:10<03:12, 1.29s/it]\u001b[A\n", + "Iteration: 95%|█████████▍| 2664/2813 [57:30<03:12, 1.29s/it]\u001b[A\n", + "Iteration: 96%|█████████▌| 2688/2813 [57:41<02:41, 1.29s/it]\u001b[A\n", + "Iteration: 96%|█████████▌| 2688/2813 [58:00<02:41, 1.29s/it]\u001b[A\n", + "Iteration: 96%|█████████▋| 2712/2813 [58:12<02:10, 1.29s/it]\u001b[A\n", + "Iteration: 96%|█████████▋| 2712/2813 [58:30<02:10, 1.29s/it]\u001b[A\n", + "Iteration: 97%|█████████▋| 2736/2813 [58:43<01:39, 1.29s/it]\u001b[A\n", + "Iteration: 97%|█████████▋| 2736/2813 [59:00<01:39, 1.29s/it]\u001b[A\n", + "Iteration: 98%|█████████▊| 2760/2813 [59:14<01:08, 1.29s/it]\u001b[A\n", + "Iteration: 98%|█████████▊| 2760/2813 [59:30<01:08, 1.29s/it]\u001b[A\n", + "Iteration: 99%|█████████▉| 2784/2813 [59:45<00:37, 1.29s/it]\u001b[A\n", + "Iteration: 99%|█████████▉| 2784/2813 [1:00:00<00:37, 1.29s/it]\u001b[A\n", + "Iteration: 100%|█████████▉| 2808/2813 [1:00:15<00:06, 1.29s/it]\u001b[A\n", + "Epoch: 100%|██████████| 1/1 [1:00:22<00:00, 3622.32s/it].29s/it]\u001b[A" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Train loss: 0.07014742273803971\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" ] } ], @@ -26207,11 +690,87 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": { "scrolled": false }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\r", + "Iteration: 0%| | 0/216 [00:00 Date: Tue, 25 Jun 2019 21:51:41 +0000 Subject: [PATCH 059/108] Minor description update. --- .../named_entity_recognition/ner_msra_bert_chinese.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb index 0f0a125e0..6718107cc 100644 --- a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb +++ b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb @@ -15,7 +15,7 @@ "\n", "\n", "\n", - "Named Entity Recognition on non-English text is not very differnt from that on English text. The only difference is the model used, which is configured by the `LANGUAGE` variable below. For non-English languages including Chinese, the *bert-base-multilingual-cased* model can be used by setting `LANGUAGE = Language.MULTILINGUAL`. For Chinese, the *bert-base-chinese* model can also be used by setting `LANGUAGE = Language.CHINESE`. On Chinese text, the performance of *bert-base-chinese* is usually better than *bert-base-multilingual-cased* because the *bert-base-chinese* model is pretrained on Chinese data only. " + "Named Entity Recognition on non-English text is not very differnt from that on English text. The only difference is the model used, which is configured by the `LANGUAGE` variable below. For non-English languages including Chinese, the *bert-base-multilingual-cased* model can be used by setting `LANGUAGE = Language.MULTILINGUAL`. For Chinese, the *bert-base-chinese* model can also be used by setting `LANGUAGE = Language.CHINESE`. On Chinese text, the performance of *bert-base-chinese* is usually better than *bert-base-multilingual-cased* because the *bert-base-chinese* model is pretrained on Chinese data only. On this particular dataset, the performances of the Chinese-only model and multilingual model are very similar" ] }, { @@ -103,7 +103,7 @@ "metadata": {}, "source": [ "### Get training and testing data\n", - "The dataset used in this notebook is the MSRA NER dataset. The dataset consists of 45000 training sentences and 3940 testing sentences. \n", + "The dataset used in this notebook is the MSRA NER dataset. The dataset consists of 45000 training sentences and 3442 testing sentences. \n", "\n", "The helper function `load_pandas_df` downloads the data files if they don't exist in `local_cache_path`. It returns the training or testing data frame based on `file_split`\n", "\n", From 5b5ee02422213ca0c7656fd89d7e143193b3f01d Mon Sep 17 00:00:00 2001 From: hlums Date: Tue, 25 Jun 2019 21:54:54 +0000 Subject: [PATCH 060/108] Minor description update. --- scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb index 6718107cc..611aa38cb 100644 --- a/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb +++ b/scenarios/named_entity_recognition/ner_msra_bert_chinese.ipynb @@ -116,7 +116,7 @@ "* 'B-ORG': beginning of organization entity\n", "* 'I-ORG': within organization entity\n", "\n", - "The maximum number of words in a sentence is 756. We set MAX_SEQ_LENGTH to 200 above to reduce the GPU memory needed to run this notebook. Less than 1% of testing data are longer than 200, so this should have negligible impact on the model performance evaluation." + "The maximum number of words in a sentence is 2427. We set MAX_SEQ_LENGTH to 200 above to reduce the GPU memory needed to run this notebook. Less than 1% of testing data are longer than 200, so this should have negligible impact on the model performance evaluation." ] }, { From 84d141be8c31f8cb19dd76fad08a668248b53fae Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Tue, 25 Jun 2019 23:16:16 -0400 Subject: [PATCH 061/108] bug fix in notebook --- .../text_classification/tc_mnli_bert.ipynb | 75 ++++++++++++------- 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/scenarios/text_classification/tc_mnli_bert.ipynb b/scenarios/text_classification/tc_mnli_bert.ipynb index c7c2b0344..d4e40d6c3 100644 --- a/scenarios/text_classification/tc_mnli_bert.ipynb +++ b/scenarios/text_classification/tc_mnli_bert.ipynb @@ -256,14 +256,23 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 78540/78540 [00:26<00:00, 2991.68it/s]\n", + "100%|██████████| 52360/52360 [00:17<00:00, 2981.71it/s]\n" + ] + } + ], "source": [ "tokenizer = Tokenizer(LANGUAGE, to_lower=TO_LOWER, cache_dir=BERT_CACHE_DIR)\n", "\n", - "tokens_train = tokenizer.tokenize(df_train[TEXT_COL])\n", - "tokens_test = tokenizer.tokenize(df_test[TEXT_COL])" + "tokens_train = tokenizer.tokenize(list(df_train[TEXT_COL]))\n", + "tokens_test = tokenizer.tokenize(list(df_test[TEXT_COL]))" ] }, { @@ -275,20 +284,21 @@ "- Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", "- Pad or truncate the token lists to the specified max length\n", "- Return mask lists that indicate paddings' positions\n", + "- Return token type id lists that indicate which sentence the tokens belong to (not needed for one-sequence classification)\n", "\n", "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "tokens_train, mask_train = tokenizer.preprocess_classification_tokens(\n", + "tokens_train, mask_train, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_train, MAX_LEN\n", ")\n", - "tokens_test, mask_test = tokenizer.preprocess_classification_tokens(\n", + "tokens_test, mask_test, _ = tokenizer.preprocess_classification_tokens(\n", " tokens_test, MAX_LEN\n", ")" ] @@ -303,7 +313,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -322,7 +332,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": { "scrolled": true }, @@ -338,17 +348,17 @@ "name": "stdout", "output_type": "stream", "text": [ - "epoch:1/1; batch:1->246/2454; loss:1.824086\n", - "epoch:1/1; batch:247->492/2454; loss:0.446337\n", - "epoch:1/1; batch:493->738/2454; loss:0.298814\n", - "epoch:1/1; batch:739->984/2454; loss:0.265785\n", - "epoch:1/1; batch:985->1230/2454; loss:0.101790\n", - "epoch:1/1; batch:1231->1476/2454; loss:0.251120\n", - "epoch:1/1; batch:1477->1722/2454; loss:0.040894\n", - "epoch:1/1; batch:1723->1968/2454; loss:0.038339\n", - "epoch:1/1; batch:1969->2214/2454; loss:0.021586\n", - "epoch:1/1; batch:2215->2454/2454; loss:0.130719\n", - "[Training time: 0.980 hrs]\n" + "epoch:1/1; batch:1->246/2454; loss:1.584357\n", + "epoch:1/1; batch:247->492/2454; loss:0.110689\n", + "epoch:1/1; batch:493->738/2454; loss:0.208907\n", + "epoch:1/1; batch:739->984/2454; loss:0.423804\n", + "epoch:1/1; batch:985->1230/2454; loss:0.035525\n", + "epoch:1/1; batch:1231->1476/2454; loss:0.189890\n", + "epoch:1/1; batch:1477->1722/2454; loss:0.216201\n", + "epoch:1/1; batch:1723->1968/2454; loss:0.245825\n", + "epoch:1/1; batch:1969->2214/2454; loss:0.138958\n", + "epoch:1/1; batch:2215->2454/2454; loss:0.066018\n", + "[Training time: 0.963 hrs]\n" ] } ], @@ -376,14 +386,14 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "52384it [11:54, 88.97it/s] \n" + "52384it [11:51, 88.76it/s] \n" ] } ], @@ -403,7 +413,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -412,15 +422,15 @@ "text": [ " precision recall f1-score support\n", "\n", - " fiction 0.90 0.94 0.92 10275\n", - " government 0.97 0.93 0.95 10292\n", - " slate 0.88 0.85 0.87 10277\n", + " fiction 0.88 0.96 0.91 10275\n", + " government 0.94 0.94 0.94 10292\n", + " slate 0.91 0.80 0.85 10277\n", " telephone 0.99 1.00 0.99 11205\n", " travel 0.95 0.97 0.96 10311\n", "\n", - " accuracy 0.94 52360\n", - " macro avg 0.94 0.94 0.94 52360\n", - "weighted avg 0.94 0.94 0.94 52360\n", + " accuracy 0.93 52360\n", + " macro avg 0.93 0.93 0.93 52360\n", + "weighted avg 0.93 0.93 0.93 52360\n", "\n" ] } @@ -428,6 +438,13 @@ "source": [ "print(classification_report(labels_test, preds, target_names=label_encoder.classes_))" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { From bd1bdf3fdf7b769aebb2e709a0cfe7b80011b4e9 Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Tue, 25 Jun 2019 23:36:19 -0400 Subject: [PATCH 062/108] bug fix --- scenarios/text_classification/tc_dac_bert_ar.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scenarios/text_classification/tc_dac_bert_ar.ipynb b/scenarios/text_classification/tc_dac_bert_ar.ipynb index f7a56b655..8cea7806f 100644 --- a/scenarios/text_classification/tc_dac_bert_ar.ipynb +++ b/scenarios/text_classification/tc_dac_bert_ar.ipynb @@ -327,8 +327,8 @@ "outputs": [], "source": [ "tokenizer = Tokenizer(LANGUAGE, cache_dir=BERT_CACHE_DIR)\n", - "tokens_train = tokenizer.tokenize(df_train[text_col].astype(str))\n", - "tokens_test = tokenizer.tokenize(df_test[text_col].astype(str))" + "tokens_train = tokenizer.tokenize(list(df_train[text_col].astype(str)))\n", + "tokens_test = tokenizer.tokenize(list(df_test[text_col].astype(str)))" ] }, { From 4a9633f70d2fc8d568bc116007561dc9b381891c Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Tue, 25 Jun 2019 23:58:44 -0400 Subject: [PATCH 063/108] old files --- README.md | 15 - scenarios/README.md | 36 ++ scenarios/data_prep/README.md | 4 +- scenarios/data_prep/stsbenchmark.ipynb | 417 ++----------- .../entailment_xnli_multilingual.ipynb | 581 +++++++++++++++++ scenarios/named_entity_recognition/README.md | 8 + .../ner_wikigold_bert.ipynb | 204 +++--- scenarios/sentence_similarity/README.md | 24 +- .../gensen_aml_deep_dive.ipynb | 219 ++++--- .../sentence_similarity/gensen_config.json | 16 +- scenarios/sentence_similarity/gensen_train.py | 586 +++++++++--------- scenarios/text_classification/README.md | 3 + tests/conftest.py | 12 +- .../unit/test_bert_sequence_classification.py | 40 ++ tests/unit/test_bert_token_classification.py | 9 + tests/unit/test_dataset.py | 22 +- tests/unit/test_word_embeddings.py | 9 +- tools/generate_conda_file.py | 5 +- utils_nlp/bert/sequence_classification.py | 85 ++- utils_nlp/bert/token_classification.py | 54 +- utils_nlp/dataset/preprocess.py | 68 +- utils_nlp/dataset/stsbenchmark.py | 125 ++-- utils_nlp/dataset/wikigold.py | 69 +-- utils_nlp/dataset/xnli.py | 89 ++- 24 files changed, 1624 insertions(+), 1076 deletions(-) create mode 100644 scenarios/README.md create mode 100644 scenarios/entailment/entailment_xnli_multilingual.ipynb create mode 100644 tests/unit/test_bert_sequence_classification.py diff --git a/README.md b/README.md index 7c736f652..4ad19b9a3 100755 --- a/README.md +++ b/README.md @@ -3,25 +3,10 @@ | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | master | [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/unit-test-master?branchName=master)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=22&branchName=master) | | staging | [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/unit-test-staging?branchName=staging)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=21&branchName=staging) | - # NLP Best Practices This repository contains examples and best practices for building NLP systems, provided as Jupyter notebooks and utility functions. The focus of the repository is on state-of-the-art methods and common scenarios that are popular among researchers and practitioners working on problems involving text and language. -The following section includes a list of the available scenarios. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. - - -## Scenarios - - -| Scenario | Applications | Languages | Models | -|---| ------------------------ | -------------------------------------------- | ------------------- | -|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| -|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| -|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| -|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| - - ## Planning All feature planning is done via projects, milestones, and issues in this repository. diff --git a/scenarios/README.md b/scenarios/README.md new file mode 100644 index 000000000..b86aa9ff0 --- /dev/null +++ b/scenarios/README.md @@ -0,0 +1,36 @@ +# NLP Scenarios + +This folder contains examples and best practices, written in Jupyter notebooks, for building Natural Language Processing systems for different scenarios. + +## Summary + +The following summarizes each scenario of the best practice notebooks. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. + +| Scenario | Applications | Languages | Models | +|---| ------------------------ | -------------------------------------------- | ------------------- | +|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| +|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| +|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| +|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| + +## Azure-enhanced notebooks + +Azure products and services are used in certain notebooks to enhance the efficiency of developing Natural Language systems at scale. + +To successfully run these notebooks, the users **need an Azure subscription** or can [use Azure for free](https://azure.microsoft.com/en-us/free/). + +The Azure products featured in the notebooks include: + +* [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/) - Azure Machine Learning service is a cloud service used to train, deploy, automate, and manage machine learning models, all at the broad scale that the cloud provides. It is used across various notebooks for the AI model development related tasks like: + * Using Datastores + * Tracking and monitoring metrics to enhance the model creation process + * Distributed Training + * Hyperparameter tuning + * Scaling up and out on Azure Machine Learning Compute + * Deploying a web service to both Azure Container Instance and Azure Kubernetes Service + +* [Azure Kubernetes Service](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aks) - You can use Azure Machine Learning service to host your classification model in a web service deployment on Azure Kubernetes Service (AKS). AKS is good for high-scale production deployments and provides autoscaling, and fast response times. + +* [Azure Container Instance](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aci)- You can use Azure Machine Learning service to host your classification model in a web service deployment on Azure Container Instance (ACI). ACI is good for low scale, CPU-based workloads. + +There may be other Azure service or products used in the notebooks. Introduction and/or reference of those will be provided in the notebooks. diff --git a/scenarios/data_prep/README.md b/scenarios/data_prep/README.md index eaf84ad28..5e13abec5 100644 --- a/scenarios/data_prep/README.md +++ b/scenarios/data_prep/README.md @@ -25,7 +25,7 @@ STS Benchmark - sts_load.ipynb + stsbenchmark.ipynb Downloads and cleans the STS Benchmark dataset. Shows an example of tokenizing and removing stopwords using the popular spaCy library. @@ -34,7 +34,7 @@ MSR Paraphrase Corpus - msrpc_load.ipynb + msrpc.ipynb Download and clean the MSR Paraphrase corpus. diff --git a/scenarios/data_prep/stsbenchmark.ipynb b/scenarios/data_prep/stsbenchmark.ipynb index ddd649814..e76967a79 100644 --- a/scenarios/data_prep/stsbenchmark.ipynb +++ b/scenarios/data_prep/stsbenchmark.ipynb @@ -46,7 +46,7 @@ "source": [ "import sys\n", "\n", - "sys.path.append(\"../../../\") ## set the environment path\n", + "sys.path.append(\"../../\") ## set the environment path\n", "\n", "import os\n", "import azureml.dataprep as dp\n", @@ -67,7 +67,7 @@ "outputs": [], "source": [ "STS_URL = \"http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz\"\n", - "BASE_DATA_PATH = \"../../../data\"\n", + "BASE_DATA_PATH = \"../../data\"\n", "RAW_DATA_PATH = os.path.join(BASE_DATA_PATH, \"raw\")\n", "CLEAN_DATA_PATH = os.path.join(BASE_DATA_PATH, \"clean\")" ] @@ -76,14 +76,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 01 Data Download" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Make a directory for the data if it doesn't already exist, and then download." + "### 01 Data Download\n", + "In this section we \n", + "* load raw data into a dataframe\n", + "* peek into the first 5 rows" ] }, { @@ -100,68 +96,21 @@ "cell_type": "code", "execution_count": 4, "metadata": {}, - "outputs": [], - "source": [ - "def download_sts(url, dirpath):\n", - " zipfile = maybe_download(url, work_directory=dirpath)\n", - " unzipped = stsbenchmark._extract_sts(zipfile, target_dirpath=dirpath, tmode=\"r:gz\")\n", - " return zipfile, unzipped" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "418kB [00:03, 138kB/s] " + "100%|██████████| 401/401 [00:01<00:00, 310KB/s] \n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Data downloaded to ../../../data/raw/stsbenchmark\n" + "Data downloaded to ../../data/raw/raw/stsbenchmark\n" ] }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], - "source": [ - "tarfile, datapath = download_sts(STS_URL, RAW_DATA_PATH)\n", - "print(\"Data downloaded to {}\".format(datapath))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 02 Data Understanding\n", - "In this section we \n", - "* load raw data into a dataframe\n", - "* peek into the first 10 rows" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can load the data using a `read` function that has built-in automatic filetype inference:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ { "data": { "text/html": [ @@ -183,13 +132,13 @@ " \n", " \n", " \n", - " Column1\n", - " Column2\n", - " Column3\n", - " Column4\n", - " Column5\n", - " Column6\n", - " Column7\n", + " column_0\n", + " column_1\n", + " column_2\n", + " column_3\n", + " column_4\n", + " column_5\n", + " column_6\n", " \n", " \n", " \n", @@ -198,7 +147,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 1\n", + " 0001\n", " 5.00\n", " A plane is taking off.\n", " An air plane is taking off.\n", @@ -208,7 +157,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 4\n", + " 0004\n", " 3.80\n", " A man is playing a large flute.\n", " A man is playing a flute.\n", @@ -218,7 +167,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 5\n", + " 0005\n", " 3.80\n", " A man is spreading shreded cheese on a pizza.\n", " A man is spreading shredded cheese on an uncoo...\n", @@ -228,7 +177,7 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 6\n", + " 0006\n", " 2.60\n", " Three men are playing chess.\n", " Two men are playing chess.\n", @@ -238,178 +187,59 @@ " main-captions\n", " MSRvid\n", " 2012test\n", - " 9\n", + " 0009\n", " 4.25\n", " A man is playing the cello.\n", " A man seated is playing the cello.\n", " \n", - " \n", - " 5\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 11\n", - " 4.25\n", - " Some men are fighting.\n", - " Two men are fighting.\n", - " \n", - " \n", - " 6\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 12\n", - " 0.50\n", - " A man is smoking.\n", - " A man is skating.\n", - " \n", - " \n", - " 7\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 13\n", - " 1.60\n", - " The man is playing the piano.\n", - " The man is playing the guitar.\n", - " \n", - " \n", - " 8\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 14\n", - " 2.20\n", - " A man is playing on a guitar and singing.\n", - " A woman is playing an acoustic guitar and sing...\n", - " \n", - " \n", - " 9\n", - " main-captions\n", - " MSRvid\n", - " 2012test\n", - " 16\n", - " 5.00\n", - " A person is throwing a cat on to the ceiling.\n", - " A person throws a cat on the ceiling.\n", - " \n", " \n", "\n", "" ], "text/plain": [ - " Column1 Column2 Column3 Column4 Column5 \\\n", - "0 main-captions MSRvid 2012test 1 5.00 \n", - "1 main-captions MSRvid 2012test 4 3.80 \n", - "2 main-captions MSRvid 2012test 5 3.80 \n", - "3 main-captions MSRvid 2012test 6 2.60 \n", - "4 main-captions MSRvid 2012test 9 4.25 \n", - "5 main-captions MSRvid 2012test 11 4.25 \n", - "6 main-captions MSRvid 2012test 12 0.50 \n", - "7 main-captions MSRvid 2012test 13 1.60 \n", - "8 main-captions MSRvid 2012test 14 2.20 \n", - "9 main-captions MSRvid 2012test 16 5.00 \n", + " column_0 column_1 column_2 column_3 column_4 \\\n", + "0 main-captions MSRvid 2012test 0001 5.00 \n", + "1 main-captions MSRvid 2012test 0004 3.80 \n", + "2 main-captions MSRvid 2012test 0005 3.80 \n", + "3 main-captions MSRvid 2012test 0006 2.60 \n", + "4 main-captions MSRvid 2012test 0009 4.25 \n", "\n", - " Column6 \\\n", + " column_5 \\\n", "0 A plane is taking off. \n", "1 A man is playing a large flute. \n", "2 A man is spreading shreded cheese on a pizza. \n", "3 Three men are playing chess. \n", "4 A man is playing the cello. \n", - "5 Some men are fighting. \n", - "6 A man is smoking. \n", - "7 The man is playing the piano. \n", - "8 A man is playing on a guitar and singing. \n", - "9 A person is throwing a cat on to the ceiling. \n", "\n", - " Column7 \n", + " column_6 \n", "0 An air plane is taking off. \n", "1 A man is playing a flute. \n", "2 A man is spreading shredded cheese on an uncoo... \n", "3 Two men are playing chess. \n", - "4 A man seated is playing the cello. \n", - "5 Two men are fighting. \n", - "6 A man is skating. \n", - "7 The man is playing the guitar. \n", - "8 A woman is playing an acoustic guitar and sing... \n", - "9 A person throws a cat on the ceiling. " + "4 A man seated is playing the cello. " ] }, - "execution_count": 6, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "dflow = dp.auto_read_file(path=os.path.join(datapath, \"sts-train.csv\"))\n", - "dflow.head()" + "df = stsbenchmark.load_pandas_df(RAW_DATA_PATH, file_split=\"train\")\n", + "df.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `auto_read_file` function from the AzureML Data Prep module actually returns a `Dataflow` object, which you can read more about [here](https://docs.microsoft.com/en-us/python/api/azureml-dataprep/azureml.dataprep.dataflow?view=azure-dataprep-py). We can easily transfer the data into a Pandas DataFrame (as before) in a single line using the `to_pandas_dataframe` function, or we can continue manipulating the data as a Dataflow object using the AzureML Data Prep API. For the remainder of this notebook we will be doing the latter." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 03 Data Cleaning\n", + "### 02 Data Cleaning\n", "Now that we know about the general shape of the data, we can clean it so that it is ready for further preprocessing. The main operation we need for the STS Benchmark data is to drop all of columns except for the sentence pairs and scores." ] }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "sentences = dflow.keep_columns([\"Column5\", \"Column6\", \"Column7\"]).rename_columns(\n", - " {\"Column5\": \"score\", \"Column6\": \"sentence1\", \"Column7\": \"sentence2\"}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 04 One-Shot Dataframe Loading\n", - "You can also use our STSBenchmark utils to automatically download, extract, and persist the data. You can then load the sanitized data as a pandas DataFrame in one line. " - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "418kB [00:02, 191kB/s] \n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../../data/raw/stsbenchmark\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-test.csv\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-dev.csv\n", - "Writing clean dataframe to ../../../data/clean/stsbenchmark/sts-train.csv\n" - ] - } - ], - "source": [ - "# Initializing this instance runs the downloader and extractor behind the scenes\n", - "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -489,12 +319,13 @@ "4 A man seated is playing the cello. " ] }, - "execution_count": 9, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } ], "source": [ + "sts_train = stsbenchmark.clean_sts(df)\n", "sts_train.head()" ] }, @@ -502,13 +333,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 05 Make Lowercase\n", - "We start with simple standardization of the text by making all text lowercase." + "### 03 Make Lowercase\n", + "We do simple standardization of the text by making all text lowercase." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -588,7 +419,7 @@ "4 a man seated is playing the cello. " ] }, - "execution_count": 10, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -602,13 +433,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 06 Tokenize\n", + "### 04 Tokenize\n", "We tokenize the text using spaCy's non-destructive tokenizer." ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -680,46 +511,6 @@ " [a, man, is, playing, the, cello, .]\n", " [a, man, seated, is, playing, the, cello, .]\n", " \n", - " \n", - " 5\n", - " 4.25\n", - " some men are fighting.\n", - " two men are fighting.\n", - " [some, men, are, fighting, .]\n", - " [two, men, are, fighting, .]\n", - " \n", - " \n", - " 6\n", - " 0.50\n", - " a man is smoking.\n", - " a man is skating.\n", - " [a, man, is, smoking, .]\n", - " [a, man, is, skating, .]\n", - " \n", - " \n", - " 7\n", - " 1.60\n", - " the man is playing the piano.\n", - " the man is playing the guitar.\n", - " [the, man, is, playing, the, piano, .]\n", - " [the, man, is, playing, the, guitar, .]\n", - " \n", - " \n", - " 8\n", - " 2.20\n", - " a man is playing on a guitar and singing.\n", - " a woman is playing an acoustic guitar and sing...\n", - " [a, man, is, playing, on, a, guitar, and, sing...\n", - " [a, woman, is, playing, an, acoustic, guitar, ...\n", - " \n", - " \n", - " 9\n", - " 5.00\n", - " a person is throwing a cat on to the ceiling.\n", - " a person throws a cat on the ceiling.\n", - " [a, person, is, throwing, a, cat, on, to, the,...\n", - " [a, person, throws, a, cat, on, the, ceiling, .]\n", - " \n", " \n", "\n", "" @@ -731,11 +522,6 @@ "2 3.80 a man is spreading shreded cheese on a pizza. \n", "3 2.60 three men are playing chess. \n", "4 4.25 a man is playing the cello. \n", - "5 4.25 some men are fighting. \n", - "6 0.50 a man is smoking. \n", - "7 1.60 the man is playing the piano. \n", - "8 2.20 a man is playing on a guitar and singing. \n", - "9 5.00 a person is throwing a cat on to the ceiling. \n", "\n", " sentence2 \\\n", "0 an air plane is taking off. \n", @@ -743,11 +529,6 @@ "2 a man is spreading shredded cheese on an uncoo... \n", "3 two men are playing chess. \n", "4 a man seated is playing the cello. \n", - "5 two men are fighting. \n", - "6 a man is skating. \n", - "7 the man is playing the guitar. \n", - "8 a woman is playing an acoustic guitar and sing... \n", - "9 a person throws a cat on the ceiling. \n", "\n", " sentence1_tokens \\\n", "0 [a, plane, is, taking, off, .] \n", @@ -755,48 +536,36 @@ "2 [a, man, is, spreading, shreded, cheese, on, a... \n", "3 [three, men, are, playing, chess, .] \n", "4 [a, man, is, playing, the, cello, .] \n", - "5 [some, men, are, fighting, .] \n", - "6 [a, man, is, smoking, .] \n", - "7 [the, man, is, playing, the, piano, .] \n", - "8 [a, man, is, playing, on, a, guitar, and, sing... \n", - "9 [a, person, is, throwing, a, cat, on, to, the,... \n", "\n", " sentence2_tokens \n", "0 [an, air, plane, is, taking, off, .] \n", "1 [a, man, is, playing, a, flute, .] \n", "2 [a, man, is, spreading, shredded, cheese, on, ... \n", "3 [two, men, are, playing, chess, .] \n", - "4 [a, man, seated, is, playing, the, cello, .] \n", - "5 [two, men, are, fighting, .] \n", - "6 [a, man, is, skating, .] \n", - "7 [the, man, is, playing, the, guitar, .] \n", - "8 [a, woman, is, playing, an, acoustic, guitar, ... \n", - "9 [a, person, throws, a, cat, on, the, ceiling, .] " + "4 [a, man, seated, is, playing, the, cello, .] " ] }, - "execution_count": 11, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "sts_train_tok = to_spacy_tokens(\n", - " sts_train_low.head(10)\n", - ") # operating on a small slice of the data as an example\n", - "sts_train_tok.head(10)" + "sts_train_tok = to_spacy_tokens(sts_train_low.head())\n", + "sts_train_tok.head()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### 07 Optional: Remove Stop Words\n", + "### 05 Optional: Remove Stop Words\n", "Removing stop words is another common preprocessing step for NLP tasks. We use the `rm_spacy_stopwords` utility function to do this on the dataframe. This function makes use of the spaCy language model's default set of stop words. If we need to add our own set of stop words (for example, if we are doing an NLP task for a very specific domain of content), we can do this in-line by simply providing the list as the `custom_stopwords` parameter of `rm_spacy_stopwords`." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -880,56 +649,6 @@ " [man, playing, cello, .]\n", " [man, seated, playing, cello, .]\n", " \n", - " \n", - " 5\n", - " 4.25\n", - " some men are fighting.\n", - " two men are fighting.\n", - " [some, men, are, fighting, .]\n", - " [two, men, are, fighting, .]\n", - " [men, fighting, .]\n", - " [men, fighting, .]\n", - " \n", - " \n", - " 6\n", - " 0.50\n", - " a man is smoking.\n", - " a man is skating.\n", - " [a, man, is, smoking, .]\n", - " [a, man, is, skating, .]\n", - " [man, smoking, .]\n", - " [man, skating, .]\n", - " \n", - " \n", - " 7\n", - " 1.60\n", - " the man is playing the piano.\n", - " the man is playing the guitar.\n", - " [the, man, is, playing, the, piano, .]\n", - " [the, man, is, playing, the, guitar, .]\n", - " [man, playing, piano, .]\n", - " [man, playing, guitar, .]\n", - " \n", - " \n", - " 8\n", - " 2.20\n", - " a man is playing on a guitar and singing.\n", - " a woman is playing an acoustic guitar and sing...\n", - " [a, man, is, playing, on, a, guitar, and, sing...\n", - " [a, woman, is, playing, an, acoustic, guitar, ...\n", - " [man, playing, guitar, singing, .]\n", - " [woman, playing, acoustic, guitar, singing, .]\n", - " \n", - " \n", - " 9\n", - " 5.00\n", - " a person is throwing a cat on to the ceiling.\n", - " a person throws a cat on the ceiling.\n", - " [a, person, is, throwing, a, cat, on, to, the,...\n", - " [a, person, throws, a, cat, on, the, ceiling, .]\n", - " [person, throwing, cat, ceiling, .]\n", - " [person, throws, cat, ceiling, .]\n", - " \n", " \n", "\n", "" @@ -941,11 +660,6 @@ "2 3.80 a man is spreading shreded cheese on a pizza. \n", "3 2.60 three men are playing chess. \n", "4 4.25 a man is playing the cello. \n", - "5 4.25 some men are fighting. \n", - "6 0.50 a man is smoking. \n", - "7 1.60 the man is playing the piano. \n", - "8 2.20 a man is playing on a guitar and singing. \n", - "9 5.00 a person is throwing a cat on to the ceiling. \n", "\n", " sentence2 \\\n", "0 an air plane is taking off. \n", @@ -953,11 +667,6 @@ "2 a man is spreading shredded cheese on an uncoo... \n", "3 two men are playing chess. \n", "4 a man seated is playing the cello. \n", - "5 two men are fighting. \n", - "6 a man is skating. \n", - "7 the man is playing the guitar. \n", - "8 a woman is playing an acoustic guitar and sing... \n", - "9 a person throws a cat on the ceiling. \n", "\n", " sentence1_tokens \\\n", "0 [a, plane, is, taking, off, .] \n", @@ -965,11 +674,6 @@ "2 [a, man, is, spreading, shreded, cheese, on, a... \n", "3 [three, men, are, playing, chess, .] \n", "4 [a, man, is, playing, the, cello, .] \n", - "5 [some, men, are, fighting, .] \n", - "6 [a, man, is, smoking, .] \n", - "7 [the, man, is, playing, the, piano, .] \n", - "8 [a, man, is, playing, on, a, guitar, and, sing... \n", - "9 [a, person, is, throwing, a, cat, on, to, the,... \n", "\n", " sentence2_tokens \\\n", "0 [an, air, plane, is, taking, off, .] \n", @@ -977,11 +681,6 @@ "2 [a, man, is, spreading, shredded, cheese, on, ... \n", "3 [two, men, are, playing, chess, .] \n", "4 [a, man, seated, is, playing, the, cello, .] \n", - "5 [two, men, are, fighting, .] \n", - "6 [a, man, is, skating, .] \n", - "7 [the, man, is, playing, the, guitar, .] \n", - "8 [a, woman, is, playing, an, acoustic, guitar, ... \n", - "9 [a, person, throws, a, cat, on, the, ceiling, .] \n", "\n", " sentence1_tokens_rm_stopwords \\\n", "0 [plane, taking, .] \n", @@ -989,34 +688,22 @@ "2 [man, spreading, shreded, cheese, pizza, .] \n", "3 [men, playing, chess, .] \n", "4 [man, playing, cello, .] \n", - "5 [men, fighting, .] \n", - "6 [man, smoking, .] \n", - "7 [man, playing, piano, .] \n", - "8 [man, playing, guitar, singing, .] \n", - "9 [person, throwing, cat, ceiling, .] \n", "\n", " sentence2_tokens_rm_stopwords \n", "0 [air, plane, taking, .] \n", "1 [man, playing, flute, .] \n", "2 [man, spreading, shredded, cheese, uncooked, p... \n", "3 [men, playing, chess, .] \n", - "4 [man, seated, playing, cello, .] \n", - "5 [men, fighting, .] \n", - "6 [man, skating, .] \n", - "7 [man, playing, guitar, .] \n", - "8 [woman, playing, acoustic, guitar, singing, .] \n", - "9 [person, throws, cat, ceiling, .] " + "4 [man, seated, playing, cello, .] " ] }, - "execution_count": 12, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "rm_spacy_stopwords(\n", - " sts_train_tok\n", - ") # operating on a small slice of the data as an example" + "rm_spacy_stopwords(sts_train_tok).head()" ] } ], @@ -1036,7 +723,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.5" + "version": "3.6.8" } }, "nbformat": 4, diff --git a/scenarios/entailment/entailment_xnli_multilingual.ipynb b/scenarios/entailment/entailment_xnli_multilingual.ipynb new file mode 100644 index 000000000..0816e8a47 --- /dev/null +++ b/scenarios/entailment/entailment_xnli_multilingual.ipynb @@ -0,0 +1,581 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Multi-lingual Inference on XNLI Dataset using BERT" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary\n", + "In this notebook, we demostrate using the [Multi-lingual BERT model](https://github.com/google-research/bert/blob/master/multilingual.md) to do language inference in Chinese and Hindi. We use the [XNLI](https://github.com/facebookresearch/XNLI) dataset and the task is to classify sentence pairs into three classes: contradiction, entailment, and neutral. \n", + "The figure below shows how [BERT](https://arxiv.org/abs/1810.04805) classifies sentence pairs. It concatenates the tokens in each sentence pairs and separates the sentences by the [SEP] token. A [CLS] token is prepended to the token list and used as the aggregate sequence representation for the classification task.\n", + "" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "import sys\n", + "import os\n", + "import random\n", + "import numpy as np\n", + "from sklearn.metrics import classification_report\n", + "from sklearn.preprocessing import LabelEncoder\n", + "\n", + "import torch\n", + "\n", + "nlp_path = os.path.abspath('../../')\n", + "if nlp_path not in sys.path:\n", + " sys.path.insert(0, nlp_path)\n", + "\n", + "from utils_nlp.bert.sequence_classification import BERTSequenceClassifier\n", + "from utils_nlp.bert.common import Language, Tokenizer\n", + "from utils_nlp.dataset.xnli import load_pandas_df\n", + "from utils_nlp.common.timer import Timer" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configurations\n", + "Note that the running time shown in this notebook are on a Standard_NC12 Azure Deep Learning Virtual Machine with two NVIDIA Tesla K80 GPUs. If you want to run through the notebook quickly, you can change the `TRAIN_DATA_USED_PERCENT` to a small number, e.g. 0.01. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "TRAIN_DATA_USED_PERCENT = 1.0\n", + "\n", + "# set random seeds\n", + "RANDOM_SEED = 42\n", + "random.seed(RANDOM_SEED)\n", + "np.random.seed(RANDOM_SEED)\n", + "torch.manual_seed(RANDOM_SEED)\n", + "num_cuda_devices = torch.cuda.device_count()\n", + "if num_cuda_devices > 1:\n", + " torch.cuda.manual_seed_all(RANDOM_SEED)\n", + "\n", + "# model configurations\n", + "LANGUAGE_CHINESE = Language.CHINESE\n", + "LANGUAGE_MULTI = Language.MULTILINGUAL\n", + "TO_LOWER = True\n", + "MAX_SEQ_LENGTH = 128\n", + "\n", + "# training configurations\n", + "NUM_GPUS = 2\n", + "BATCH_SIZE = 32\n", + "NUM_EPOCHS = 2\n", + "\n", + "# optimizer configurations\n", + "LEARNING_RATE= 5e-5\n", + "WARMUP_PROPORTION= 0.1\n", + "\n", + "# data configurations\n", + "TEXT_COL = \"text\"\n", + "LABEL_COL = \"label\"\n", + "\n", + "CACHE_DIR = \"./temp\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load Data\n", + "The XNLI dataset comes in two zip files: \n", + "* XNLI-1.0.zip: dev and test datasets in 15 languages. The original English data was translated into other languages by human translators. \n", + "* XNLI-MT-1.0.zip: training dataset in 15 languages. This dataset is machine translations of the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) dataset. It also contains English translations of the dev and test datasets, but not used in this notebook. \n", + "\n", + "The `load_pandas_df` function downloads and extracts the zip files if they don't already exist in `local_cache_path` and returns the data subset specified by `file_split` and `language`." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "train_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\", language=\"zh\")\n", + "dev_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"dev\", language=\"zh\")\n", + "test_df_chinese = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\", language=\"zh\")\n", + "\n", + "train_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"train\", language=\"hi\")\n", + "dev_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"dev\", language=\"hi\")\n", + "test_df_hindi = load_pandas_df(local_cache_path=CACHE_DIR, file_split=\"test\", language=\"hi\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Chinese training dataset size: 392702\n", + "Chinese dev dataset size: 2490\n", + "Chinese test dataset size: 5010\n", + "\n", + "Hindi training dataset size: 392702\n", + "Hindi dev dataset size: 2490\n", + "Hindi test dataset size: 5010\n", + "\n", + " text label\n", + "0 (从 概念 上 看 , 奶油 收入 有 两 个 基本 方面 产品 和 地理 ., 产品 和 ... neutral\n", + "1 (你 知道 在 这个 季节 , 我 猜 在 你 的 水平 你 把 他们 丢到 下 一个 水平... entailment\n", + "2 (我们 的 一个 号码 会 非常 详细 地 执行 你 的 指示, 我 团队 的 一个 成员 ... entailment\n", + "3 (你 怎么 知道 的 ? 所有 这些 都 是 他们 的 信息 ., 这些 信息 属于 他们 .) entailment\n", + "4 (是 啊 , 我 告诉 你 , 如果 你 去 买 一些 网球鞋 , 我 可以 看到 为什么 ... neutral\n", + " text label\n", + "0 (Conceptually क ् रीम एंजलिस में दो मूल आयाम ह... neutral\n", + "1 (आप मौसम के दौरान जानते हैं और मैं अपने स ् तर... entailment\n", + "2 (हमारे एक नंबर में से एक आपके निर ् देशों को म... entailment\n", + "3 (आप कैसे जानते हैं ? ये सब उनकी जानकारी फिर से... entailment\n", + "4 (हाँ मैं आपको बताता हूँ कि अगर आप उन टेनिस जूत... neutral\n" + ] + } + ], + "source": [ + "print(\"Chinese training dataset size: {}\".format(train_df_chinese.shape[0]))\n", + "print(\"Chinese dev dataset size: {}\".format(dev_df_chinese.shape[0]))\n", + "print(\"Chinese test dataset size: {}\".format(test_df_chinese.shape[0]))\n", + "print()\n", + "print(\"Hindi training dataset size: {}\".format(train_df_hindi.shape[0]))\n", + "print(\"Hindi dev dataset size: {}\".format(dev_df_hindi.shape[0]))\n", + "print(\"Hindi test dataset size: {}\".format(test_df_hindi.shape[0]))\n", + "print()\n", + "print(train_df_chinese.head())\n", + "print(train_df_hindi.head())" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "train_data_used_count = round(TRAIN_DATA_USED_PERCENT * train_df_chinese.shape[0])\n", + "train_df_chinese = train_df_chinese.loc[:train_data_used_count]\n", + "train_df_hindi = train_df_hindi.loc[:train_data_used_count]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Language Inference on Chinese\n", + "For Chinese dataset, we use the `bert-base-chinese` model which was pretrained on Chinese dataset only. The `bert-base-multilingual-cased` model can also be used on Chinese, but the accuracy is 3% lower." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenize and Preprocess\n", + "Before training, we tokenize the sentence texts and convert them to lists of tokens. The following steps instantiate a BERT tokenizer given the language, and tokenize the text of the training and testing sets." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 392702/392702 [02:26<00:00, 2682.67it/s]\n", + "100%|██████████| 5010/5010 [00:01<00:00, 3122.04it/s]\n" + ] + } + ], + "source": [ + "tokenizer_chinese = Tokenizer(LANGUAGE_CHINESE, to_lower=TO_LOWER, cache_dir=CACHE_DIR)\n", + "\n", + "train_tokens_chinese = tokenizer_chinese.tokenize(train_df_chinese[TEXT_COL])\n", + "test_tokens_chinese= tokenizer_chinese.tokenize(test_df_chinese[TEXT_COL])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In addition, we perform the following preprocessing steps in the cell below:\n", + "\n", + "* Convert the tokens into token indices corresponding to the BERT tokenizer's vocabulary\n", + "* Add the special tokens [CLS] and [SEP] to mark the beginning and end of a sentence\n", + "* Pad or truncate the token lists to the specified max length\n", + "* Return mask lists that indicate paddings' positions\n", + "* Return token type id lists that indicate which sentence the tokens belong to\n", + "\n", + "*See the original [implementation](https://github.com/google-research/bert/blob/master/run_classifier.py) for more information on BERT's input format.*" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "train_token_ids_chinese, train_input_mask_chinese, train_token_type_ids_chinese = \\\n", + " tokenizer_chinese.preprocess_classification_tokens(train_tokens_chinese, max_len=MAX_SEQ_LENGTH)\n", + "test_token_ids_chinese, test_input_mask_chinese, test_token_type_ids_chinese = \\\n", + " tokenizer_chinese.preprocess_classification_tokens(test_tokens_chinese, max_len=MAX_SEQ_LENGTH)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "label_encoder_chinese = LabelEncoder()\n", + "train_labels_chinese = label_encoder_chinese.fit_transform(train_df_chinese[LABEL_COL])\n", + "num_labels_chinese = len(np.unique(train_labels_chinese))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "classifier_chinese = BERTSequenceClassifier(language=LANGUAGE_CHINESE,\n", + " num_labels=num_labels_chinese,\n", + " cache_dir=CACHE_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Train Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch:1/2; batch:1->1228/12271; loss:1.194384\n", + "epoch:1/2; batch:1229->2456/12271; loss:0.863067\n", + "epoch:1/2; batch:2457->3684/12271; loss:0.781256\n", + "epoch:1/2; batch:3685->4912/12271; loss:1.067413\n", + "epoch:1/2; batch:4913->6140/12271; loss:0.599279\n", + "epoch:1/2; batch:6141->7368/12271; loss:0.471488\n", + "epoch:1/2; batch:7369->8596/12271; loss:0.572327\n", + "epoch:1/2; batch:8597->9824/12271; loss:0.689093\n", + "epoch:1/2; batch:9825->11052/12271; loss:0.651702\n", + "epoch:1/2; batch:11053->12271/12271; loss:0.431085\n", + "epoch:2/2; batch:1->1228/12271; loss:0.255859\n", + "epoch:2/2; batch:1229->2456/12271; loss:0.434052\n", + "epoch:2/2; batch:2457->3684/12271; loss:0.433569\n", + "epoch:2/2; batch:3685->4912/12271; loss:0.405915\n", + "epoch:2/2; batch:4913->6140/12271; loss:0.636128\n", + "epoch:2/2; batch:6141->7368/12271; loss:0.416685\n", + "epoch:2/2; batch:7369->8596/12271; loss:0.265789\n", + "epoch:2/2; batch:8597->9824/12271; loss:0.328964\n", + "epoch:2/2; batch:9825->11052/12271; loss:0.436310\n", + "epoch:2/2; batch:11053->12271/12271; loss:0.374193\n", + "Training time : 8.050 hrs\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " classifier_chinese.fit(token_ids=train_token_ids_chinese,\n", + " input_mask=train_input_mask_chinese,\n", + " token_type_ids=train_token_type_ids_chinese,\n", + " labels=train_labels_chinese,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)\n", + "print(\"Training time : {:.3f} hrs\".format(t.interval / 3600))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict on Test Data" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "5024it [00:54, 101.88it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prediction time : 0.015 hrs\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " predictions_chinese = classifier_chinese.predict(token_ids=test_token_ids_chinese,\n", + " input_mask=test_input_mask_chinese,\n", + " token_type_ids=test_token_type_ids_chinese,\n", + " batch_size=BATCH_SIZE)\n", + "print(\"Prediction time : {:.3f} hrs\".format(t.interval / 3600))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Evaluate" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " precision recall f1-score support\n", + "\n", + "contradiction 0.81 0.84 0.82 1670\n", + " entailment 0.84 0.68 0.76 1670\n", + " neutral 0.70 0.80 0.74 1670\n", + "\n", + " accuracy 0.77 5010\n", + " macro avg 0.78 0.77 0.77 5010\n", + " weighted avg 0.78 0.77 0.77 5010\n", + "\n" + ] + } + ], + "source": [ + "predictions_chinese = label_encoder_chinese.inverse_transform(predictions_chinese)\n", + "print(classification_report(test_df_chinese[LABEL_COL], predictions_chinese))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Language Inference on Hindi\n", + "For Hindi and all other languages except Chinese, we use the `bert-base-multilingual-cased` model. \n", + "The preprocesing, model training, and prediction steps are the same as on Chinese data, except for the underlying tokenizer and BERT model used" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tokenize and Preprocess" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████| 392702/392702 [03:48<00:00, 1719.84it/s]\n", + "100%|██████████| 5010/5010 [00:02<00:00, 1916.46it/s]\n" + ] + } + ], + "source": [ + "tokenizer_multi = Tokenizer(LANGUAGE_MULTI, cache_dir=CACHE_DIR)\n", + "\n", + "train_tokens_hindi = tokenizer_multi.tokenize(train_df_hindi[TEXT_COL])\n", + "test_tokens_hindi= tokenizer_multi.tokenize(test_df_hindi[TEXT_COL])\n", + "\n", + "train_token_ids_hindi, train_input_mask_hindi, train_token_type_ids_hindi = \\\n", + " tokenizer_multi.preprocess_classification_tokens(train_tokens_hindi, max_len=MAX_SEQ_LENGTH)\n", + "test_token_ids_hindi, test_input_mask_hindi, test_token_type_ids_hindi = \\\n", + " tokenizer_multi.preprocess_classification_tokens(test_tokens_hindi, max_len=MAX_SEQ_LENGTH)\n", + "\n", + "label_encoder_hindi = LabelEncoder()\n", + "train_labels_hindi = label_encoder_hindi.fit_transform(train_df_hindi[LABEL_COL])\n", + "num_labels_hindi = len(np.unique(train_labels_hindi))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create and Train Classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch:1/2; batch:1->1228/12271; loss:1.091754\n", + "epoch:1/2; batch:1229->2456/12271; loss:0.992931\n", + "epoch:1/2; batch:2457->3684/12271; loss:1.045146\n", + "epoch:1/2; batch:3685->4912/12271; loss:0.799912\n", + "epoch:1/2; batch:4913->6140/12271; loss:0.815425\n", + "epoch:1/2; batch:6141->7368/12271; loss:0.564856\n", + "epoch:1/2; batch:7369->8596/12271; loss:0.726981\n", + "epoch:1/2; batch:8597->9824/12271; loss:0.764087\n", + "epoch:1/2; batch:9825->11052/12271; loss:0.964115\n", + "epoch:1/2; batch:11053->12271/12271; loss:0.502252\n", + "epoch:2/2; batch:1->1228/12271; loss:0.601600\n", + "epoch:2/2; batch:1229->2456/12271; loss:0.695099\n", + "epoch:2/2; batch:2457->3684/12271; loss:0.419610\n", + "epoch:2/2; batch:3685->4912/12271; loss:0.603106\n", + "epoch:2/2; batch:4913->6140/12271; loss:0.705180\n", + "epoch:2/2; batch:6141->7368/12271; loss:0.493404\n", + "epoch:2/2; batch:7369->8596/12271; loss:0.864921\n", + "epoch:2/2; batch:8597->9824/12271; loss:0.518601\n", + "epoch:2/2; batch:9825->11052/12271; loss:0.395920\n", + "epoch:2/2; batch:11053->12271/12271; loss:0.685858\n", + "Training time : 9.520 hrs\n" + ] + } + ], + "source": [ + "classifier_multi = BERTSequenceClassifier(language=LANGUAGE_MULTI,\n", + " num_labels=num_labels_hindi,\n", + " cache_dir=CACHE_DIR)\n", + "with Timer() as t:\n", + " classifier_multi.fit(token_ids=train_token_ids_hindi,\n", + " input_mask=train_input_mask_hindi,\n", + " token_type_ids=train_token_type_ids_hindi,\n", + " labels=train_labels_hindi,\n", + " num_gpus=NUM_GPUS,\n", + " num_epochs=NUM_EPOCHS,\n", + " batch_size=BATCH_SIZE,\n", + " lr=LEARNING_RATE,\n", + " warmup_proportion=WARMUP_PROPORTION)\n", + "print(\"Training time : {:.3f} hrs\".format(t.interval / 3600))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Predict and Evaluate" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "5024it [01:02, 87.10it/s] " + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Prediction time : 0.017 hrs\n", + " precision recall f1-score support\n", + "\n", + "contradiction 0.69 0.72 0.70 1670\n", + " entailment 0.74 0.51 0.60 1670\n", + " neutral 0.58 0.74 0.65 1670\n", + "\n", + " accuracy 0.65 5010\n", + " macro avg 0.67 0.65 0.65 5010\n", + " weighted avg 0.67 0.65 0.65 5010\n", + "\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " predictions_hindi = classifier_multi.predict(token_ids=test_token_ids_hindi,\n", + " input_mask=test_input_mask_hindi,\n", + " token_type_ids=test_token_type_ids_hindi,\n", + " batch_size=BATCH_SIZE)\n", + "print(\"Prediction time : {:.3f} hrs\".format(t.interval / 3600))\n", + "predictions_hindi= label_encoder_hindi.inverse_transform(predictions_hindi)\n", + "print(classification_report(test_df_hindi[LABEL_COL], predictions_hindi))" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "pytorch", + "language": "python", + "name": "pytorch" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/scenarios/named_entity_recognition/README.md b/scenarios/named_entity_recognition/README.md index e69de29bb..cbcc3c07e 100644 --- a/scenarios/named_entity_recognition/README.md +++ b/scenarios/named_entity_recognition/README.md @@ -0,0 +1,8 @@ +# Named Entity Recognition (NER) +Named Entity Recognition (NER) is the task of detecting and classifying +real-world objects mentioned in text. Common named entities include person +names, locations, organizations, etc. The state-of-the art NER methods include +combining Long Short-Term Memory neural network with Conditional Random Field +(LSTM-CRF) and pretrained language models like BERT. NER can be used for +information extraction and filtering. It also plays an important role in other +NLP tasks like question answering and texts summarization. diff --git a/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb b/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb index 40928c806..565151a7b 100644 --- a/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb +++ b/scenarios/named_entity_recognition/ner_wikigold_bert.ipynb @@ -8,7 +8,7 @@ "*Licensed under the MIT License.*\n", "# Named Entity Recognition Using BERT\n", "## Summary\n", - "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, and model evaluation. \n", + "This notebook demonstrates how to fine tune [pretrained BERT model](https://github.com/huggingface/pytorch-pretrained-BERT) for named entity recognition (NER) task. Utility functions and classes in the NLP Best Practices repo are used to facilitate data preprocessing, model training, model scoring, and model evaluation. \n", "\n", "[BERT (Bidirectional Transformers for Language Understanding)](https://arxiv.org/pdf/1810.04805.pdf) is a powerful pre-trained lanaguage model that can be used for multiple NLP tasks, including text classification, question answering, named entity recognition, etc. It's able to achieve state of the art performance with only a few epochs of fine tuning on task specific datasets. \n", "The figure below illustrates how BERT can be fine tuned for NER tasks. The input data is a list of tokens representing a sentence. In the training data, each token has an entity label. After fine tuning, the model predicts an entity label for each token in a given testing sentence. \n", @@ -29,7 +29,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 1, "metadata": { "scrolled": true }, @@ -38,18 +38,16 @@ "import sys\n", "import os\n", "import random\n", - "from seqeval.metrics import f1_score, classification_report\n", - "\n", + "from seqeval.metrics import classification_report\n", "import torch\n", - "from pytorch_pretrained_bert.tokenization import BertTokenizer\n", "\n", "nlp_path = os.path.abspath('../../')\n", "if nlp_path not in sys.path:\n", " sys.path.insert(0, nlp_path)\n", "\n", - "from utils_nlp.bert.token_classification import BERTTokenClassifier, postprocess_token_labels\n", + "from utils_nlp.bert.token_classification import BERTTokenClassifier, create_label_map, postprocess_token_labels\n", "from utils_nlp.bert.common import Language, Tokenizer\n", - "from utils_nlp.dataset.wikigold import download, read_data, get_train_test_data, get_unique_labels" + "from utils_nlp.dataset.wikigold import load_train_test_dfs, get_unique_labels" ] }, { @@ -61,16 +59,14 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 2, "metadata": { "scrolled": true }, "outputs": [], "source": [ - "# path configurations\n", - "DATA_DIR = \"./data\"\n", - "DATA_FILE = \"./data/wikigold.conll.txt\"\n", - "CACHE_DIR=\".\"\n", + "# path configuration\n", + "CACHE_DIR=\"./temp\"\n", "\n", "# set random seeds\n", "RANDOM_SEED = 100\n", @@ -82,12 +78,15 @@ "MAX_SEQ_LENGTH = 200\n", "\n", "# training configurations\n", - "DEVICE=\"gpu\"\n", "BATCH_SIZE = 16\n", "NUM_TRAIN_EPOCHS = 5\n", "\n", "# optimizer configuration\n", - "LEARNING_RATE = 3e-5" + "LEARNING_RATE = 3e-5\n", + "\n", + "# data configurations\n", + "TEXT_COL = \"sentence\"\n", + "LABELS_COL = \"labels\"" ] }, { @@ -102,18 +101,18 @@ "metadata": {}, "source": [ "### Get training and testing data\n", - "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). The `download` function downloads the data file to a user-specified directory. \n", + "The dataset used in this notebook is the [wikigold dataset](https://www.aclweb.org/anthology/W09-3302). The wikigold dataset consists of 145 mannually labelled Wikipedia articles, including 1841 sentences and 40k tokens in total. The dataset can be directly downloaded from [here](https://github.com/juand-r/entity-recognition-datasets/tree/master/data/wikigold). \n", "\n", - "The helper function `get_train_test_data` splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", + "The helper function `load_train_test_dfs` downloads the data file if it doesn't exist in `local_cache_path`. It splits the dataset into training and testing sets according to `test_percentage`. Because this is a relatively small dataset, we set `test_percentage` to 0.5 in order to have enough data for model evaluation. Running this notebook multiple times with different random seeds produces similar results. \n", "\n", - "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). An 'X' label is added for the trailing word pieces generated by BERT, because BERT uses WordPiece tokenizer. \n", + "The helper function `get_unique_labels` returns the unique entity labels in the dataset. There are 5 unique labels in the original dataset: 'O' (non-entity), 'I-LOC' (location), 'I-MISC' (miscellaneous), 'I-PER' (person), and 'I-ORG' (organization). \n", "\n", "The maximum number of words in a sentence is 144, so we set MAX_SEQ_LENGTH to 200 above, because the number of tokens will grow after WordPiece tokenization." ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 3, "metadata": { "scrolled": true }, @@ -126,7 +125,7 @@ "Maximum sequence length in testing data is: 81\n", "\n", "Unique entity labels: \n", - "['O', 'I-LOC', 'I-MISC', 'I-PER', 'I-ORG', 'X']\n", + "['O', 'I-LOC', 'I-MISC', 'I-PER', 'I-ORG']\n", "\n", "Sample sentence: \n", "Two , Samsung based , electronic cash registers were reconstructed in order to expand their functions and adapt them for networking .\n", @@ -138,44 +137,37 @@ } ], "source": [ - "download(DATA_DIR)\n", - "wikigold_text = read_data(DATA_FILE)\n", - "train_text, train_labels, test_text, test_labels = get_train_test_data(wikigold_text, \n", - " test_percentage=0.5, \n", - " random_seed=RANDOM_SEED)\n", + "train_df, test_df = load_train_test_dfs(local_cache_path=CACHE_DIR, test_percentage=0.5,random_seed=RANDOM_SEED)\n", "label_list = get_unique_labels()\n", "print('\\nUnique entity labels: \\n{}\\n'.format(label_list))\n", - "print('Sample sentence: \\n{}\\n'.format(train_text[0]))\n", - "print('Sample sentence labels: \\n{}\\n'.format(train_labels[0]))" + "print('Sample sentence: \\n{}\\n'.format(train_df[TEXT_COL][0]))\n", + "print('Sample sentence labels: \\n{}\\n'.format(train_df[LABELS_COL][0]))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### Tokenization and Preprocessing\n", - "The `preprocess_ner_tokens` method of the `Tokenizer` class converts raw string data to numerical features, involving the following steps:\n", - "1. WordPiece tokenization.\n", - "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", - "3. Sequence padding or truncation according to the `max_seq_length` configuration." + "### Tokenization and Preprocessing\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Create a dictionary that maps labels to numerical values**" + "**Create a dictionary that maps labels to numerical values** \n", + "Note there is an argument called `trailing_piece_tag`. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. \"criticize\" is tokenized into \"critic\" and \"##ize\". Since the input data only come with one token label for \"criticize\", within Tokenizer.prerocess_ner_tokens, the original token label is assigned to the first token \"critic\" and the second token \"##ize\" is labeled as \"X\". By default, `trailing_piece_tag` is set to \"X\". If \"X\" already exists in your data, you can set `trailing_piece_tag` to another value that doesn't exist in your data." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "metadata": { "scrolled": true }, "outputs": [], "source": [ - "label_map = {label: i for i, label in enumerate(label_list)}" + "label_map = create_label_map(label_list, trailing_piece_tag=\"X\")" ] }, { @@ -187,7 +179,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 5, "metadata": { "scrolled": true }, @@ -202,46 +194,49 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Create numerical features** \n", - "Note there is an argument called `trailing_piece_tag`. BERT uses a WordPiece tokenizer which breaks down some words into multiple tokens, e.g. \"playing\" is tokenized into \"play\" and \"##ing\". Since the input data only come with one token label for \"playing\", within `prerocess_ner_tokens`, the original token label is assigned to the first token \"play\" and the second token \"##ing\" is labeled as \"X\". By default, `trailing_piece_tag` is set to \"X\". If \"X\" already exists in your data, you can set `trailing_piece_tag` to another value that doesn't exist in your data. " + "**Tokenize and preprocess text** \n", + "The `tokenize_ner` method of the `Tokenizer` class converts text and labels in strings to numerical features, involving the following steps:\n", + "1. WordPiece tokenization.\n", + "2. Convert tokens and labels to numerical values, i.e. token ids and label ids.\n", + "3. Sequence padding or truncation according to the `max_seq_length` configuration." ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 6, "metadata": { - "scrolled": false + "scrolled": true }, "outputs": [], "source": [ "train_token_ids, train_input_mask, train_trailing_token_mask, train_label_ids = \\\n", - " tokenizer.preprocess_ner_tokens(text=train_text,\n", - " label_map=label_map,\n", - " max_len=MAX_SEQ_LENGTH,\n", - " labels=train_labels,\n", - " trailing_piece_tag=\"X\")\n", + " tokenizer.tokenize_ner(text=train_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=train_df[LABELS_COL],\n", + " trailing_piece_tag=\"X\")\n", "test_token_ids, test_input_mask, test_trailing_token_mask, test_label_ids = \\\n", - " tokenizer.preprocess_ner_tokens(text=test_text,\n", - " label_map=label_map,\n", - " max_len=MAX_SEQ_LENGTH,\n", - " labels=test_labels,\n", - " trailing_piece_tag=\"X\")" + " tokenizer.tokenize_ner(text=test_df[TEXT_COL],\n", + " label_map=label_map,\n", + " max_len=MAX_SEQ_LENGTH,\n", + " labels=test_df[LABELS_COL],\n", + " trailing_piece_tag=\"X\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "`Tokenizer.preprocess_ner_tokens` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", + "`Tokenizer.tokenize_ner` outputs three or four lists of numerical features lists, each sublist contains features of an input sentence: \n", "1. token ids: list of numerical values each corresponds to a token.\n", "2. attention mask: list of 1s and 0s, 1 for input tokens and 0 for padded tokens, so that padded tokens are not attended to. \n", - "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ing. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", + "3. trailing word piece mask: boolean list, `True` for the first word piece of each original word, `False` for the trailing word pieces, e.g. ##ize. This mask is useful for removing predictions on trailing word pieces, so that each original word in the input text has a unique predicted label. \n", "4. label ids: list of numerical values each corresponds to an entity label, if `labels` is provided." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 7, "metadata": { "scrolled": true }, @@ -285,19 +280,19 @@ "* Language.CHINESE: \"bert-base-chinese\"\n", "* Language.MULTILINGUAL: \"bert-base-multilingual-cased\"\n", "\n", - "Here we use the base, uncased pretrained model." + "Here we use the base, cased pretrained model." ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 8, "metadata": { "scrolled": true }, "outputs": [], "source": [ "token_classifier = BERTTokenClassifier(language=LANGUAGE,\n", - " num_labels=len(label_list),\n", + " num_labels=len(label_map),\n", " cache_dir=CACHE_DIR)" ] }, @@ -310,7 +305,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 9, "metadata": { "scrolled": false }, @@ -338,8 +333,8 @@ "\n", "Iteration: 40%|███▉ | 23/58 [00:30<00:45, 1.31s/it]\u001b[A\n", "Iteration: 40%|███▉ | 23/58 [00:49<00:45, 1.31s/it]\u001b[A\n", - "Iteration: 81%|████████ | 47/58 [01:01<00:14, 1.31s/it]\u001b[A\n", - "Epoch: 20%|██ | 1/5 [01:15<05:01, 75.44s/it]0s/it]\u001b[A\n", + "Iteration: 81%|████████ | 47/58 [01:00<00:14, 1.30s/it]\u001b[A\n", + "Epoch: 20%|██ | 1/5 [01:14<04:58, 74.52s/it]8s/it]\u001b[A\n", "Iteration: 0%| | 0/58 [00:00=1.0.43.1','numpy>=1.16.0']\n", " )" ] }, @@ -979,7 +979,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 9, "metadata": { "scrolled": true }, @@ -988,7 +988,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "Submitting C:\\Users\\lishao\\Project\\Rotation2\\NLP directory for run. The size of the directory >= 25 MB, so it can take a few minutes.\n" + "Submitting E:\\Projects\\NLP-BP\\temp\\nlp directory for run. The size of the directory >= 25 MB, so it can take a few minutes.\n" ] }, { @@ -996,9 +996,9 @@ "output_type": "stream", "text": [ "Run(Experiment: pytorch-gensen,\n", - "Id: pytorch-gensen_1560797674_e36e44f4,\n", + "Id: pytorch-gensen_1561150688_f84eab04,\n", "Type: azureml.scriptrun,\n", - "Status: Preparing)\n" + "Status: Queued)\n" ] } ], @@ -1019,7 +1019,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Horovod on AzureML**\n", + "#### Horovod on AzureML\n", "\n", "[Horovod](https://github.com/horovod/horovod) is a distributed training framework for TensorFlow, PyTorch etc. to make distributed Deep Learning fast and easy to use. We have created 2 nodes in the GPU cluster on AzureML. By using Horovod, we can use those two machines to train the model in parallel. In theory, the model trains faster on AzureML than on VM which uses single machine because it converges faster which we will get lower loss. However, by using more nodes, the model may take more time in communicating with each node. The communication time could be ignored when the model is trained on the large datasets.\n", "\n", @@ -1031,7 +1031,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**Interpret the Training Results**\n", + "#### Interpret the Training Results\n", "\n", "The following chart shows the model validation loss (the less loss, the better performance) with different nodes with AmlCompute:\n", "\n", @@ -1042,28 +1042,29 @@ "From the chart, we can tell training with more nodes, the performance is getting better with lower loss." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The Azureml Widget allows an easy way to stream updates of the logged metrics right into your notebook. To use this feature install the widget by running the commands below. \n", + "\n", + "```\n", + "conda install ipywidgets\n", + "\n", + "jupyter nbextension install --py --user azureml.widgets\n", + "\n", + "jupyter nbextension enable azureml.widgets --user --py\n", + "\n", + "```" + ] + }, { "cell_type": "code", - "execution_count": 39, + "execution_count": null, "metadata": { - "scrolled": true + "scrolled": false }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "19d55fcc0871444da604b1d828d9eac4", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 's…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "RunDetails(run).show()" ] @@ -1085,7 +1086,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ "run.wait_for_completion(show_output=True) # this provides a verbose log" @@ -1114,6 +1117,35 @@ " ```" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.3.6 Clean up after training\n", + "\n", + "We finally delete the training script `gensen_train.py` and config file `gensen_config.json` from the project directory." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "gensen_train = os.path.join(project_folder,'utils_nlp/gensen/gensen_train.py')\n", + "gensen_config = os.path.join(project_folder,'utils_nlp/gensen/gensen_config.json')\n", + "\n", + "if os.path.isfile(gensen_train):\n", + " os.remove(gensen_train)\n", + "else:\n", + " print(\"Error: %s file not found\" % gensen_train)\n", + " \n", + "if os.path.isfile(gensen_config):\n", + " os.remove(gensen_config)\n", + "else:\n", + " print(\"Error: %s file not found\" % gensen_config)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -1135,7 +1167,7 @@ }, { "cell_type": "code", - "execution_count": 130, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1164,7 +1196,7 @@ }, { "cell_type": "code", - "execution_count": 131, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1194,54 +1226,11 @@ }, { "cell_type": "code", - "execution_count": 132, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "c61e610d4601486e9f41fd852320b47b", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_HyperDriveWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO',…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 's…" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "5c47f13e11c646cd865d4f286b70ab0c", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_UserRunWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 's…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "RunDetails(hyperdrive_run).show()" ] @@ -1331,9 +1320,9 @@ } ], "kernelspec": { - "display_name": "Python 3", + "display_name": "Python NLP CPU", "language": "python", - "name": "python3" + "name": "nlp_cpu" }, "language_info": { "codemirror_mode": { diff --git a/scenarios/sentence_similarity/gensen_config.json b/scenarios/sentence_similarity/gensen_config.json index f7e47a76c..54aff67b7 100644 --- a/scenarios/sentence_similarity/gensen_config.json +++ b/scenarios/sentence_similarity/gensen_config.json @@ -15,20 +15,20 @@ }, "data": {"paths": [ { - "train_src": "data/processed/snli_1.0_train.txt.s1.tok", - "train_trg": "data/processed/snli_1.0_train.txt.s2.tok", - "val_src": "data/processed/snli_1.0_dev.txt.s1.tok", - "val_trg": "data/processed/snli_1.0_dev.txt.s1.tok", + "train_src": "snli_1.0_train.txt.s1.tok", + "train_trg": "snli_1.0_train.txt.s2.tok", + "val_src": "snli_1.0_dev.txt.s1.tok", + "val_trg": "snli_1.0_dev.txt.s1.tok", "taskname": "snli" } ], "max_src_length": 90, "max_trg_length": 90, "task": "multi-seq2seq-nli", - "save_dir": "data/models/example", - "nli_train": "data/processed/snli_1.0_train.txt.clean.noblank", - "nli_dev": "data/processed/snli_1.0_dev.txt.clean.noblank", - "nli_test": "data/processed/snli_1.0_test.txt.clean.noblank" + "save_dir": "models/", + "nli_train": "snli_1.0_train.txt.clean.noblank", + "nli_dev": "snli_1.0_dev.txt.clean.noblank", + "nli_test": "snli_1.0_test.txt.clean.noblank" }, "model": { "dim_src": 2048, diff --git a/scenarios/sentence_similarity/gensen_train.py b/scenarios/sentence_similarity/gensen_train.py index 7b704a157..b29fb1dcb 100644 --- a/scenarios/sentence_similarity/gensen_train.py +++ b/scenarios/sentence_similarity/gensen_train.py @@ -15,20 +15,20 @@ This training process is based on GPU only. """ -import logging import argparse -import os import json +import logging +import os import time +import horovod.torch as hvd +import mlflow import numpy as np import torch import torch.backends.cudnn as cudnn import torch.nn as nn import torch.nn.functional as f import torch.optim as optim -from azureml.core.run import Run -import horovod.torch as hvd from utils_nlp.gensen.multi_task_model import MultitaskModel from utils_nlp.gensen.utils import ( @@ -37,10 +37,8 @@ compute_validation_loss, ) -# get the Azure ML run object -run = Run.get_context() - cudnn.benchmark = True +logger = logging.getLogger(__name__) hvd.init() if torch.cuda.is_available(): @@ -138,6 +136,7 @@ def evaluate( model_state, ): """ Function to validate the model. + Args: model_state(dict): Saved model weights. config(dict): Config object. @@ -146,9 +145,11 @@ def evaluate( loss_criterion(nn.CrossEntropyLoss): Cross entropy loss. monitor_epoch(int): Current epoch count. min_val_loss(float): Minimum validation loss - min_val_loss_epoch(int): Epoch where the minimum validation loss was seen. + min_val_loss_epoch(int): Epoch where the minimum validation + loss was seen. save_dir(str): Directory path to save the model dictionary. starting_time(time.Time): Starting time of the training. + Returns: bool: Whether to continue training or not. """ @@ -172,7 +173,9 @@ def evaluate( # Horovod: print output only on first rank. if hvd.rank() == 0: # log the best val accuracy to AML run - run.log("Best Validation Loss", np.float(validation_loss)) + logging.info( + "Best Validation Loss: {}".format(np.float(validation_loss)) + ) # If the validation loss is small enough, and it starts to go up. # Should stop training. @@ -182,8 +185,6 @@ def evaluate( min_val_loss_epoch = monitor_epoch model_state = model.state_dict() - run.log("Validation Loss", validation_loss) - print(monitor_epoch, min_val_loss_epoch, min_val_loss) logging.info( "Monitor epoch: %d Validation Loss: %.3f Min Validation Epoch: " "%d Loss : %.3f " @@ -275,312 +276,333 @@ def train(config, data_folder, learning_rate=0.0001): config(dict): Loaded json file as a python object. data_folder(str): Path to the folder containing the data. learning_rate(float): Learning rate for the model. - """ owd = os.getcwd() + os.chdir(data_folder) try: - save_dir = config["data"]["save_dir"] - - os.chdir(data_folder) - - if not os.path.exists("./log"): - os.makedirs("./log") - - os.makedirs(save_dir, exist_ok=True) - - setup_logging(config) - - batch_size = config["training"]["batch_size"] - src_vocab_size = config["model"]["n_words_src"] - trg_vocab_size = config["model"]["n_words_trg"] - max_len_src = config["data"]["max_src_length"] - max_len_trg = config["data"]["max_trg_length"] - model_state = {} - - train_src = [item["train_src"] for item in config["data"]["paths"]] - train_trg = [item["train_trg"] for item in config["data"]["paths"]] - tasknames = [item["taskname"] for item in config["data"]["paths"]] - - # Keep track of indicies to train forward and backward jointly - if ( - "skipthought_next" in tasknames - and "skipthought_previous" in tasknames - ): - skipthought_idx = tasknames.index("skipthought_next") - skipthought_backward_idx = tasknames.index("skipthought_previous") - paired_tasks = { - skipthought_idx: skipthought_backward_idx, - skipthought_backward_idx: skipthought_idx, - } - else: - paired_tasks = None - skipthought_idx = None - skipthought_backward_idx = None - - train_iterator = BufferedDataIterator( - train_src, - train_trg, - src_vocab_size, - trg_vocab_size, - tasknames, - save_dir, - buffer_size=1e6, - lowercase=True, - seed=(hvd.rank() + 1) * 12345, - ) + with mlflow.start_run(): + save_dir = config["data"]["save_dir"] + if not os.path.exists("./log"): + os.makedirs("./log") - nli_iterator = NLIIterator( - train=config["data"]["nli_train"], - dev=config["data"]["nli_dev"], - test=config["data"]["nli_test"], - vocab_size=-1, - vocab=os.path.join(save_dir, "src_vocab.pkl"), - seed=(hvd.rank() + 1) * 12345, - ) + os.makedirs(save_dir, exist_ok=True) - src_vocab_size = len(train_iterator.src[0]["word2id"]) - trg_vocab_size = len(train_iterator.trg[0]["word2id"]) + setup_logging(config) - # Logging set up. - logging.info("Finished creating iterator ...") - log_config(config) - logging.info( - "Found %d words in source : " - % (len(train_iterator.src[0]["id2word"])) - ) - for idx, taskname in enumerate(tasknames): - logging.info( - "Found %d target words in task %s " - % (len(train_iterator.trg[idx]["id2word"]), taskname) - ) - logging.info("Found %d words in src " % src_vocab_size) - logging.info("Found %d words in trg " % trg_vocab_size) - - weight_mask = torch.ones(trg_vocab_size).cuda() - weight_mask[train_iterator.trg[0]["word2id"][""]] = 0 - loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda() - nli_criterion = nn.CrossEntropyLoss().cuda() - - model = MultitaskModel( - src_emb_dim=config["model"]["dim_word_src"], - trg_emb_dim=config["model"]["dim_word_trg"], - src_vocab_size=src_vocab_size, - trg_vocab_size=trg_vocab_size, - src_hidden_dim=config["model"]["dim_src"], - trg_hidden_dim=config["model"]["dim_trg"], - bidirectional=config["model"]["bidirectional"], - pad_token_src=train_iterator.src[0]["word2id"][""], - pad_token_trg=train_iterator.trg[0]["word2id"][""], - nlayers_src=config["model"]["n_layers_src"], - dropout=config["model"]["dropout"], - num_tasks=len(train_iterator.src), - paired_tasks=paired_tasks, - ).cuda() - - optimizer = setup_horovod(model, learning_rate=learning_rate) - logging.info(model) - - n_gpus = config["training"]["n_gpus"] - model = torch.nn.DataParallel(model, device_ids=range(n_gpus)) - - task_losses = [[] for _ in tasknames] - task_idxs = [0 for _ in tasknames] - nli_losses = [] - updates = 0 - nli_ctr = 0 - nli_epoch = 0 - monitor_epoch = 0 - nli_mbatch_ctr = 0 - mbatch_times = [] - min_val_loss = 10000000 - min_val_loss_epoch = -1 - rng_num_tasks = len(tasknames) - 1 if paired_tasks else len(tasknames) - logging.info("Commencing Training ...") - start = time.time() - while True: - # Train NLI once every 10 minibatches of other tasks - if nli_ctr % 10 == 0: - minibatch = nli_iterator.get_parallel_minibatch( - nli_mbatch_ctr, batch_size * n_gpus - ) - optimizer.zero_grad() - class_logits = model( - minibatch, -1, return_hidden=False, paired_trg=None - ) + batch_size = config["training"]["batch_size"] + src_vocab_size = config["model"]["n_words_src"] + trg_vocab_size = config["model"]["n_words_trg"] + max_len_src = config["data"]["max_src_length"] + max_len_trg = config["data"]["max_trg_length"] + model_state = {} + + train_src = [item["train_src"] for item in config["data"]["paths"]] + train_trg = [item["train_trg"] for item in config["data"]["paths"]] + tasknames = [item["taskname"] for item in config["data"]["paths"]] - loss = nli_criterion( - class_logits.contiguous().view(-1, class_logits.size(1)), - minibatch["labels"].contiguous().view(-1), + # Keep track of indicies to train forward and backward jointly + if ( + "skipthought_next" in tasknames + and "skipthought_previous" in tasknames + ): + skipthought_idx = tasknames.index("skipthought_next") + skipthought_backward_idx = tasknames.index( + "skipthought_previous" ) + paired_tasks = { + skipthought_idx: skipthought_backward_idx, + skipthought_backward_idx: skipthought_idx, + } + else: + paired_tasks = None + skipthought_idx = None + skipthought_backward_idx = None + + train_iterator = BufferedDataIterator( + train_src, + train_trg, + src_vocab_size, + trg_vocab_size, + tasknames, + save_dir, + buffer_size=1e6, + lowercase=True, + seed=(hvd.rank() + 1) * 12345, + ) - # nli_losses.append(loss.data[0]) - nli_losses.append(loss.item()) - loss.backward() - torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) - optimizer.step() + nli_iterator = NLIIterator( + train=config["data"]["nli_train"], + dev=config["data"]["nli_dev"], + test=config["data"]["nli_test"], + vocab_size=-1, + vocab=os.path.join(save_dir, "src_vocab.pkl"), + seed=(hvd.rank() + 1) * 12345, + ) - # For AML. - run.log("loss", loss.item()) + src_vocab_size = len(train_iterator.src[0]["word2id"]) + trg_vocab_size = len(train_iterator.trg[0]["word2id"]) - nli_mbatch_ctr += batch_size * n_gpus - if nli_mbatch_ctr >= len(nli_iterator.train_lines): - nli_mbatch_ctr = 0 - nli_epoch += 1 - else: - # Sample a random task - task_idx = np.random.randint(low=0, high=rng_num_tasks) - - # Get a minibatch corresponding to the sampled task - minibatch = train_iterator.get_parallel_minibatch( - task_idx, - task_idxs[task_idx], - batch_size * n_gpus, - max_len_src, - max_len_trg, + # Logging set up. + logging.info("Finished creating iterator ...") + log_config(config) + logging.info( + "Found %d words in source : " + % (len(train_iterator.src[0]["id2word"])) + ) + for idx, taskname in enumerate(tasknames): + logging.info( + "Found %d target words in task %s " + % (len(train_iterator.trg[idx]["id2word"]), taskname) ) - - """Increment pointer into task and if current buffer is - exhausted, fetch new buffer. """ - task_idxs[task_idx] += batch_size * n_gpus - if task_idxs[task_idx] >= train_iterator.buffer_size: - train_iterator.fetch_buffer(task_idx) - task_idxs[task_idx] = 0 - - if task_idx == skipthought_idx: - minibatch_back = train_iterator.get_parallel_minibatch( - skipthought_backward_idx, - task_idxs[skipthought_backward_idx], - batch_size * n_gpus, - max_len_src, - max_len_trg, + logging.info("Found %d words in src " % src_vocab_size) + logging.info("Found %d words in trg " % trg_vocab_size) + + weight_mask = torch.ones(trg_vocab_size).cuda() + weight_mask[train_iterator.trg[0]["word2id"][""]] = 0 + loss_criterion = nn.CrossEntropyLoss(weight=weight_mask).cuda() + nli_criterion = nn.CrossEntropyLoss().cuda() + + model = MultitaskModel( + src_emb_dim=config["model"]["dim_word_src"], + trg_emb_dim=config["model"]["dim_word_trg"], + src_vocab_size=src_vocab_size, + trg_vocab_size=trg_vocab_size, + src_hidden_dim=config["model"]["dim_src"], + trg_hidden_dim=config["model"]["dim_trg"], + bidirectional=config["model"]["bidirectional"], + pad_token_src=train_iterator.src[0]["word2id"][""], + pad_token_trg=train_iterator.trg[0]["word2id"][""], + nlayers_src=config["model"]["n_layers_src"], + dropout=config["model"]["dropout"], + num_tasks=len(train_iterator.src), + paired_tasks=paired_tasks, + ).cuda() + + optimizer = setup_horovod(model, learning_rate=learning_rate) + logging.info(model) + + n_gpus = config["training"]["n_gpus"] + model = torch.nn.DataParallel(model, device_ids=range(n_gpus)) + + task_losses = [[] for _ in tasknames] + task_idxs = [0 for _ in tasknames] + nli_losses = [] + updates = 0 + nli_ctr = 0 + nli_epoch = 0 + monitor_epoch = 0 + nli_mbatch_ctr = 0 + mbatch_times = [] + min_val_loss = 10000000 + min_val_loss_epoch = -1 + rng_num_tasks = ( + len(tasknames) - 1 if paired_tasks else len(tasknames) + ) + logging.info("OS Environ: \n {} \n\n".format(os.environ)) + mlflow.log_param("learning_rate", learning_rate) + logging.info("Commencing Training ...") + start = time.time() + while True: + batch_start_time = time.time() + # Train NLI once every 10 minibatches of other tasks + if nli_ctr % 10 == 0: + minibatch = nli_iterator.get_parallel_minibatch( + nli_mbatch_ctr, batch_size * n_gpus ) - task_idxs[skipthought_backward_idx] += batch_size * n_gpus - if ( - task_idxs[skipthought_backward_idx] - >= train_iterator.buffer_size - ): - train_iterator.fetch_buffer(skipthought_backward_idx) - task_idxs[skipthought_backward_idx] = 0 - optimizer.zero_grad() - decoder_logit, decoder_logit_2 = model( - minibatch, - task_idx, - paired_trg=minibatch_back["input_trg"], - ) - - loss_f = loss_criterion( - decoder_logit.contiguous().view( - -1, decoder_logit.size(2) - ), - minibatch["output_trg"].contiguous().view(-1), + class_logits = model( + minibatch, -1, return_hidden=False, paired_trg=None ) - loss_b = loss_criterion( - decoder_logit_2.contiguous().view( - -1, decoder_logit_2.size(2) + loss = nli_criterion( + class_logits.contiguous().view( + -1, class_logits.size(1) ), - minibatch_back["output_trg"].contiguous().view(-1), + minibatch["labels"].contiguous().view(-1), ) - task_losses[task_idx].append(loss_f.data[0]) - task_losses[skipthought_backward_idx].append( - loss_b.data[0] - ) - loss = loss_f + loss_b + # nli_losses.append(loss.data[0]) + nli_losses.append(loss.item()) + loss.backward() + torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) + optimizer.step() + nli_mbatch_ctr += batch_size * n_gpus + if nli_mbatch_ctr >= len(nli_iterator.train_lines): + nli_mbatch_ctr = 0 + nli_epoch += 1 else: - optimizer.zero_grad() - decoder_logit = model(minibatch, task_idx) + # Sample a random task + task_idx = np.random.randint(low=0, high=rng_num_tasks) - loss = loss_criterion( - decoder_logit.contiguous().view( - -1, decoder_logit.size(2) - ), - minibatch["output_trg"].contiguous().view(-1), + # Get a minibatch corresponding to the sampled task + minibatch = train_iterator.get_parallel_minibatch( + task_idx, + task_idxs[task_idx], + batch_size * n_gpus, + max_len_src, + max_len_trg, ) - task_losses[task_idx].append(loss.item()) + """Increment pointer into task and if current buffer is + exhausted, fetch new buffer. """ + task_idxs[task_idx] += batch_size * n_gpus + if task_idxs[task_idx] >= train_iterator.buffer_size: + train_iterator.fetch_buffer(task_idx) + task_idxs[task_idx] = 0 + + if task_idx == skipthought_idx: + minibatch_back = train_iterator.get_parallel_minibatch( + skipthought_backward_idx, + task_idxs[skipthought_backward_idx], + batch_size * n_gpus, + max_len_src, + max_len_trg, + ) + task_idxs[skipthought_backward_idx] += ( + batch_size * n_gpus + ) + if ( + task_idxs[skipthought_backward_idx] + >= train_iterator.buffer_size + ): + train_iterator.fetch_buffer( + skipthought_backward_idx + ) + task_idxs[skipthought_backward_idx] = 0 + + optimizer.zero_grad() + decoder_logit, decoder_logit_2 = model( + minibatch, + task_idx, + paired_trg=minibatch_back["input_trg"], + ) + + loss_f = loss_criterion( + decoder_logit.contiguous().view( + -1, decoder_logit.size(2) + ), + minibatch["output_trg"].contiguous().view(-1), + ) + + loss_b = loss_criterion( + decoder_logit_2.contiguous().view( + -1, decoder_logit_2.size(2) + ), + minibatch_back["output_trg"].contiguous().view(-1), + ) - loss.backward() - # For distributed optimizer need to sync before gradient - # clipping. - optimizer.synchronize() + task_losses[task_idx].append(loss_f.data[0]) + task_losses[skipthought_backward_idx].append( + loss_b.data[0] + ) + loss = loss_f + loss_b - torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) - optimizer.step() + else: + optimizer.zero_grad() + decoder_logit = model(minibatch, task_idx) - end = time.time() - mbatch_times.append(end - start) + loss = loss_criterion( + decoder_logit.contiguous().view( + -1, decoder_logit.size(2) + ), + minibatch["output_trg"].contiguous().view(-1), + ) + + task_losses[task_idx].append(loss.item()) + + loss.backward() + # For distributed optimizer need to sync before gradient + # clipping. + optimizer.synchronize() + + torch.nn.utils.clip_grad_norm(model.parameters(), 1.0) + optimizer.step() + + end = time.time() + mbatch_times.append(end - batch_start_time) + + # Validations + if ( + updates % config["management"]["monitor_loss"] == 0 + and updates != 0 + ): + monitor_epoch += 1 + for idx, task in enumerate(tasknames): + logging.info( + "Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s " + "minibatches : %d" + % ( + updates, + task, + np.mean(task_losses[idx]), + task, + len(task_losses[idx]), + ) + ) + mlflow.log_metric( + "validation_loss", + np.mean(task_losses[idx]), + step=monitor_epoch, + ) - # Validations - if ( - updates % config["management"]["monitor_loss"] == 0 - and updates != 0 - ): - monitor_epoch += 1 - for idx, task in enumerate(tasknames): logging.info( - "Seq2Seq Examples Processed : %d %s Loss : %.5f Num %s " - "minibatches : %d" + "Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI " + "Loss : %.5f " % ( - updates, - task, - np.mean(task_losses[idx]), - task, - len(task_losses[idx]), + nli_ctr, + nli_epoch, + nli_mbatch_ctr, + np.mean(nli_losses), ) ) - run.log("Task Loss", np.mean(task_losses[idx])) + mlflow.log_metric( + "nli_loss", np.mean(nli_losses), step=nli_epoch + ) - logging.info( - "Round: %d NLI Epoch : %d NLI Examples Processed : %d NLI " - "Loss : %.5f " - % (nli_ctr, nli_epoch, nli_mbatch_ctr, np.mean(nli_losses)) - ) - run.log("NLI Loss", np.mean(nli_losses)) - logging.info( - "Average time per mininbatch : %.5f" - % (np.mean(mbatch_times)) - ) - run.log( - "Average time per mininbatch : ", np.mean(mbatch_times) - ) - task_losses = [[] for _ in tasknames] - mbatch_times = [] - nli_losses = [] - - # For validate and break if done. - logging.info("############################") - logging.info("##### Evaluating model #####") - logging.info("############################") - training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate( - config=config, - train_iterator=train_iterator, - model=model, - loss_criterion=loss_criterion, - monitor_epoch=monitor_epoch, - min_val_loss=min_val_loss, - min_val_loss_epoch=min_val_loss_epoch, - save_dir=save_dir, - starting_time=start, - model_state=model_state, - ) - if training_complete: - break - - logging.info("Evaluating on NLI") - evaluate_nli( - nli_iterator=nli_iterator, - model=model, - n_gpus=n_gpus, - batch_size=batch_size, - ) + logging.info( + "Average time per mininbatch : %.5f" + % (np.mean(mbatch_times)) + ) + mlflow.log_metric( + "minibatch_avg_duration", np.mean(mbatch_times) + ) + + task_losses = [[] for _ in tasknames] + mbatch_times = [] + nli_losses = [] + + # For validate and break if done. + logging.info("############################") + logging.info("##### Evaluating model #####") + logging.info("############################") + training_complete, min_val_loss_epoch, min_val_loss, model_state = evaluate( + config=config, + train_iterator=train_iterator, + model=model, + loss_criterion=loss_criterion, + monitor_epoch=monitor_epoch, + min_val_loss=min_val_loss, + min_val_loss_epoch=min_val_loss_epoch, + save_dir=save_dir, + starting_time=start, + model_state=model_state, + ) + if training_complete: + break + + logging.info("Evaluating on NLI") + evaluate_nli( + nli_iterator=nli_iterator, + model=model, + n_gpus=n_gpus, + batch_size=batch_size, + ) - updates += batch_size * n_gpus - nli_ctr += 1 - logging.info("Updates: %d" % updates) + updates += batch_size * n_gpus + nli_ctr += 1 + logging.info("Updates: %d" % updates) finally: os.chdir(owd) diff --git a/scenarios/text_classification/README.md b/scenarios/text_classification/README.md index e69de29bb..5a8e46488 100644 --- a/scenarios/text_classification/README.md +++ b/scenarios/text_classification/README.md @@ -0,0 +1,3 @@ +# Text Classification + +Text classification is a supervised learning method of learning and predicting the category or the class of a document given its text content. The state-of-the-art methods are based on neural networks of different architectures as well as pretrained language models or word embeddings. Text classification is a core task in natural language Processing and has numerous applications such as sentiment analysis, document indexing in digital libraries, hate speech detection, and general-purpose categorization in medical, academic, legal, and many other domains. diff --git a/tests/conftest.py b/tests/conftest.py index 3746aed46..11007e2d4 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -9,11 +9,15 @@ # file. You don’t need to import the fixture you want to use in a test, it # automatically gets discovered by pytest." -import pytest import os -from tests.notebooks_common import path_notebooks from tempfile import TemporaryDirectory +import pytest +from tests.notebooks_common import path_notebooks + +from utils_nlp.bert.common import Language +from utils_nlp.bert.common import Tokenizer as BERTTokenizer + @pytest.fixture(scope="module") def notebooks(): @@ -116,5 +120,5 @@ def ner_test_data(): @pytest.fixture() -def english_tokenizer(): - return Tokenizer(language=Language.ENGLISHCASED, to_lower=False) +def bert_english_tokenizer(): + return BERTTokenizer(language=Language.ENGLISHCASED, to_lower=False) diff --git a/tests/unit/test_bert_sequence_classification.py b/tests/unit/test_bert_sequence_classification.py new file mode 100644 index 000000000..b40cb2cc3 --- /dev/null +++ b/tests/unit/test_bert_sequence_classification.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +import pytest + +from utils_nlp.bert.sequence_classification import BERTSequenceClassifier +from utils_nlp.bert.common import Language + + +@pytest.fixture() +def data(): + return ( + ["hi", "hello", "what's wrong with us", "can I leave?"], + [0, 0, 1, 2], + ) + + +def test_classifier(bert_english_tokenizer, data): + tokens = bert_english_tokenizer.tokenize(data[0]) + tokens, mask, _ = bert_english_tokenizer.preprocess_classification_tokens( + tokens, max_len=10 + ) + + classifier = BERTSequenceClassifier( + language=Language.ENGLISHCASED, num_labels=3 + ) + classifier.fit( + token_ids=tokens, + input_mask=mask, + labels=data[1], + num_gpus=0, + num_epochs=1, + batch_size=2, + verbose=True, + ) + + preds = classifier.predict( + token_ids=tokens, input_mask=mask, num_gpus=0, batch_size=2 + ) + assert len(preds) == len(data[1]) diff --git a/tests/unit/test_bert_token_classification.py b/tests/unit/test_bert_token_classification.py index 070c37739..ec4c4a961 100644 --- a/tests/unit/test_bert_token_classification.py +++ b/tests/unit/test_bert_token_classification.py @@ -44,6 +44,15 @@ def test_token_classifier_fit_predict(tmp_path, ner_test_data): labels=ner_test_data["INPUT_LABEL_IDS"], ) + # test output probabilities + predictions = token_classifier.predict( + token_ids=ner_test_data["INPUT_TOKEN_IDS"], + input_mask=ner_test_data["INPUT_MASK"], + labels=ner_test_data["INPUT_LABEL_IDS"], + probabilities=True, + ) + assert len(predictions.classes) == predictions.probabilities.shape[0] + def test_postprocess_token_labels(ner_test_data): labels_no_padding = postprocess_token_labels( diff --git a/tests/unit/test_dataset.py b/tests/unit/test_dataset.py index 2592fbbd3..91ca57a0b 100755 --- a/tests/unit/test_dataset.py +++ b/tests/unit/test_dataset.py @@ -30,27 +30,19 @@ def test_load_pandas_df_msrpc(): def test_wikigold(tmp_path): - wg_text_length = 318333 wg_sentence_count = 1841 wg_test_percentage = 0.5 wg_test_sentence_count = round(wg_sentence_count * wg_test_percentage) wg_train_sentence_count = wg_sentence_count - wg_test_sentence_count - # test download downloaded_file = os.path.join(tmp_path, "wikigold.conll.txt") assert not os.path.exists(downloaded_file) - wg.download(dir_path=tmp_path) - assert os.path.exists(downloaded_file) - - # test read_data - wg_text = wg.read_data(downloaded_file) - assert len(wg_text) == wg_text_length - # test get_train_test_data - train_text, train_labels, test_text, test_labels = wg.get_train_test_data( - wg_text, test_percentage=wg_test_percentage + train_df, test_df = wg.load_train_test_dfs( + tmp_path, test_percentage=wg_test_percentage ) - assert len(train_text) == wg_train_sentence_count - assert len(train_labels) == wg_train_sentence_count - assert len(test_text) == wg_test_sentence_count - assert len(test_labels) == wg_test_sentence_count + + assert os.path.exists(downloaded_file) + + assert train_df.shape == (wg_train_sentence_count, 2) + assert test_df.shape == (wg_test_sentence_count, 2) diff --git a/tests/unit/test_word_embeddings.py b/tests/unit/test_word_embeddings.py index c1eb0e32d..bd1cbb0c2 100644 --- a/tests/unit/test_word_embeddings.py +++ b/tests/unit/test_word_embeddings.py @@ -29,7 +29,7 @@ def test_load_pretrained_vectors_word2vec(): model = load_word2vec(dir_path, limit=500000) assert isinstance(model, Word2VecKeyedVectors) - assert (len(model.wv.vocab) == 500000) + assert len(model.wv.vocab) == 500000 file_path = Path(file_path) assert file_path.is_file() @@ -38,6 +38,7 @@ def test_load_pretrained_vectors_word2vec(): assert isinstance(load_word2vec(dir_path), Word2VecKeyedVectors) + def test_load_pretrained_vectors_glove(): dir_path = "temp_data/" file_path = os.path.join( @@ -48,7 +49,7 @@ def test_load_pretrained_vectors_glove(): model = load_glove(dir_path, limit=50000) assert isinstance(model, Word2VecKeyedVectors) - assert (len(model.wv.vocab) == 50000) + assert len(model.wv.vocab) == 50000 file_path = Path(file_path) assert file_path.is_file() @@ -58,7 +59,9 @@ def test_load_pretrained_vectors_glove(): def test_load_pretrained_vectors_fasttext(): dir_path = "temp_data/" - file_path = os.path.join(os.path.join(dir_path, "fastText"), "wiki.simple.bin") + file_path = os.path.join( + os.path.join(dir_path, "fastText"), "wiki.simple.bin" + ) assert isinstance(load_fasttext(dir_path), FastText) diff --git a/tools/generate_conda_file.py b/tools/generate_conda_file.py index 95befecc2..428790a01 100644 --- a/tools/generate_conda_file.py +++ b/tools/generate_conda_file.py @@ -54,9 +54,7 @@ } PIP_BASE = { - "azureml-sdk[notebooks,tensorboard]": ( - "azureml-sdk[notebooks,tensorboard]==1.0.33" - ), + "azureml-sdk[notebooks,tensorboard]": "azureml-sdk[notebooks,tensorboard]==1.0.43", "azureml-dataprep": "azureml-dataprep==1.1.4", "black": "black>=18.6b4", "dask": "dask[dataframe]==1.2.2", @@ -75,6 +73,7 @@ "nltk": "nltk>=3.4", "pytorch-pretrained-bert": "pytorch-pretrained-bert>=0.6", "seqeval": "seqeval>=0.0.12", + "azureml-mlflow": "azureml-mlflow>=1.0.43.1", } PIP_GPU = {"horovod": "horovod>=0.16.1"} diff --git a/utils_nlp/bert/sequence_classification.py b/utils_nlp/bert/sequence_classification.py index c5d4614f2..40e77b44b 100644 --- a/utils_nlp/bert/sequence_classification.py +++ b/utils_nlp/bert/sequence_classification.py @@ -5,6 +5,7 @@ # https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/run_classifier.py import random +from collections import namedtuple import numpy as np import torch @@ -22,6 +23,7 @@ class BERTSequenceClassifier: def __init__(self, language=Language.ENGLISH, num_labels=2, cache_dir="."): """Initializes the classifier and the underlying pretrained model. + Args: language (Language, optional): The pretrained model's language. Defaults to Language.ENGLISH. @@ -47,19 +49,24 @@ def fit( token_ids, input_mask, labels, + token_type_ids=None, num_gpus=None, num_epochs=1, batch_size=32, lr=2e-5, + warmup_proportion=None, verbose=True, ): """Fine-tunes the BERT classifier using the given training data. + Args: token_ids (list): List of training token id lists. input_mask (list): List of input mask lists. labels (list): List of training labels. - device (str, optional): Device used for training ("cpu" or "gpu"). - Defaults to "gpu". + token_type_ids (list, optional): List of lists. Each sublist + contains segment ids indicating if the token belongs to + the first sentence(0) or second sentence(1). Only needed + for two-sentence tasks. num_gpus (int, optional): The number of gpus to use. If None is specified, all available GPUs will be used. Defaults to None. @@ -67,6 +74,9 @@ def fit( Defaults to 1. batch_size (int, optional): Training batch size. Defaults to 32. lr (float): Learning rate of the Adam optimizer. Defaults to 2e-5. + warmup_proportion (float, optional): Proportion of training to + perform linear learning rate warmup for. E.g., 0.1 = 10% of + training. Defaults to None. verbose (bool, optional): If True, shows the training progress and loss values. Defaults to True. """ @@ -95,16 +105,27 @@ def fit( }, ] - opt = BertAdam(optimizer_grouped_parameters, lr=lr) + num_examples = len(token_ids) + num_batches = int(num_examples / batch_size) + num_train_optimization_steps = num_batches * num_epochs + + if warmup_proportion is None: + opt = BertAdam(optimizer_grouped_parameters, lr=lr) + else: + opt = BertAdam( + optimizer_grouped_parameters, + lr=lr, + t_total=num_train_optimization_steps, + warmup=warmup_proportion, + ) # define loss function loss_func = nn.CrossEntropyLoss().to(device) # train self.model.train() # training mode - num_examples = len(token_ids) - num_batches = int(num_examples / batch_size) + token_type_ids_batch = None for epoch in range(num_epochs): for i in range(num_batches): @@ -121,11 +142,18 @@ def fit( input_mask[start:end], dtype=torch.long, device=device ) + if token_type_ids is not None: + token_type_ids_batch = torch.tensor( + token_type_ids[start:end], + dtype=torch.long, + device=device, + ) + opt.zero_grad() y_h = self.model( input_ids=x_batch, - token_type_ids=None, + token_type_ids=token_type_ids_batch, attention_mask=mask_batch, labels=None, ) @@ -146,20 +174,37 @@ def fit( ) ) # empty cache - del [x_batch, y_batch, mask_batch] + del [x_batch, y_batch, mask_batch, token_type_ids_batch] torch.cuda.empty_cache() - def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): + def predict( + self, + token_ids, + input_mask, + token_type_ids=None, + num_gpus=None, + batch_size=32, + probabilities=False, + ): """Scores the given dataset and returns the predicted classes. + Args: token_ids (list): List of training token lists. input_mask (list): List of input mask lists. + token_type_ids (list, optional): List of lists. Each sublist + contains segment ids indicating if the token belongs to + the first sentence(0) or second sentence(1). Only needed + for two-sentence tasks. num_gpus (int, optional): The number of gpus to use. If None is specified, all available GPUs will be used. Defaults to None. batch_size (int, optional): Scoring batch size. Defaults to 32. + probabilities (bool, optional): + If True, the predicted probability distribution + is also returned. Defaults to False. Returns: - [ndarray]: Predicted classes. + 1darray, namedtuple(1darray, ndarray): Predicted classes or + (classes, probabilities) if probabilities is True. """ device = get_device("cpu" if num_gpus == 0 else "gpu") @@ -178,16 +223,30 @@ def predict(self, token_ids, input_mask, num_gpus=None, batch_size=32): mask_batch = torch.tensor( mask_batch, dtype=torch.long, device=device ) + token_type_ids_batch = None + if token_type_ids is not None: + token_type_ids_batch = torch.tensor( + token_type_ids[i : i + batch_size], + dtype=torch.long, + device=device, + ) with torch.no_grad(): p_batch = self.model( input_ids=x_batch, - token_type_ids=None, + token_type_ids=token_type_ids_batch, attention_mask=mask_batch, labels=None, ) - preds.append(p_batch.cpu().data.numpy()) + preds.append(p_batch.cpu()) if i % batch_size == 0: pbar.update(batch_size) - preds = [x.argmax(1) for x in preds] + preds = np.concatenate(preds) - return preds + + if probabilities: + return namedtuple("Predictions", "classes probabilities")( + preds.argmax(axis=1), + nn.Softmax(dim=1)(torch.Tensor(preds)).numpy(), + ) + else: + return preds.argmax(axis=1) diff --git a/utils_nlp/bert/token_classification.py b/utils_nlp/bert/token_classification.py index 3182684b4..f4c0f50bf 100644 --- a/utils_nlp/bert/token_classification.py +++ b/utils_nlp/bert/token_classification.py @@ -7,6 +7,7 @@ import numpy as np from tqdm import tqdm, trange +from collections import namedtuple import torch import torch.nn as nn @@ -14,7 +15,7 @@ from pytorch_pretrained_bert.optimization import BertAdam from pytorch_pretrained_bert.modeling import BertForTokenClassification -from .common import Language, create_data_loader +from utils_nlp.bert.common import Language, create_data_loader from utils_nlp.pytorch.device_utils import get_device, move_to_device @@ -192,8 +193,16 @@ def fit( train_loss = tr_loss / nb_tr_steps print("Train loss: {}".format(train_loss)) + torch.cuda.empty_cache() + def predict( - self, token_ids, input_mask, labels=None, batch_size=32, num_gpus=None + self, + token_ids, + input_mask, + labels=None, + batch_size=32, + num_gpus=None, + probabilities=False, ): """ Predict token labels on the testing data. @@ -215,7 +224,12 @@ def predict( If None, all available GPUs will be used. Defaults to None. Returns: - list: List of lists of predicted token labels. + list or namedtuple(list, ndarray): List of lists of predicted + token labels or ([token labels], probabilities) if + probabilities is True. The probabilities output is an n x m + array, where n is the size of the testing data and m is the + number of tokens in each input sublist. The probability + values are the softmax probability of the predicted class. """ test_dataloader = create_data_loader( input_ids=token_ids, @@ -228,7 +242,6 @@ def predict( self.model = move_to_device(self.model, device, num_gpus) self.model.eval() - predictions = [] eval_loss = 0 nb_eval_steps = 0 for step, batch in enumerate( @@ -255,16 +268,37 @@ def predict( eval_loss += tmp_eval_loss.mean().item() - logits = logits.detach().cpu().numpy() - predictions.extend([list(p) for p in np.argmax(logits, axis=2)]) + logits = logits.detach().cpu() + + if step == 0: + logits_all = logits.numpy() + else: + logits_all = np.append(logits_all, logits, axis=0) nb_eval_steps += 1 + predictions = [list(p) for p in np.argmax(logits_all, axis=2)] + if true_label_available: validation_loss = eval_loss / nb_eval_steps print("Evaluation loss: {}".format(validation_loss)) - return predictions + if probabilities: + return namedtuple("Predictions", "classes probabilities")( + predictions, + np.max(nn.Softmax(dim=2)(torch.Tensor(logits_all)).numpy(), 2), + ) + else: + return predictions + + +def create_label_map(label_list, trailing_piece_tag="X"): + label_map = {label: i for i, label in enumerate(label_list)} + + if trailing_piece_tag not in label_list: + label_map[trailing_piece_tag] = len(label_list) + + return label_map def postprocess_token_labels( @@ -294,13 +328,13 @@ def postprocess_token_labels( original labels. Default value is None. remove_trailing_word_pieces (bool, optional): Whether to remove predicted labels of trailing word pieces generated by WordPiece - tokenizer. For example, "playing" is broken into "play" and - "##ing". After removing predicted label for "##ing", + tokenizer. For example, "criticize" is broken into "critic" and + "##ize". After removing predicted label for "##ize", the predicted label for "play" is assigned to the original word "playing". Default value is False. trailing_token_mask (list, optional): list of boolean values, True for the first word piece of each original word, False for trailing - word pieces, e.g. ##ing. If remove_trailing_word_pieces is + word pieces, e.g. ##ize. If remove_trailing_word_pieces is True, this mask is used to remove the predicted labels on trailing word pieces, so that each original word in the input text has a unique predicted label. diff --git a/utils_nlp/dataset/preprocess.py b/utils_nlp/dataset/preprocess.py index 03f0e9062..2e51821f5 100644 --- a/utils_nlp/dataset/preprocess.py +++ b/utils_nlp/dataset/preprocess.py @@ -22,7 +22,8 @@ def to_lowercase_all(df): def to_lowercase(df, column_names=[]): """ - This function transforms strings of the column names in the dataframe passed to lowercase + This function transforms strings of the column names in the dataframe + passed to lowercase Args: df (pd.DataFrame): Raw dataframe with some text columns. @@ -46,18 +47,18 @@ def to_spacy_tokens( token_cols=["sentence1_tokens", "sentence2_tokens"], ): """ - This function tokenizes the sentence pairs using spaCy, defaulting to the - spaCy en_core_web_sm model - - Args: - df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. - sentence_cols (list, optional): Column names of the raw sentence pairs. - token_cols (list, optional): Column names for the tokenized sentences. - - Returns: - pd.DataFrame: Dataframe with new columns token_cols, each containing - a list of tokens for their respective sentences. - """ + This function tokenizes the sentence pairs using spaCy, defaulting to the + spaCy en_core_web_sm model + + Args: + df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. + sentence_cols (list, optional): Column names of the raw sentence pairs. + token_cols (list, optional): Column names for the tokenized sentences. + + Returns: + pd.DataFrame: Dataframe with new columns token_cols, each containing + a list of tokens for their respective sentences. + """ nlp = spacy.load("en_core_web_sm") text_df = df[sentence_cols] nlp_df = text_df.applymap(lambda x: nlp(x)) @@ -77,21 +78,22 @@ def rm_spacy_stopwords( custom_stopwords=[], ): """ - This function tokenizes the sentence pairs using spaCy and remove stopwords, - defaulting to the spaCy en_core_web_sm model - - Args: - df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. - sentence_cols (list, optional): Column names for the raw sentence pairs. - stop_cols (list, optional): Column names for the tokenized sentences - without stop words. - custom_stopwords (list of str, optional): List of custom stopwords to - register with the spaCy model. - - Returns: - pd.DataFrame: Dataframe with new columns stop_cols, each containing a - list of tokens for their respective sentences. - """ + This function tokenizes the sentence pairs using spaCy and remove + stopwords, defaulting to the spaCy en_core_web_sm model + + Args: + df (pd.DataFrame): Dataframe with columns sentence_cols to tokenize. + sentence_cols (list, optional): Column names for the raw sentence + pairs. + stop_cols (list, optional): Column names for the tokenized sentences + without stop words. + custom_stopwords (list of str, optional): List of custom stopwords to + register with the spaCy model. + + Returns: + pd.DataFrame: Dataframe with new columns stop_cols, each containing a + list of tokens for their respective sentences. + """ nlp = spacy.load("en_core_web_sm") if len(custom_stopwords) > 0: for csw in custom_stopwords: @@ -160,3 +162,13 @@ def rm_nltk_stopwords( stop_df.columns = stop_cols return pd.concat([df, stop_df], axis=1) + + +def convert_to_unicode(input_text): + """Converts intput_text to Unicode. Input must be utf-8.""" + if isinstance(input_text, str): + return input_text + elif isinstance(input_text, bytes): + return input_text.decode("utf-8", "ignore") + else: + raise TypeError("Unsupported string type: %s" % (type(input_text))) diff --git a/utils_nlp/dataset/stsbenchmark.py b/utils_nlp/dataset/stsbenchmark.py index ed919ed57..31e05e637 100644 --- a/utils_nlp/dataset/stsbenchmark.py +++ b/utils_nlp/dataset/stsbenchmark.py @@ -4,7 +4,6 @@ import os import tarfile import pandas as pd -import azureml.dataprep as dp from utils_nlp.dataset.url_utils import maybe_download @@ -14,38 +13,33 @@ def load_pandas_df(data_path, file_split=DEFAULT_FILE_SPLIT): """Load the STS Benchmark dataset as a pd.DataFrame - + Args: data_path (str): Path to data directory - file_split (str, optional): File split to load. One of (train, dev, test). Defaults to train. - + file_split (str, optional): File split to load. + One of (train, dev, test). + Defaults to train. + Returns: pd.DataFrame: STS Benchmark dataset """ - clean_file_path = os.path.join( - data_path, "clean/stsbenchmark", "sts-{}.csv".format(file_split) - ) - dflow = _maybe_download_and_extract(data_path, clean_file_path) - return dflow.to_pandas_dataframe() - - -def _maybe_download_and_extract(base_data_path, clean_file_path): - if not os.path.exists(clean_file_path): - raw_data_path = os.path.join(base_data_path, "raw") - if not os.path.exists(raw_data_path): - os.makedirs(raw_data_path) - sts_path = _download_sts(raw_data_path) - sts_files = [f for f in os.listdir(sts_path) if f.endswith(".csv")] - _clean_sts( - sts_files, - sts_path, - os.path.join(base_data_path, "clean", "stsbenchmark"), - ) - return dp.auto_read_file(clean_file_path).drop_columns("Column1") + file_name = "sts-{}.csv".format(file_split) + df = _maybe_download_and_extract(file_name, data_path) + return df + + +def _maybe_download_and_extract(sts_file, base_data_path): + raw_data_path = os.path.join(base_data_path, "raw") + if not os.path.exists(raw_data_path): + os.makedirs(raw_data_path) + sts_path = _download_sts(raw_data_path) + df = _load_sts(os.path.join(sts_path, sts_file)) + return df def _download_sts(dirpath): - """Download and extract data from http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz + """Download and extract data from + http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz Args: dirpath (str): Path to data directory. @@ -66,8 +60,11 @@ def _extract_sts(tarpath, target_dirpath=".", tmode="r"): Args: tarpath (str): Path to tarfile, to be deleted after extraction. - target_dirpath (str, optional): Directory in which to save the extracted files. - tmode (str, optional): The mode for reading, of the form "filemode[:compression]". Defaults to "r". + target_dirpath (str, optional): Directory in which to save + the extracted files. + tmode (str, optional): The mode for reading, + of the form "filemode[:compression]". + Defaults to "r". Returns: str: Path to extracted STS Benchmark data. @@ -79,31 +76,59 @@ def _extract_sts(tarpath, target_dirpath=".", tmode="r"): return os.path.join(target_dirpath, extracted) -def _clean_sts(filenames, src_dir, target_dir): - """Drop columns containing irrelevant metadata and save as new csv files in the target_dir +def _load_sts(src_file_path): + """Load datafile as dataframe Args: - filenames (list of str): List of filenames for the train/dev/test csv files. - src_dir (str): Directory for the raw csv files. - target_dir (str): Directory for the clean csv files to be written to. + src_file_path (str): filepath to train/dev/test csv files. """ - if not os.path.exists(target_dir): - os.makedirs(target_dir) - filepaths = [os.path.join(src_dir, f) for f in filenames] - for i, fp in enumerate(filepaths): - dat = dp.auto_read_file(path=fp) - s = dat.keep_columns(["Column5", "Column6", "Column7"]).rename_columns( - { - "Column5": "score", - "Column6": "sentence1", - "Column7": "sentence2", - } - ) - print( - "Writing clean dataframe to {}".format( - os.path.join(target_dir, filenames[i]) + with open(src_file_path, "r", encoding="utf-8") as f: + sent_pairs = [] + for line in f: + line = line.strip().split("\t") + sent_pairs.append( + [ + line[0].strip(), + line[1].strip(), + line[2].strip(), + line[3].strip(), + float(line[4]), + line[5].strip(), + line[6].strip(), + ] ) + + sdf = pd.DataFrame( + sent_pairs, + columns=[ + "column_0", + "column_1", + "column_2", + "column_3", + "column_4", + "column_5", + "column_6", + ], ) - sdf = s.to_pandas_dataframe().to_csv( - os.path.join(target_dir, filenames[i]), sep="\t" - ) + return sdf + + +def clean_sts(df): + """Drop columns containing irrelevant metadata and + save as new csv files in the target_dir. + + Args: + df (pandas.Dataframe): drop columns from train/test/dev files. + """ + clean_df = df.drop( + ["column_0", "column_1", "column_2", "column_3"], axis=1 + ) + clean_df = clean_df.rename( + index=str, + columns={ + "column_4": "score", + "column_5": "sentence1", + "column_6": "sentence2", + }, + ) + return clean_df diff --git a/utils_nlp/dataset/wikigold.py b/utils_nlp/dataset/wikigold.py index 8f32bad27..740440831 100644 --- a/utils_nlp/dataset/wikigold.py +++ b/utils_nlp/dataset/wikigold.py @@ -2,6 +2,9 @@ # Licensed under the MIT License. import random +import os +import pandas as pd + from utils_nlp.dataset.url_utils import maybe_download URL = ( @@ -10,48 +13,35 @@ ) -def download(dir_path="."): - """Download the wikigold data file to dir_path if it doesn't exist yet.""" - file_name = URL.split("/")[-1] - maybe_download(URL, file_name, dir_path) - - -def read_data(data_file): +def load_train_test_dfs( + local_cache_path="./", test_percentage=0.5, random_seed=None +): """ - Read the wikigold dataset into a string of text. + Get the training and testing data frames based on test_percentage. Args: - data_file (str): data file path, including the file name. - - Returns: - str: One string containing the wikigold dataset. - """ - with open(data_file, "r", encoding="utf8") as file: - text = file.read() - - return text - - -def get_train_test_data(text, test_percentage=0.5, random_seed=None): - """ - Get the training and testing data based on test_percentage. - - Args: - text (str): One string containing the wikigold dataset. + local_cache_path (str): Path to store the data. If the data file + doesn't exist in this path, it's downloaded. test_percentage (float, optional): Percentage of data ot use for testing. Since this is a small dataset, the default testing percentage is set to 0.5 random_seed (float, optional): Random seed used to shuffle the data. Returns: - tuple: A tuple containing four lists: - train_sentence_list: List of training sentence strings. - train_labels_list: List of lists. Each sublist contains the - entity labels of the words in the training sentence. - test_sentence_list: List of testing sentence strings. - test_labels_list: List of lists. Each sublist contains the - entity labels of the word in the testing sentence. + tuple: (train_pandas_df, test_pandas_df), each data frame contains + two columns + "sentence": sentences in strings. + "labels": list of entity labels of the words in the sentence. + """ + file_name = URL.split("/")[-1] + maybe_download(URL, file_name, local_cache_path) + + data_file = os.path.join(local_cache_path, file_name) + + with open(data_file, "r", encoding="utf8") as file: + text = file.read() + # Input data are separated by empty lines text_split = text.split("\n\n") # Remove empty line at EOF @@ -94,14 +84,17 @@ def _get_sentence_and_labels(text_list, data_type): test_text_split, "testing" ) - return ( - train_sentence_list, - train_labels_list, - test_sentence_list, - test_labels_list, + train_df = pd.DataFrame( + {"sentence": train_sentence_list, "labels": train_labels_list} ) + test_df = pd.DataFrame( + {"sentence": test_sentence_list, "labels": test_labels_list} + ) + + return (train_df, test_df) + def get_unique_labels(): """Get the unique labels in the wikigold dataset.""" - return ["O", "I-LOC", "I-MISC", "I-PER", "I-ORG", "X"] + return ["O", "I-LOC", "I-MISC", "I-PER", "I-ORG"] diff --git a/utils_nlp/dataset/xnli.py b/utils_nlp/dataset/xnli.py index e7bbcf4cb..a233c9e7b 100644 --- a/utils_nlp/dataset/xnli.py +++ b/utils_nlp/dataset/xnli.py @@ -10,37 +10,86 @@ import pandas as pd from utils_nlp.dataset.url_utils import extract_zip, maybe_download +from utils_nlp.dataset.preprocess import convert_to_unicode -URL = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" +URL_XNLI = "https://www.nyu.edu/projects/bowman/xnli/XNLI-1.0.zip" +URL_XNLI_MT = "https://www.nyu.edu/projects/bowman/xnli/XNLI-MT-1.0.zip" -DATA_FILES = { - "dev": "XNLI-1.0/xnli.dev.jsonl", - "test": "XNLI-1.0/xnli.test.jsonl", -} +def load_pandas_df(local_cache_path="./", file_split="dev", language="zh"): + """Downloads and extracts the dataset files. -def load_pandas_df(local_cache_path=None, file_split="dev"): - """Downloads and extracts the dataset files Args: - local_cache_path ([type], optional): [description]. - Defaults to None. + local_cache_path (str, optional): Path to store the data. + Defaults to "./". file_split (str, optional): The subset to load. - One of: {"dev", "test"} - Defaults to "train". + One of: {"train", "dev", "test"} + Defaults to "dev". + language (str, optional): language subset to read. + One of: {"en", "fr", "es", "de", "el", "bg", "ru", + "tr", "ar", "vi", "th", "zh", "hi", "sw", "ur"} + Defaults to "zh" (Chinese). Returns: pd.DataFrame: pandas DataFrame containing the specified XNLI subset. """ - file_name = URL.split("/")[-1] - maybe_download(URL, file_name, local_cache_path) + if file_split in ("dev", "test"): + url = URL_XNLI + sentence_1_index = 6 + sentence_2_index = 7 + label_index = 1 - if not os.path.exists( - os.path.join(local_cache_path, DATA_FILES[file_split]) - ): + zip_file_name = url.split("/")[-1] + folder_name = ".".join(zip_file_name.split(".")[:-1]) + file_name = folder_name + "/" + ".".join(["xnli", file_split, "tsv"]) + elif file_split == "train": + url = URL_XNLI_MT + sentence_1_index = 0 + sentence_2_index = 1 + label_index = 2 + + zip_file_name = url.split("/")[-1] + folder_name = ".".join(zip_file_name.split(".")[:-1]) + file_name = ( + folder_name + + "/multinli/" + + ".".join(["multinli", file_split, language, "tsv"]) + ) + + maybe_download(url, zip_file_name, local_cache_path) + + if not os.path.exists(os.path.join(local_cache_path, folder_name)): extract_zip( - os.path.join(local_cache_path, file_name), local_cache_path + os.path.join(local_cache_path, zip_file_name), local_cache_path + ) + + with open( + os.path.join(local_cache_path, file_name), "r", encoding="utf-8" + ) as f: + lines = f.read().splitlines() + + line_list = [line.split("\t") for line in lines] + # Remove the column name row + line_list.pop(0) + if file_split != "train": + line_list = [line for line in line_list if line[0] == language] + + label_list = [convert_to_unicode(line[label_index]) for line in line_list] + old_contradict_label = convert_to_unicode("contradictory") + new_contradict_label = convert_to_unicode("contradiction") + label_list = [ + new_contradict_label if label == old_contradict_label else label + for label in label_list + ] + text_list = [ + ( + convert_to_unicode(line[sentence_1_index]), + convert_to_unicode(line[sentence_2_index]), ) - return pd.read_json( - os.path.join(local_cache_path, DATA_FILES[file_split]), lines=True - ) + for line in line_list + ] + + df = pd.DataFrame({"text": text_list, "label": label_list}) + + return df From 8d13bc0d4990bb5b460486be96a509d93a8c93f5 Mon Sep 17 00:00:00 2001 From: hlums Date: Wed, 26 Jun 2019 14:22:15 +0000 Subject: [PATCH 064/108] Renamed conll preprocess function. --- utils_nlp/dataset/msra_ner.py | 6 ++---- utils_nlp/dataset/ner_utils.py | 8 ++------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/utils_nlp/dataset/msra_ner.py b/utils_nlp/dataset/msra_ner.py index 12728de17..b4a15b815 100644 --- a/utils_nlp/dataset/msra_ner.py +++ b/utils_nlp/dataset/msra_ner.py @@ -1,6 +1,6 @@ import os import pandas as pd -from utils_nlp.dataset.ner_utils import get_sentence_and_labels +from utils_nlp.dataset.ner_utils import preprocess_conll FILES = { @@ -22,9 +22,7 @@ def load_pandas_df(local_cache_path="./", file_split="test"): text = text.replace("? 0", "? 0\n") text = text.replace("! 0", "! 0\n") - sentence_list, labels_list = get_sentence_and_labels( - text, file_split - ) + sentence_list, labels_list = preprocess_conll(text, file_split) labels_list = [ ["O" if label == "0" else label for label in labels] diff --git a/utils_nlp/dataset/ner_utils.py b/utils_nlp/dataset/ner_utils.py index 46844be12..7ac60734e 100644 --- a/utils_nlp/dataset/ner_utils.py +++ b/utils_nlp/dataset/ner_utils.py @@ -1,4 +1,4 @@ -def get_sentence_and_labels(text, data_type="", join_characeter=" "): +def preprocess_conll(text, data_type=""): """ Helper function converting data in conll format to sentence and list of token labels. @@ -16,10 +16,6 @@ def get_sentence_and_labels(text, data_type="", join_characeter=" "): . O" data_type (str, optional): String that briefly describes the data, e.g. "train" - join_characeter (str, optional): String used to join input words. - Defaults to " ". For Chinese text, "" should be used because - Chinese characters/words don't have spaces between them as - English does. Returns: tuple: (list of sentences, list of token label lists) @@ -37,7 +33,7 @@ def get_sentence_and_labels(text, data_type="", join_characeter=" "): # split "word label" pairs s_split_split = [t.split() for t in s_split] sentence_list.append( - join_characeter.join([t[0] for t in s_split_split if len(t) > 1]) + " ".join([t[0] for t in s_split_split if len(t) > 1]) ) labels_list.append([t[1] for t in s_split_split if len(t) > 1]) if len(s_split_split) > max_seq_len: From 6c756830ee141811c4ae056572ffe09be9998b38 Mon Sep 17 00:00:00 2001 From: bethz <6098674+bethz@users.noreply.github.com> Date: Wed, 26 Jun 2019 10:41:39 -0400 Subject: [PATCH 065/108] create unit-tests.yml as a copy from .ci/azure-pipelines.yml and create in tests/ci instead of .ci --- tests/ci/unit-tests.yml | 46 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 tests/ci/unit-tests.yml diff --git a/tests/ci/unit-tests.yml b/tests/ci/unit-tests.yml new file mode 100644 index 000000000..3c83dadcc --- /dev/null +++ b/tests/ci/unit-tests.yml @@ -0,0 +1,46 @@ + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +#Any commit to this branch will trigger the build. +trigger: +- staging +- master + +pool: + vmImage: 'ubuntu-16.04' + +steps: + +- bash: | + echo "##vso[task.prependpath]/usr/share/miniconda/bin" + displayName: Add Conda to PATH + +- bash: | + conda remove -q -n nlp --all -y + python tools/generate_conda_file.py --gpu + conda env create -n nlp_gpu -f nlp_gpu.yaml + conda env list + source activate nlp_gpu + displayName: 'Creating Conda Environment with dependencies' + +- bash: | + source activate nlp_gpu + python -m ipykernel install --user --name nlp_gpu --display-name "nlp_gpu" + # Commenting out pytest since it contains bunch of tests from other project which are not applicable. + # But keeping the line here to show how to run it once tests relevant to this project are added + # pytest --junitxml=junit/test-unitttest.xml #not running any tests for now + displayName: 'Run Unit tests' + +- task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-unitttest.xml' + testRunTitle: 'Test results for PyTest' + +- task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'High' From 3f84802644360d7fa550b2d38ab5314e186d4959 Mon Sep 17 00:00:00 2001 From: bethz <6098674+bethz@users.noreply.github.com> Date: Wed, 26 Jun 2019 10:53:32 -0400 Subject: [PATCH 066/108] add comments to tests/readme --- tests/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/README.md b/tests/README.md index bdd57bd54..79abd7de2 100644 --- a/tests/README.md +++ b/tests/README.md @@ -2,6 +2,8 @@ This project uses unit, smoke and integration tests with Python files and notebooks. For more information, see a [quick introduction to unit, smoke and integration tests](https://miguelgfierro.com/blog/2018/a-beginners-guide-to-python-testing/). To manually execute the unit tests in the different environments, first **make sure you are in the correct environment as described in the [SETUP.md](/SETUP.md)**. +Tests are automatically run as part of a DevOps pipeline. The pipelines are defined in .yml files in tests/ci with filenames that align with pipeline names. + ## Test execution Click on the following menus to see more details on how to execute the unit, smoke and integration tests: From d92695feecb4d12e138dee3b0e9f8d4d7ebe080d Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Wed, 26 Jun 2019 11:04:10 -0400 Subject: [PATCH 067/108] notebook edits --- scenarios/embeddings/embedding_trainer.ipynb | 218 ++++++++++-------- .../baseline_deep_dive.ipynb | 194 +++++++++++----- 2 files changed, 255 insertions(+), 157 deletions(-) diff --git a/scenarios/embeddings/embedding_trainer.ipynb b/scenarios/embeddings/embedding_trainer.ipynb index 7d656f04d..f5ed37567 100644 --- a/scenarios/embeddings/embedding_trainer.ipynb +++ b/scenarios/embeddings/embedding_trainer.ipynb @@ -28,7 +28,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -51,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -80,17 +80,35 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 11, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 309KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], "source": [ "# Produce a pandas dataframe for the training set\n", - "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")" + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "\n", + "# Clean the sts dataset\n", + "sts_train = stsbenchmark.clean_sts(train_raw)" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -170,7 +188,7 @@ "4 A man seated is playing the cello. " ] }, - "execution_count": 5, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -181,7 +199,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -190,7 +208,7 @@ "(5749, 3)" ] }, - "execution_count": 6, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } @@ -209,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -223,7 +241,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 15, "metadata": { "scrolled": true }, @@ -237,16 +255,16 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 16, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "11492" + "11498" ] }, - "execution_count": 9, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -257,7 +275,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 17, "metadata": {}, "outputs": [ { @@ -279,7 +297,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -297,7 +315,7 @@ " ['man', 'seated', 'playing', 'cello', '.']]" ] }, - "execution_count": 11, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -334,7 +352,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ @@ -344,7 +362,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -358,14 +376,14 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 21, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 0.3874\n" + "Time elapsed: 0.4556\n" ] } ], @@ -386,33 +404,41 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 22, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Embedding for apple: [ 0.1108162 0.24349137 -0.01440436 0.03533127 -0.06876028 0.07968962\n", - " 0.01578981 0.14264993 -0.06832716 0.00339077 0.07635406 0.06265593\n", - " 0.03414075 0.10075415 -0.05965225 0.00968812 0.16405381 -0.24480335\n", - " -0.06949984 -0.18414594 0.0465034 0.2028756 0.09074208 0.20703372\n", - " 0.1098601 -0.32350177 -0.10786435 0.08799383 -0.19245893 -0.09788057\n", - " 0.09563518 0.08567159 0.15692063 0.08486914 -0.10940372 0.10400604\n", - " 0.03643018 0.15096138 0.12341096 -0.06584675 -0.21533655 -0.01426107\n", - " -0.06800868 -0.03641699 -0.15752348 -0.01934456 0.0068708 -0.06268159\n", - " 0.04240354 -0.06285387 -0.0215644 -0.00047655 -0.0192252 -0.12477098\n", - " -0.08567388 0.08970863 0.07633136 0.21374965 0.19123942 0.01627954\n", - " 0.11209694 0.06009139 -0.03454148 0.0743629 0.03803044 0.059964\n", - " 0.08909379 -0.04600987 0.06926275 -0.09804282 0.02527839 0.16690746\n", - " -0.11900123 -0.0311705 -0.05939943 -0.14164011 0.22661647 0.08943615\n", - " -0.03721635 0.03887443 -0.15312009 0.06582782 0.13990967 0.08372186\n", - " -0.03915371 0.09002874 0.14046906 -0.04060138 0.11289847 0.0010503\n", - " -0.1014872 -0.08762068 -0.19562078 -0.03109288 -0.16293499 -0.00314896\n", - " -0.02791101 0.04398078 0.04605171 -0.08095105]\n", + "Embedding for apple: [ 0.05805362 0.06101197 -0.04139881 0.02149955 -0.09089632 0.08171839\n", + " 0.10880544 0.04739253 -0.18464622 0.18185261 -0.0273802 0.23335838\n", + " 0.02462817 0.19001065 0.042492 -0.03106086 0.13986434 -0.08186961\n", + " -0.04803263 -0.03560257 -0.01290459 -0.05349363 -0.01384514 -0.19388926\n", + " -0.07060098 0.06136238 -0.08374732 -0.07936234 0.14275725 -0.17378892\n", + " -0.07579862 0.1358681 0.03124874 0.07999087 -0.10487169 0.03901242\n", + " -0.03545398 0.1413099 0.06107847 -0.06615571 0.03585797 -0.1804256\n", + " 0.23718679 0.0819917 -0.17114222 0.06501587 -0.03194249 -0.05697308\n", + " -0.16496892 -0.02637602 0.01153994 -0.10465483 0.16883366 0.03583959\n", + " -0.05584354 0.11883577 -0.01215279 -0.2250833 -0.07159518 0.08646166\n", + " 0.00850767 0.07679912 -0.13213757 -0.08736049 -0.09475534 -0.03855689\n", + " 0.01396248 -0.02864163 0.00354996 -0.01462657 -0.08833787 -0.11314301\n", + " -0.04131266 -0.09071928 -0.03713143 0.1178434 -0.12651944 -0.11256607\n", + " 0.13031591 -0.15850762 0.11350677 0.14365956 -0.02895318 0.09518009\n", + " -0.02517641 0.00678065 -0.01811527 -0.08079742 0.10072935 0.2130049\n", + " -0.10550384 -0.01195244 -0.0962322 0.05746774 0.05794769 0.22316577\n", + " -0.00290377 -0.11464126 0.01171946 -0.04879373]\n", "\n", "First 30 vocabulary words: ['plane', 'taking', '.', 'air', 'man', 'playing', 'large', 'flute', 'spreading', 'cheese', 'pizza', 'men', 'seated', 'fighting', 'smoking', 'piano', 'guitar', 'singing', 'woman', 'person']\n" ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\smart_open\\smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n", + " 'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n" + ] } ], "source": [ @@ -456,7 +482,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -466,7 +492,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -480,14 +506,14 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 10.4061\n" + "Time elapsed: 10.3698\n" ] } ], @@ -504,30 +530,30 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Embedding for apple: [ 0.24594913 0.0478383 0.576843 -0.14472146 -0.13372016 0.3994271\n", - " -0.18761183 -0.10253572 -0.5489808 0.3115678 0.18665203 0.08805989\n", - " 0.565551 0.26285723 0.01494028 0.39692047 -0.39978772 -0.30473194\n", - " 0.05508447 0.10066988 0.20679028 0.30775183 0.0472638 -0.239493\n", - " 0.12949444 -0.20410636 -0.13940431 -0.03945793 0.4396631 -0.08924853\n", - " 0.08834386 -0.22228362 0.28431413 0.18899629 0.3427995 -0.2114068\n", - " -0.01075403 0.8549923 0.09068774 -0.04244559 -0.22046468 0.06916029\n", - " -0.31791446 0.11447909 -0.05693823 0.10290135 -0.09406947 -0.26463747\n", - " -0.17336299 0.07076416 -0.26909345 0.1761348 0.14077482 0.24621071\n", - " -0.0408617 -0.3031526 0.10244257 0.4772046 0.25927255 -0.02917116\n", - " 0.2211562 0.04355185 0.19956268 0.13878216 0.28868207 -0.5039835\n", - " 0.41010958 0.07107946 -0.09606131 -0.22969621 0.05883528 -0.01241339\n", - " 0.00676485 0.311163 0.08247512 -0.13799056 0.15181121 0.08045118\n", - " -0.06654785 0.04279696 0.532607 0.2505259 0.10194286 0.05519621\n", - " -0.451315 -0.24121635 0.10120259 0.36105216 0.47429752 0.4230102\n", - " -0.07235575 -0.16397384 0.28193682 -0.21931437 -0.16088559 -0.03915804\n", - " 0.41476008 -0.03525754 0.34007013 -0.152273 ]\n", + "Embedding for apple: [-0.18175453 -0.14863092 0.01440668 0.41852772 0.4886491 -0.24110396\n", + " -0.26591563 -0.42659786 -0.04840926 -0.05654079 0.26051033 -0.02733019\n", + " 0.00937179 -0.07287153 0.21057971 0.21508346 0.06344912 0.10872953\n", + " -0.10214202 -0.54538804 -0.15845574 -0.05536952 -0.04718296 -0.46515992\n", + " -0.12252445 -0.09347973 0.11549287 0.14775406 -0.4141621 0.24835227\n", + " 0.08907127 -0.00180367 -0.02042806 0.13677692 0.19265138 0.1525672\n", + " 0.05339279 -0.18745865 -0.38480887 -0.26928213 0.2699537 0.38778877\n", + " 0.28482276 -0.17511593 0.11898511 -0.06478633 -0.39813048 0.30248052\n", + " 0.03833921 0.08309021 -0.06976178 -0.15951832 -0.6560336 -0.4534666\n", + " -0.18082033 0.09569218 0.10938869 -0.3292928 -0.4216524 0.24858503\n", + " -0.35272446 -0.30754313 0.06224228 0.23139575 -0.11154156 0.03544799\n", + " -0.09699723 0.13625555 0.3257419 -0.09298395 0.3291442 -0.03776973\n", + " -0.17104091 -0.19018205 0.13310616 0.22434781 -0.00192542 -0.22643566\n", + " -0.02940017 -0.3396929 0.09581995 -0.09487487 0.15184835 0.05633284\n", + " -0.13727354 0.28902617 -0.09076066 -0.15375414 0.11667106 0.1914239\n", + " 0.36700025 0.03567546 0.67464125 0.48771846 -0.40189445 -0.37667385\n", + " -0.50891036 -0.16170104 -0.40450782 0.07738833]\n", "\n", "First 30 vocabulary words: ['plane', 'taking', '.', 'air', 'man', 'playing', 'large', 'flute', 'spreading', 'cheese', 'pizza', 'men', 'seated', 'fighting', 'smoking', 'piano', 'guitar', 'singing', 'woman', 'person']\n" ] @@ -589,7 +615,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -601,7 +627,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ @@ -626,7 +652,7 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -634,7 +660,7 @@ "output_type": "stream", "text": [ "BUILDING VOCABULARY\n", - "Processed 0 tokens.Processed 84997 tokens.\n", + "Processed 0 tokens.Processed 85334 tokens.\n", "Counted 11716 unique words.\n", "Truncating vocabulary at min count 5.\n", "Using vocabulary of size 2943.\n", @@ -665,7 +691,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -679,9 +705,9 @@ "overflow length: 38028356\n", "Reading vocab from file \"../../data/trained_word_embeddings/vocab.txt\"...loaded 2943 words.\n", "Building lookup table...table contains 8661250 elements.\n", - "Processing token: 0Processed 84997 tokens.\n", + "Processing token: 0Processed 85334 tokens.\n", "Writing cooccurrences to disk......2 files in total.\n", - "Merging cooccurrence files: processed 0 lines.0 lines.100000 lines.Merging cooccurrence files: processed 187717 lines.\n", + "Merging cooccurrence files: processed 0 lines.0 lines.100000 lines.Merging cooccurrence files: processed 188154 lines.\n", "\n" ] } @@ -706,7 +732,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 31, "metadata": {}, "outputs": [ { @@ -715,9 +741,9 @@ "text": [ "SHUFFLING COOCCURRENCES\n", "array size: 255013683\n", - "Shuffling by chunks: processed 0 lines.processed 187717 lines.\n", + "Shuffling by chunks: processed 0 lines.processed 188154 lines.\n", "Wrote 1 temporary file(s).\n", - "Merging temp files: processed 0 lines.187717 lines.Merging temp files: processed 187717 lines.\n", + "Merging temp files: processed 0 lines.188154 lines.Merging temp files: processed 188154 lines.\n", "\n" ] } @@ -747,7 +773,7 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 32, "metadata": {}, "outputs": [ { @@ -755,27 +781,27 @@ "output_type": "stream", "text": [ "TRAINING MODEL\n", - "Read 187717 lines.\n", + "Read 188154 lines.\n", "Initializing parameters...done.\n", "vector size: 50\n", "vocab size: 2943\n", "x_max: 10.000000\n", "alpha: 0.750000\n", - "05/09/19 - 03:10.13PM, iter: 001, cost: 0.078329\n", - "05/09/19 - 03:10.13PM, iter: 002, cost: 0.072090\n", - "05/09/19 - 03:10.13PM, iter: 003, cost: 0.070081\n", - "05/09/19 - 03:10.13PM, iter: 004, cost: 0.067171\n", - "05/09/19 - 03:10.13PM, iter: 005, cost: 0.063501\n", - "05/09/19 - 03:10.13PM, iter: 006, cost: 0.060700\n", - "05/09/19 - 03:10.13PM, iter: 007, cost: 0.058092\n", - "05/09/19 - 03:10.13PM, iter: 008, cost: 0.056080\n", - "05/09/19 - 03:10.13PM, iter: 009, cost: 0.054016\n", - "05/09/19 - 03:10.13PM, iter: 010, cost: 0.051806\n", - "05/09/19 - 03:10.13PM, iter: 011, cost: 0.049565\n", - "05/09/19 - 03:10.13PM, iter: 012, cost: 0.047378\n", - "05/09/19 - 03:10.13PM, iter: 013, cost: 0.045232\n", - "05/09/19 - 03:10.13PM, iter: 014, cost: 0.043136\n", - "05/09/19 - 03:10.13PM, iter: 015, cost: 0.041132\n" + "06/26/19 - 09:24.10AM, iter: 001, cost: 0.078565\n", + "06/26/19 - 09:24.10AM, iter: 002, cost: 0.072320\n", + "06/26/19 - 09:24.10AM, iter: 003, cost: 0.070274\n", + "06/26/19 - 09:24.10AM, iter: 004, cost: 0.067244\n", + "06/26/19 - 09:24.10AM, iter: 005, cost: 0.063690\n", + "06/26/19 - 09:24.10AM, iter: 006, cost: 0.060640\n", + "06/26/19 - 09:24.10AM, iter: 007, cost: 0.058201\n", + "06/26/19 - 09:24.10AM, iter: 008, cost: 0.056211\n", + "06/26/19 - 09:24.10AM, iter: 009, cost: 0.054148\n", + "06/26/19 - 09:24.10AM, iter: 010, cost: 0.051913\n", + "06/26/19 - 09:24.10AM, iter: 011, cost: 0.049649\n", + "06/26/19 - 09:24.10AM, iter: 012, cost: 0.047426\n", + "06/26/19 - 09:24.10AM, iter: 013, cost: 0.045255\n", + "06/26/19 - 09:24.10AM, iter: 014, cost: 0.043138\n", + "06/26/19 - 09:24.10AM, iter: 015, cost: 0.041108\n" ] } ], @@ -787,7 +813,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 33, "metadata": {}, "outputs": [], "source": [ @@ -796,14 +822,14 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 34, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 8.1586\n" + "Time elapsed: 25.0459\n" ] } ], @@ -827,7 +853,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ @@ -841,16 +867,16 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 36, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Embedding for apple: [0.123773, -0.053006, 0.070493, 0.108794, 0.056317, -0.121031, 0.031882, 0.036723, -0.080099, 0.070415, -0.049969, 0.13519, 0.02835, 0.077195, 0.038348, -0.07014, 0.064163, -0.073477, 0.054575, 0.000798, 0.144856, 0.129294, 0.088421, 0.098318, -0.208831, 0.003972, 0.043487, 0.098745, -0.135213, -0.080192, 0.033854, -0.092947, -0.086098, 0.063487, -0.003857, -0.040265, 0.006533, -0.028026, -0.0315, -0.046298, 0.053757, -0.038117, 0.008664, -0.141584, 0.105524, 0.106604, -0.102875, 0.062868, -0.185542, -0.002386]\n", + "Embedding for apple: [0.062942, -0.097984, 0.037373, 0.111635, 0.086733, -0.071781, 0.043611, -0.01458, -0.012725, 0.076614, -0.13072, 0.129127, -0.00262, 0.015669, 0.06114, -0.044421, 0.004353, -0.066637, 0.049023, -0.00885, 0.138072, 0.165017, 0.047256, 0.122998, -0.247253, 0.01951, 0.007255, 0.070611, -0.130033, -0.05971, 0.056946, -0.085183, -0.118371, 0.033433, -0.035763, 0.021646, -0.005461, -0.03758, -0.048107, -0.075025, 0.012993, -0.07799, -0.030288, -0.137319, 0.121737, 0.054742, -0.013201, 0.055261, -0.146741, -0.041641]\n", "\n", - "First 30 vocabulary words: ['.', ',', 'man', '-', 'woman', \"'\", 'said', 'dog', '\"', 'playing', ':', 'white', 'black', '$', 'killed', 'percent', 'new', 'syria', 'people', 'china']\n" + "First 30 vocabulary words: ['.', ',', 'man', '-', '\"', 'woman', \"'\", 'said', 'dog', 'playing', ':', 'white', 'black', '$', 'killed', 'percent', 'new', 'syria', 'people', 'china']\n" ] } ], diff --git a/scenarios/sentence_similarity/baseline_deep_dive.ipynb b/scenarios/sentence_similarity/baseline_deep_dive.ipynb index e99de3109..e55cad900 100644 --- a/scenarios/sentence_similarity/baseline_deep_dive.ipynb +++ b/scenarios/sentence_similarity/baseline_deep_dive.ipynb @@ -83,12 +83,21 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\n", + "Gensim version: 3.7.3\n" + ] + } + ], "source": [ "#Import Packages\n", "import sys\n", "# Set the environment path\n", - "sys.path.append(\"../../../\") \n", + "sys.path.append(\"../../\") \n", "import os\n", "from collections import Counter\n", "import math\n", @@ -160,11 +169,44 @@ "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 160KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpzq_k_pn9\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 211KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpzq_k_pn9\\raw\\stsbenchmark\n" + ] + } + ], "source": [ "# Produce a pandas dataframe for the training and test sets\n", - "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "sts_test = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")\n", + "\n", + "# Clean the sts dataset\n", + "sts_train = stsbenchmark.clean_sts(train_raw)\n", + "sts_test = stsbenchmark.clean_sts(test_raw)" ] }, { @@ -660,10 +702,12 @@ "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "Vector file already exists. No changes made.\n" + "100%|████████████████████████████████████████████████████████████████████████████| 1.61M/1.61M [04:45<00:00, 5.63kKB/s]\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\smart_open\\smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n", + " 'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n" ] } ], @@ -926,10 +970,12 @@ "metadata": {}, "outputs": [ { - "name": "stdout", + "name": "stderr", "output_type": "stream", "text": [ - "Vector file already exists. No changes made.\n" + "100%|████████████████████████████████████████████████████████████████████████████| 2.13M/2.13M [12:20<00:00, 2.87kKB/s]\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\smart_open\\smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n", + " 'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n" ] } ], @@ -1009,14 +1055,14 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "10.4GB [35:23, 4.88MB/s] \n" + "100%|████████████████████████████████████████████████████████████████████████████| 2.56M/2.56M [13:37<00:00, 3.13kKB/s]\n" ] } ], @@ -1026,16 +1072,16 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:12: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:12: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", " if sys.path[0] == '':\n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:29: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).\n" + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:29: DeprecationWarning: Call to deprecated `__getitem__` (Method will be removed in 4.0.0, use self.wv.__getitem__() instead).\n" ] } ], @@ -1058,21 +1104,21 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:13: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:13: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", " del sys.path[0]\n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:14: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:14: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", " \n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:21: DeprecationWarning: Call to deprecated `wmdistance` (Method will be removed in 4.0.0, use self.wv.wmdistance() instead).\n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:16: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:21: DeprecationWarning: Call to deprecated `wmdistance` (Method will be removed in 4.0.0, use self.wv.wmdistance() instead).\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:16: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n", " app.launch_new_instance()\n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\azureml\\lib\\site-packages\\ipykernel_launcher.py:17: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n" + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\ipykernel_launcher.py:17: DeprecationWarning: Call to deprecated `__contains__` (Method will be removed in 4.0.0, use self.wv.__contains__() instead).\n" ] } ], @@ -1109,7 +1155,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 26, "metadata": {}, "outputs": [], "source": [ @@ -1132,21 +1178,17 @@ " stop_words=stop_word_param,\n", " sublinear_tf=True,\n", " )\n", - "\n", " all_sentences = df[[\"sentence1\", \"sentence2\"]]\n", - " corpus = all_sentences.values.flatten().tolist()\n", + " corpus = np.concatenate([df['sentence1'].values, df['sentence2'].values])\n", " tfidf_matrix = np.array(tf.fit_transform(corpus).todense())\n", - " \n", - " df['sentence1_tfidf'] = df.apply(lambda x: tfidf_matrix[2*x.name,:], axis=1)\n", - " df['sentence2_tfidf'] = df.apply(lambda x: tfidf_matrix[2*x.name+1,:], axis=1)\n", - " df['predictions'] = df.apply(lambda x: calculate_cosine_similarity(x.sentence1_tfidf, x.sentence2_tfidf) if \n", - " (sum(x.sentence1_tfidf) != 0 and sum(x.sentence2_tfidf) != 0) else 0,axis=1)\n", + " df['predictions'] = df.apply(lambda x: calculate_cosine_similarity(tfidf_matrix[int(x.name),:], tfidf_matrix[len(df.index)+int(x.name),:]) if \n", + " (sum(tfidf_matrix[int(x.name),:]) != 0 and sum(tfidf_matrix[len(df.index)+int(x.name),:]) != 0) else 0,axis=1)\n", " return df['predictions'].tolist()" ] }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 27, "metadata": { "scrolled": true }, @@ -1185,7 +1227,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 28, "metadata": {}, "outputs": [], "source": [ @@ -1206,7 +1248,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -1264,7 +1306,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 30, "metadata": { "scrolled": true }, @@ -1294,7 +1336,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 31, "metadata": {}, "outputs": [], "source": [ @@ -1314,30 +1356,33 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 32, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Word2vec Cosine 0.6337760059182685\n", - "Word2vec Cosine with Stop Words 0.647674307797345\n", - "Word2vec WMD 0.6578256301323717\n", - "Word2vec WMD with Stop Words 0.5697910628727219\n", - "GLoVe Cosine 0.642064729899729\n", - "GLoVe Cosine with Stop Words 0.5639670964748242\n", - "GLoVe WMD 0.6272339050920003\n", - "GLoVe WMD with Stop Words 0.48560149551724\n", - "fastText Cosine 0.6288780924569854\n", - "fastText Cosine with Stop Words 0.5958470751204787\n", - "fastText WMD 0.5275208457920849\n", - "fastText WMD with Stop Words 0.44198752510004097\n", - "TF-IDF Cosine 0.6683811410442564\n", - "TF-IDF Cosine with Stop Words 0.7034695168223283\n", - "Doc2vec Cosine 0.4984144504392967\n", - "Doc2vec Cosine with Stop Words 0.4172218818503345\n" - ] + "data": { + "text/plain": [ + "{'Word2vec Cosine': 0.6476606845766778,\n", + " 'Word2vec Cosine with Stop Words': 0.6683808069062863,\n", + " 'Word2vec WMD': 0.6574175839579567,\n", + " 'Word2vec WMD with Stop Words': 0.5689438215886101,\n", + " 'GLoVe Cosine': 0.6688056947022161,\n", + " 'GLoVe Cosine with Stop Words': 0.6049380247374541,\n", + " 'GLoVe WMD': 0.6267300417407605,\n", + " 'GLoVe WMD with Stop Words': 0.48470008225931194,\n", + " 'fastText Cosine': 0.6707510007525627,\n", + " 'fastText Cosine with Stop Words': 0.6771300330824099,\n", + " 'fastText WMD': 0.6394958913339955,\n", + " 'fastText WMD with Stop Words': 0.5177829727556036,\n", + " 'TF-IDF Cosine': 0.6749213786510483,\n", + " 'TF-IDF Cosine with Stop Words': 0.7118087132257667,\n", + " 'Doc2vec Cosine': 0.5236274769065202,\n", + " 'Doc2vec Cosine with Stop Words': 0.45176043696294416}" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ @@ -1350,19 +1395,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We investigate our 8 models with and without stop words (16 different results total). The results show that TF-IDF bag-of-words document embeddings (without stop words) combined with the cosine similarity performs the best, with a Pearson correlation of 0.7034. " + "We investigate our 8 models with and without stop words (16 different results total). The results show that TF-IDF bag-of-words document embeddings combined with the cosine similarity performs the best." ] }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 33, "metadata": { "scrolled": true }, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAhMAAAEWCAYAAADchhUKAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvOIA7rQAAIABJREFUeJzs3XecXVW5//HPl4C0UKQaVIh0gUCAECAGBMGGDQUFRCSgInKFCwrKT7yC7YIXFAsIl6vSO4oFUDokpJCQEJJQgpRIFektgCR5fn+s5zA7J2dmzuRMpiTf9+s1r+yzy9prrzOw16y99vMoIjAzMzNbWEv1dgXMzMysf3NnwszMzFrizoSZmZm1xJ0JMzMza4k7E2ZmZtYSdybMzMysJe5MmFm3kvQjSc9I+mdv16UvkPQ+SX+X9IqkPXu7PgCSRkm6rfL5FUnr92adukJSSNqwif12kfRYT9RpSefOhNkSTtIsSa/lDeUpSWdLGriQZb0b+CawWUS8o3tr2m/9ADgtIgZGxB/rN9a1//OSrs527DFZt4e6u1xJt+SNf6u69X/M9bt09zmtd7gzYWYAn4iIgcA2wHbAd7tagKSlgfWAZyPiXwt5/OJoPeDuTvaptf8g4CngV4u8Vj3nfuCLtQ+SVgd2AJ7utRpZt3NnwszeEhGPA38FtgCQtIqk30p6UtLj+QhjQG4bJWmspFMlPQfcAlwPrJN/ZZ+T+31S0t2SXsi/VN9bO1/+Vf5tSdOAVyUtneuOkTRN0qt5/rUl/VXSy5JukPT2ShmXS/qnpBcljZa0eWXbOZJOz7/2X5Z0u6QNKts3l3S9pOdyVOY7uX4pScdKelDSs5Iuk7Rae+0m6SuSHshy/ixpnVz/ILA+8Jdsk2U7af/XgSuAzSplf0zSnZJekvSopBMq25aTdEHW8QVJkySt3dl316D+bz02aKLNNq202UxJn+vomoALgX0q594PuBL4d6XMZSX9XNIT+fPzalvl78OTue3gurovK+kUSY/kd3impOXbuc5vZ1u8nHXfrZO6W5PcmTCzt+Tw+h7AnbnqXGAOsCGwNfAh4MuVQ7YHHgLWAj4IfBR4IofNR0naGLgYOBJYE7iGcmN9W6WM/YCPAatGxJxct1eWtzHwCUoH5zvAGpT/bx1ROf6vwEZZhymUm1fVfsD3gbcDDwA/zmtdCbgB+BuwTl7jjXnMEcCewPtz2/PA6e202QeAE4HPUUYW/gFcAhARGwCPkCMPEfFGozIqZa0A7ANMqKx+lfKX/arZTl9T29yLA4FVgHcDqwOHAq/lts6+u46012YrUjqMF1Haez/g19UOXANPAPfk+clrOa9un+MooxVDga2A4eTomKSPAEdTfh82AnavO/YnlN+ToXmt7wS+V18JSZsAXwe2i4iVgA8Dszqot3VFRPjHP/5Zgn8o/0N9BXiBciP8NbA8sDbwBrB8Zd/9gJtzeRTwSF1ZuwCPVT7/F3BZ5fNSwOPALpVzH9ygPvtXPv8eOKPy+XDgj+1cy6pAAKvk53OA31S27wHcV7mWO9sp515gt8rnQcCbwNIN9v0t8D+VzwNz38GV69m9yfafQ7n5Dulg/58Dp+bywcA4YMu6fZr57m6rbAtgwybabB9gTN25/hc4vp263kLpwHyB0qncBLg/tz1W+T14ENijctyHgVm5/DvgpMq2jWv1BUTpbG1Q2b4j8HD972Pu/y9KZ2SZ3v7vbnH7WVyfUZpZ1+wZETdUV0gaAiwDPCmptnop4NHKbtXlRtahdFAAiIh5kh6l/PXYURlPVZZfa/B5YNZxAOWv5s9SRj7m5T5rAC/mcvWtktm1Yyl/zT/YTr3XA66UNK+ybi7lJv143b7rUEZEAIiIVyQ9S7nGWe2UX2/PiLghr+dTwK2SNouIf0raHjiJ8ujpbcCywOV53Pl5HZdIWhW4gPJX/np0/t11pL02Ww/YXtILle1LZz068gfgp8Cz7ew73+9JLq9T2Ta5blvNmsAKwOTKdQpY4HFORDwg6UjgBGBzSdcC34iIJzqpuzXBjznMrD2PUv66XSMiVs2flSOiOqTdWdrhJyg3IABU/o//bua/IbeSuvjzlJvv7pTh/sG1UzVx7KPABh1s+2jluleNiOWizCmpV3+NK1IeOTTat0MRMTci/kDpuIzM1RcBfwbeHRGrAGeS1xcRb0bE9yNiM2AE8HHKY4RmvruF8Shwa127DIyIr3VyXbMpj6O+RuPOxHxtCKyb6wCepPzOVLfVPEPpXG5eqc8qUSazNqrHRRExMs8VlEck1g3cmTCzhiLiSeA64KeSVs5JiRtIen8XirkM+Jik3SQtQ3lt9A3K0Hx3WCnLe5byF+p/d+HYq4B3SDoyJ/GtlKMAUG7YP5a0HoCkNSV9qp1yLgIOkjQ0Jw3+N3B7RMzq6sWo+BRlrsK9uXol4LmIeF3ScEoHqrb/rpKG5IjGS5THK3O76btr5CpgY0kHSFomf7ZTZVJtB74DvL+ddrkY+G628xqUOQ8X5LbLgFGSNss5JcfXDoqIecD/AadKWgtA0jslfbj+BJI2kfSB/I5ep3RC5jZ74dYxdybMrCNfpAyt30OZhHgFZf5AUyJiJuV5+a8of0V+gjIZ8d8dHti88yjD3o9nHSd0vPt8dXuZMqnvE5Rh/b8Du+bmX1BGA66T9HKWu3075dxImRvye8pf0RsA+3bxOv4i6RVKh+DHwIERUXud9DDgB1mP71FurjXvoHwnL1E6H7fSdhNu6btrJNvsQ5Tre4LSbj+hPHrp7NgnIuK2djb/CLgDmAZMpzw2+lEe91fKPJGbKJNBb6o79tu5foKklyiTajdpcI5lKY+Lnsl6r0Xp4Fg3UEQrI4xmZma2pPPIhJmZmbXEnQkzMzNriTsTZmZm1hJ3JszMzKwlDlplS4Q11lgjBg8e3NvVMDPrVyZPnvxMRKzZ2X7uTNgSYfDgwdxxxx29XQ0zs35F0j8638uPOczMzKxF7kyYmZlZS9yZMDMzs5a4M2FmZmYtcWfCzMzMWuLOhJmZmbXEnQkzMzNriTsTZmZm1hIHrbIlwvTHX2TwsVf3djXMbAkz66SP9XYVeoRHJszMzKwl7kz0MEmnSjqy8vlaSb+pfP6ppG+0UP4Jko7O5ZMl3SdpmqQrJa3aWu07PffReb4Zku6S9MWFKOPQhTnOzMx6jzsTPW8cMAJA0lLAGsDmle0jgLHNFCRpQCe7XA9sERFbAvcD/6/LtW2SpEOBDwLDI2ILYGdAXS0nIs6MiPO6u35mZrbouDPR88aSnQlKJ2IG8LKkt0taFngvcKeKk/Ov/OmS9gGQtIukmyVdBEzPdcdJminpBmCT2oki4rqImJMfJwDvyv1vl/RWB0bSLZK2lbSipN9JmiTpTkmfyu0DJJ2S9Zgm6fAG1/Ud4LCIeCnP/WJEnJvH75blTc/yl831J0m6J8s8JddVR1ZukfQTSRMl3S9pp0p9Ts56TpP01da+EjMza4UnYPawiHhC0hxJ61I6FeOBdwI7Ai8C0yLi35L2AoYCW1FGLyZJGp3FDKeMODwsaVtgX2Bryvc5BZjc4NQHA5fm8iXA54DjJQ0C1omIyZL+G7gpIg7ORyITs4PyReA9wNYRMUfSatWCJa0ErBQRD9afVNJywDnAbhFxv6TzgK/lv58GNo2I6OARzNIRMVzSHsDxwO7Al4AXI2K77JiMlXRdRDxcd+5DgEMABqzcaQZdMzNbSB6Z6B210YlaZ2J85fO43GckcHFEzI2Ip4Bbge1y28TKjXMn4MqImJ2jAn+uP5mk44A5wIW56jLgs7n8OeDyXP4QcKykqcAtwHLAupQb+Jm1UY6IeK7+FEC0c62bAA9HxP35+VzKI5CXgNeB30j6DDC7neP/kP9OBgZX6vnFrOftwOrARvUHRsRZETEsIoYNWGGVdoo3M7NWeWSid9TmTQyhPOZ4FPgm5Qb7u9yno/kGr9Z9bu9GjqQDgY9TRgYCICIel/SspC2BfYDaYwIBe0XEzLoyOuosEBEvSXpV0voR8VB9Fdo5Zo6k4cBulJGVrwMfaLDrG/nvXNp+XwUcHhHXtlcnMzPrOR6Z6B1jKTf453Lk4TlgVcqjjvG5z2hgn5wfsCblr/mJDcoaDXxa0vL5uOETtQ2SPgJ8G/hkRNT/5X8J8C1glYiYnuuuBQ7PzgOSts711wGHSlo616/Ggk4ETpe0cu6zcj5muA8YLGnD3O8A4FZJA/Pc1wBHUh7pNOtayqOSZfJcG0tasQvHm5lZN/LIRO+YTpkHcVHduoER8Ux+vpLSubiLMirwrYj4p6RNqwVFxBRJlwJTgX8AYyqbTwOWBa7P/sGEiDg0t10B/AL4YWX/HwI/B6Zlh2IWpdPzG2DjXP8m8H9ZdtUZwEDK3I43gTeBn0bE65IOAi7Pzsgk4ExgNeBPOadCwFGdtlqb31AeeUzJej4N7NmF483MrBspR77NFmvDhg2LO+64o7erYWbWr0iaHBHDOtvPjznMzMysJe5MmJmZWUs8Z8KWCE70ZWZ9yeKWAMwjE2ZmZtaSDjsTclKqzsrosaRUkq6RtGr+HFZZv4ukq5o4focMoz1V0r2STqgcP6KTw5ut45WS9qx8ninpu5XPv88AVQtb/jmS9m61nmZm1r06G5lwUqoO9GRSqojYIyJeoMSjOKyz/Rs4FzgkIoYCW1CiYALsQluukFZVf19WB16hvN5asyNtET47VItpYWZmfV9nnQknpeqBpFSSviXpiFw+VdJNlbpckMuzJK0BnARskCMMJ2cRAyVdkSMtF2bshXprAU/m9c6NiHskDQYOBY7K8naStJ6kG7OuN6rkEKmNCpwpaUxe38cbnKP6+zICuApYM38/3gO8lrEylpN0drbxnZJ2zXOMknS5pL8A1+Vxp2W7X53XUGuzBb4PMzPrHR3+9eekVD2WlGo0JZz2L4FhwLIq0R1HMn8QKoBjsz2HZp13yfbcHHiCckN/H3Bb3XGnAjMl3QL8DTg3ImZJOhN4JSJqHaS/AOdFxLmSDs461R5dDAbeD2wA3Cxpw4h4vXKOycAWkt5G+X25FVif0uncmrZRrP8AiIghKkG4rpO0cW7bEdgyIp5TeSSyCSXs+NrAPcDv8jvt9PuQE32ZmfWIZiZgOinVok9KNRnYNjs6b1DaeBilveo7E41MjIjHImIeJRLm4PodIuIHWeZ1wOcpHYpGdqQtMuf5lO+25rKImBcRfwceAuqjcb4B3A1sA+yQ19ve78v5ecx9lMidtc7E9ZXvbGfafq+eAG7K9U19H070ZWbWM5rpTNQnpZpAueFU50t0d1Kq/atJqYBqUqpLKufcKyKG5s+6EXEvHXcWyE7Mq5LWb1SFdo6ZQxlh+T3lr/T2bsQdJaWq1fM9EXFdXflvUkJXH0Rp7zHArpQRgHvbu5YG560/d/11PBgRZ1CSa22lMq+hM9HOcqPPUOq/M2X053nK70utM9Etvy9d+D7MzKwHNDsy4aRUiz4p1Wjg6Px3DGUuw9Rap6riZWClLpyfPO/HKnMpNqJ0Ol5oUN44yqMogP2Z/3HJZyUtJWkDyuOL+bKLprGULKR35edplFGKdSmjFlCucf+s18a5rVFZo4F98/dqEKWDRYvfh5mZdbNmZsw7KVXPJKUaAxwHjI+IVyW9ToNHHBHxrKSxkmYAfwWajcR0AHCqpNmUx0j7R8TcnCNxhcoE1sOBIyjzEo7Juh5UKWMm5RHW2sChdfMlasZROhonZn3nSPoX8Gg+hgH4NXCmpOlZl1ER8YYWnDd6JSUt+XTKGz635vqVWPjvw8zMupkTfVlTJJ0DXBURV/R2XRaGE32ZmXWdnOjLzMzMeoIDA1lTImJUb9ehFc7NYWZ9XX/O1+GRCTMzM2uJOxN9iBbDXCiStsoYG7XP+0maXXm7ZYikabl8i6RHKm+dIOmPkl7J5cGSXlOJmnmvSrTRAxdFvc3MrHnuTPQti2MulOnAevkqMJRruI8SEbP2uXpNL1AieJIdnEF15T0YEVtHxHspr7AelW/gmJlZL3Fnom9Z7HKh5Ougk4Dtc9W2wOnMn8OjmvzrEtriXHyGtqiiC4iIh4BvUF5nNTOzXuLORB+SIaPrc6HcTonhMYzMhUK5ydZyoewOnJxBnaBEhjwuIjbT/LlQPkNbiPN6B1NiVkBbLhRUyYVCiYFxU0RsRwkedXIG3zqEtlwoW9IWBr1qHDAi959HCX9e7UxURyZuBHbOkZV9acvR0p4p1IX1rpF0iKQ7JN0xd/aLnRRjZmYLy52Jvmdxy4VSvabhwKRMsrahSrTUgTnCUDOXEnVzH2D5iJjVflOVS2hvg3NzmJn1DL8a2vfU50J5lJJR9CXgd7lPd+dC2a2aC0VSNRdKLWV6LRfKzLoyOsyFkiZQOjsjaQvB/hhl5GFcg/0voUS/PKGTcqGMujSTv8TMzBYRj0z0PYtdLpSIeJnSKRpVuYbxlLwajToTYyjhuC9usO0tkgYDpwC/6mg/MzNbtNyZ6HtquVAm1K17sS4XyjRKLpSbyFwo9QVFxBTKnIOplAyb9blQVqLkQpkq6czKtisoowaXVdb9EFiGkvNkBm15Un4DPJLr76KkN29kLLBsRDyan8dTcngs0JmI4pTK9VZtUHs1NOv3q4g4u51zmplZD3BuDlsiODeHmVnXOTeHmZmZ9Qh3JszMzKwlfpvDlghO9GVm/TmRVl/nkQkzMzNryUJ3JpyUqueSUklaR9IVuTxU0h6VbW+1UydlHFwJeT2jEg57lKR1FqZedeVL0jOS3p6fB0kKSSMr+zwtafUWzjFL0hqt1tXMzLpXKyMTTkrVQ0mpIuKJiNg7Pw4F9uho/3qS3kUJhz0y23AHyqulUGI/tNyZyKBXtdDfUNrqTtp+RzYBnomIZ5ussx/BmZn1E610JpyUqpuSUkm6JiNOkvX9Xi7/UNKXc5RjhqS3AT+gBKyaWmtLYLO89ockNUp6tRbwMvBK1uWViHhY0t6UnB8XZnnLS9ot6zA923DZrMssST/JEZaJkjZscJ7q78QI4GfM37kYl2WtJ+nG/A5uVMlFgqRzJP1M0s3ATyStLum6rM//kpE/8/u9WtJd2S77YGZmvWahOxNOSgV0X1Kq0cBOklam5Ml4X64fSSXQVLbn94BLI2JoRNTOtynwYUp7Hq98LFNxF/AU8LCksyV9Isu7ArgD2D8ihlLCYp8D7BMRQygTdL9WKeeliBhOCXj18wbX8dZoVdblj8C783O17U4Dzqt8B7+slLExsHtEfBM4HrgtIram5BVZN/f5CPBERGwVEVsAf2tQFyf6MjPrIa1OwHRSqu5JSjWGEhJ7JHA1MFDSCsDg+lwY7bg6It7IiJH/AtauboyIuZQb8N6Ux0SnSjqhQTmbAA9HxP35+dysV83FlX93ZEETga2zI7ZMRLwCPJSjGNXfiR2Bi3L5/LzumsuzvuS5L8hruBp4PtdPB3bPkZKdIqJhT8GJvszMekarnYn6pFQTKDeK6l+h3Z2Uav9qUiqgmpTqkso598q/3odGxLoRcW+uXxRJqX7F/KGn29NeUqpJlNGcnSijFHcCXwEmN1EmwBuV5bk0eOU3Q1RPjIgTKdeyV4NyOvquYP62W6AdM8fHA5TRoym5egJljsdaQHsdo2pZnf5OZGdnW0qn4sTaYyEzM+sd3TEy4aRULSalyscXj1JGVyZkmUczfy6NmpcpOTWapvI2yDaVVUOBfzQo7z5gcGU+xAGUkaSafSr/jqexsZS2qrbdfwITap1ASjvW5prsTxndaWR0bkfSR4HamyLrALMj4gJKm27TzvFmZtYDWu1MOClV9yWlGgM8lZ2lMZRJpo06EzdTJlxWJ2B2ZhngFJXXa6dSOgP/mdvOAc7M9QIOAi6XNJ0yZ6Ta1stKuj2PPaqdc42ltFWtMzElr6XadkcAB6m8ZntApS71vk+ZkzKF8ujqkVw/BJiYdT4O+FHHl29mZouSE31ZUyTNAoa102nq85zoy8ys6+REX2ZmZtYTHBjImhIRg3u7Dq1wbg4z6wsW1/wgHpkwMzOzlrgz0Y9JWlvSRRn5crKk8ZI+ndt2kXRVk+WcIOnEunVDc+JoV+pzdE7ynJHRKb/YleOzjEMX5jgzM+s97kz0U/na6x+B0RGxfkTUIoi+ayGKu5i21z5r9qUtsFQz9TkU+CAwPKNS7kzncSsWEBFnRsR5XT3OzMx6jzsT/dcHgH9HxFuvbkbEPyJigTgWVY1yb2SUzRckbV/Z9XNkEDBJH8pRjymSLpc0sEHR3wEOy+ilRMSLEXFue+fM9SdJuidzdJyS66rZYm+p5AO5X9JOuX6ASr6XSXnsVxeuCc3MrDu4M9F/bU5blMmmSFqO9nNvXEwGkpK0A/BsRPxdJeX3dyn5Mrah5PL4Rl25KwErZejxps6ZAcM+DWyeOTraixWxdOYDOZKSqwPgS5RYJttRopV+RdJ7GpzbuTnMzHqAOxOLCUmn5zyFSR3s1lHujUuAvVXSye9LW0TPHYDNgLEZJOpAYL3609N+mPL2zvkS8DrwG0mfAeojm9bUMrFOBgbn8oeAL2Z9bgdWBzaqP9C5OczMeoZfDe2/7qaSXyMi/iNHETqKzNTuHIaIeDQDU70/y92xcsz1EbFfB8e+JOlVSevXJUJr95wRMUfScGA3Sufl65RHN/VqeUeqOUcEHB4R17ZXJzMz6zkemei/bgKWk1RNEb5CJ8d0lnvjYuBU4MGIeCzXTQDeVztG0gqSNm5Q9onA6Spp1JG0sqRD2jtnzrtYJSKuoTzCGNr5Jb/lWsqjkmXyXBurZCo1M7Ne4JGJfioiQtKelHTi3wKepmTc/HZlt90kPVb5/Fnacm8sTclWWs29cTnwC+DwynmeljQKuLg2cZIyh+J+5ncGMBCYJOlN4E3gpxHxuqRG51wN+FPOqRDt5/po5DeURx5T8q2Wp4E9u3C8mZl1I+fmsCWCc3OYmXWdc3OYmZlZj3BnwszMzFriORO2RHCiL7Ml0+KaWKuv8ciEmZmZtaTHOhNOStUaSddIWjV/Dqusb6rtJO0g6XZJUyXdK+mEyvEjuqmOV+YbJrXPMyV9t/L59xmgamHLP0fS3q3W08zMulePdCaclKp1EbFHRLwArAoc1tn+DZwLHBIRQ4EtgMty/S5At3QmgHG1siStDrxCW/ArcnlcMwXla6RmZtYP9NTIhJNSdZCUStK3JB2Ry6dKuqlSlwtyeVZGuDwJ2CBHGE7OIgZKuiJHWi7Mzlu9tYAn83rnRsQ9kgYDhwJHZXk7SVpP0o1Z1xslrZvnP0fSmZLG5PV9vME5xtLWMRkBXAWsqeI9wGsR8U9Jy0k6O9v4Tkm75jlG5Xf2F+C6PO60bPer8xpqbbbA92FmZr2jpzoTTkrVcVKq0cBOuTyM0jlYBhgJjKnb91hKhMqhEXFMrts6z7kZsD7wvgZ1OxWYmY8ivippuYiYRQkgdWqWNwY4DTgvr/NC4JeVMgZTwm1/DDgz26tqMrCFpLdROhPjgZnAe/Pz2NzvPwCyjfcDzq2UtSNwYER8gNLmmwBDgK/QNurR1PchJ/oyM+sRvTIBU05KVZ+UajKwbXZ03qDchIdROhj1nYlGJkbEYxExD5haOfdbIuIHWeZ1wOeBv7VT1o60PTI6n9KhqbksIuZFxN+Bh4BN687xBiVnyDaU7+L2vJYR+VN7xDEyyyYi7gP+AdRCdF8fEc/l8s7AxTmS8gQlhDg0+X040ZeZWc/oqc5E7QYDlKRUlARPa3ZwTIdJqYBZtCWluqxyzPX5V/bQiNgsIr5Ud+xLwKuS1m/2nBExBxgO/J4Strm9G3FHSalqdXpPRFxXV/6beT0HUW64Y4BdgQ2AZiaWvlFZrp67/joejIgzKG2/Vc5r6Ey0s9zoM5T670wZ/Xmektuj1pmojUx0ND/l1c7O0YXvw8zMekBPdSaclKrzpFSjgaPz3zGUuQxTY8F45y8DK3Xh/OR5P1aZS7ERpdPxQoPyxpGPkID9gdsq2z4raSlJG1Aep8xscKqxwFeBu/LzNMooxbqUTiWUa9w/67VxbmtU1mhg35x3MojSwaLF78PMzLpZj8yYd1KqppJSjQGOA8ZHxKuSXqfBI46IeFbSWEkzgL8CzUZiOoDS/rOBOcD+ETE3JzteIelTlLY8AvidpGOyrgdVyphJ6dCtDRwaEa83OM84SkfjxKzvHEn/Ah7NxzAAv6bMuZiedRkVEW80mDd6JWXy7nTKd1jrTK7Ewn8fZmbWzZzoy5oi6Rzgqoi4orfrsjCc6MvMrOvkRF9mZmbWExwYyJoSEaN6uw6tcG4OM+spS2I+EI9MmJmZWUvcmVhMqY/kQpG0VcbYqH3eT9LsytstQyRNy+VbJD1SeesESX+U9EouD5b0WkbNvFcl2uiBzdTDzMwWHXcmFkN5M+4ruVCmA+tlQC4o8Sbuo0TtrH0eW9n/BTKCp6RVgUF15T0YEVtHxHuzHkflGzhmZtZL3JlYPPWZXCj5OugkoHb8tsDpzJ/Do5r86xLa4lx8hraooguIiIco4dKP6Oi6zMxs0XJnYvHUZ3KhpHHAiAzWNQ+4hfk7E9WRiRuBnSUNyHNe2knVp1AX1rtyTc7NYWbWA9yZWAL0ci4UaMsmOhyYlEnWNpS0JjAwRxhq5lKibu4DLJ/JyDq8vPY2ODeHmVnP8Kuhi6e7KTlLgJILJUcROora1GEuFEmzaMuFsmPlmOsjYr9O6jOBkjF1JCXxF8BjlI7JuAb7X0KJfnlCJ+VCmXvR1GRQMzNbNDwysXjqU7lQIuJl4FFgFG2difGUvBqNOhNjKOG4L26w7S2SBgOnAB3OBTEzs0XLnYnFUCYH2xN4v6SHJU2kPLZYIBdK7YfyF34tL8l0ytyG+lwom5MTL/M8T1M6CBfn650TaGf+AuVRx7KZ8RVKZ2J9GnQmojglIp5pUM4GtVdDKdlifxURZ3fUHmZmtmg5N4ctEZybw8ys65ybw8zMzHqEOxNmZmbWEr/NYUsEJ/oy6zlLYqKrJZ1HJszMzKwlfaIz4aRUndZrHUlXVK5nj7prPrqJMg7OMNnTJM2Q9KlcP0rSOgtTr7ryJekZSW/Pz4OlrmS3AAAgAElEQVQkhaSRlX2elrR6C+eYlfEyzMysD+n1zoSTUnUuIp6IiL3z41Bgj472ryfpXcBxwMiI2JISuXJabh4FtNyZyNdRb6ctoNUI4M78F0mbAM9ExLNN1tmP4MzM+ole70zgpFRIukbSlrl8p6Tv5fIPJX05RzlmSHob8ANgH0lTJdU6TpvlSMlDkholvVoLeBl4JevySkQ8LGlvYBhwYZa3fKN2zbrMkvSTHGGZWAluVVULm11rq58xf+diXJa1nqQbc5TkRknr5vpzJP1M0s3ATyStLum6rM//klE6Ja0o6WqVEOEzKu1gZma9oC90JpyUCkYDO0laGZhDjnZQwk+Pqe0UEf8GvgdcGhFDI6J2vk2BD1NyXxxfeyxTcRfwFPCwpLMlfSLLu4LSDvtHxFAgaL9dAV6KiOHAacDPG1zHONraajhlxOnd+bnadqcB5+UoyYXALytlbEz5jr4JHA/cFhFbA38G1s19PgI8ERFbRcQWwN8a1MWJvszMekhf6EzMR0tmUqoxWf+RwNXAQEkrAINztKUzV0fEGxkx8l/A2tWNETGXcgPeG7gfOFXSCQ3K6ahdoa0tL6ZtxKFqIrB1dsSWiYhXgIdyFKM6qrMjbY+ezs/rrrk860ue+4K8hquB53P9dGD3HCnZKSIa9hSc6MvMrGf0hefSTkpVHq0MAx4CrgfWAL4CTG6iTIA3KstzafC95pyGicBESdcDZzeoc7vtWiumneXaOWZLegA4mLbRpgmUOR5rAe11jKplvdrBttp57pe0bZZ7oqTrIuIHndTdzMwWkb4wMrHEJ6XKxxePUuZ3TMgyj6byiKPiZWClBus7Ovc6kraprBoK/KNBeZ216z6Vf8fT2FhKW1Xb7j+BCdEWu30cbXNN9qeM7jQyOrcj6aNA7U2RdYDZEXEBpU23aed4MzPrAb3emXBSqreMAZ6KiNm5/C4adyZupky4rE7A7MwywCmS7stHPPtQbvBQ5kicmetFx+26rKTb89ij2jnXWEpb1ToTU/Jaqm13BHBQfg8HVOpS7/uUOSlTgA8Bj+T6IZQRlqmUt1R+1PHlm5nZouREX9aUfHQ0rJ1OU5/nRF9mZl0nJ/oyMzOzntAXJmBaPxARg3u7Dq1wbg4z60lLWn4Sj0yYmZlZS9yZ6KMkHaGS0+PCLh63qqTDcnlITtScKum5nOA6VdINC1Gf9SXt28H2TSX9VdLfs96XSFqri+cYIKnRpFMzM+vD3Jnouw4D9oiI/bt43Kp5LBExPSNlDqVEkDwmP+++EPVZn7bXOecjaXngKsrbKhtlLpL/A7qU1Csi5kbETgtRNzMz60XuTPRBks6k3Lz/LOkoScMljcvXTMepJM1C0uaZJ2Nq5rnYCDiJ8krqVEknd3KeY/P4aWrLB7JjHvs2SQMl3SPpvVnurrmtPv/HAZREbdfUVkTEjRFxr0q+j3NVcn1MkbRznmeIpEmVuq8vaWlJL+T23VXydvxB0kxJ51XqvZ2kW1UyzP5V0tqYmVmv8QTMPigiDpX0EWDXiHhGJWfHzhExR9LuwH9TonseCvwiIi5USQI2ADgW2CJHI9qlksZ8XUpSMwHXSBoREeMk/Y2SUOztwNnZKTgW+HpE7NmguC1oP1rnEZREbkMkbZ7n2YgyenJKRFyqkkysUfTNbSgh0P8FTFDJtXIn8Avgk9k2+wM/BA5pcI2H1NYPWHnNjprDzMxa4M5E/7AKcG7ehIMShApKYKjjVFKM/yETmjVb5oeAj1JuzgADKUm2xlESbE0GXmL+RF8LYyRwMkBE3C3pCWDDPM93Ja2XdX9AC6YdnxARTwJkgKrBwOuUgGQ35LUOoIQ7X0BEnAWcBbDsoI0cUMXMbBHxY47+4YfAzZkh8xPAcgARcRHwSeA14FpJH+hCmQJ+VJtTEREbRsQ5uW0NSkjzlYFlmyjrbkq69vbOs4CIOB/4NCWvyPW1xx91GuUcETCtUu8hEfHRJupoZmaLiDsT/cMqwOO5PKq2UtL6wEMR8UvKBMstaT53x7XAl1QyfCLpXSoJ1qD8NX8sJSz5ibmuo3LPp4RD/0ilbntI2oz582u8FxgEPCBp/Yh4ICJ+QcmUumUTdQa4B3inpOFZ5tvy8YmZmfUSdyb6h/+hZMccSxnWr9kHmJGPADYFzouIZylp1md0NAEzJ0teQZmLMJ2SO2SgpIOBVyPiMuDHlORo76c8Dhmgkh7+iLqyZlNGTI7KV0PvAb4APE1JbLZ8nuNC4IuZ2Ozzku7Ouq9PphrvTES8QUml/jNJd2W9tm/mWDMzWzScm8OWCM7NYWbWdc7NYWZmZj3CnQkzMzNriV8NtSWCE32ZWVctacm6WuGRCTMzM2tJt3UmnJhq0ZL0aUnH5PJnJG1a2XabpM4iXg6QdHq+5TE9w2ivJ2mpjG7ZHXXcVtIdlc8HSHpF0oD8vLWkKS2Uv2G+/WFmZn1Idz7mOAz4aEQ83MXjaompfh0R04GhAJLOAa6KiCsWsj61xFSX1G9QW2KqI2r5JCTtRklM9a9mTxARc4EeSUwVEVdWPn4GmAfc14UiPk+5vi0jYp6kdSkRLpeixJQ4qRuqeRewoaQV8nXREcD9wFbAlPw8ttnCJC0dEXO6oV5mZrYIdcvIhJyYqqXEVFnOQ7m8hqR5kkbk5/GSBkv6sqSfS9oJ2AM4NesyOIvZN9tmZu3YOoOAJyNiXl7vIxHxQrbTSlnWeXnOb+UIxgxJh+e6DTMuxPnZNpdlp+wteeOfAgzPVVsDZ1A6EeS/47K8D+Y5p0v6P5XcIkh6TNJ/qcTU+HS23TRJ4ym5SGpttsD30eCazcysB3RLZyIiDgWeoCSmOpXyF/POEbE18D1KYipoS0w1FBhGyalwLPBghkY+pr1zaP7EVEOBESqJqcYDtcRUPyUTU2W5N2e5v6wrrqnEVJROx/l5o6slphoKbJfXW28b4D8oyaneK2kHlSRWvwD2iohtKcGZfljXfnOAh7LTNTLrtlPerNeKiFmVfccA1wBH5bXVtikihgPHUNq83iXAZ7KDd0rlscixwMtZ1hdVIkvuT+kQ7AgcJqkWnXIz4PRsm9eBrzY4zzjKd7MSJRz2aObvTIyVtALwu2yTIZTQ3dVEXa9GxPsi4nLgHOBrEbEj8wfs6vT7kHSIpDsk3TF39osNqmpmZt1hUU3AXAW4XNIM4FRKYiYoiam+I+nbwHoR8VoXyqwmpppCSRa1cW47Hvg4MITSoWjFSEp4aCLibspNqpqY6lvAuyPi9QbHToiIJ/PxRy0x1XtpS0w1lXLzfneDY8cAO+fPiZTHJ9sDtzdZ7z/kv5PzvPOJiEeATYDjctXNknZpUM5OwO8jYnZEvAz8kdImAA9HxIRcvqCyvmospdOwAzAxImYCm0h6B7BM1uO9wN8j4sE85jzKdddcCmWUBlg+ImqPRs6v7NPp9xERZ0XEsIgYNmCFVRpU1czMusOi6kw4MVXXE1ONodzIh1Hmc6xBucGObuJ6queunbfRNbweEddExNHAT4BPNdito7Sj9eFSG4VPHU/pBL0vlwH+CXyWtvkSnaU2fbWTczT7fZiZWQ9YlCMTTkxVNJuYajzwfsojln8D04GvUDoZ9Zpts7eovGkxKJeXoozi/KM2wVFt6b9HU+YqLC9pIKXDUavDeyRtl8v7AbfVnyfnYTxFeURU60xMAI4k50tQ2mSjyjyHLwC3NijrGeB1STvmqv0r17Ow34eZmXWzRdWZcGKqtnM1lZgqH/k8QdsNdwxltOWeBsVeTHlcVJ2A2Zl3AFfno6fplNGhM3Lbb4Fpks6LiIlZ/iRKJ+CMfMsGyojOVyRNA1akdOIaGQsMiIgn8/N4SpuNy2udDXwJ+EO28xvA/7VT1kHA/+YEzFcq6xfq+zAzs+7nRF/WFEkbAlfkhMd+x4m+zMy6Tk70ZWZmZj3BuTmsKRHxABlQrD9ybg6z/sn5MfoHj0yYmZlZS9yZ6KfUj3KhSPqLpI9XPj+oSj4QSX+S9EmVKKIh6cDKtu1y3ZH5+YKs512S7leJVrpOV+trZmbdx52J/uswYI+I2L/TPedXy4VCREyvxb+gvKp7TH7efSHqU8uF0sg4MgqmSijxFyjRNWt2oO0tlul15exLyflRdVREbEV5I2g6cJOkZRaizmZm1g3cmeiH1P9yodSiYpL//hFYJ8vbCHghY0oAPASsrJKjRMAHKTFGFhAR8yLiFOA5SoRUMzPrBZ6A2Q9FxKEZcGvXiHhG0sqUXChzJO1OyYWyF225UC5UyS8ygBLca4vOXvHU/LlQBFyjkgtlnKRaLpS3k7lQ8rHF1yNizwbFTQKGZmCsEZTOwWaSNqaMUNRnEv09JTbHvZRw4m920iRTKKMU882wlHQImfNjwMprdlKEmZktLHcmFg+rAOfmX/kB1Ib8xwPHSXoX8IeI+Hv5Y78p1VwoAAMpuVDGUXKhTKakMP9aZwVFxGuSZlLeBtmeEm59M0rHYkfaHnHUXEqJUno/JYBWZ2HX2wuBfhYZWGvZQRs5oIqZ2SLixxyLh76eCwVKh2EXYLmIeIkSXXNE/sw3MhERj+f53w/c0kTZQymjGGZm1gvcmVg89PVcKFA6DF+jbaTjTkpis3dQUtbX+y/g2xExr70CVRwFrA5c38Q1mZnZIuDOxOKhT+dCSWMpk0bHZ/lvAs9S0pQv8AgiIm6LiD+3U71TM89J7dHJB7I8MzPrBc7NYUsE5+YwM+s65+YwMzOzHuHOhJmZmbXEr4baEsGJvsza52Ra1iqPTJiZmVlLeqQz4aRUrSelkvRjSbvm8jckLZfLS0t6oYnjB0m6Jutyj6Q/d9YWC1HHb0o6pfL5txkts/b5KEk/a6H8L0v6eav1NDOz7tVTIxNOStViUqqIOC4ibs6P3yADU3XBj4CrI2KriNgM+G6u76gtuuqttktDgNUl1X7PFghQ1Z6MIeGRMzOzfmCR/89aTkoFdJyUStIISZfl8l6SXpW0jKQVJf09118gac8M0rQWMKY6KiPppBx1GC9prQZVGAQ8VqnPtFycry0kLZ8jKNMlTZG0c5b/ZUlXSrpW0kxJ321wjsmUnBvLSlqNEm57BiV0NlRCZ0v6lkqsixmSDs91G+bnMyn5Ngblee+XdAulI1e73n1z37sk3YyZmfWaRT4B00mpFtAoKdUkYNtc3gm4B9iGkg9jQvXgiDhV0jeBnSLihaznKsCtEXFsPkY4mNJJqDoNuEjSFOCGbIsnKW38VltI+jbw74gYImlzSltulGUMB7YA/g1MknRVREyt1O3fkmbktbw96/4oMELSy1nuk5KGA/tneQOAiZJuBWZTOh4H5e/NuyiRMLehRNgcXWmP44FdIuIpSas2amg50ZeZWY/ojWHkVYDL86ZzKrB5rh8PfCdvZutFxGtdKLOalGoKsCElKRWUm87HKUPuP+2soDxvNSnVxKxbLY9Eo6RUnwP2oySl6swCSakyeuMjedMeBvwc2JnSsRjTRJmvRcRfc3kyMLjBOa4BNgB+S7lh3ylp9QZljaQk2SIi7gaeoLQnwLUR8XxEvEoZsRnZ4PjayM4ISrs1arudgN9HxOyIeLmurAcjYlIu7wDcGBHPRsS/KVE4q+c5T9KXaef3OCLOiohhETFswAqrNNrFzMy6QW90JpyUqnFSqjHAxyh/nd9IueGOpPw13pl/V5bn0s6IU96UL4yILwBTadwZ6CitaH241EbhU2vzJnaktNsMymhGte06OserTZwD4CuUjuJg4C5Jb++gTDMzW4R6a2TCSakWNJoysXJcRPwzz7VBRDQ6X7PtUj3/bpKWz+WVgfcAjzQoazTlEQQ5v2QQ8EBu+5DKGzYrAJ+i8WTK2sjEqtl5mUeZxPox2kYmRgOfzvkZA7OsRiMwE4DdJK2Wj772rmxbPyImUNr+eeCdzbeGmZl1p94IWvU/wLmSvgHcVFm/D/AFSW8C/wR+EBHPSRqbj0T+GhHHNCowIq6RtCklKRWUG+TnJX2STEqVcwvGqySlGk8mpQJ+mx2YqgWSUkl6ltLZaZiUqoPrPVXS94Hls7z2klKNp9y4ayMRMyhvszRyFnCDpEeBj3Rw7qrtgNOyfZcCzoiIO2vzU2ptAfwK+F+V5F5vAl/MuRAAtwEXUR6XnF+dL1GT82JeBKZVVk+gzI+YnvtMlHQxZa4IWZfpkjasK+sxST/K458Aqsk1TpX0Hsoox3URMaPJdjAzs27mRF/WlJybsEVEHNnbdVkYTvRlZtZ1cqIvMzMz6wkembAlwrKDNopBBzp4ppn1L72dN8UjE2ZmZtYj3JnopyStrrZcJf+U9Hjlc1SWp0oa3OD4cyTtncu3ZFTLaZLuk3RaNRCUpLlNlLexSu6PB1TysFymEo68q9dVH8fDzMz6OKcg76ci4llKzAoknQC8kuG6kfRKZ1FDG9g/Iu7ItztOBP5EiZ0BJShWu+WpJB27GvhGRPwl1+0KrAk81ZVKRMSIzvcyM7O+xCMTNp+MNPktYF1JWzV52OeB8bWORJZzc0TMkLScpLNVcn3cqbbMp41ysSDplfx3lxwxuSJHSy5Uvp8qaVtJt0qarJIrZFB3toGZmXWNRyYWT8tLqsWAeDgiPt2VgyNibsad2JSSBbWz8raghPFu5D+yzCEZC+Q6lTwnjXKx1NuaEm79CUrsj/dJup0SC+NTEfG0pH2AH1PykczHuTnMzHqGOxOLpw4fSzSpGvK6lfJGUm7+RMR9kv5ByZsyHjguk3n9ISL+3uDYiRHxGEB2ZgZTomluAVyfAxUDgCcbnTgizqIE+GLZQRv5tSUzs0XEjzmWEPmoYaqka5rYdwAlMVqjHCKN3E1b1tMFimu0sslcLG9Ulms5RwTcXcnDMiQiPtTgWDMz6yHuTCwhIuKgvPnu0dF+kpahTMB8NCKmdbRvxUWUNONvvRAt6SOShjB/ro+NKaniZ6pxLpZmzATWlLRjrb4qqdLNzKyXuDNhNRdKmkbJCbIiJflWUzJt+8eBwyX9XdI9lCRu/wJ+Tcn9MZ2Srn1URLxBycUyIx9fbAqc1+S5/k1J+PWTnNcxlZJYzMzMeokjYNoSwbk5zMy6zhEwzczMrEe4M2FmZmYt8auhtkSY/viLDD726t6uhplZj+qpRGEemTAzM7OW9FhnwompWiPpk5KOzeU9JW1W2XaLpA4nyEhaStIvJc3I0NaTJL0nt32nm+q4VSVSJpL2kzQ7XzdF0pB8Y2Rhyx8saUZ31NXMzLpPjz3mcGKq1kTEnynxGAD2BK4C7ulCEfsA6wBbRsS8jDz5am77DvDf3VDN6cB6klaKiJcpr2zeRwmLPTE/j222MEkDImJuN9TLzMwWoX7/mGNxSEwlaYCkh1SsKmmepJ1z2xhJG0oalSMwIyiRI0/OumyQxXw263e/pJ0aXPMg4MmImJfX+1hEPC/pJDL3hqQL85zfyBGMGZKOzHWD87rOzeu/QtIKdd/FPGASsH2u2hY4nbY4ECOAcVnebtm+0yX9TtKyuX6WpO9Jui2vaVtJd0kaT+b56Oj7MDOzntdXOhO1m9lUSVd29eD867WWmKqZ8ppKTAXsB5ybIxm1xFRDgWHAYw2O3Ro4EtgMWJ+SmGoZSm6KvSNiW+B3lMRU9fW/P48bmXXbKW+w74qIByr7jqOMUByTES0fzE1LR8TwPP/xDep2GfCJbJOfSto6yzuWHMmJiP0lbQscROkQ7AB8pbYvsAlwVkRsCbwEHNbgPOMo0TBXBOYBtzB/Z2Jstuc5wD7ZzksDX6uU8XpEjIyIS4CzgSMiYse683T6fUg6RNIdku6YO/vFBlU1M7Pu0Fc6E7Wb2dCuZrisWCAx1UKWNxI4H0piKqCamOo7kr4NrJdRH+tNzL/451EiMw6m3IBriammAt8F3tXg2DHAzvlzYtZjO8pf+s34Q/47Oc87n0yYtQnw/yg3+Rsl7dagnJHAlRHxakS8kuXWRjoejYjaY4oLct96YymdhuHApOzsbChpTWBgRDyU9Xg4Iu7PY87N6665FEDSKsCqEXFrrj+/sk+n30dEnBURwyJi2IAVVmlQVTMz6w59pTOxAC15ianGUG7aw4FrgFWBXSi5LZpRO3ftvI2u4Y2I+GtEHEOZI7Fng90aXn+tiE4+A0ygdIJGUm74UEYN9iUfcXRyDmiby6F2ztHs92FmZj2gz3YmlsDEVLdT/qKfFxGvU0Y2vkrpZNR7GVipyfOT591G0jq5vFTW/x+5+c1sRyjXv6ekFfJRxacrdVi3dh2UR0C31Z8nJ14+SsnNUetMjKc8fql1Ju4DBkvaMD8fANxKnYh4AXhRUm0EZP/K9Szs92FmZt2sz3YmmrBYJabKczxK+cseyg18JcobEvUuAY7JCYwbNNjeyFrAX1RerZwGzAFOy21nAdMkXRgRUyjzGSZSOji/iYg7c797gQOz3VcDzmjnXGOBZSPi0fw8njKHZFxe6+uUeRmXZzvPA85sp6yDgNNzAmb1UcZCfR9mZtb9nOjLmqISq+OqiNiil6uyUJzoy8ys6+REX2ZmZtYTnJvDmhIRsyhvpfRLzs1h1vt6Kk+E9TyPTJiZmVlL3JnoY9SWV+TujPz4jXz7YmHKulAlh8mMjDK5TOdHLZx8S+WknNA6I6NTfnQhyvmBpN0XRR3NzGzRcGei76kF3Noc+CCwB40jWjbjQsqbDkOA5YEvd08VG/ohJWT3FjlJ8xN08fVVgIj4XkTc0N2VMzOzRcediT4sIv4FHAJ8XUV7eUMGSDol10+TdHgef00kyque71LJHjpL82dZfUDS2pLWlPR7lYyikyS9L7cPrJx3mqS9qvVUydHxFeDwfMWViHgqIi7L7fvlsTMk/aRS53PUlsX0qFxfzQ47S9L3JU3JfTbN9SvmSMukbIemXws2M7Pu5wmYfVxEPJSPOdYCvpDrhuSN9boMrHUQ8B5g64iYI2m1ahn5eOMA4D8zY+ifKMGozpa0PTArIp6SdBFwakTcJmld4FrgvcB/AS9mHg0kvb2umhsCj0TES/X1z0BZP6FEHH0+67wnJabGO2uvmlY7N3WeiYhtJB0GHE0ZXTkOuCkiDs7jJkq6ISJerR4o6RBKZ4wBK6/ZbhubmVlrPDLRP9TCT7eXN2R34MyImJPbnqs7/tfA6IioRbK8lBL0CUqY60tzeXfgtAwE9WdgZUkr5frTa4VFxPNdqPt2wC0R8XTW70JKHo6HgPUl/UrSRyiJwxpplHPkQ8CxWc9bgOUokUrn49wcZmY9wyMTfVyGjZ5Lic7ZXk6LdnNYSDoeWJMSmrtmPG3Jt/YEfpTrlwJ2rE+aJand8tMDlFDbK2U47fq6LSDTn28FfJiSqfVzwMENdm2Uc0TAXhExs4M6mZlZD/HIRB+WN/szgdNy3kPDvCHAdcChkpbObavlv1+m3Kz3y0ymAGRZVwI/A+6NiGdz03XA1yvnH9rO+vkec0TEbOC3wC8lvS33GSTpC5SQ3O+XtIZKQrb9gFslrQEsFRG/pzxG2aYLTXMtJRS68lxbd7K/mZktQu5M9D3L114NBW6g3Mi/n9vayxvyG+ARSn6Nu4DP5/5nAmsD47PM71XOcyllDsallXVHAMNykuU9wKG5/kfA23Oy5F3Arg3q/V3gaeAelfwffwSejognKWnPbwbuAqZExJ+AdwK35KOKc3KfZv0QWCavd0Z+NjOzXuLcHLZEcG4OM7Ouk3NzmJmZWU9wZ8LMzMxa4rc5bIngRF+2pHFSLetJHpkwMzOzlrTUmZCTUvVYUipJ4/LfwZI+X1k/StJpTRz/8Qw9fZekeyR9NdfvKWmzbqrjnbXXSSUtLenVfD20tn2ypK68Alpf/i2SOp0IZGZmPavVkQknpeqhpFQRMSIXB9P26mdTsmN2FvCJiNgK2JoSORJK0Kpu6UwA44BaPbeixMAYkXVYEVif8npoM3X2Izgzs36i2x5zOCnVwielkvRrSZ/M5Ssl/S6XvyTpR7n8Su5+ErBTjggdlevWkfS3HGn5nwZfz0qU+THP5vW+EREzJY0APgmcnOVtIGmopAnZdlcqA1TlqMDPJY3Ldhje4DxjaetMjKDEuagFvhpOiTExV9Jqkv6Y55ggacs8xwmSzpJ0HXCepOUlXZL7XUrpZLb7fZiZWe/o1jkTEfFQlrkWJUQymRxqP+BcSctROhy1pFRbUkYk3qK2pFR/y6iNtaRUqJKUCvgFJSnVdsBelMBNUElKleXfVFfNZpJSfYByE9xOJSnVUDIpVV7P2e00wTMRsQ1wBiUpFbQlpdqOEuzp5PwrvWo0sFMuv5O2kYKRwJi6fY8FxuSI0Km5bigl18YQYB9J764ekLk6/gz8Q9LFkvaXtFREjMv1x2R5DwLnAd/OtpvO/CNNK+YIyWHA7xpcf3VkYkRe1xsq+T1GUDobUIJw3Znn+E6es2Zb4FMR8Xnga8Ds3O/Hua12vZ1+H5IOkXSHpDvmzn6x0S5mZtYNFsUETCel6npSqjGU0YbNgHuApyQNAnak3KA7c2NEvBgRr+fx69XvEBFfBnajjPocTYPOgKRVgFUj4tZcdS7l+msuzrJGU9p7vkyfETELeJukd1AeWc0EJgHbUzoTtWup/m7cBKye5wb4cyU3yM7ABbnfNGBarm/q+3CiLzOzntGtz6XlpFQLlZQqIh7Pxwkfofw1v1qe45UGdWzkjcpy9dz155kOTJd0PvAwMKqJsucropPPUL6vvYEnIyIkTQDeR3nMMSH3adTOtbJebWd924rmvw8zM+sB3TYyISelak+zSanGA0dS2m0MZfSg/hEHwMt0cbJoziPZpbJqKGWkaL7yIuJF4HlJtUcuBwC3Vo7bJ8sbSXmU1OjZwVjgqLye2nV9EfhnRLyQ66q/G7tQHg81Gl2o7rcFUJtb0cr3YWZm3azVzoSTUnWu2aRUY4ClI+IBYApldKJRZ2IaMDTLHDYAAAltSURBVEflFc9mJx4K+JbKq7dTKd/RqNx2CXBMTg7dADiQMq9jGqXT8YNKOc+rvKJ6JvClds41lvLWxniAbNMBzP+45gTyu6NMKD2wnbLOAAbmft+iPKKB1r4PMzPrZk70ZU2RdAtwdET0y2xZTvRlZtZ1cqIvMzMz6wkODGRNiYhdersOZmbWN3lkwszMzFrizoSZmZm1xJ0JMzMza4k7E2ZmZtYSdybMzMysJe5MmJmZWUvcmTAzM7OWOAKmLREkvUzJDdOfrAE809uV6KL+WGfon/V2nXtOf6x3d9V5vYhYs7OdHLTKlhQzmwkJ25dIusN17hn9sd6uc8/pj/Xu6Tr7MYeZmZm1xJ0JMzMza4k7E7akOKu3K7AQXOee0x/r7Tr3nP5Y7x6tsydgmpmZWUs8MmFmZmYtcWfCzMzMWuLOhC02JH1E0kxJD0g6tsH2ZSVdmttvlzS452u5oCbqvbOkKZLmSNq7N+pYr4k6f0PSPZKmSbpR0nq9Uc+6OnVW50MlTZc0VdJtkjbrjXrW66zelf32lhSSev0VxibaepSkp7Otp0r6cm/Us65OnbazpM/l7/Xdki7q6To20kRbn1pp5/slvbBIKhLx/9u79xi5yjqM49+nLYReEJCKEWlcwKVqSWnTraURFayJGsgWY41tIKFGSFSwMRgvaNWi4Q8kBqNAqNRaKsQqVaGKWi9tqRq3tFB6Jca2ttJogq0KlhZalsc/zrt1MjuzcybjnNmZ/X2SSWbOvGf2OWfn8s573jk/xyUubX8BRgN7gQuAU4FtwFvK2nwcuDddnw/8oE1ydwFTgZXAvDbJfAUwLl3/WKv3dc7Mryq53gv8sh32dWp3OrAR6AN6hntmYCFwV6v3b52Zu4GtwFnp9jntkLus/SeA5c3IEiMToVO8Fdhje5/t48AqYG5Zm7nA/en6amCOJBWYsZKauW3vt70deKUVASvIk3m97aPpZh9wXsEZy+XJ/HzJzfHAcJidnud5DfBV4GvAi0WGqyJv5uEkT+YbgLtt/wvA9rMFZ6yk3n29APh+M4JEZyJ0itcDz5TcPpiWVWxj+2XgOeDsQtJVlyf3cFNv5o8Av2hqotpyZZZ0o6S9ZB/MiwrKNpSauSVNBybZ/lmRwYaQ9/nxgXQYbLWkScVEqypP5ouAiyT9QVKfpPcWlq663K/FdKjxfGBdM4JEZyJ0ikojDOXfLPO0KdpwzFRL7sySrgV6gDuamqi2XJlt3237QuCzwOKmp6ptyNySRgF3Ap8qLFFtefb1T4Eu21OB3/C/EcNWyZN5DNmhjsvJvuEvk3Rmk3PVUs/7x3xgte3+ZgSJzkToFAeB0m835wF/q9ZG0hjgDOCfhaSrLk/u4SZXZknvBr4A9Np+qaBs1dS7n1cBVzc1UT61cp8OXAxskLQfuBRY0+JJmDX3te3DJc+J+4AZBWWrJu/7xyO2T9j+C1nhwO6C8lVTz/N6Pk06xAHRmQidYzPQLel8SaeSvXDWlLVZA1yXrs8D1jnNSmqhPLmHm5qZ09D7UrKOxHA4tpwnc+kHw5XAnwvMV82QuW0/Z3ui7S7bXWTzU3ptb2lNXCDfvn5dyc1e4OkC81WS53X4MNnEYiRNJDvssa/QlIPlev+QNBk4C/hjs4JEZyJ0hDQH4iZgLdkb0w9t75L0FUm9qdl3gLMl7QFuBqr+zK4oeXJLminpIPBBYKmkXa1LnHtf3wFMAB5KP0lraQcpZ+ab0k/+niJ7flxX5eEKkzP3sJIz86K0r7eRzU1Z2Jq0mZyZ1wKHJe0G1gOftn24NYkzdTw/FgCrmvnlKU6nHUIIIYSGxMhECCGEEBoSnYkQQgghNCQ6EyGEEEJoSHQmQgghhNCQ6EyEEEIIoSHRmQghtDVJ/ennpzslPSRpXKszVSNpgqSlkvamn0ZulDSrSX9rhWpUmU3VO88tub1suFRLDe0lOhMhhHZ3zPY02xcDx4GP5l1R0ujmxapoGdlZV7ttTyE7v8LEPCsqM6psWaP5FwInOxO2r7e9u8HHDCNQdCZCCJ3kd8AbIasLIunxNGqxdOCDV9KRdFKfTcBsSV+StDmNbHx7oJKspEWSdqdiVKvSsldLejgt65M0NS1fImm5pA2S9kkaVCRM0oXALGCx7VcAUrXHR9P9N6cMOyV9Mi3rkvS0pHuAJ4FJFfLPkPSYpCckrS07u+TA3x60jWnUogd4MO2jsSl/T1pngaQdaZ3bSx7riKTbJG1L++C1/5f/XGhr0ZkIIXQEZfVW3gfskPRm4EPA22xPA/qBa1LT8cBO27Ns/x64y/bMNLIxFrgqtfscMD0VoxoY7bgV2JqWfR5YWRLhTcB7yMpCf1nSKWURpwBPVSq0JGkG8GGyzsalwA3KTkkOMBlYaXu67QOl+YFNwLeAebZnAMuB2yrsnkHbaHs1sAW4Jo3sHCvJcy5wO/AuYBowU9JArZLxQJ/tS4CNZKW5wwgXnYkQQrsbm06BvQX4K9lp0+eQFY/anO6bA1yQ2vcDPypZ/wpJmyTtIPvwnJKWbyf71n4t8HJadhnwPQDb68hOz35Guu9R2y/ZPgQ8C9Tzjf0y4Ce2X7B9BPgx8PZ03wHbfSVtS/NPJiv09eu0nYvJij2Vq7aN1cwENtj+Rzpl84PAO9J9x4GBcudPAF05tzF0sDGtDhBCCA06lkYfTkqHKu63fUuF9i8OjA5IOg24B+ix/YykJcBpqd2VZB+gvcAXJU1h6JLPpZVR+xn8/roLuETSqIHDHKWRh9i+F6rlT+vtsj272so1trHqakPcd6KkxkOl7QwjUIxMhBA60W+BeZLOgZNzHd5Qod3Ah+ohSRPIqsmSJjpOsr0e+AxwJlnhso2kwyWSLgcO2X4+TyDbe8lGT24tmZfRLWluetyrJY2TNB54P9n8j1r+BLxG0uz0eKekTk/NbUz+Q1bGvNwm4J2SJqa5JguAx/JsZxiZokcZQug4tndLWgz8KnUMTgA3AgfK2v1b0n3ADmA/WUlngNHAA+kQhoA7U9slwHclbQeOUn9l0euBrwN7JB0FDpNVn3xS0grg8dRume2tkrpqbOfxNJHymynrGOAbZKMgtbYRYAVwr6RjwOySdf4u6Ray6pgCfm77kTq3NYwgUTU0hBBCCA2JwxwhhBBCaEh0JkIIIYTQkOhMhBBCCKEh0ZkIIYQQQkOiMxFCCCGEhkRnIoQQQggNic5ECCGEEBryX6qQQSxAZo33AAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAg8AAAEWCAYAAADhFHRsAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nOzdd7xcVbn/8c+XgLRQpBpUiHSBQIAQIAYEwYYNBQVEpKiIXOGCgvITr2C74AXFAsLlqvSOYgGUDgkpBBJCEkqQEqkivQWQJM/vj/UMZ2cy55yZ03JO8n2/XueVmV3WXnvNwF6z9trPo4jAzMzMrFlLLOwKmJmZ2cDizoOZmZm1xJ0HMzMza4k7D2ZmZtYSdx7MzMysJe48mJmZWUvceTCzHiXpR5KekfTPhV2X/kDS+yT9XdIrknZf2PUBkHSApFsr71+RtO7CrFMrJIWk9ZvYbidJj/VFnRY37jyYLeYkzZL0Wl5AnpJ0lqTBXSzr3cA3gU0i4h09W9MB6wfAqRExOCL+WL+yrv2fl3RVtmOfybo91NPlSro5L/Rb1C3/Yy7fqaePaX3DnQczA/hERAwGtgK2Ab7bagGSlgTWAZ6NiH91cf9F0TrA3Z1sU2v/IcBTwK96vVZ9537gi7U3klYFtgOeXmg1sm5z58HM3hIRjwN/BTYDkLSSpN9KelLS43lLYlCuO0DSOEmnSHoOuBm4Dlgrf0Wfndt9UtLdkl7IX6LvrR0vf3V/W9I04FVJS+ayoyVNk/RqHn9NSX+V9LKk6yW9vVLGZZL+KelFSWMkbVpZd7ak0/LX/MuSbpO0XmX9ppKuk/Rcjrp8J5cvIekYSQ9KelbSpZJWaa/dJH1F0gNZzp8lrZXLHwTWBf6SbbJ0J+3/OnA5sEml7I9JulPSS5IelXR8Zd0yks7POr4g6XZJa3b22TWo/1u3AZpos40rbTZT0uc6OifgAmCvyrH3Aa4A/l0pc2lJP5f0RP79vNpW+X14MtcdVFf3pSWdLOmR/AzPkLRsO+f57WyLl7Puu3RSd2uHOw9m9pYcLt8NuDMXnQPMAdYHtgQ+BHy5ssu2wEPAGsAHgY8CT+Qw+AGSNgQuAo4AVgeuplxI31YpYx/gY8DKETEnl+2R5W0IfILSofkOsBrl/1uHV/b/K7BB1mEK5WJVtQ/wfeDtwAPAj/NcVwCuB/4GrJXneEPucziwO/D+XPc8cFo7bfYB4ATgc5SRg38AFwNExHrAI+TIQkS80aiMSlnLAXsBEyuLX6X8cl852+lraps7sT+wEvBuYFXgEOC1XNfZZ9eR9tpseUoH8UJKe+8D/LraYWvgCeCePD55LufWbXMsZTRiOLAFMJIc/ZL0EeAoyvdhA2DXun1/QvmeDM9zfSfwvfpKSNoI+DqwTUSsAHwYmNVBva0jEeE///lvMf6j/A/0FeAFyoXv18CywJrAG8CylW33AW7K1wcAj9SVtRPwWOX9fwGXVt4vATwO7FQ59kEN6rNv5f3vgdMr7w8D/tjOuawMBLBSvj8b+E1l/W7AfZVzubOdcu4Fdqm8HwK8CSzZYNvfAv9TeT84tx1aOZ9dm2z/OZSL7bAOtv85cEq+PggYD2xet00zn92tlXUBrN9Em+0FjK071v8Cx7VT15spHZYvUDqRGwH357rHKt+DB4HdKvt9GJiVr38HnFhZt2GtvoAonav1Kuu3Bx6u/z7m9v+idD6WWtj/3Q30v0X1HqOZtWb3iLi+ukDSMGAp4ElJtcVLAI9WNqu+bmQtSocEgIiYJ+lRyq/Djsp4qvL6tQbvB2cdB1F+FX+WMrIxL7dZDXgxX1ef+phd25fya/3Bduq9DnCFpHmVZXMpF+XH67ZdizLiAUBEvCLpWco5zmqn/Hq7R8T1eT6fAm6RtElE/FPStsCJlFtJbwOWBi7L/c7L87hY0srA+ZRf8evQ+WfXkfbabB1gW0kvVNYvmfXoyB+AnwLPtrPtfN+TfL1WZd3kunU1qwPLAZMr5ylggdszEfGApCOA44FNJV0DfCMinuik7taAb1uYWXsepfx6XS0iVs6/FSOiOkTdWVreJygXHABU/g//bua/AHcnte/nKRfbXSnD90Nrh2pi30eB9TpY99HKea8cEctEmRNSr/4cl6fcQmi0bYciYm5E/IHSURmdiy8E/gy8OyJWAs4gzy8i3oyI70fEJsAo4OOU2wLNfHZd8ShwS127DI6Ir3VyXrMpt5e+RuPOw3xtCKydywCepHxnqutqnqF0Jjet1GelKJNPG9XjwogYnccKyi0P6wJ3HsysoYh4ErgW+KmkFXMS4XqS3t9CMZcCH5O0i6SlKI9xvkEZau8JK2R5z1J+gf53C/teCbxD0hE56W6F/JUP5QL9Y0nrAEhaXdKn2innQuBAScNzkt9/A7dFxKxWT0bFpyhzDe7NxSsAz0XE65JGUjpMte13ljQsRyxeotwumdtDn10jVwIbStpP0lL5t40qk2A78B3g/e20y0XAd7OdV6PMWTg/110KHCBpk5wTclxtp4iYB/wfcIqkNQAkvVPSh+sPIGkjSR/Iz+h1SqdjbrMnbvNz58HMOvJFylD5PZRJg5dT7v83JSJmUu53/4ryK/ETlMmD/+5wx+adSxnGfjzrOLHjzeer28uUSXifoAzT/x3YOVf/gvJr/1pJL2e527ZTzg2UuR2/p/xKXg/Yu8Xz+IukVygdgB8D+0dE7fHOQ4EfZD2+R7mY1ryD8pm8ROls3ELbRbdbn10j2WYfopzfE5R2+wnlVkpn+z4REbe2s/pHwB3ANGA65TbQj3K/v1LmedxImbx5Y92+387lEyW9RJkEu1GDYyxNuf3zTNZ7DUqHxrpAEd0ZMTQzM7PFjUcezMzMrCXuPJiZmVlL3HkwMzOzlrjzYGZmZi1xkChbLKy22moxdOjQhV0NM7MBZfLkyc9ExOr1y915sMXC0KFDueOOOxZ2NczMBhRJ/2i03LctzMzMrCXuPJiZmVlL3HkwMzOzlrjzYGZmZi1x58HMzMxa4s6DmZmZtcSdBzMzM2uJOw9mZmbWEgeJssXC9MdfZOgxVy3sapjZYmzWiR9b2FXoMR55MDMzs5a489DHJJ0i6YjK+2sk/aby/qeSvtGN8o+XdFS+PknSfZKmSbpC0srdq32nxz4qjzdD0l2SvtiFMg7pyn5mZtZ33Hnoe+OBUQCSlgBWAzatrB8FjGumIEmDOtnkOmCziNgcuB/4fy3XtkmSDgE+CIyMiM2AHQG1Wk5EnBER5/Z0/czMrOe489D3xpGdB0qnYQbwsqS3S1oaeC9wp4qT8lf8dEl7AUjaSdJNki4EpueyYyXNlHQ9sFHtQBFxbUTMybcTgXfl9rdJeqvDIulmSVtLWl7S7yTdLulOSZ/K9YMknZz1mCbpsAbn9R3g0Ih4KY/9YkSck/vvkuVNz/KXzuUnSronyzw5l1VHTm6W9BNJkyTdL2mHSn1OynpOk/TV7n0kZmbWCk+Y7GMR8YSkOZLWpnQiJgDvBLYHXgSmRcS/Je0BDAe2oIxO3C5pTBYzkjKi8LCkrYG9gS0pn+cUYHKDQx8EXJKvLwY+BxwnaQiwVkRMlvTfwI0RcVDe4piUHZIvAu8BtoyIOZJWqRYsaQVghYh4sP6gkpYBzgZ2iYj7JZ0LfC3//TSwcUREB7dUloyIkZJ2A44DdgW+BLwYEdtkR2ScpGsj4uG6Yx8MHAwwaMUFMsqamVkXeeRh4aiNPtQ6DxMq78fnNqOBiyJibkQ8BdwCbJPrJlUulDsAV0TE7PzV/+f6g0k6FpgDXJCLLgU+m68/B1yWrz8EHCNpKnAzsAywNuWCfUZtFCMinqs/BBDtnOtGwMMRcX++P4dyS+Ml4HXgN5I+A8xuZ/8/5L+TgaGVen4x63kbsCqwQf2OEXFmRIyIiBGDllupneLNzKxVHnlYOGrzHoZRbls8CnyTckH9XW7T0XyBV+vet3fhRtL+wMcpv/wDICIel/SspM2BvYDasL+APSJiZl0ZHXUOiIiXJL0qad2IeKi+Cu3sM0fSSGAXysjJ14EPNNj0jfx3Lm3fVwGHRcQ17dXJzMx6j0ceFo5xlAv6czmy8BywMuXWxYTcZgywV97fX53ya31Sg7LGAJ+WtGzePvhEbYWkjwDfBj4ZEfW/7C8GvgWsFBHTc9k1wGHZWUDSlrn8WuAQSUvm8lVY0AnAaZJWzG1WzNsG9wFDJa2f2+0H3CJpcB77auAIyi2aZl1DufWxVB5rQ0nLt7C/mZl1g0ceFo7plHkMF9YtGxwRz+T7Kyidibsov/q/FRH/lLRxtaCImCLpEmAq8A9gbGX1qcDSwHXZH5gYEYfkusuBXwA/rGz/Q+DnwLTsQMyidHJ+A2yYy98E/i/LrjodGEyZm/Em8Cbw04h4XdKBwGXZ+bgdOANYBfhTzokQcGSnrdbmN5RbGFOynk8Du7ewv5mZdYNyJNtskTZixIi44447FnY1zMwGFEmTI2JE/XLftjAzM7OWuPNgZmZmLfGcB1ssODGWmQ0EAyV5lkcezMzMrCUddh7kJE6dldFnSZwkXS1p5fw7tLJ8J0lXNrH/dhmWeqqkeyUdX9l/VCe7N1vHKyTtXnk/U9J3K+9/nwGhulr+2ZL27G49zcysezobeXASpw70ZRKniNgtIl6gxIM4tLPtGzgHODgihgObUaJMAuxEW66N7qp+X1YFXqE8blqzPW0RNDtUiylhZmb9T2edBydx6oMkTpK+JenwfH2KpBsrdTk/X8+StBpwIrBejiCclEUMlnR5jqRckLEP6q0BPJnnOzci7pE0FDgEODLL20HSOpJuyLreoJKDo/ar/wxJY/P8Pt7gGNXvyyjgSmD1/H68B3gtY1UsI+msbOM7Je2cxzhA0mWS/gJcm/udmu1+VZ5Drc0W+DzMzKxvdPjrzkmc+iyJ0xhKeOpfAiOApVWiJ45m/qBPAMdkew7POu+U7bkp8ATlAv4+4Na6/U4BZkq6GfgbcE5EzJJ0BvBKRNQ6RH8Bzo2IcyQdlHWq3YoYCrwfWA+4SdL6EfF65RiTgc0kvY3yfbkFWJfSydyStlGq/wCIiGEqQa+ulbRhrtse2DwinlO5xbERJYz3msA9wO/yM+3085ATY5mZ9YpmJkw6iVPvJ3GaDGydHZs3KG08gtJe9Z2HRiZFxGMRMY8SaXJo/QYR8YMs81rg85QORCPb0xb58jzKZ1tzaUTMi4i/Aw8B9dEu3wDuBrYCtsvzbe/7cl7ucx8lMmat83Bd5TPbkbbv1RPAjbm8qc/DibHMzHpHM52H+iROEykXmOp8h55O4rRvNYkTUE3idHHlmHtExPD8Wzsi7qXjzgHZaXlV0rqNqtDOPnMoIyi/p/wKb+/C21ESp1o93xMR19aV/yYlFPSBlPYeC+xM+YV/b3vn0uC49ceuP48HI+J0SjKqLVTmJXQm2nnd6D2U+u9IGd15nvJ9qXUeeuT70sLnYWZmvaDZkQcncer9JE5jgKPy37GUuQhTa52oipeBFVo4Pnncj1XmQmxA6WS80KC88ZRbSwD7Mv/tj89KWkLSepTbEfNl30zjKFk678r30yijEGtTRiWgnOO+Wa8Nc12jssYAe+f3agilQ0U3Pw8zM+umZma0O4lT3yRxGgscC0yIiFclvU6DWxYR8aykcZJmAH8Fmo18tB9wiqTZlNtC+0bE3JzjcLnKhNPDgMMp8wqOzroeWCljJuWW1JrAIXXzHWrGUzoWJ2R950j6F/Bo3lYB+DVwhqTpWZcDIuINLTjP8wpKmu7plCdwbsnlK9D1z8PMzLrJibGsKZLOBq6MiMsXdl26womxzMxaJyfGMjMzs57gQDzWlIg4YGHXoTuc28LM+puBkseiEY88mJmZWUvceehHtAjmEpG0Rca4qL3fR9LsytMnwyRNy9c3S3qk8lQIkv4o6ZV8PVTSaypRKe9Viea5f2/U28zM2ufOQ/+yKOYSmQ6sk4/mQjmH+ygRJ2vvq+f0AiVCJtmhGVJX3oMRsWVEvJfySOmR+YSMmZn1EXce+pdFLpdIPp55O7BtLtoaOI35c2BUk2VdTFucic/QFrVzARHxEPANyuOlZmbWR9x56EcyBHN9LpHbKDE0RpC5RCgX1VoukV2BkzKIEpTIi8dGxCaaP5fIZ2gLGV7vIErMCGjLJYIquUQoMShujIhtKMGaTspgVwfTlktkc9rCileNB0bl9vMo4cSrnYfqyMMNwI45crI3bTlO2jOFujDZNZIOlnSHpDvmzn6xk2LMzKxZ7jz0P4taLpHqOY0Ebs+kZOurRCMdnCMINXMpUS33ApaNiFntN1U5hfZWOLeFmVnv8KOa/U99LpFHKRk3XwJ+l9v0dC6RXaq5RCRVc4nUUojXconMrCujw1wiaSKlczOatpDmj1FGFsY32P5iSnTJ4zspF8qoSjP5P8zMrId45KH/WeRyiUTEy5RO0AGVc5hAyUvRqPMwlhLe+qIG694iaShwMvCrjrYzM7Oe5c5D/1PLJTKxbtmLdblEplFyidxI5hKpLygiplDmDEylZKCszyWyAiWXyFRJZ1TWXU4ZFbi0suyHwFKUnCEzaMsz8hvgkVx+FyXddyPjgKUj4tF8P4GSA2OBzkMUJ1fOt2q92qOaWb9fRcRZ7RzTzMx6gXNb2GLBuS3MzFrn3BZmZmbWI9x5MDMzs5b4aQtbLDgxltniaSAnn+rPPPJgZmZmLely58FJnPouiZOktSRdnq+HS9qtsu6tduqkjIMqIaRnVMJLHyBpra7Uq658SXpG0tvz/RBJIWl0ZZunJa3ajWPMkrRad+tqZmbd052RBydx6qMkThHxRETsmW+HA7t1tH09Se+ihJcenW24HeVRTyixF7rdecggU7VQ2lDa6k7aviMbAc9ExLNN1tm31MzM+qnudB6cxKmHkjhJujojOpL1/V6+/qGkL+coxgxJbwN+QAkQNbXWlsAmee4PSWqUJGoN4GXglazLKxHxsKQ9KTkzLsjylpW0S9Zherbh0lmXWZJ+kiMokySt3+A41e/EKOBnzN+ZGJ9lrSPphvwMblDJ5YGksyX9TNJNwE8krSrp2qzP/5KRNfPzvUrSXdkue2FmZn2my50HJ3ECei6J0xhgB0krUvJMvC+Xj6YS2Cnb83vAJRExPCJqx9sY+DClPY9T3mapuAt4CnhY0lmSPpHlXQ7cAewbEcMpYabPBvaKiGGUCbVfq5TzUkSMpASY+nmD83hrNCrr8kfg3fm+2nanAudWPoNfVsrYENg1Ir4JHAfcGhFbUvJyrJ3bfAR4IiK2iIjNgL81qIsTY5mZ9ZLuTph0EqeeSeI0lhJiejRwFTBY0nLA0PpcEu24KiLeyIiM/wLWrK6MiLmUC+6elNs+p0g6vkE5GwEPR8T9+f6crFfNRZV/t2dBk4Ats+O1VES8AjyUoxTV78T2wIX5+rw875rLsr7ksc/Pc7gKeD6XTwd2zZGQHSKiYc/AibHMzHpHdzsP9UmcJlIuDNVfmT2dxGnfahInoJrE6eLKMffIX+fDI2LtiLg3l/dGEqdfMX8o5/a0l8TpdspozQ6UUYg7ga8Ak5soE+CNyuu5NHgEN0M+T4qIEyjnskeDcjr6rGD+tlugHTNHxgOU0aEpuXgiZY7GGkB7HaFqWZ1+J7JzszWlE3FC7TaPmZn1jZ4YeXASp24mccrbEY9SRk8mZplHMX8uipqXKTkpmqbytMZWlUXDgX80KO8+YGhlPsN+lJGimr0q/06gsXGUtqq23X8CE2udPko71uaK7EsZvWlkTK5H0keB2pMcawGzI+J8Sptu1c7+ZmbWC7rbeXASp55L4jQWeCo7R2Mpk0IbdR5uokyQrE6Y7MxSwMkqj7tOpVz8/zPXnQ2ckcsFHAhcJmk6Zc5Hta2XlnRb7ntkO8caR2mrWudhSp5Lte0OBw5Ueex1v0pd6n2fMqdkCuVW1CO5fBgwKet8LPCjjk/fzMx6khNjWVMkzQJGtNNJ6vecGMvMrHVyYiwzMzPrCQ7EY02JiKELuw7d4dwWZjYQDJRcHB55MDMzs5a48zCASVpT0oUZWXKypAmSPp3rdpJ0ZZPlHC/phLplw3OiZyv1OSonZc7I6I9fbGX/LOOQruxnZmZ9x52HASofQ/0jMCYi1o2IWoTOd3WhuItoewyzZm/aAjk1U59DgA8CIzPq4450HjdiARFxRkSc2+p+ZmbWd9x5GLg+APw7It56lDIi/hERC8SRqGqUuyKjWL4gadvKpp8jg25J+lCOakyRdJmkwQ2K/g5waEYHJSJejIhz2jtmLj9R0j2Z4+LkXFbNpnpzJZ/G/ZJ2yOWDVPKl3J77frVrTWhmZl3hzsPAtSltURybImkZ2s9dcREZuEnSdsCzEfF3lRTY36Xkm9iKkgvjG3XlrgCskKG8mzpmBuj6NLBp5rhoL1bDkplP4whKrguAL1FiiWxDiQb6FUnvaXBs57YwM+sF7jwsIiSdlvMMbu9gs45yV1wM7KmSXn1v2iJmbgdsAozLoEz7A+vUH572w363d8yXgNeB30j6DFAfObSmlql0MjA0X38I+GLW5zZgVWCD+h2d28LMrHf4Uc2B624q+Ski4j9ylKCjSEjtzkGIiEczENT7s9ztK/tcFxH7dLDvS5JelbRuXeKwdo8ZEXMkjQR2oXRWvk65FVOvlrejmrNDwGERcU17dTIzs97jkYeB60ZgGUnVlNnLdbJPZ7krLgJOAR6MiMdy2UTgfbV9JC0nacMGZZ8AnKaSVhxJK0o6uL1j5ryJlSLiasotieGdn/JbrqHc+lgqj7WhSiZPMzPrAx55GKAiIiTtTkmv/S3gaUpGym9XNttF0mOV95+lLXfFkpRsntXcFZcBvwAOqxznaUkHABfVJjpS5kDcz/xOBwYDt0t6E3gT+GlEvC6p0TFXAf6UcyJE+7kyGvkN5RbGlHzq5Glg9xb2NzOzbnBuC1ssOLeFmVnrnNvCzMzMeoQ7D2ZmZtYSz3mwxYITY5lZVwyURFV9zSMPZmZm1pI+6zw4iVP3SLpa0sr5d2hleVNtJ2k7SbdJmirpXknHV/Yf1UN1vCKfAKm9nynpu5X3v8+AUF0t/2xJe3a3nmZm1j190nlwEqfui4jdIuIFYGXg0M62b+Ac4OCIGA5sBlyay3cCeqTzAIyvlSVpVeAV2oJNka/HN1NQPtZpZmb9UF+NPDiJUwdJnCR9S9Lh+foUSTdW6nJ+vp6VESRPBNbLEYSTsojBki7PkZQLsrNWbw3gyTzfuRFxj6ShwCHAkVneDpLWkXRD1vUGSWvn8c+WdIaksXl+H29wjHG0dURGAVcCq6t4D/BaRPxT0jKSzso2vlPSznmMA/Iz+wtwbe53arb7VXkOtTZb4PMwM7O+0VedBydx6jiJ0xhgh3w9gtIZWAoYDYyt2/YYSgTI4RFxdC7bMo+5CbAu8L4GdTsFmJm3Fr4qaZmImEUJ2HRKljcWOBU4N8/zAuCXlTKGUsJXfww4I9urajKwmaS3UToPE4CZwHvz/bjc7j8Aso33Ac6plLU9sH9EfIDS5hsBw4Cv0Daq0dTnISfGMjPrFQtlwqScxKk+idNkYOvs2LxBueiOoHQo6jsPjUyKiMciYh4wtXLst0TED7LMa4HPA39rp6ztabsFdB6lA1NzaUTMi4i/Aw8BG9cd4w1Kzo2tKJ/FbXkuo/KvdstidJZNRNwH/AOohby+LiKey9c7AhflSMkTlJDc0OTn4cRYZma9o686D7ULClCSOFESIq3ewT4dJnECZtGWxOnSyj7X5a/o4RGxSUR8qW7fl4BXJa3b7DEjYg4wEvg9JQxyexfejpI41er0noi4tq78N/N8DqRcYMcCOwPrAc1MBH2j8rp67PrzeDAiTqe0/RY5L6Ez0c7rRu+h1H9HyujO85TcGLXOQ23koaP5Ja92dowWPg8zM+sFfdV5cBKnzpM4jQGOyn/HUuYiTI0F44e/DKzQwvHJ436sMhdiA0on44UG5Y0nbwkB+wK3VtZ9VtISktaj3B6Z2eBQ44CvAnfl+2mUUYi1KZ1IKOe4b9Zrw1zXqKwxwN45b2QIpUNFNz8PMzPrpj6Z0e4kTk0lcRoLHAtMiIhXJb1Og1sWEfGspHGSZgB/BZqNfLQfpf1nA3OAfSNibk5OvFzSpyhteTjwO0lHZ10PrJQxk9KBWxM4JCJeb3Cc8ZSOxQlZ3zmS/gU8mrdVAH5NmTMxPetyQES80WCe5xWUybbTKZ9hrfO4Al3/PMzMrJucGMuaIuls4MqIuHxh16UrnBjLzKx1cmIsMzMz6wkOxGNNiYgDFnYdusO5LcystyyO+S888mBmZmYtcedhEaV+kktE0hYZ46L2fh9JsytPnwyTNC1f3yzpkcpTIUj6o6RX8vVQSa9lVMp7VaJ57t9MPczMrOe487AIyotvf8klMh1YJwNgQYn3cB8lKmbt/bjK9i+QETIlrQwMqSvvwYjYMiLem/U4Mp+QMTOzPuLOw6Kp3+QSycczbwdq+28NnMb8OTCqybIupi3OxGdoi9q5gIh4iBJ+/PCOzsvMzHqWOw+Lpn6TSySNB0ZlcKx5wM3M33mojjzcAOwoaVAe85JOqj6FujDZlXNybgszs17gzsNiYCHnEoG2bJsjgdszKdn6klYHBucIQs1cSlTLvYBlM3lXh6fX3grntjAz6x1+VHPRdDcl5wdQconkKEFHUZI6zCUiaRZtuUS2r+xzXUTs00l9JlIyio6mJMoCeIzSERnfYPuLKdElj++kXChzJ5qavGlmZj3DIw+Lpn6VSyQiXgYeBQ6grfMwgZKXolHnYSwlvPVFDda9RdJQ4GSgw7kcZmbWs9x5WARlMq3dgfdLeljSJMptiAVyidT+KL/ga3k9plPmJtTnEtmUnCiZx3ma0iG4KB+3nEg78w8oty6WzoyoUDoP69Kg8xDFyRHxTINy1qs9qknJpvqriDiro/YwM7Oe5dwWtlhwbgszs9Y5t4WZmZn1CHcezMzMrCV+2sIWC06MZda7FsfkUIszjzyYmZlZS/pF58FJnDqt11qSLq+cz25153xUE2UclGGnp0maIelTufwASWt1pV515UvSM5Lenu+HSHcQ6GwAACAASURBVApJoyvbPC1p1W4cY1bGqzAzs4VooXcenMSpcxHxRETsmW+HA7t1tH09Se8CjgVGR8TmlMiQ03L1AUC3Ow/5eOhttAWQGgXcmf8iaSPgmYh4tsk6+5aamVk/tdA7DziJE5KulrR5vr5T0vfy9Q8lfTlHMWZIehvwA2AvSVMl1TpKm+RIyEOSGiWJWgN4GXgl6/JKRDwsaU9gBHBBlrdso3bNusyS9JMcQZlUCSZVVQtDXWurnzF/Z2J8lrWOpBtyFOQGSWvn8rMl/UzSTcBPJK0q6dqsz/+SUTAlLS/pKpWQ2zMq7WBmZn2gP3QenMQJxgA7SFoRmEOOZlDCOY+tbRQR/wa+B1wSEcMjona8jYEPU3JHHFe7zVJxF/AU8LCksyR9Isu7nNIO+0bEcCBov10BXoqIkcCpwM8bnMd42tpqJGVE6d35vtp2pwLn5ijIBcAvK2VsSPmMvgkcB9waEVsCfwbWzm0+AjwREVtExGbA3xrUxYmxzMx6SX/oPMxHi2cSp7FZ/9HAVcBgScsBQ3M0pTNXRcQbGZHxX8Ca1ZURMZdywd0TuB84RdLxDcrpqF2hrS0vom1EoWoSsGV2vJaKiFeAh3KUojpqsz1tt5LOy/OuuSzrSx77/DyHq4Dnc/l0YNccCdkhIhr2DJwYy8ysd/SH+8pO4lRulYwAHgKuA1YDvgJMbqJMgDcqr+fS4HPNOQmTgEmSrgPOalDndtu1Vkw7r2vHmC3pAeAg2kaTJlLmaKwBtNcRqpb1agfrase5X9LWWe4Jkq6NiB90UnczM+sh/WHkYbFP4pS3Ix6lzM+YmGUeReWWRcXLwAoNlnd07LUkbVVZNBz4R4PyOmvXvSr/TqCxcZS2qrbdfwIToy0W+nja5orsSxm9aWRMrkfSR4HakxxrAbMj4nxKm27Vzv5mZtYLFnrnwUmc3jIWeCoiZufrd9G483ATZYJkdcJkZ5YCTpZ0X96y2YtyQYcyx+GMXC46btelJd2W+x7ZzrHGUdqq1nmYkudSbbvDgQPzc9ivUpd636fMKZkCfAh4JJcPo4ygTKU8RfKjjk/fzMx6khNjWVPyVtCIdjpJ/Z4TY5mZtU5OjGVmZmY9oT9MmLQBICKGLuw6dIdzW5jZQDEQ8oR45MHMzMxa4s5DPyXpcJWcGBe0uN/Kkg7N18NyYuVUSc/lhNSpkq7vQn3WlbR3B+s3lvRXSX/Pel8saY0WjzFIUqNJomZm1o+489B/HQrsFhH7trjfyrkvETE9I1EOp0RoPDrf79qF+qxL2+OV85G0LHAl5WmSDTKXx/8BLSXBioi5EbFDF+pmZmZ9yJ2HfkjSGZSL9Z8lHSlppKTx+djneJUkU0jaNPNMTM08ERsAJ1IeEZ0q6aROjnNM7j9Nbfk0ts993yZpsKR7JL03y90519Xnz9iPktjs6tqCiLghIu5VyZdxjkqujCmSdszjDJN0e6Xu60paUtILuX5XlbwXf5A0U9K5lXpvI+kWlQysf5W0JmZm1mc8YbIfiohDJH0E2DkinlHJebFjRMyRtCvw35TomYcAv4iIC1SSZg0CjgE2y9GGdqmk9V6bkgRMwNWSRkXEeEl/oyTgejtwVnYCjgG+HhG7NyhuM9qPhnk4JfHZMEmb5nE2oIyOnBwRl6gk32oU3XIrSkjxfwETVXKV3An8Avhkts2+wA+Bgxuc48G15YNWXL2j5jAzsxa48zAwrASckxfdoAR9ghKI6ViVlNt/yARgzZb5IeCjlIsxwGBKUqrxlIRUk4GXmD8xVleMBk4CiIi7JT0BrJ/H+a6kdbLuD2jBNNwTI+JJgAwINRR4nRIA7Po810GU8OELiIgzgTMBlh6ygQOamJn1EN+2GBh+CNyUGSQ/ASwDEBEXAp8EXgOukfSBFsoU8KPanIiIWD8izs51q1FChK8ILN1EWXdT0pe3d5wFRMR5wKcpeTmuq93OqNMoZ4eAaZV6D4uIjzZRRzMz6yHuPAwMKwGP5+sDagslrQs8FBG/pEyI3Jzmc19cA3xJJQMmkt6lkpAMyq/1Yyhhvk/IZR2Vex4lvPhHKnXbTdImzJ+f4r3AEOABSetGxAMR8QtKJtHNm6gzwD3AOyWNzDLflrdDzMysj7jzMDD8DyV75DjKMH3NXsCMHNLfGDg3Ip6lpB2f0dGEyZzceDllLsF0Su6NwZIOAl6NiEuBH1OSib2fcntjkEq69MPryppNGRE5Mh/VvAf4AvA0JRHYsnmMC4AvZiKwz0u6O+u+Lpl6uzMR8QYltfjPJN2V9dq2mX3NzKxnOLeFLRac28LMrHXObWFmZmY9wp0HMzMza4kf1bTFghNjmdlA1F+TZHnkwczMzFrSY50HJ3LqXZI+LenofP0ZSRtX1t0qqbOIkoMknZZPYUzPsNTrSFoio0f2RB23lnRH5f1+kl6RNCjfbylpSjfKXz+fzjAzs4WoJ29bHAp8NCIebnG/WiKnX0fEdGA4gKSzgSsj4vIu1qeWyOni+hVqS+R0eC0fg6RdKImc/tXsASJiLtAniZwi4orK288A84D7Wiji85Tz2zwi5klamxJBcglKTIcTe6CadwHrS1ouH98cBdwPbAFMyffjmi1M0pIRMacH6mVmZj2oR0Ye5ERO3UrklOU8lK9XkzRP0qh8P0HSUElflvRzSTsAuwGnZF2GZjF7Z9vMrO1bZwjwZETMy/N9JCJeyHZaIcs6N4/5rRyhmCHpsFy2fsZlOC/b5tLshL0lL/RTgJG5aEvgdEqngfx3fJb3wTzmdEn/p5KbA0mPSfovlZgWn862myZpAiWXR63NFvg8GpyzmZn1gh7pPETEIcATlEROp1B+Ee8YEVsC36MkcoK2RE7DgRGUnATHAA9mqOGj2zuG5k/kNBwYpZLIaQJQS+T0UzKRU5Z7U5b7y7rimkrkROlknJcXtloip+HANnm+9bYC/oOSzOm9krZTSfr0C2CPiNiaEgzph3XtNwd4KDtZo7NuO+TFeY2ImFXZdixwNXBknlttnSJiJHA0pc3rXQx8Jjt0J1ducxwDvJxlfVElcuO+lA7A9sChkmrRHzcBTsu2eR34aoPjjKd8NitQwkuPYf7OwzhJywG/yzYZRgmFXU1s9WpEvC8iLgPOBr4WEdszf4CsTj8PSQdLukPSHXNnv9igqmZm1hW9NWFyJeAySTOAUyiJjKAkcvqOpG8D60TEay2UWU3kNIWSXGnDXHcc8HFgGKUD0R2jKeGWiYi7KRelaiKnbwHvjojXG+w7MSKezNsZtURO76UtkdNUysX63Q32HQvsmH8nUG6HbAvc1mS9/5D/Ts7jziciHgE2Ao7NRTdJ2qlBOTsAv4+I2RHxMvBHSpsAPBwRE/P1+ZXlVeMonYTtgEkRMRPYSNI7gKWyHu8F/h4RD+Y+51LOu+YSKKMwwLIRUbvVcV5lm04/j4g4MyJGRMSIQcut1KCqZmbWFb3VeXAip9YTOY2lXLhHUOZjrEa5oI5p4nyqx64dt9E5vB4RV0fEUcBPgE812KyjtJz14UgbhSedQOn0vC9fA/wT+Cxt8x06S/35aifHaPbzMDOzXtCbIw9O5FQ0m8hpAvB+yi2TfwPTga9QOhX1mm2zt6g8CTEkXy9BGaX5R21CotrSYY+hzDVYVtJgSgejVof3SNomX+8D3Fp/nJxH8RTllk+t8zAROIKc70Bpkw0q8xS+ANzSoKxngNclbZ+L9q2cT1c/DzMz66be6jw4kVPbsZpK5JS3cJ6g7QI7ljKack+DYi+i3P6pTpjszDuAq/JW0nTK6M/pue63wDRJ50bEpCz/dspF//R8CgbKiM1XJE0Dlqd02hoZBwyKiCfz/QRKm43Pc50NfAn4Q7bzG8D/tVPWgcD/5oTJVyrLu/R5mJlZ9zkxljVF0vrA5TlBccBxYiwzs9bJibHMzMysJzi3hTUlIh4gA3gNRM5tYWZ9rb/mpegJHnkwMzOzlrjzMEBpAOUSkfQXSR+vvH9QlXwakv4k6ZMqUTpD0v6VddvksiPy/flZz7sk3a8SDXStVutrZmZd587DwHUosFtE7NvplvOr5RIhIqbX4k9QHp09Ot/v2oX61HKJNDKejDKpEpr7BUr0yprtaHvKZHpdOXtTcmZUHRkRW1Ce2JkO3ChpqS7U2czMusCdhwFIAy+XSC3qJPnvH4G1srwNgBcypgPAQ8CKKjk+BHyQEuNjARExLyJOBp6jRCA1M7M+4AmTA1BEHJIBrnaOiGckrUjJJTJH0q6UXCJ70JZL5AKV/ByDKMG0NuvskUvNn0tEwNUquUTGS6rlEnk7mUskb0N8PSJ2b1Dc7cDwDEQ1itIZ2ETShpQRiPpMm7+nxMa4lxKe+81OmmQKZRRivhmRkg4mc2YMWnH1ToowM7NmufOwaFgJOCd/xQdQG8KfABwr6V3AHyLi7+XHfFOquUQABlNyiYyn5BKZTEnp/bXOCoqI1yTNpDytsS0lfPkmlI7E9rTdsqi5hBIF9H5KwKrOwpi3F1L8TDKQ1dJDNnBAEzOzHuLbFouG/p5LBEoHYSdgmYh4iRK9clT+zTfyEBGP5/HfD9zcRNnDKaMUZmbWB9x5WDT091wiUDoIX6NtJONOSiKwd1BSuNf7L+DbETGvvQJVHAmsClzXxDmZmVkPcOdh0dCvc4mkcZRJnhOy/DeBZylpuxe4pRARt0bEn9up3imZJ6R2K+QDWZ6ZmfUB57awxYJzW5iZtc65LczMzKxHuPNgZmZmLfGjmrZYcGIss/5rUU4gtajyyIOZmZm1pE86D07i1P0kTpJ+LGnnfP0NScvk6yUlvdDE/kMkXZ11uUfSnztriy7U8ZuSTq68/21Go6y9P1LSz7pR/pcl/by79TQzs+7pq5EHJ3HqZhKniDg2Im7Kt98gA0G14EfAVRGxRURsAnw3l3fUFq16q+3SMGBVSbXv2QIBodqTMRw8MmZm1g/1+v+c5SROQMdJnCSNknRpvt5D0quSlpK0vKS/5/LzJe2eQZHWAMZWR10knZijChMkrdGgCkOAxyr1mZYv52sLScvmCMl0SVMk7Zjlf1nSFZKukTRT0ncbHGMyJWfF0pJWoYSvnkEJRQ2VUNSSvqUSa2KGpMNy2fr5/gxKvoohedz7Jd1M6bjVznfv3PYuSTdhZmZ9ptcnTDqJ0wIaJXG6Hdg6X+8A3ANsRcknMbG6c0ScIumbwA4R8ULWcyXglog4Jm8LHETpFFSdClwoaQpwfbbFk5Q2fqstJH0b+HdEDJO0KaUtN8gyRgKbAf8Gbpd0ZURMrdTt35Jm5Lm8Pev+KDBK0stZ7pOSRgL7ZnmDgEmSbgFmUzoaB+b35l2USJNbUSJYjqm0x3HAThHxlKSVGzW0nBjLzKxXLIxh4ZWAy/IicwqwaS6fAHwnL17rRMRrLZRZTeI0BVifksQJykXm45Qh9J92VlAet5rEaVLWrZaHoVESp88B+1CSOHVmgSROGR3xkbxIjwB+DuxI6UiMbaLM1yLir/l6MjC0wTGuBtYDfku5QN8padUGZY2mJKUiIu4GnqC0J8A1EfF8RLxKGZEZ3WD/2sjNKEq7NWq7HYDfR8TsiHi5rqwHI+L2fL0dcENEPBsR/6ZEuawe51xJX6ad73FEnBkRIyJixKDlVmq0iZmZdcHC6Dw4iVPjJE5jgY9Rfn3fQLnAjqb82u7Mvyuv59LOiFJehC+IiC8AU2l88e8o7WZ9ONJG4Ulr8x62p7TbDMpoRbXtOjrGq00cA+ArlI7hUOAuSW/voEwzM+tBC2vkwUmcFjSGMhFyfET8M4+1XkQ0Ol6z7VI9/i6Sls3XKwLvAR5pUNYYyi0Fcn7IEOCBXPchlSdglgM+RePJj7WRh5WzszKPMun0Y7SNPIwBPp3zKwZnWY1GWCYCu0haJW9l7VlZt25ETKS0/fPAO5tvDTMz646FESTqf4BzJH0DuLGyfC/gC5LeBP4J/CAinpM0Lm9x/DUijm5UYERcLWljShInKBfEz0v6JJnEKecGTFBJ4jSBTOIE/DY7LFULJHGS9Cylc9MwiVMH53uKpO8Dy2Z57SVxmkC5UNdGGmZQnjZp5EzgekmPAh/p4NhV2wCnZvsuAZweEXfW5pfU2gL4FfC/Ksmw3gS+mHMZAG4FLqTc/jivOt+hJue1vAhMqyyeSJnfMD23mSTpIspcD7Iu0yWtX1fWY5J+lPs/AVSTU5wi6T2UUYxrI2JGk+1gZmbd5MRY1pScW7BZRByxsOvSFU6MZWbWOjkxlpmZmfUEjzzYYmHpIRvEkP0dnNLMBo7+kPPDIw9mZmbWI9x5GKAkraq2XB//lPR45X1UXk+VNLTB/mdL2jNf35xRI6dJuk/SqdXAS5LmNlHehiq5Mx5QyWNyqUp471bPqz6OhpmZ9TNOyT1ARcSzlJgRSDoeeCXDXyPplc6icjawb0TckU9fnAD8iRK7AkoQqnbLU0nSdRXwjYj4Sy7bGVgdeKqVSkTEqM63MjOzhckjDzafjOT4LWBtSVs0udvngQm1jkOWc1NEzJC0jKSzVHJl3Km2zKCNcpkg6ZX8d6ccEbk8R0MuUD4vKmlrSbdImqySa2NIT7aBmZl1zCMPi6ZlJdViMDwcEZ9uZeeImJtxHzamZAntrLzNKGGxG/mPLHNYxuK4ViVPSKNcJvW2pIQvf4ISe+N9km6jxKL4VEQ8LWkv4MeUfB7zcW4LM7Pe4c7DoqnD2wxNqoaQ7k55oykXeyLiPkn/oOQdmQAcm8mv/hARf2+w76SIeAwgOy9DKdEqNwOuy4GIQcCTjQ4cEWdSAmqx9JAN/FiRmVkP8W2LxUTeOpgq6eomth1ESSTWKAdHI3fTlhV0geIaLWwyl8kblde1nB0C7q7kMRkWER9qsK+ZmfUSdx4WExFxYF5sd+toO0lLUSZMPhoR0zratuJCStrttx5KlvQRScOYP1fGhpTU6TPVOJdJM2YCq0vavlZfldThZmbWR9x5sJoLJE2j5NRYnpKsqimZxvzjwGGS/i7pHkrSs38Bv6bkzphOSV9+QES8QcllMiNvR2wMnNvksf5NSZD1k5yXMZWSiMvMzPqII0zaYsG5LczMWucIk2ZmZtYj3HkwMzOzlvhRTVssTH/8RYYec9XCroaZWZ/qreRaHnkwMzOzlvRZ58GJnLpH0iclHZOvd5e0SWXdzZIWmNBSt/8Skn4paUaGir5d0nty3Xd6qI5bVCJRImkfSbPz8U8kDcsnOrpa/lBJM3qirmZm1nV9dtvCiZy6JyL+TImHALA7cCVwTwtF7AWsBWweEfMysuOrue47wH/3QDWnA+tIWiEiXqY8QnkfJcz0pHw/rtnCJA2KiLk9UC8zM+tBA/62xaKQyEnSIEkPqVhZ0jxJO+a6sZLWl3RAjrCMokRmPCnrsl4W89ms3/2SdmhwzkOAJyNiXp7vYxHxvKQTydwVki7IY34jRyhmSDoilw3N8zonz/9yScvVfRbzgNuBbXPR1sBptMVhGAWMz/J2yfadLul3kpbO5bMkfU/SrXlOW0u6S9IEMk9GR5+HmZn1vv7SeahdvKZKuqLVnfPXaS2RUzPlNZXICdgHOCdHKmqJnIYDI4DHGuy7JXAEsAmwLiWR01KU3A57RsTWwO8oiZzq639/7jc667ZDXlDfFREPVLYdTxmBODojRj6Yq5aMiJF5/OMa1O1S4BPZJj+VtGWWdww5UhMR+0raGjiQ0gHYDvhKbVtgI+DMiNgceAk4tMFxxlOiTS4PzANuZv7Ow7hsz7OBvbKdlwS+Vinj9YgYHREXA2cBh0fE9nXH6fTzkHSwpDsk3TF39osNqmpmZl3RXzoPtYvX8FYzQFYskMipi+WNBs6DksgJqCZy+o6kbwPrZFTFepPyF/08SuTDoZQLbi2R01Tgu8C7Guw7Ftgx/07IemxD+SXfjD/kv5PzuPPJBFMbAf+PclG/QdIuDcoZDVwREa9GxCtZbm0k49GIqN12OD+3rTeO0kkYCdyenZv1Ja0ODI6Ih7IeD0fE/bnPOXneNZcASFoJWDkibsnl51W26fTziIgzI2JERIwYtNxKDapqZmZd0V86DwvQ4pfIaSzlIj0SuBpYGdiJkhuiGbVj147b6BzeiIi/RsTRlDkOuzfYrOH514ro5D3AREqnZzTlAg9lVGBv8pZFJ8eAtrkYaucYzX4eZmbWC/pt52ExTOR0G+UX+7yIeJ0ycvFVSqei3svACk0enzzuVpLWytdLZP3/kavfzHaEcv67S1oubz18ulKHtWvnQbmlc2v9cXKi5KOU3Ba1zsMEyu2UWufhPmCopPXz/X7ALdSJiBeAFyXVRjj2rZxPVz8PMzPrpn7beWjCIpXIKY/xKOWXO5QL9gqUJxjqXQwcnRMO12uwvpE1gL+oPOo4DZgDnJrrzgSmSbogIqZQ5iNMonRofhMRd+Z29wL7Z7uvApzezrHGAUtHxKP5fgJlDsj4PNfXKfMqLst2ngec0U5ZBwKn5YTJ6q2JLn0eZmbWfU6MZU1RiZVxZURstpCr0iVOjGVm1jo5MZaZmZn1BOe2sKZExCzKUyMDknNbmPV/vZWHwXqeRx7MzMysJe489DNqy8txd0ZW/EY+HdGVsi5QyQEyI6M4LtX5Xl2TT5GcmBNQZ2T0x492oZwfSNq1N+poZmY9w52H/qcW4GpT4IPAbjSOGNmMCyhPIgwDlgW+3DNVbOiHlBDYm+Wkyk/Q4uOkABHxvYi4vqcrZ2ZmPcedh34sIv4FHAx8XUV7eTcGSTo5l0+TdFjuf3UkyqOX71LJrjlL82chfUDSmpJWl/R7lYybt0t6X64fXDnuNEl7VOupkuPiK8Bh+cgpEfFURFya6/fJfWdI+kmlzmerLcvnkbm8mj11lqTvS5qS22ycy5fPkZTbsx2afkzXzMy6zxMm+7mIeChvW6wBfCGXDcsL6bUZyOpA4D3AlhExR9Iq1TLydsV+wH9mRs0/UYI/nSVpW2BWRDwl6ULglIi4VdLawDXAe4H/Al7MPBRIentdNdcHHomIl+rrn4GpfkKJ6Pl81nl3SkyLd9Ye/ax2Zuo8ExFbSToUOIoyenIscGNEHJT7TZJ0fUS8Wt1R0sGUzheDVly93TY2M7PWeORhYKiFc24v78auwBkRMSfXPVe3/6+BMRFRixR5CSXIEpSw0Zfk612BUzPw0p+BFSWtkMtPqxUWEc+3UPdtgJsj4ums3wWUPBYPAetK+pWkj1ASbTXSKGfHh4Bjsp43A8tQIoHOx7ktzMx6h0ce+rkMwzyXEv2yvZwQ7eaAkHQcsDol1HXNBNqSVe0O/CiXLwFsX59kSlK75acHKKGrV8jw1PV1W0CmA98C+DAlk+nngIMabNooZ4eAPSJiZgd1MjOzXuKRh34sL+5nAKfmvIWGeTeAa4FDJC2Z61bJf79MuTjvk5k+AciyrgB+BtwbEc/mqmuBr1eOP7yd5fPdtoiI2cBvgV9KeltuM0TSFyghrt8vaTWVBGb7ALdIWg1YIiJ+T7ktslULTXMNJbS48lhbdrK9mZn1IHce+p9la49qAtdTLtzfz3Xt5d34DfAIJT/FXcDnc/szgDWBCVnm9yrHuYQyh+KSyrLDgRE5KfIe4JBc/iPg7Tm58S5g5wb1/i7wNHCPSv6MPwJPR8STlDTgNwF3AVMi4k/AO4Gb89bD2blNs34ILJXnOyPfm5lZH3FuC1ssOLeFmVnr5NwWZmZm1hPceTAzM7OW+GkLWyw4MZZZ55yYyprlkQczMzNrSbc6D3ISpz5L4iRpfP47VNLnK8sPkHRqE/t/PEM53yXpHklfzeW7S9qkh+p4Z+3xTklLSno1H9esrZ8sqZVHMuvLv1nSAhN3zMysb3V35MFJnPooiVNEjMqXQ2l7FLMp2RE7E/hERGwBbEmJzAglSFSPdB6A8UCtnltQYlCMyjosD6xLeVyzmTr7lpqZWT/VY7ctnMSp60mcJP1a0ifz9RWSfpevvyTpR/n6ldz8RGCHHPE5MpetJelvOZLyPw0+nhUo81uezfN9IyJmShoFfBI4KctbT9JwSROz7a5QBoTKX/0/lzQ+22Fkg+OMo63zMIoSZ6IWaGokJcbDXEmrSPpjHmOipM3zGMdLOlPStcC5kpaVdHFudwmlU9nu52FmZn2jR+c8RMRDWeYalJDDZDKlfYBzJC1D6WDUkjhtThlxeIvakjj9LaMi1pI4oUoSJ+AXlCRO2wB7UAIlQSWJU5Z/Y101m0ni9AHKRW8blSROw8kkTnk+Z7XTBM9ExFbA6ZQkTtCWxGkbSnClk/JXeNUYYId8/U7aRgJGA2Prtj0GGJsjPqfksuGUXBXDgL0kvbu6Q+a6+DPwD0kXSdpX0hIRMT6XH53lPQicC3w72246848kLZ8jIIcCv2tw/tWRh1F5Xm+o5McYRelcQAl6dWce4zt5zJqtgU9FxOeBrwGzc7sf57ra+Xb6eUg6WNIdku6YO/vFRpuYmVkX9MaESSdxaj2J01jKaMImwD3AU5KGANtTLsiduSEiXoyI13P/deo3iIgvA7tQRnWOosHFX9JKwMoRcUsuOody/jUXZVljKO09XybMiJgFvE3SOyi3oGYCtwPbUjoPtXOpfjduBFbNYwP8uZJbY0fg/NxuGjAtlzf1eTgxlplZ7+jR+8pyEqcuJXGKiMfz9sBHKL/WV8ljvNKgjo28UXldPXb9caYD0yWdBzwMHNBE2fMV0cl7KJ/XnsCTERGSJgLvo9y2mJjbNGrnWlmvtrO8bUHzn4eZmfWCHht5kJM4tafZJE4TgCMo7TaWMjpQf8sC4GVanNyZ80B2qiwaThkJmq+8iHgReF5S7RbKfsAtlf32yvJGU24NNboXMA44Ms+ndl5fBP4ZES/ksup3YyfK7Z5GowfV7TYDanMjuvN5mJlZN3W38+AkTp1rNonTWGDJiHgAmEIZfWjUeZgGzFF5jELG/gAACWlJREFU5LLZiYICvqXyKOxUymd0QK67GDg6J3OuB+xPmZcxjdLJ+EGlnOdVHhk9A/hSO8caR3mqYgJAtukg5r/9cjz52VEmgO7fTlmnA4Nzu29RbrlA9z4PMzPrJifGsqZIuhk4KiIGZHYpJ8YyM2udnBjLzMzMeoID8VhTImKnhV0HMzPrHzzyYGZmZi1x58HMzMxa4s6DmZmZtcSdBzMzM2uJOw9mZmbWEncezMzMrCXuPJiZmVlLHGHSFguSXqbkVhkoVgOeWdiVaJHr3Ddc59430OoLvVfndSJi9fqFDhJli4uZjUKs9leS7hhI9QXXua+4zr1voNUX+r7Ovm1hZmZmLXHnwczMzFrizoMtLs5c2BVo0UCrL7jOfcV17n0Drb7Qx3X2hEkzMzNriUcezMzMrCXuPJiZmVlL3HmwRYakj0iaKekBScc0WL+0pEty/W2ShvZ9LReoU2d13lHSFElzJO25MOpYr4k6f0PSPZKmSbpB0joLo551deqszodImi5pqqRbJW2yMOpZV6cO61zZbk9JIWmhPlrYRBsfIOnpbOOpkr68MOpZV6dO21jS5/L7fLekC/u6jg3q01k7n1Jp4//f3v3HyFHWcRx/f9pC6A8EpGJEGg/wqFpS2vRqaUQFa6IGcsVYYxtIqBESFWwMxh9o1aLhDyQGo0Co1FoqxCpVoYpaf7SlarzSQulPora1lUYTbBWwtNByfPxjnqvrdvd2xrqze9vvK7lkd/aZvc/M7e0+88yz8/2jpGeaEsR2/MTPkP8BhgM7gfOAk4FNwJuq2nwUuDvdng18bwhk7gImAkuBWUNkP18GjEq3PzJE9vMrKm73Aj9v98yp3anAWqAP6GnnvMBc4I5W7tf/IXM3sBE4I90/q90zV7X/GLC4GVli5CF0ijcDO2zvsn0YWAbMrGozE7g33V4OzJCkEjNWa5jZ9m7bm4GXWxGwhjyZV9s+mO72AeeUnLFanszPVdwdDbR6Jnme1zPAl4GvAC+UGa6GvHnbSZ7M1wF32v4ngO2nS85Yreh+ngN8txlBovMQOsVrgacq7u9Ny2q2sf0S8CxwZinpasuTud0Uzfwh4GdNTdRYrsySrpe0k+zDeF5J2eppmFnSZGCc7Z+UGayOvK+L96XTWcsljSsnWl15Ml8AXCDpd5L6JL27tHS15f7/S6cLzwVWNSNIdB5Cp6g1glB99JinTZnaLU8euTNLuhroAW5raqLGcmW2faft84FPA/Obnmpwg2aWNAy4HfhEaYkGl2cf/xjosj0R+BX/GQVslTyZR5CduriU7Ch+kaTTm5xrMEXeM2YDy233NyNIdB5Cp9gLVB7JnAP8tV4bSSOA04B/lJKutjyZ202uzJLeCXwO6LX9YknZ6im6n5cBVzY1UWONMp8KXAiskbQbuBhY0cJJkw33se39Fa+Fe4ApJWWrJ+97xkO2j9j+M1lxve6S8tVS5LU8myadsoDoPITOsR7olnSupJPJ/nFWVLVZAVyTbs8CVjnNKmqRPJnbTcPMaTh9IVnHodXniCFf5soPhMuBP5WYr5ZBM9t+1vZY2122u8jmlvTa3tCauLn28Wsq7vYCT5aYr5Y8/38Pkk0ARtJYstMYu0pN+d9yvWdIGg+cAfy+WUGi8xA6QprDcAOwkuxN6fu2t0n6kqTe1OxbwJmSdgA3AnW//laGPJklTZW0F3g/sFDSttYlzr2fbwPGAA+kr4u1tEOUM/MN6at4T5C9Nq6p83SlyJm5beTMOy/t401kc0rmtiZtJmfmlcB+SduB1cAnbe9vTeJCr4s5wLJmHhzF5alDCCGEUEiMPIQQQgihkOg8hBBCCKGQ6DyEEEIIoZDoPIQQQgihkOg8hBBCCKGQ6DyEEIY0Sf3pK6FbJT0gaVSrM9UjaYykhZJ2pq8trpU0rUm/a4kaVGJNlS7Prri/qB0qiob2F52HEMJQd8j2JNsXAoeBD+ddUdLw5sWqaRHZVU27bU8gu9bB2DwrKjOsatnx5p8LHO082L7W9vbjfM5wAojOQwihk/wGeD1ktTUkPZpGJRYOfNBKOpAuqrMOmC7pC5LWp5GLbw5UWpU0T9L2VMhpWVr2SkkPpmV9kiam5QskLZa0RtIuSccU1pJ0PjANmG/7ZYBUHfHh9PiNKcNWSR9Py7okPSnpLuBxYFyN/FMkPSLpMUkrq67kOPC7j9nGNCrRA9yf9tHIlL8nrTNH0pa0zq0Vz3VA0i2SNqV98Or/y18uDCnReQghdARl9UreA2yR9EbgA8BbbE8C+oGrUtPRwFbb02z/FrjD9tQ0cjESuCK1+wwwORVyGhjNuBnYmJZ9FlhaEeENwLvIyiZ/UdJJVREnAE/UKlQkaQrwQbLOxcXAdcou8w0wHlhqe7LtPZX5gXXAN4BZtqcAi4FbauyeY7bR9nJgA3BVGrk5VJHnbOBW4B3AJGCqpIF6H6OBPtsXAWvJylaHE0x0HkIIQ93IdFnpDcBfyC5DPoOs8NL69NgM4LzUvh/4QcX6l0laJ2kL2YflhLR8M9lR+dXAS2nZJcB3AGyvIrvc+WnpsYdtv2h7H/A0UOSI/BLgR7aft30A+CHw1vTYHtt9FW0r848nK5D1y7Sd88mKJVWrt431TAXW2P57uiTy/cDb0mOHgYEy4I8BXTm3MXSQEa0OEEIIx+lQGl04Kp16uNf2TTXavzBw9C/pFOAuoMf2U5IWAKekdpeTfWD2Ap+XNIHBSyJXVg/t59j3123ARZKGDZy2qIw8yPY9Xy9/Wm+b7en1Vm6wjXVXG+SxIxU1E2ptZzgBxMhDCKET/RqYJeksODpX4XU12g18iO6TNIas2ippYuI426uBTwGnkxX7Wks6/SHpUmCf7efyBLK9k2x05OaKeRXdkmam571S0ihJo4H3ks3faOQPwKskTU/Pd1Lq5DTcxuRfZOW9q60D3i5pbJorMgd4JM92hhND9BhDCB3H9nZJ84FfpI7AEeB6YE9Vu2ck3QNsAXaTlTwGGA7cl05JCLg9tV0AfFvSZuAgxatvXgt8Fdgh6SCwn6xS4+OSlgCPpnaLbG+U1NVgOw+niY9fT1lHAF8jG+VotI0AS4C7JR0Cples8zdJN5FVkhTwU9sPFdzW0MGiqmYIIYQQConTFiGEEEIoJDoPIYQQQigkOg8hhBBCKCQ6DyGEEEIoJDoPIYQQQigkOg8hhBBCKCQ6DyGEEEIo5N+RLSlEIAFpQAAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -1393,7 +1438,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 34, "metadata": {}, "outputs": [], "source": [ @@ -1403,9 +1448,36 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/papermill.record+json": { + "results": { + "Doc2vec Cosine": 0.5236274769065202, + "Doc2vec Cosine with Stop Words": 0.45176043696294416, + "GLoVe Cosine": 0.6688056947022161, + "GLoVe Cosine with Stop Words": 0.6049380247374541, + "GLoVe WMD": 0.6267300417407605, + "GLoVe WMD with Stop Words": 0.48470008225931194, + "TF-IDF Cosine": 0.6749213786510483, + "TF-IDF Cosine with Stop Words": 0.7118087132257667, + "Word2vec Cosine": 0.6476606845766778, + "Word2vec Cosine with Stop Words": 0.6683808069062863, + "Word2vec WMD": 0.6574175839579567, + "Word2vec WMD with Stop Words": 0.5689438215886101, + "fastText Cosine": 0.6707510007525627, + "fastText Cosine with Stop Words": 0.6771300330824099, + "fastText WMD": 0.6394958913339955, + "fastText WMD with Stop Words": 0.5177829727556036 + } + } + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Record results with papermill for tests\n", "pm.record(\"results\", results)" @@ -1428,7 +1500,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.5" + "version": "3.6.8" } }, "nbformat": 4, From 4202063d5bd366b42b9a226aa17b30a057f3c5cd Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Wed, 12 Jun 2019 12:18:09 -0400 Subject: [PATCH 068/108] autoML notebook with google universal sentence encoder features --- ...ml_google_universal_sentence_encoder.ipynb | 723 ++++++++++++++++++ 1 file changed, 723 insertions(+) create mode 100644 scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb diff --git a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb new file mode 100644 index 000000000..658ce393f --- /dev/null +++ b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb @@ -0,0 +1,723 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning#3-setup-a-new-conda-environment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to extract features for a sentence similarity task using the pretrained models InferSent and Google Universal Sentence Encoder. Then we will demonstrate how the AutoML package can easily automate model selection and hyperparameter tuning" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Logging before flag parsing goes to stderr.\n", + "W0612 09:40:01.239300 40168 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Turning diagnostics collection on. \n", + "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.41\n", + "Pandas version: 0.23.4\n", + "Tensorflow Version: 1.13.1\n" + ] + } + ], + "source": [ + "# set the environment path to find NLP\n", + "import sys\n", + "sys.path.append(\"../../../\")\n", + "import time\n", + "import os\n", + "import pandas as pd\n", + "import shutil\n", + "import numpy as np\n", + "import torch\n", + "import sys\n", + "from scipy.stats import pearsonr\n", + "from scipy.spatial import distance\n", + "from utils_nlp.azureml import azureml_utils\n", + "\n", + "#tensorflow dependencies for Google Universal Sentence Encoder\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "\n", + "#AzureML packages\n", + "import azureml as aml\n", + "import logging\n", + "from azureml.telemetry import set_diagnostics_collection\n", + "set_diagnostics_collection(send_diagnostics=True)\n", + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.core.experiment import Experiment\n", + "from azureml.widgets import RunDetails\n", + "\n", + "print(\"System version: {}\".format(sys.version))\n", + "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", + "print(\"Pandas version: {}\".format(pd.__version__))\n", + "print(\"Tensorflow Version:\", tf.VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "BASE_DATA_PATH = '../../../data'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Feature Engineering" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We'll collect the Google Sentence Encoder encodings" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "#replace all this with our util!\n", + "data = []\n", + "with open(\"sts-train.csv\", 'r', encoding=\"utf-8\") as f:\n", + " for line in f:\n", + " l = line.strip().split(\"\\t\")\n", + " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", + "train = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", + "\n", + "data = []\n", + "with open(\"sts-test.csv\", 'r', encoding=\"utf-8\") as f:\n", + " for line in f:\n", + " l = line.strip().split(\"\\t\")\n", + " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", + "test = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", + "\n", + "data = []\n", + "with open(\"sts-dev.csv\", 'r', encoding=\"utf-8\") as f:\n", + " for line in f:\n", + " l = line.strip().split(\"\\t\")\n", + " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", + "dev = pd.DataFrame(data, columns=['sentence1','sentence2','score'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Google Universal Sentence Encoder" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", + "\n", + "# Import the Universal Sentence Encoder's TF Hub module\n", + "google_USE_embed = hub.Module(module_url)\n", + "\n", + "# Reduce logging output.\n", + "tf.logging.set_verbosity(tf.logging.ERROR)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Embed Sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def embed_google_universal_sentence_encoder(dataset, embedding_model):\n", + " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", + " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", + "\n", + " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", + " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", + " \n", + " with tf.Session() as session:\n", + " session.run(tf.global_variables_initializer())\n", + " session.run(tf.tables_initializer())\n", + " emb1, emb2 = session.run(\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={\n", + " sts_input1: dataset['sentence1'],\n", + " sts_input2: dataset['sentence2']\n", + " })\n", + " return emb1, emb2\n", + " \n", + "def feature_engineering(dataset, googleUSE_embedding_model):\n", + " google_USE_emb1, google_USE_emb2 = embed_google_universal_sentence_encoder(dataset, googleUSE_embedding_model)\n", + " n_google = google_USE_emb1.shape[1] \n", + " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", + " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", + " df = pd.DataFrame(df, columns=names)\n", + " df['score'] = dataset['score']\n", + " return df" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "training_data = feature_engineering(train, google_USE_embed)\n", + "validation_data = feature_engineering(dev, google_USE_embed)\n", + "testing_data = feature_engineering(test, google_USE_embed)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Take out when notebook is ready for publishing\n", + "training_data.to_csv(\"Data/training_set.csv\", index=None)\n", + "testing_data.to_csv(\"Data/testing_set.csv\", index=None)\n", + "validation_data.to_csv(\"Data/validation_set.csv\", index=None)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def get_baseline_performance(data):\n", + " sent1_googleUSE = data[[i for i in data.columns if 'USEEmb1' in i]].values.tolist()\n", + " sent2_googleUSE = data[[i for i in data.columns if 'USEEmb2' in i]].values.tolist()\n", + " \n", + " predictions_googleUSE = [1-distance.cosine(sent1_googleUSE[i], sent2_googleUSE[i]) for i in range(len(sent1_googleUSE))]\n", + " print(\"Google Universal Sentence Encoder Pearson Correlation:\",pearsonr(predictions_googleUSE, data['score'].values.tolist())[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Google Universal Sentence Encoder Pearson Correlation: 0.7640280696312057\n" + ] + } + ], + "source": [ + "get_baseline_performance(testing_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# AutoML - no AmlCompute" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0612 09:49:07.823286 40168 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0612 09:49:08.846869 29376 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0612 09:49:20.330181 40168 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n", + "Workspace name: MAIDAPTest\n", + "Azure region: eastus2\n", + "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", + "Resource group: nlprg\n" + ] + } + ], + "source": [ + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "#Take out when notebook is ready for publishing\n", + "training_data = pd.read_csv(\"Data/training_set.csv\")\n", + "testing_data = pd.read_csv(\"Data/testing_set.csv\")\n", + "validation_data = pd.read_csv(\"Data/validation_set.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "train_y = training_data['score']\n", + "train_x = training_data[[i for i in training_data.columns if 'USE' in i]]\n", + "\n", + "validation_y = validation_data['score']\n", + "validation_x = validation_data[[i for i in validation_data.columns if 'USE' in i]]\n", + "\n", + "test_y = testing_data['score']\n", + "test_x = testing_data[[i for i in testing_data.columns if 'USE' in i]]" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(5749, 1024)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train_x.shape" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"iteration_timeout_minutes\" : 15,\n", + " \"iterations\" : 50,\n", + " \"primary_metric\" : 'spearman_correlation',\n", + " \"preprocess\" : True,\n", + " \"verbosity\":logging.ERROR}" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "# local compute\n", + "automated_ml_config = AutoMLConfig(task = 'regression',\n", + " debug_log = 'automated_ml_errors.log',\n", + " path = './automated-ml-regression',\n", + " X = train_x.values,\n", + " y = train_y.values.flatten(),\n", + " X_valid = validation_x.values,\n", + " y_valid = validation_y.values.flatten(),\n", + " **automl_settings)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running on local machine\n", + "Parent Run ID: AutoML_089672db-2a8f-4d74-84ce-b3bee49733dd\n", + "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", + "Current status: DatasetEvaluation. Gathering dataset statistics.\n", + "Current status: FeaturesGeneration. Generating features for the dataset.\n", + "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", + "Current status: ModelSelection. Beginning model selection.\n", + "\n", + "****************************************************************************************************\n", + "ITERATION: The iteration being evaluated.\n", + "PIPELINE: A summary description of the pipeline being evaluated.\n", + "DURATION: Time taken for the current iteration.\n", + "METRIC: The result of computing score on the fitted pipeline.\n", + "BEST: The best observed score thus far.\n", + "****************************************************************************************************\n", + "\n", + " ITERATION PIPELINE DURATION METRIC BEST\n", + " 0 StandardScalerWrapper RandomForest 0:01:09 0.1834 0.1834\n", + " 1 MinMaxScaler RandomForest 0:01:59 0.4272 0.4272\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:19 0.2811 0.4272\n", + " 3 StandardScalerWrapper LightGBM 0:00:19 0.2845 0.4272\n", + " 4 RobustScaler DecisionTree 0:00:26 0.2544 0.4272\n", + " 5 StandardScalerWrapper LassoLars 0:00:16 0.1246 0.4272\n", + " 6 StandardScalerWrapper LightGBM 0:00:20 0.6568 0.6568\n", + " 7 StandardScalerWrapper RandomForest 0:00:20 0.2186 0.6568\n", + " 8 StandardScalerWrapper LassoLars 0:00:21 0.0838 0.6568\n", + " 9 MinMaxScaler ExtremeRandomTrees 0:00:21 0.3632 0.6568\n", + " 10 RobustScaler ExtremeRandomTrees 0:01:01 0.3490 0.6568\n", + " 11 StandardScalerWrapper ExtremeRandomTrees 0:01:07 0.2673 0.6568\n", + " 12 MinMaxScaler ExtremeRandomTrees 0:00:22 0.2580 0.6568\n", + " 13 RobustScaler RandomForest 0:00:29 0.3360 0.6568\n", + " 14 StandardScalerWrapper LassoLars 0:00:15 nan 0.6568\n", + " 15 StandardScalerWrapper ExtremeRandomTrees 0:00:15 0.2102 0.6568\n", + " 16 StandardScalerWrapper RandomForest 0:00:28 0.2170 0.6568\n", + " 17 MinMaxScaler SGD 0:00:15 0.0965 0.6568\n", + " 18 StandardScalerWrapper RandomForest 0:00:40 0.3519 0.6568\n", + " 19 MinMaxScaler RandomForest 0:00:18 0.1664 0.6568\n", + " 20 StandardScalerWrapper LightGBM 0:00:38 0.7423 0.7423\n", + " 21 StandardScalerWrapper XGBoostRegressor 0:04:14 0.6688 0.7423\n", + " 22 StandardScalerWrapper DecisionTree 0:05:39 0.2257 0.7423\n", + " 23 StandardScalerWrapper LightGBM 0:01:47 0.6779 0.7423\n", + " 24 StandardScalerWrapper XGBoostRegressor 0:06:50 0.7638 0.7638\n", + " 25 TruncatedSVDWrapper XGBoostRegressor 0:01:09 0.7484 0.7638\n", + " 26 StandardScalerWrapper XGBoostRegressor 0:11:05 0.6608 0.7638\n", + " 27 StandardScalerWrapper RandomForest 0:05:09 0.4328 0.7638\n", + " 28 MaxAbsScaler LightGBM 0:00:36 0.6966 0.7638\n", + " 29 0:15:27 nan 0.7638\n", + "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", + " 30 TruncatedSVDWrapper XGBoostRegressor 0:01:32 0.5744 0.7638\n", + " 31 StandardScalerWrapper LightGBM 0:01:19 0.6043 0.7638\n", + " 32 RobustScaler DecisionTree 0:01:12 0.2604 0.7638\n", + " 33 MaxAbsScaler LightGBM 0:00:33 0.7516 0.7638\n", + " 34 StandardScalerWrapper LightGBM 0:00:31 0.6979 0.7638\n", + " 35 TruncatedSVDWrapper XGBoostRegressor 0:00:34 0.6998 0.7638\n", + " 36 MaxAbsScaler LightGBM 0:00:38 0.7226 0.7638\n", + " 37 MaxAbsScaler LightGBM 0:01:03 0.7107 0.7638\n", + " 38 StandardScalerWrapper XGBoostRegressor 0:01:08 0.6529 0.7638\n", + " 39 StandardScalerWrapper XGBoostRegressor 0:02:09 0.6202 0.7638\n", + " 40 TruncatedSVDWrapper XGBoostRegressor 0:00:58 0.7196 0.7638\n", + " 41 MaxAbsScaler LightGBM 0:00:44 0.6948 0.7638\n", + " 42 MaxAbsScaler LightGBM 0:05:25 0.6742 0.7638\n", + " 43 StandardScalerWrapper LightGBM 0:01:25 0.5840 0.7638\n", + " 44 MaxAbsScaler LightGBM 0:00:49 0.7087 0.7638\n", + " 45 TruncatedSVDWrapper XGBoostRegressor 0:00:37 0.6802 0.7638\n", + " 46 StandardScalerWrapper XGBoostRegressor 0:01:31 0.6529 0.7638\n", + " 47 MaxAbsScaler LightGBM 0:00:39 0.6008 0.7638\n", + " 48 VotingEnsemble 0:01:40 0.8146 0.8146\n", + " 49 StackEnsemble 0:09:34 0.8142 0.8146\n" + ] + } + ], + "source": [ + "experiment=Experiment(ws, 'automated-ml-regression')\n", + "local_run = experiment.submit(automated_ml_config, show_output=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "15f441d10d3d42c9b950a7e3f2cbc7dc", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "RunDetails(local_run).show()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "widget_data = RunDetails(local_run).get_widget_data()" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Registering model AutoML089672db2best\n", + "AutoML089672db2best\n" + ] + } + ], + "source": [ + "description = 'AutoML Sentence Similarity Model'\n", + "tags = None\n", + "model = local_run.register_model(description = description, tags = tags)\n", + "\n", + "print(local_run.model_id) # This will be written to the script file later in the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Run(Experiment: automated-ml-regression,\n", + "Id: AutoML_089672db-2a8f-4d74-84ce-b3bee49733dd_48,\n", + "Type: None,\n", + "Status: Completed)\n", + "RegressionPipeline(pipeline=Pipeline(memory=None,\n", + " steps=[('datatransformer', DataTransformer(enable_feature_sweeping=None, feature_sweeping_timeout=None,\n", + " is_onnx_compatible=None, logger=None, observer=None, task=None)), ('prefittedsoftvotingregressor', PreFittedSoftVotingRegressor(estimators=[('24', Pipeline(memory=None,\n", + " steps=[('stand...333333333333, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667]))]),\n", + " stddev=None)\n" + ] + } + ], + "source": [ + "lookup_metric = \"spearman_correlation\"\n", + "best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n", + "print(best_run)\n", + "print(fitted_model)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.7817644762555442\n" + ] + } + ], + "source": [ + "y_pred = fitted_model.predict(test_x.values)\n", + "print(pearsonr(y_pred, test_y)[0])" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "{'estimators': [('24', Pipeline(memory=None,\n", + " steps=[('standardscalerwrapper', ), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", + " ma...ale_pos_weight=1, seed=None,\n", + " silent=True, subsample=0.7, tree_method='auto', verbose=-10))])),\n", + " ('33', Pipeline(memory=None,\n", + " steps=[('maxabsscaler', MaxAbsScaler(copy=True)), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", + " colsample_bytree=0.7000000000000001, importance_type='split',\n", + " learning_rate=0.16842263157894738, max_bin=7, max_depth=3,\n", + " min_child_samples=14,...ue, subsample=0.5499999999999999,\n", + " subsample_for_bin=200000, subsample_freq=3, verbose=-1))])),\n", + " ('25', Pipeline(memory=None,\n", + " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", + " max_delta_step=0, max_dept...scale_pos_weight=1, seed=None,\n", + " silent=True, subsample=1, tree_method='auto', verbose=-10))])),\n", + " ('20', Pipeline(memory=None,\n", + " steps=[('standardscalerwrapper', ), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", + " importance_type='split', learning_rate=0.2, max_bin=63,\n", + " ...425, silent=True, subsample=0.85,\n", + " subsample_for_bin=200000, subsample_freq=2, verbose=-1))])),\n", + " ('40', Pipeline(memory=None,\n", + " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.9, eta=0.3, gamma=0, learning_rate=0.1,\n", + " max_delta_step=0, max_dep...ale_pos_weight=1, seed=None, silent=True,\n", + " subsample=0.9, tree_method='auto', verbose=-10))])),\n", + " ('37', Pipeline(memory=None,\n", + " steps=[('maxabsscaler', MaxAbsScaler(copy=True)), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", + " colsample_bytree=0.9, importance_type='split',\n", + " learning_rate=0.11579368421052631, max_bin=63, max_depth=10,\n", + " min_child_samples=16, min_child_we...e, subsample=0.44999999999999996,\n", + " subsample_for_bin=200000, subsample_freq=3, verbose=-1))])),\n", + " ('44', Pipeline(memory=None,\n", + " steps=[('maxabsscaler', MaxAbsScaler(copy=True)), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", + " colsample_bytree=0.7000000000000001, importance_type='split',\n", + " learning_rate=0.16842263157894738, max_bin=7, max_depth=9,\n", + " min_child_samples=60,... subsample=0.7999999999999999, subsample_for_bin=200000,\n", + " subsample_freq=7, verbose=-1))])),\n", + " ('45', Pipeline(memory=None,\n", + " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.10894736842105263, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.7, eta=0.05, gamma=0.1, learning_rate=0.1,\n", + " max_delta_step=0, max...ale_pos_weight=1, seed=None,\n", + " silent=True, subsample=0.7, tree_method='auto', verbose=-10))]))],\n", + " 'weights': [0.2,\n", + " 0.2,\n", + " 0.2,\n", + " 0.13333333333333333,\n", + " 0.06666666666666667,\n", + " 0.06666666666666667,\n", + " 0.06666666666666667,\n", + " 0.06666666666666667],\n", + " 'flatten_transform': None}" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "fitted_model.pipeline.steps[1][1].get_params()" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['sentence_similarity_regressor.pkl']" + ] + }, + "execution_count": 44, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from sklearn.externals import joblib\n", + "\n", + "model_path = 'sentence_similarity_regressor.pkl'\n", + "\n", + "joblib.dump(fitted_model, model_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [], + "source": [ + "m2 = joblib.load('sentence_similarity_regressor.pkl')" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "RegressionPipeline(pipeline=Pipeline(memory=None,\n", + " steps=[('datatransformer', DataTransformer(enable_feature_sweeping=None, feature_sweeping_timeout=None,\n", + " is_onnx_compatible=None, logger=None, observer=None, task=None)), ('prefittedsoftvotingregressor', PreFittedSoftVotingRegressor(estimators=[('24', Pipeline(memory=None,\n", + " steps=[('stand...333333333333, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667]))]),\n", + " stddev=None)" + ] + }, + "execution_count": 54, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "m2" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 7b06d67dbbf497dd2724b801a87c34446b3f12cb Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Wed, 12 Jun 2019 14:45:10 -0400 Subject: [PATCH 069/108] Fixed notebooks based on new sts benchmark data loader functions --- ...ml_google_universal_sentence_encoder.ipynb | 324 ++++++++++-------- .../baseline_deep_dive.ipynb | 10 +- 2 files changed, 195 insertions(+), 139 deletions(-) diff --git a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb index 658ce393f..0ebc171aa 100644 --- a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb +++ b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb @@ -24,7 +24,7 @@ "output_type": "stream", "text": [ "WARNING: Logging before flag parsing goes to stderr.\n", - "W0612 09:40:01.239300 40168 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + "W0612 14:28:37.904844 30000 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" ] }, { @@ -42,7 +42,7 @@ "source": [ "# set the environment path to find NLP\n", "import sys\n", - "sys.path.append(\"../../../\")\n", + "sys.path.append(\"../../\")\n", "import time\n", "import os\n", "import pandas as pd\n", @@ -52,7 +52,15 @@ "import sys\n", "from scipy.stats import pearsonr\n", "from scipy.spatial import distance\n", + "\n", + "# import utils\n", "from utils_nlp.azureml import azureml_utils\n", + "from utils_nlp.dataset import stsbenchmark\n", + "from utils_nlp.dataset.preprocess import (\n", + " to_lowercase,\n", + " to_spacy_tokens,\n", + " rm_spacy_stopwords,\n", + ")\n", "\n", "#tensorflow dependencies for Google Universal Sentence Encoder\n", "import tensorflow as tf\n", @@ -79,7 +87,7 @@ "metadata": {}, "outputs": [], "source": [ - "BASE_DATA_PATH = '../../../data'" + "BASE_DATA_PATH = '../../data'" ] }, { @@ -100,6 +108,146 @@ "cell_type": "code", "execution_count": 3, "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████| 401/401 [00:01<00:00, 252KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████| 401/401 [00:01<00:00, 307KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████| 401/401 [00:01<00:00, 277KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], + "source": [ + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "train = stsbenchmark.clean_sts(train_raw)\n", + "dev = stsbenchmark.clean_sts(dev_raw)\n", + "test = stsbenchmark.clean_sts(test_raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
scoresentence1sentence2
05.00A plane is taking off.An air plane is taking off.
13.80A man is playing a large flute.A man is playing a flute.
23.80A man is spreading shreded cheese on a pizza.A man is spreading shredded cheese on an uncoo...
\n", + "
" + ], + "text/plain": [ + " score sentence1 \\\n", + "0 5.00 A plane is taking off. \n", + "1 3.80 A man is playing a large flute. \n", + "2 3.80 A man is spreading shreded cheese on a pizza. \n", + "\n", + " sentence2 \n", + "0 An air plane is taking off. \n", + "1 A man is playing a flute. \n", + "2 A man is spreading shredded cheese on an uncoo... " + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train.head(3)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, "outputs": [], "source": [ "#replace all this with our util!\n", @@ -108,21 +256,21 @@ " for line in f:\n", " l = line.strip().split(\"\\t\")\n", " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", - "train = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", + "train_old = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", "\n", "data = []\n", "with open(\"sts-test.csv\", 'r', encoding=\"utf-8\") as f:\n", " for line in f:\n", " l = line.strip().split(\"\\t\")\n", " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", - "test = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", + "test_old = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", "\n", "data = []\n", "with open(\"sts-dev.csv\", 'r', encoding=\"utf-8\") as f:\n", " for line in f:\n", " l = line.strip().split(\"\\t\")\n", " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", - "dev = pd.DataFrame(data, columns=['sentence1','sentence2','score'])" + "dev_old = pd.DataFrame(data, columns=['sentence1','sentence2','score'])" ] }, { @@ -134,7 +282,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -156,7 +304,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -184,13 +332,13 @@ " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", " df = pd.DataFrame(df, columns=names)\n", - " df['score'] = dataset['score']\n", + " df['score'] = dataset['score'].tolist()\n", " return df" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -201,19 +349,19 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "# Take out when notebook is ready for publishing\n", - "training_data.to_csv(\"Data/training_set.csv\", index=None)\n", - "testing_data.to_csv(\"Data/testing_set.csv\", index=None)\n", - "validation_data.to_csv(\"Data/validation_set.csv\", index=None)" + "training_data.to_csv(\"training_set.csv\", index=None)\n", + "testing_data.to_csv(\"testing_set.csv\", index=None)\n", + "validation_data.to_csv(\"validation_set.csv\", index=None)" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -227,14 +375,14 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Google Universal Sentence Encoder Pearson Correlation: 0.7640280696312057\n" + "Google Universal Sentence Encoder Pearson Correlation: 0.7640271333273213\n" ] } ], @@ -251,18 +399,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 13, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0612 09:49:07.823286 40168 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", - "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", - "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -274,8 +413,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "W0612 09:49:08.846869 29376 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0612 09:49:20.330181 40168 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + "W0612 14:38:35.410748 27824 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0612 14:38:47.482707 30000 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" ] }, { @@ -305,19 +444,19 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "#Take out when notebook is ready for publishing\n", - "training_data = pd.read_csv(\"Data/training_set.csv\")\n", - "testing_data = pd.read_csv(\"Data/testing_set.csv\")\n", - "validation_data = pd.read_csv(\"Data/validation_set.csv\")" + "training_data = pd.read_csv(\"training_set.csv\")\n", + "testing_data = pd.read_csv(\"testing_set.csv\")\n", + "validation_data = pd.read_csv(\"validation_set.csv\")" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -333,7 +472,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -342,7 +481,7 @@ "(5749, 1024)" ] }, - "execution_count": 12, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -353,7 +492,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -367,7 +506,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -384,7 +523,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [ { @@ -392,7 +531,7 @@ "output_type": "stream", "text": [ "Running on local machine\n", - "Parent Run ID: AutoML_089672db-2a8f-4d74-84ce-b3bee49733dd\n", + "Parent Run ID: AutoML_be89b4c3-aaa6-4b11-aaff-65d63d4d73be\n", "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", "Current status: DatasetEvaluation. Gathering dataset statistics.\n", "Current status: FeaturesGeneration. Generating features for the dataset.\n", @@ -408,57 +547,9 @@ "****************************************************************************************************\n", "\n", " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:01:09 0.1834 0.1834\n", - " 1 MinMaxScaler RandomForest 0:01:59 0.4272 0.4272\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:19 0.2811 0.4272\n", - " 3 StandardScalerWrapper LightGBM 0:00:19 0.2845 0.4272\n", - " 4 RobustScaler DecisionTree 0:00:26 0.2544 0.4272\n", - " 5 StandardScalerWrapper LassoLars 0:00:16 0.1246 0.4272\n", - " 6 StandardScalerWrapper LightGBM 0:00:20 0.6568 0.6568\n", - " 7 StandardScalerWrapper RandomForest 0:00:20 0.2186 0.6568\n", - " 8 StandardScalerWrapper LassoLars 0:00:21 0.0838 0.6568\n", - " 9 MinMaxScaler ExtremeRandomTrees 0:00:21 0.3632 0.6568\n", - " 10 RobustScaler ExtremeRandomTrees 0:01:01 0.3490 0.6568\n", - " 11 StandardScalerWrapper ExtremeRandomTrees 0:01:07 0.2673 0.6568\n", - " 12 MinMaxScaler ExtremeRandomTrees 0:00:22 0.2580 0.6568\n", - " 13 RobustScaler RandomForest 0:00:29 0.3360 0.6568\n", - " 14 StandardScalerWrapper LassoLars 0:00:15 nan 0.6568\n", - " 15 StandardScalerWrapper ExtremeRandomTrees 0:00:15 0.2102 0.6568\n", - " 16 StandardScalerWrapper RandomForest 0:00:28 0.2170 0.6568\n", - " 17 MinMaxScaler SGD 0:00:15 0.0965 0.6568\n", - " 18 StandardScalerWrapper RandomForest 0:00:40 0.3519 0.6568\n", - " 19 MinMaxScaler RandomForest 0:00:18 0.1664 0.6568\n", - " 20 StandardScalerWrapper LightGBM 0:00:38 0.7423 0.7423\n", - " 21 StandardScalerWrapper XGBoostRegressor 0:04:14 0.6688 0.7423\n", - " 22 StandardScalerWrapper DecisionTree 0:05:39 0.2257 0.7423\n", - " 23 StandardScalerWrapper LightGBM 0:01:47 0.6779 0.7423\n", - " 24 StandardScalerWrapper XGBoostRegressor 0:06:50 0.7638 0.7638\n", - " 25 TruncatedSVDWrapper XGBoostRegressor 0:01:09 0.7484 0.7638\n", - " 26 StandardScalerWrapper XGBoostRegressor 0:11:05 0.6608 0.7638\n", - " 27 StandardScalerWrapper RandomForest 0:05:09 0.4328 0.7638\n", - " 28 MaxAbsScaler LightGBM 0:00:36 0.6966 0.7638\n", - " 29 0:15:27 nan 0.7638\n", - "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", - " 30 TruncatedSVDWrapper XGBoostRegressor 0:01:32 0.5744 0.7638\n", - " 31 StandardScalerWrapper LightGBM 0:01:19 0.6043 0.7638\n", - " 32 RobustScaler DecisionTree 0:01:12 0.2604 0.7638\n", - " 33 MaxAbsScaler LightGBM 0:00:33 0.7516 0.7638\n", - " 34 StandardScalerWrapper LightGBM 0:00:31 0.6979 0.7638\n", - " 35 TruncatedSVDWrapper XGBoostRegressor 0:00:34 0.6998 0.7638\n", - " 36 MaxAbsScaler LightGBM 0:00:38 0.7226 0.7638\n", - " 37 MaxAbsScaler LightGBM 0:01:03 0.7107 0.7638\n", - " 38 StandardScalerWrapper XGBoostRegressor 0:01:08 0.6529 0.7638\n", - " 39 StandardScalerWrapper XGBoostRegressor 0:02:09 0.6202 0.7638\n", - " 40 TruncatedSVDWrapper XGBoostRegressor 0:00:58 0.7196 0.7638\n", - " 41 MaxAbsScaler LightGBM 0:00:44 0.6948 0.7638\n", - " 42 MaxAbsScaler LightGBM 0:05:25 0.6742 0.7638\n", - " 43 StandardScalerWrapper LightGBM 0:01:25 0.5840 0.7638\n", - " 44 MaxAbsScaler LightGBM 0:00:49 0.7087 0.7638\n", - " 45 TruncatedSVDWrapper XGBoostRegressor 0:00:37 0.6802 0.7638\n", - " 46 StandardScalerWrapper XGBoostRegressor 0:01:31 0.6529 0.7638\n", - " 47 MaxAbsScaler LightGBM 0:00:39 0.6008 0.7638\n", - " 48 VotingEnsemble 0:01:40 0.8146 0.8146\n", - " 49 StackEnsemble 0:09:34 0.8142 0.8146\n" + " 0 StandardScalerWrapper RandomForest 0:00:38 0.1752 0.1752\n", + " 1 MinMaxScaler RandomForest 0:00:58 0.4233 0.4233\n", + " 2 " ] } ], @@ -469,24 +560,9 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "15f441d10d3d42c9b950a7e3f2cbc7dc", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "RunDetails(local_run).show()" ] @@ -524,25 +600,9 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Run(Experiment: automated-ml-regression,\n", - "Id: AutoML_089672db-2a8f-4d74-84ce-b3bee49733dd_48,\n", - "Type: None,\n", - "Status: Completed)\n", - "RegressionPipeline(pipeline=Pipeline(memory=None,\n", - " steps=[('datatransformer', DataTransformer(enable_feature_sweeping=None, feature_sweeping_timeout=None,\n", - " is_onnx_compatible=None, logger=None, observer=None, task=None)), ('prefittedsoftvotingregressor', PreFittedSoftVotingRegressor(estimators=[('24', Pipeline(memory=None,\n", - " steps=[('stand...333333333333, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667]))]),\n", - " stddev=None)\n" - ] - } - ], + "outputs": [], "source": [ "lookup_metric = \"spearman_correlation\"\n", "best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n", @@ -552,17 +612,9 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.7817644762555442\n" - ] - } - ], + "outputs": [], "source": [ "y_pred = fitted_model.predict(test_x.values)\n", "print(pearsonr(y_pred, test_y)[0])" diff --git a/scenarios/sentence_similarity/baseline_deep_dive.ipynb b/scenarios/sentence_similarity/baseline_deep_dive.ipynb index e99de3109..bfeb82a08 100644 --- a/scenarios/sentence_similarity/baseline_deep_dive.ipynb +++ b/scenarios/sentence_similarity/baseline_deep_dive.ipynb @@ -163,8 +163,12 @@ "outputs": [], "source": [ "# Produce a pandas dataframe for the training and test sets\n", - "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "sts_test = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")\n", + "\n", + "# Clean the sts dataset\n", + "sts_train = stsbenchmark.clean_sts(train_raw)\n", + "sts_test = stsbenchmark.clean_sts(test_raw)" ] }, { @@ -1428,7 +1432,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.5" + "version": "3.6.7" } }, "nbformat": 4, From 183f25b73cb99dd241b1a06c7bf46e8fe558cd1f Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Fri, 14 Jun 2019 15:31:47 -0400 Subject: [PATCH 070/108] AutoML notebook with google USE embeddings- added descriptions --- ...ml_google_universal_sentence_encoder.ipynb | 6050 ++++++++++++++++- 1 file changed, 5821 insertions(+), 229 deletions(-) diff --git a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb index 0ebc171aa..53150b96e 100644 --- a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb +++ b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb @@ -4,14 +4,59 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/automated-machine-learning#3-setup-a-new-conda-environment" + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using AutoML for Predicting Sentence Similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to use Azure AutoML to automate machine learning model selection and tuning. It also demonstrates how to use a popular sentence embedding model from Google, Universal Sentence Encoder. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What is Azure AutoML?\n", + "\n", + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", + "\n", + "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](automl.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook demonstrates how to extract features for a sentence similarity task using the pretrained models InferSent and Google Universal Sentence Encoder. Then we will demonstrate how the AutoML package can easily automate model selection and hyperparameter tuning" + "### Modeling Problem\n", + "\n", + "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence).\n", + "\n", + "For each sentence in the sentence pair, we will use Google's pretrained Universal Sentence Encoder (details provided below) to generate a $512$-dimensional embedding. Both embeddings in the sentence pair will be concatenated and the resulting $1024$-dimensional vector will be used as features in our regression problem. Our target variable is the sentence similarity score." ] }, { @@ -24,7 +69,7 @@ "output_type": "stream", "text": [ "WARNING: Logging before flag parsing goes to stderr.\n", - "W0612 14:28:37.904844 30000 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + "W0614 12:29:04.807609 38572 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" ] }, { @@ -40,7 +85,7 @@ } ], "source": [ - "# set the environment path to find NLP\n", + "# Set the environment path to find NLP\n", "import sys\n", "sys.path.append(\"../../\")\n", "import time\n", @@ -52,8 +97,9 @@ "import sys\n", "from scipy.stats import pearsonr\n", "from scipy.spatial import distance\n", + "from sklearn.externals import joblib\n", "\n", - "# import utils\n", + "# Import utils\n", "from utils_nlp.azureml import azureml_utils\n", "from utils_nlp.dataset import stsbenchmark\n", "from utils_nlp.dataset.preprocess import (\n", @@ -62,11 +108,12 @@ " rm_spacy_stopwords,\n", ")\n", "\n", - "#tensorflow dependencies for Google Universal Sentence Encoder\n", + "# Tensorflow dependencies for Google Universal Sentence Encoder\n", "import tensorflow as tf\n", "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", "\n", - "#AzureML packages\n", + "# AzureML packages\n", "import azureml as aml\n", "import logging\n", "from azureml.telemetry import set_diagnostics_collection\n", @@ -94,14 +141,21 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Feature Engineering" + "# Data Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## STS Benchmark Dataset" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We'll collect the Google Sentence Encoder encodings" + "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." ] }, { @@ -113,7 +167,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████| 401/401 [00:01<00:00, 252KB/s]\n" + "100%|███████████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 271KB/s]\n" ] }, { @@ -127,7 +181,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████| 401/401 [00:01<00:00, 307KB/s]\n" + "100%|███████████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 273KB/s]\n" ] }, { @@ -141,7 +195,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████| 401/401 [00:01<00:00, 277KB/s]\n" + "100%|███████████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 288KB/s]\n" ] }, { @@ -153,6 +207,7 @@ } ], "source": [ + "# Load in the raw datasets as pandas dataframes\n", "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" @@ -164,6 +219,8 @@ "metadata": {}, "outputs": [], "source": [ + "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", + "# and renaming the remaining columns\n", "train = stsbenchmark.clean_sts(train_raw)\n", "dev = stsbenchmark.clean_sts(dev_raw)\n", "test = stsbenchmark.clean_sts(test_raw)" @@ -173,6 +230,27 @@ "cell_type": "code", "execution_count": 5, "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training set has 5749 sentences\n", + "Development set has 1500 sentences\n", + "Testing set has 1379 sentences\n" + ] + } + ], + "source": [ + "print(\"Training set has {} sentences\".format(len(train)))\n", + "print(\"Development set has {} sentences\".format(len(dev)))\n", + "print(\"Testing set has {} sentences\".format(len(test)))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, "outputs": [ { "data": { @@ -219,6 +297,18 @@ " A man is spreading shreded cheese on a pizza.\n", " A man is spreading shredded cheese on an uncoo...\n", " \n", + " \n", + " 3\n", + " 2.60\n", + " Three men are playing chess.\n", + " Two men are playing chess.\n", + " \n", + " \n", + " 4\n", + " 4.25\n", + " A man is playing the cello.\n", + " A man seated is playing the cello.\n", + " \n", " \n", "\n", "" @@ -228,78 +318,54 @@ "0 5.00 A plane is taking off. \n", "1 3.80 A man is playing a large flute. \n", "2 3.80 A man is spreading shreded cheese on a pizza. \n", + "3 2.60 Three men are playing chess. \n", + "4 4.25 A man is playing the cello. \n", "\n", " sentence2 \n", "0 An air plane is taking off. \n", "1 A man is playing a flute. \n", - "2 A man is spreading shredded cheese on an uncoo... " + "2 A man is spreading shredded cheese on an uncoo... \n", + "3 Two men are playing chess. \n", + "4 A man seated is playing the cello. " ] }, - "execution_count": 5, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "train.head(3)" + "train.head(5)" ] }, { - "cell_type": "code", - "execution_count": 21, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "#replace all this with our util!\n", - "data = []\n", - "with open(\"sts-train.csv\", 'r', encoding=\"utf-8\") as f:\n", - " for line in f:\n", - " l = line.strip().split(\"\\t\")\n", - " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", - "train_old = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", - "\n", - "data = []\n", - "with open(\"sts-test.csv\", 'r', encoding=\"utf-8\") as f:\n", - " for line in f:\n", - " l = line.strip().split(\"\\t\")\n", - " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", - "test_old = pd.DataFrame(data, columns=['sentence1','sentence2','score'])\n", - "\n", - "data = []\n", - "with open(\"sts-dev.csv\", 'r', encoding=\"utf-8\") as f:\n", - " for line in f:\n", - " l = line.strip().split(\"\\t\")\n", - " data.append([l[5].strip().lower(),l[6].strip().lower(), float(l[4])])\n", - "dev_old = pd.DataFrame(data, columns=['sentence1','sentence2','score'])" + "## Feature Engineering: Universal Sentence Encoder" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Google Universal Sentence Encoder" + "Now that we have our sentence pairs loaded, we will convert these sentences into a numerical representation in order to use them in our machine learning model. To do this, we'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). " ] }, { - "cell_type": "code", - "execution_count": 6, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", - "\n", - "# Import the Universal Sentence Encoder's TF Hub module\n", - "google_USE_embed = hub.Module(module_url)\n", - "\n", - "# Reduce logging output.\n", - "tf.logging.set_verbosity(tf.logging.ERROR)" + "**Google Universal Sentence Encoder: Transformer Model** The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Embed Sentences" + "**Using the Pretrained Model**\n", + "\n", + "Tensorflow-hub provides the pretrained model for use by the public. We import the model from its url and then feed the model our sentences for it to encode." ] }, { @@ -308,10 +374,35 @@ "metadata": {}, "outputs": [], "source": [ - "def embed_google_universal_sentence_encoder(dataset, embedding_model):\n", + "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", + "\n", + "# Import the Universal Sentence Encoder's TF Hub module\n", + "embedding_model = hub.Module(module_url)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def google_encoder(dataset):\n", + " \"\"\" Function that embeds sentences using the Google Universal\n", + " Sentence Encoder pretrained model\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " emb1: 512-dimensional representation of sentence1\n", + " emb2: 512-dimensional representation of sentence2\n", + " \"\"\"\n", " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", "\n", + " # Apply embedding model and normalize the input\n", " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", " \n", @@ -324,11 +415,36 @@ " sts_input1: dataset['sentence1'],\n", " sts_input2: dataset['sentence2']\n", " })\n", - " return emb1, emb2\n", + " return emb1, emb2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As features, we will embed both sentences using the Google Universal Sentence Encoder and concatenate their representations into a $1024$-dimensional vector. The resulting data will be saved in a dataframe for consumption by our AutoML model." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def feature_engineering(dataset):\n", + " \"\"\"Extracts embedding features from the dataset and returns\n", + " features and target in a dataframe\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", " \n", - "def feature_engineering(dataset, googleUSE_embedding_model):\n", - " google_USE_emb1, google_USE_emb2 = embed_google_universal_sentence_encoder(dataset, googleUSE_embedding_model)\n", - " n_google = google_USE_emb1.shape[1] \n", + " Returns:\n", + " -------\n", + " df: pandas dataframe with embedding features and target variable\n", + " \"\"\"\n", + " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", + " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", " df = pd.DataFrame(df, columns=names)\n", @@ -338,51 +454,87 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "training_data = feature_engineering(train, google_USE_embed)\n", - "validation_data = feature_engineering(dev, google_USE_embed)\n", - "testing_data = feature_engineering(test, google_USE_embed)" + "training_data = feature_engineering(train)\n", + "validation_data = feature_engineering(dev)\n", + "testing_data = feature_engineering(test)" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 60, "metadata": {}, "outputs": [], "source": [ - "# Take out when notebook is ready for publishing\n", - "training_data.to_csv(\"training_set.csv\", index=None)\n", - "testing_data.to_csv(\"testing_set.csv\", index=None)\n", - "validation_data.to_csv(\"validation_set.csv\", index=None)" + "#Take this out later\n", + "training_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_train.csv\"), index=None)\n", + "testing_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_test.csv\"), index=None)\n", + "validation_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_dev.csv\"), index=None)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Calculate Baseline Performance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before using AutoML we will calculate a baseline to compare the AutoML results to. For the baseline we will take the Google Universal Sentence Encoder embeddings of each sentence, calculate the cosine similarity between the two sentence embeddings, then compare the predicted values with the true scores using pearson correlation. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What is Pearson Correlation?\n", + "\n", + "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", + "\n", + "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", + "\n", + "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "def get_baseline_performance(data):\n", - " sent1_googleUSE = data[[i for i in data.columns if 'USEEmb1' in i]].values.tolist()\n", - " sent2_googleUSE = data[[i for i in data.columns if 'USEEmb2' in i]].values.tolist()\n", + " \"\"\" Get baseline performance by calculating the cosine similarity between\n", + " the embeddings in the sentence pair and then evaluating the pearson \n", + " correlation between the predicted and true similarity scores\n", " \n", - " predictions_googleUSE = [1-distance.cosine(sent1_googleUSE[i], sent2_googleUSE[i]) for i in range(len(sent1_googleUSE))]\n", - " print(\"Google Universal Sentence Encoder Pearson Correlation:\",pearsonr(predictions_googleUSE, data['score'].values.tolist())[0])" + " Parameters:\n", + " ----------\n", + " data: dataframe containing embeddings and similarity scores\n", + " \"\"\"\n", + " emb1 = data[[i for i in data.columns if 'USEEmb1' in i]].values.tolist()\n", + " emb2 = data[[i for i in data.columns if 'USEEmb2' in i]].values.tolist()\n", + " scores = data['score'].values.tolist()\n", + " \n", + " predictions = [1-distance.cosine(emb1[i], emb2[i]) for i in range(len(emb1))]\n", + " print(\"Google Universal Sentence Encoder Pearson Correlation:\", round(pearsonr(predictions, scores)[0],3))" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 12, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Google Universal Sentence Encoder Pearson Correlation: 0.7640271333273213\n" + "Google Universal Sentence Encoder Pearson Correlation: 0.764\n" ] } ], @@ -394,7 +546,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# AutoML - no AmlCompute" + "# AutoML" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." ] }, { @@ -402,6 +561,15 @@ "execution_count": 13, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0614 12:37:29.502033 38572 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -413,8 +581,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "W0612 14:38:35.410748 27824 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0612 14:38:47.482707 30000 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + "W0614 12:37:29.827009 11124 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0614 12:37:37.015321 38572 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" ] }, { @@ -443,87 +611,94 @@ ] }, { - "cell_type": "code", - "execution_count": 14, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "#Take out when notebook is ready for publishing\n", - "training_data = pd.read_csv(\"training_set.csv\")\n", - "testing_data = pd.read_csv(\"testing_set.csv\")\n", - "validation_data = pd.read_csv(\"validation_set.csv\")" + "## AutoMLConfig Parameters\n", + "Next, we specify the parameters for the AutoMLConfig class. " ] }, { - "cell_type": "code", - "execution_count": 15, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "train_y = training_data['score']\n", - "train_x = training_data[[i for i in training_data.columns if 'USE' in i]]\n", - "\n", - "validation_y = validation_data['score']\n", - "validation_x = validation_data[[i for i in validation_data.columns if 'USE' in i]]\n", - "\n", - "test_y = testing_data['score']\n", - "test_x = testing_data[[i for i in testing_data.columns if 'USE' in i]]" + "**task** \n", + "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" ] }, { - "cell_type": "code", - "execution_count": 16, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(5749, 1024)" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "train_x.shape" + "**preprocess** \n", + "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**primary_metric** \n", + "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Constraints:** \n", + "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after acheiving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ "automl_settings = {\n", - " \"iteration_timeout_minutes\" : 15,\n", - " \"iterations\" : 50,\n", - " \"primary_metric\" : 'spearman_correlation',\n", - " \"preprocess\" : True,\n", + " \"task\": 'regression', #type of task: classification, regression or forecasting\n", + " \"debug_log\": 'automated_ml_errors.log',\n", + " \"path\": './automated-ml-regression',\n", + " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", + " \"iterations\" : 50, #Number of algorithm options to try\n", + " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", + " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", " \"verbosity\":logging.ERROR}" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ + "X_train = training_data.drop(\"score\", axis=1).values\n", + "y_train = training_data['score'].values.flatten()\n", + "X_validation = validation_data.drop(\"score\", axis=1).values\n", + "y_validation = validation_data['score'].values.flatten()\n", + "\n", "# local compute\n", - "automated_ml_config = AutoMLConfig(task = 'regression',\n", - " debug_log = 'automated_ml_errors.log',\n", - " path = './automated-ml-regression',\n", - " X = train_x.values,\n", - " y = train_y.values.flatten(),\n", - " X_valid = validation_x.values,\n", - " y_valid = validation_y.values.flatten(),\n", - " **automl_settings)" + "automated_ml_config = AutoMLConfig(\n", + " X = X_train,\n", + " y = y_train,\n", + " X_valid = X_validation,\n", + " y_valid = y_validation,\n", + " **automl_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the Experiment\n", + "\n", + "Run the experiment locally and inspect the results using a widget" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -531,7 +706,7 @@ "output_type": "stream", "text": [ "Running on local machine\n", - "Parent Run ID: AutoML_be89b4c3-aaa6-4b11-aaff-65d63d4d73be\n", + "Parent Run ID: AutoML_91012708-fd8f-405f-afed-4c42f8fe3fc6\n", "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", "Current status: DatasetEvaluation. Gathering dataset statistics.\n", "Current status: FeaturesGeneration. Generating features for the dataset.\n", @@ -547,9 +722,57 @@ "****************************************************************************************************\n", "\n", " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:38 0.1752 0.1752\n", - " 1 MinMaxScaler RandomForest 0:00:58 0.4233 0.4233\n", - " 2 " + " 0 StandardScalerWrapper RandomForest 0:00:35 0.1791 0.1791\n", + " 1 MinMaxScaler RandomForest 0:01:04 0.4340 0.4340\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:13 0.2467 0.4340\n", + " 3 StandardScalerWrapper LightGBM 0:00:10 0.2708 0.4340\n", + " 4 RobustScaler DecisionTree 0:00:14 0.2435 0.4340\n", + " 5 StandardScalerWrapper LassoLars 0:00:08 0.1246 0.4340\n", + " 6 StandardScalerWrapper LightGBM 0:00:11 0.6567 0.6567\n", + " 7 StandardScalerWrapper RandomForest 0:00:11 0.2160 0.6567\n", + " 8 StandardScalerWrapper LassoLars 0:00:10 0.0836 0.6567\n", + " 9 MinMaxScaler ExtremeRandomTrees 0:00:12 0.3599 0.6567\n", + " 10 RobustScaler ExtremeRandomTrees 0:00:35 0.3491 0.6567\n", + " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:11 0.3217 0.6567\n", + " 12 MinMaxScaler ExtremeRandomTrees 0:00:13 0.2308 0.6567\n", + " 13 RobustScaler RandomForest 0:00:15 0.3675 0.6567\n", + " 14 StandardScalerWrapper LassoLars 0:00:08 nan 0.6567\n", + " 15 StandardScalerWrapper ExtremeRandomTrees 0:00:08 0.1977 0.6567\n", + " 16 StandardScalerWrapper RandomForest 0:00:09 0.2468 0.6567\n", + " 17 MinMaxScaler SGD 0:00:08 0.0797 0.6567\n", + " 18 StandardScalerWrapper RandomForest 0:00:22 0.3277 0.6567\n", + " 19 MinMaxScaler RandomForest 0:00:09 0.1681 0.6567\n", + " 20 StandardScalerWrapper LightGBM 0:00:47 0.7412 0.7412\n", + " 21 StandardScalerWrapper XGBoostRegressor 0:02:18 0.6772 0.7412\n", + " 22 StandardScalerWrapper LightGBM 0:00:16 0.6983 0.7412\n", + " 23 StandardScalerWrapper LightGBM 0:00:17 0.6864 0.7412\n", + " 24 StandardScalerWrapper DecisionTree 0:02:32 0.2330 0.7412\n", + " 25 MaxAbsScaler LightGBM 0:00:13 0.3161 0.7412\n", + " 26 StandardScalerWrapper LightGBM 0:00:39 0.5771 0.7412\n", + " 27 StandardScalerWrapper XGBoostRegressor 0:01:03 0.6196 0.7412\n", + " 28 StandardScalerWrapper XGBoostRegressor 0:03:07 0.7688 0.7688\n", + " 29 StandardScalerWrapper XGBoostRegressor 0:04:49 0.7275 0.7688\n", + " 30 TruncatedSVDWrapper XGBoostRegressor 0:00:59 0.7438 0.7688\n", + " 31 StandardScalerWrapper XGBoostRegressor 0:00:33 0.6567 0.7688\n", + " 32 StandardScalerWrapper RandomForest 0:01:50 0.4262 0.7688\n", + " 33 StandardScalerWrapper XGBoostRegressor 0:00:54 0.6762 0.7688\n", + " 34 StandardScalerWrapper LightGBM 0:00:20 0.4265 0.7688\n", + " 35 TruncatedSVDWrapper XGBoostRegressor 0:01:14 0.7097 0.7688\n", + " 36 0:15:13 nan 0.7688\n", + "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", + " 37 MinMaxScaler DecisionTree 0:00:42 0.1453 0.7688\n", + " 38 StandardScalerWrapper XGBoostRegressor 0:00:57 0.6063 0.7688\n", + " 39 StandardScalerWrapper XGBoostRegressor 0:05:10 0.6577 0.7688\n", + " 40 TruncatedSVDWrapper XGBoostRegressor 0:00:40 0.7293 0.7688\n", + " 41 MaxAbsScaler LightGBM 0:00:29 0.6281 0.7688\n", + " 42 StandardScalerWrapper XGBoostRegressor 0:11:03 0.5731 0.7688\n", + " 43 StandardScalerWrapper XGBoostRegressor 0:01:22 0.6788 0.7688\n", + " 44 SparseNormalizer XGBoostRegressor 0:00:53 0.7598 0.7688\n", + " 45 TruncatedSVDWrapper XGBoostRegressor 0:00:25 0.7171 0.7688\n", + " 46 SparseNormalizer XGBoostRegressor 0:03:20 0.6843 0.7688\n", + " 47 SparseNormalizer LightGBM 0:11:34 0.6405 0.7688\n", + " 48 VotingEnsemble 0:01:04 0.8130 0.8130\n", + " 49 StackEnsemble 0:08:57 0.8130 0.8130\n" ] } ], @@ -559,195 +782,5564 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "RunDetails(local_run).show()" + "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, my accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "fb411f9a99e946958f9dd8717a6cada1", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ - "widget_data = RunDetails(local_run).get_widget_data()" + "# Inspect the run details using the provided widget\n", + "RunDetails(local_run).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inspect the Best Model\n", + "\n", + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case). The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). Our best model is a Pipeline with two steps: a DataTransformer step and a PreFittedSoftVotingRegressor step. We demonstrate how to extract additional information about what data transformations were used and which models make up the ensemble." ] }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Registering model AutoML089672db2best\n", - "AutoML089672db2best\n" + "RegressionPipeline(pipeline=Pipeline(memory=None,\n", + " steps=[('datatransformer', DataTransformer(enable_feature_sweeping=None, feature_sweeping_timeout=None,\n", + " is_onnx_compatible=None, logger=None, observer=None, task=None)), ('stackensembleregressor', StackEnsembleRegressor(base_learners=[('28', Pipeline(memory=None,\n", + " steps=[('standardscaler... random_state=None, selection='cyclic', tol=0.0001, verbose=0),\n", + " training_cv_folds=5))]),\n", + " stddev=None)\n" ] } ], - "source": [ - "description = 'AutoML Sentence Similarity Model'\n", - "tags = None\n", - "model = local_run.register_model(description = description, tags = tags)\n", - "\n", - "print(local_run.model_id) # This will be written to the script file later in the notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ "lookup_metric = \"spearman_correlation\"\n", "best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n", - "print(best_run)\n", "print(fitted_model)" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "y_pred = fitted_model.predict(test_x.values)\n", - "print(pearsonr(y_pred, test_y)[0])" + "We can look at the different models that are used to produce the stack ensemble model" ] }, { "cell_type": "code", - "execution_count": 52, - "metadata": { - "scrolled": true - }, + "execution_count": 29, + "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "{'estimators': [('24', Pipeline(memory=None,\n", - " steps=[('standardscalerwrapper', ), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + "{'base_learners': None,\n", + " 'meta_learner': None,\n", + " 'training_cv_folds': None,\n", + " '28': Pipeline(memory=None,\n", + " steps=[('standardscalerwrapper', ), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", + " ma...ale_pos_weight=1, seed=None,\n", + " silent=True, subsample=0.7, tree_method='auto', verbose=-10))]),\n", + " '44': Pipeline(memory=None,\n", + " steps=[('sparsenormalizer', ), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.1, gamma=0, grow_policy='lossguide',\n", + " learnin...scale_pos_weight=1, seed=None, silent=True, subsample=1,\n", + " tree_method='hist', verbose=-10))]),\n", + " '30': Pipeline(memory=None,\n", + " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", + " max_delta_step=0, max_dept...scale_pos_weight=1, seed=None,\n", + " silent=True, subsample=1, tree_method='auto', verbose=-10))]),\n", + " '20': Pipeline(memory=None,\n", + " steps=[('standardscalerwrapper', ), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", + " importance_type='split', learning_rate=0.2, max_bin=63,\n", + " ...425, silent=True, subsample=0.85,\n", + " subsample_for_bin=200000, subsample_freq=2, verbose=-1))]),\n", + " '40': Pipeline(memory=None,\n", + " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.40578947368421053, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.9, eta=0.3, gamma=0.01, learning_rate=0.1,\n", + " max_delta_step=0, max...ale_pos_weight=1, seed=None, silent=True, subsample=0.6,\n", + " tree_method='auto', verbose=-10))]),\n", + " '45': Pipeline(memory=None,\n", + " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.5, eta=0.2, gamma=0, grow_policy='lossguide',\n", + " learning_rate=0.1, ...eight=1,\n", + " seed=None, silent=True, subsample=0.9, tree_method='hist',\n", + " verbose=-10))]),\n", + " '22': Pipeline(memory=None,\n", + " steps=[('standardscalerwrapper', ), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", + " colsample_bytree=0.9, importance_type='split', learning_rate=0.2,\n", + " max_bi... silent=True, subsample=0.6, subsample_for_bin=200000,\n", + " subsample_freq=2, verbose=-1))]),\n", + " '28__memory': None,\n", + " '28__steps': [('standardscalerwrapper',\n", + " ),\n", + " ('xgboostregressor',\n", + " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " ma...ale_pos_weight=1, seed=None,\n", - " silent=True, subsample=0.7, tree_method='auto', verbose=-10))])),\n", - " ('33', Pipeline(memory=None,\n", - " steps=[('maxabsscaler', MaxAbsScaler(copy=True)), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", - " colsample_bytree=0.7000000000000001, importance_type='split',\n", - " learning_rate=0.16842263157894738, max_bin=7, max_depth=3,\n", - " min_child_samples=14,...ue, subsample=0.5499999999999999,\n", - " subsample_for_bin=200000, subsample_freq=3, verbose=-1))])),\n", - " ('25', Pipeline(memory=None,\n", - " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " max_delta_step=0, max_depth=4, max_leaves=0, min_child_weight=1,\n", + " missing=nan, n_estimators=600, n_jobs=1, nthread=None,\n", + " objective='reg:linear', random_state=0, reg_alpha=0,\n", + " reg_lambda=1.1458333333333335, scale_pos_weight=1, seed=None,\n", + " silent=True, subsample=0.7, tree_method='auto', verbose=-10))],\n", + " '28__standardscalerwrapper': ,\n", + " '28__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", + " max_delta_step=0, max_depth=4, max_leaves=0, min_child_weight=1,\n", + " missing=nan, n_estimators=600, n_jobs=1, nthread=None,\n", + " objective='reg:linear', random_state=0, reg_alpha=0,\n", + " reg_lambda=1.1458333333333335, scale_pos_weight=1, seed=None,\n", + " silent=True, subsample=0.7, tree_method='auto', verbose=-10),\n", + " '28__standardscalerwrapper__module_name': 'sklearn.preprocessing.data',\n", + " '28__standardscalerwrapper__class_name': 'StandardScaler',\n", + " '28__standardscalerwrapper__copy': True,\n", + " '28__standardscalerwrapper__with_mean': False,\n", + " '28__standardscalerwrapper__with_std': False,\n", + " '28__xgboostregressor__base_score': 0.5,\n", + " '28__xgboostregressor__booster': 'gbtree',\n", + " '28__xgboostregressor__colsample_bylevel': 1,\n", + " '28__xgboostregressor__colsample_bytree': 1,\n", + " '28__xgboostregressor__gamma': 0,\n", + " '28__xgboostregressor__learning_rate': 0.1,\n", + " '28__xgboostregressor__max_delta_step': 0,\n", + " '28__xgboostregressor__max_depth': 4,\n", + " '28__xgboostregressor__min_child_weight': 1,\n", + " '28__xgboostregressor__missing': nan,\n", + " '28__xgboostregressor__n_estimators': 600,\n", + " '28__xgboostregressor__n_jobs': 1,\n", + " '28__xgboostregressor__nthread': None,\n", + " '28__xgboostregressor__objective': 'reg:linear',\n", + " '28__xgboostregressor__random_state': 0,\n", + " '28__xgboostregressor__reg_alpha': 0,\n", + " '28__xgboostregressor__reg_lambda': 1.1458333333333335,\n", + " '28__xgboostregressor__scale_pos_weight': 1,\n", + " '28__xgboostregressor__seed': None,\n", + " '28__xgboostregressor__silent': True,\n", + " '28__xgboostregressor__subsample': 0.7,\n", + " '28__xgboostregressor__eta': 0.01,\n", + " '28__xgboostregressor__max_leaves': 0,\n", + " '28__xgboostregressor__tree_method': 'auto',\n", + " '28__xgboostregressor__verbose': -10,\n", + " '44__memory': None,\n", + " '44__steps': [('sparsenormalizer',\n", + " ),\n", + " ('xgboostregressor',\n", + " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.1, gamma=0, grow_policy='lossguide',\n", + " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=6,\n", + " max_leaves=15, min_child_weight=1, missing=nan, n_estimators=200,\n", + " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", + " reg_alpha=0.3125, reg_lambda=0.20833333333333334,\n", + " scale_pos_weight=1, seed=None, silent=True, subsample=1,\n", + " tree_method='hist', verbose=-10))],\n", + " '44__sparsenormalizer': ,\n", + " '44__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.1, gamma=0, grow_policy='lossguide',\n", + " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=6,\n", + " max_leaves=15, min_child_weight=1, missing=nan, n_estimators=200,\n", + " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", + " reg_alpha=0.3125, reg_lambda=0.20833333333333334,\n", + " scale_pos_weight=1, seed=None, silent=True, subsample=1,\n", + " tree_method='hist', verbose=-10),\n", + " '44__sparsenormalizer__norm': 'l1',\n", + " '44__sparsenormalizer__copy': True,\n", + " '44__xgboostregressor__base_score': 0.5,\n", + " '44__xgboostregressor__booster': 'gbtree',\n", + " '44__xgboostregressor__colsample_bylevel': 1,\n", + " '44__xgboostregressor__colsample_bytree': 1,\n", + " '44__xgboostregressor__gamma': 0,\n", + " '44__xgboostregressor__learning_rate': 0.1,\n", + " '44__xgboostregressor__max_delta_step': 0,\n", + " '44__xgboostregressor__max_depth': 6,\n", + " '44__xgboostregressor__min_child_weight': 1,\n", + " '44__xgboostregressor__missing': nan,\n", + " '44__xgboostregressor__n_estimators': 200,\n", + " '44__xgboostregressor__n_jobs': 1,\n", + " '44__xgboostregressor__nthread': None,\n", + " '44__xgboostregressor__objective': 'reg:linear',\n", + " '44__xgboostregressor__random_state': 0,\n", + " '44__xgboostregressor__reg_alpha': 0.3125,\n", + " '44__xgboostregressor__reg_lambda': 0.20833333333333334,\n", + " '44__xgboostregressor__scale_pos_weight': 1,\n", + " '44__xgboostregressor__seed': None,\n", + " '44__xgboostregressor__silent': True,\n", + " '44__xgboostregressor__subsample': 1,\n", + " '44__xgboostregressor__eta': 0.1,\n", + " '44__xgboostregressor__grow_policy': 'lossguide',\n", + " '44__xgboostregressor__max_bin': 255,\n", + " '44__xgboostregressor__max_leaves': 15,\n", + " '44__xgboostregressor__tree_method': 'hist',\n", + " '44__xgboostregressor__verbose': -10,\n", + " '30__memory': None,\n", + " '30__steps': [('truncatedsvdwrapper',\n", + " TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None)),\n", + " ('xgboostregressor',\n", + " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_dept...scale_pos_weight=1, seed=None,\n", - " silent=True, subsample=1, tree_method='auto', verbose=-10))])),\n", - " ('20', Pipeline(memory=None,\n", - " steps=[('standardscalerwrapper', ), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", + " max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1,\n", + " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", + " objective='reg:linear', random_state=0, reg_alpha=2.1875,\n", + " reg_lambda=0.5208333333333334, scale_pos_weight=1, seed=None,\n", + " silent=True, subsample=1, tree_method='auto', verbose=-10))],\n", + " '30__truncatedsvdwrapper': TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None),\n", + " '30__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", + " max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1,\n", + " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", + " objective='reg:linear', random_state=0, reg_alpha=2.1875,\n", + " reg_lambda=0.5208333333333334, scale_pos_weight=1, seed=None,\n", + " silent=True, subsample=1, tree_method='auto', verbose=-10),\n", + " '30__truncatedsvdwrapper__n_components': 0.2573684210526316,\n", + " '30__truncatedsvdwrapper__random_state': None,\n", + " '30__xgboostregressor__base_score': 0.5,\n", + " '30__xgboostregressor__booster': 'gbtree',\n", + " '30__xgboostregressor__colsample_bylevel': 1,\n", + " '30__xgboostregressor__colsample_bytree': 1,\n", + " '30__xgboostregressor__gamma': 0,\n", + " '30__xgboostregressor__learning_rate': 0.1,\n", + " '30__xgboostregressor__max_delta_step': 0,\n", + " '30__xgboostregressor__max_depth': 6,\n", + " '30__xgboostregressor__min_child_weight': 1,\n", + " '30__xgboostregressor__missing': nan,\n", + " '30__xgboostregressor__n_estimators': 200,\n", + " '30__xgboostregressor__n_jobs': 1,\n", + " '30__xgboostregressor__nthread': None,\n", + " '30__xgboostregressor__objective': 'reg:linear',\n", + " '30__xgboostregressor__random_state': 0,\n", + " '30__xgboostregressor__reg_alpha': 2.1875,\n", + " '30__xgboostregressor__reg_lambda': 0.5208333333333334,\n", + " '30__xgboostregressor__scale_pos_weight': 1,\n", + " '30__xgboostregressor__seed': None,\n", + " '30__xgboostregressor__silent': True,\n", + " '30__xgboostregressor__subsample': 1,\n", + " '30__xgboostregressor__eta': 0.01,\n", + " '30__xgboostregressor__max_leaves': 0,\n", + " '30__xgboostregressor__tree_method': 'auto',\n", + " '30__xgboostregressor__verbose': -10,\n", + " '20__memory': None,\n", + " '20__steps': [('standardscalerwrapper',\n", + " ),\n", + " ('lightgbmregressor',\n", + " LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", " importance_type='split', learning_rate=0.2, max_bin=63,\n", - " ...425, silent=True, subsample=0.85,\n", - " subsample_for_bin=200000, subsample_freq=2, verbose=-1))])),\n", - " ('40', Pipeline(memory=None,\n", - " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.9, eta=0.3, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_dep...ale_pos_weight=1, seed=None, silent=True,\n", - " subsample=0.9, tree_method='auto', verbose=-10))])),\n", - " ('37', Pipeline(memory=None,\n", - " steps=[('maxabsscaler', MaxAbsScaler(copy=True)), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", - " colsample_bytree=0.9, importance_type='split',\n", - " learning_rate=0.11579368421052631, max_bin=63, max_depth=10,\n", - " min_child_samples=16, min_child_we...e, subsample=0.44999999999999996,\n", - " subsample_for_bin=200000, subsample_freq=3, verbose=-1))])),\n", - " ('44', Pipeline(memory=None,\n", - " steps=[('maxabsscaler', MaxAbsScaler(copy=True)), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", - " colsample_bytree=0.7000000000000001, importance_type='split',\n", - " learning_rate=0.16842263157894738, max_bin=7, max_depth=9,\n", - " min_child_samples=60,... subsample=0.7999999999999999, subsample_for_bin=200000,\n", - " subsample_freq=7, verbose=-1))])),\n", - " ('45', Pipeline(memory=None,\n", - " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.10894736842105263, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.7, eta=0.05, gamma=0.1, learning_rate=0.1,\n", - " max_delta_step=0, max...ale_pos_weight=1, seed=None,\n", - " silent=True, subsample=0.7, tree_method='auto', verbose=-10))]))],\n", - " 'weights': [0.2,\n", - " 0.2,\n", - " 0.2,\n", - " 0.13333333333333333,\n", - " 0.06666666666666667,\n", - " 0.06666666666666667,\n", - " 0.06666666666666667,\n", - " 0.06666666666666667],\n", - " 'flatten_transform': None}" + " max_depth=7, min_child_samples=164, min_child_weight=0.001,\n", + " min_split_gain=0.42105263157894735, n_estimators=600, n_jobs=1,\n", + " num_leaves=127, objective=None, random_state=None,\n", + " reg_alpha=0.975, reg_lambda=1.425, silent=True, subsample=0.85,\n", + " subsample_for_bin=200000, subsample_freq=2, verbose=-1))],\n", + " '20__standardscalerwrapper': ,\n", + " '20__lightgbmregressor': LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", + " importance_type='split', learning_rate=0.2, max_bin=63,\n", + " max_depth=7, min_child_samples=164, min_child_weight=0.001,\n", + " min_split_gain=0.42105263157894735, n_estimators=600, n_jobs=1,\n", + " num_leaves=127, objective=None, random_state=None,\n", + " reg_alpha=0.975, reg_lambda=1.425, silent=True, subsample=0.85,\n", + " subsample_for_bin=200000, subsample_freq=2, verbose=-1),\n", + " '20__standardscalerwrapper__module_name': 'sklearn.preprocessing.data',\n", + " '20__standardscalerwrapper__class_name': 'StandardScaler',\n", + " '20__standardscalerwrapper__copy': True,\n", + " '20__standardscalerwrapper__with_mean': False,\n", + " '20__standardscalerwrapper__with_std': True,\n", + " '20__lightgbmregressor__random_state': None,\n", + " '20__lightgbmregressor__n_jobs': 1,\n", + " '20__lightgbmregressor__boosting_type': 'gbdt',\n", + " '20__lightgbmregressor__class_weight': None,\n", + " '20__lightgbmregressor__colsample_bytree': 1,\n", + " '20__lightgbmregressor__importance_type': 'split',\n", + " '20__lightgbmregressor__learning_rate': 0.2,\n", + " '20__lightgbmregressor__max_depth': 7,\n", + " '20__lightgbmregressor__min_child_samples': 164,\n", + " '20__lightgbmregressor__min_child_weight': 0.001,\n", + " '20__lightgbmregressor__min_split_gain': 0.42105263157894735,\n", + " '20__lightgbmregressor__n_estimators': 600,\n", + " '20__lightgbmregressor__num_leaves': 127,\n", + " '20__lightgbmregressor__objective': None,\n", + " '20__lightgbmregressor__reg_alpha': 0.975,\n", + " '20__lightgbmregressor__reg_lambda': 1.425,\n", + " '20__lightgbmregressor__silent': True,\n", + " '20__lightgbmregressor__subsample': 0.85,\n", + " '20__lightgbmregressor__subsample_for_bin': 200000,\n", + " '20__lightgbmregressor__subsample_freq': 2,\n", + " '20__lightgbmregressor__max_bin': 63,\n", + " '20__lightgbmregressor__verbose': -1,\n", + " '40__memory': None,\n", + " '40__steps': [('truncatedsvdwrapper',\n", + " TruncatedSVDWrapper(n_components=0.40578947368421053, random_state=None)),\n", + " ('xgboostregressor',\n", + " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.9, eta=0.3, gamma=0.01, learning_rate=0.1,\n", + " max_delta_step=0, max_depth=9, max_leaves=31, min_child_weight=1,\n", + " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", + " objective='reg:linear', random_state=0,\n", + " reg_alpha=0.8333333333333334, reg_lambda=1.875,\n", + " scale_pos_weight=1, seed=None, silent=True, subsample=0.6,\n", + " tree_method='auto', verbose=-10))],\n", + " '40__truncatedsvdwrapper': TruncatedSVDWrapper(n_components=0.40578947368421053, random_state=None),\n", + " '40__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.9, eta=0.3, gamma=0.01, learning_rate=0.1,\n", + " max_delta_step=0, max_depth=9, max_leaves=31, min_child_weight=1,\n", + " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", + " objective='reg:linear', random_state=0,\n", + " reg_alpha=0.8333333333333334, reg_lambda=1.875,\n", + " scale_pos_weight=1, seed=None, silent=True, subsample=0.6,\n", + " tree_method='auto', verbose=-10),\n", + " '40__truncatedsvdwrapper__n_components': 0.40578947368421053,\n", + " '40__truncatedsvdwrapper__random_state': None,\n", + " '40__xgboostregressor__base_score': 0.5,\n", + " '40__xgboostregressor__booster': 'gbtree',\n", + " '40__xgboostregressor__colsample_bylevel': 1,\n", + " '40__xgboostregressor__colsample_bytree': 0.9,\n", + " '40__xgboostregressor__gamma': 0.01,\n", + " '40__xgboostregressor__learning_rate': 0.1,\n", + " '40__xgboostregressor__max_delta_step': 0,\n", + " '40__xgboostregressor__max_depth': 9,\n", + " '40__xgboostregressor__min_child_weight': 1,\n", + " '40__xgboostregressor__missing': nan,\n", + " '40__xgboostregressor__n_estimators': 200,\n", + " '40__xgboostregressor__n_jobs': 1,\n", + " '40__xgboostregressor__nthread': None,\n", + " '40__xgboostregressor__objective': 'reg:linear',\n", + " '40__xgboostregressor__random_state': 0,\n", + " '40__xgboostregressor__reg_alpha': 0.8333333333333334,\n", + " '40__xgboostregressor__reg_lambda': 1.875,\n", + " '40__xgboostregressor__scale_pos_weight': 1,\n", + " '40__xgboostregressor__seed': None,\n", + " '40__xgboostregressor__silent': True,\n", + " '40__xgboostregressor__subsample': 0.6,\n", + " '40__xgboostregressor__eta': 0.3,\n", + " '40__xgboostregressor__max_leaves': 31,\n", + " '40__xgboostregressor__tree_method': 'auto',\n", + " '40__xgboostregressor__verbose': -10,\n", + " '45__memory': None,\n", + " '45__steps': [('truncatedsvdwrapper',\n", + " TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None)),\n", + " ('xgboostregressor',\n", + " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.5, eta=0.2, gamma=0, grow_policy='lossguide',\n", + " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=10,\n", + " max_leaves=255, min_child_weight=1, missing=nan, n_estimators=50,\n", + " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", + " reg_alpha=0.5208333333333334, reg_lambda=2.5, scale_pos_weight=1,\n", + " seed=None, silent=True, subsample=0.9, tree_method='hist',\n", + " verbose=-10))],\n", + " '45__truncatedsvdwrapper': TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None),\n", + " '45__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", + " colsample_bytree=0.5, eta=0.2, gamma=0, grow_policy='lossguide',\n", + " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=10,\n", + " max_leaves=255, min_child_weight=1, missing=nan, n_estimators=50,\n", + " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", + " reg_alpha=0.5208333333333334, reg_lambda=2.5, scale_pos_weight=1,\n", + " seed=None, silent=True, subsample=0.9, tree_method='hist',\n", + " verbose=-10),\n", + " '45__truncatedsvdwrapper__n_components': 0.3563157894736842,\n", + " '45__truncatedsvdwrapper__random_state': None,\n", + " '45__xgboostregressor__base_score': 0.5,\n", + " '45__xgboostregressor__booster': 'gbtree',\n", + " '45__xgboostregressor__colsample_bylevel': 1,\n", + " '45__xgboostregressor__colsample_bytree': 0.5,\n", + " '45__xgboostregressor__gamma': 0,\n", + " '45__xgboostregressor__learning_rate': 0.1,\n", + " '45__xgboostregressor__max_delta_step': 0,\n", + " '45__xgboostregressor__max_depth': 10,\n", + " '45__xgboostregressor__min_child_weight': 1,\n", + " '45__xgboostregressor__missing': nan,\n", + " '45__xgboostregressor__n_estimators': 50,\n", + " '45__xgboostregressor__n_jobs': 1,\n", + " '45__xgboostregressor__nthread': None,\n", + " '45__xgboostregressor__objective': 'reg:linear',\n", + " '45__xgboostregressor__random_state': 0,\n", + " '45__xgboostregressor__reg_alpha': 0.5208333333333334,\n", + " '45__xgboostregressor__reg_lambda': 2.5,\n", + " '45__xgboostregressor__scale_pos_weight': 1,\n", + " '45__xgboostregressor__seed': None,\n", + " '45__xgboostregressor__silent': True,\n", + " '45__xgboostregressor__subsample': 0.9,\n", + " '45__xgboostregressor__eta': 0.2,\n", + " '45__xgboostregressor__grow_policy': 'lossguide',\n", + " '45__xgboostregressor__max_bin': 255,\n", + " '45__xgboostregressor__max_leaves': 255,\n", + " '45__xgboostregressor__tree_method': 'hist',\n", + " '45__xgboostregressor__verbose': -10,\n", + " '22__memory': None,\n", + " '22__steps': [('standardscalerwrapper',\n", + " ),\n", + " ('lightgbmregressor',\n", + " LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", + " colsample_bytree=0.9, importance_type='split', learning_rate=0.2,\n", + " max_bin=7, max_depth=5, min_child_samples=60,\n", + " min_child_weight=0.001, min_split_gain=0.3157894736842105,\n", + " n_estimators=600, n_jobs=1, num_leaves=31, objective=None,\n", + " random_state=None, reg_alpha=0.8999999999999999, reg_lambda=1.125,\n", + " silent=True, subsample=0.6, subsample_for_bin=200000,\n", + " subsample_freq=2, verbose=-1))],\n", + " '22__standardscalerwrapper': ,\n", + " '22__lightgbmregressor': LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", + " colsample_bytree=0.9, importance_type='split', learning_rate=0.2,\n", + " max_bin=7, max_depth=5, min_child_samples=60,\n", + " min_child_weight=0.001, min_split_gain=0.3157894736842105,\n", + " n_estimators=600, n_jobs=1, num_leaves=31, objective=None,\n", + " random_state=None, reg_alpha=0.8999999999999999, reg_lambda=1.125,\n", + " silent=True, subsample=0.6, subsample_for_bin=200000,\n", + " subsample_freq=2, verbose=-1),\n", + " '22__standardscalerwrapper__module_name': 'sklearn.preprocessing.data',\n", + " '22__standardscalerwrapper__class_name': 'StandardScaler',\n", + " '22__standardscalerwrapper__copy': True,\n", + " '22__standardscalerwrapper__with_mean': False,\n", + " '22__standardscalerwrapper__with_std': True,\n", + " '22__lightgbmregressor__random_state': None,\n", + " '22__lightgbmregressor__n_jobs': 1,\n", + " '22__lightgbmregressor__boosting_type': 'gbdt',\n", + " '22__lightgbmregressor__class_weight': None,\n", + " '22__lightgbmregressor__colsample_bytree': 0.9,\n", + " '22__lightgbmregressor__importance_type': 'split',\n", + " '22__lightgbmregressor__learning_rate': 0.2,\n", + " '22__lightgbmregressor__max_depth': 5,\n", + " '22__lightgbmregressor__min_child_samples': 60,\n", + " '22__lightgbmregressor__min_child_weight': 0.001,\n", + " '22__lightgbmregressor__min_split_gain': 0.3157894736842105,\n", + " '22__lightgbmregressor__n_estimators': 600,\n", + " '22__lightgbmregressor__num_leaves': 31,\n", + " '22__lightgbmregressor__objective': None,\n", + " '22__lightgbmregressor__reg_alpha': 0.8999999999999999,\n", + " '22__lightgbmregressor__reg_lambda': 1.125,\n", + " '22__lightgbmregressor__silent': True,\n", + " '22__lightgbmregressor__subsample': 0.6,\n", + " '22__lightgbmregressor__subsample_for_bin': 200000,\n", + " '22__lightgbmregressor__subsample_freq': 2,\n", + " '22__lightgbmregressor__max_bin': 7,\n", + " '22__lightgbmregressor__verbose': -1,\n", + " 'metalearner__alphas': None,\n", + " 'metalearner__copy_X': True,\n", + " 'metalearner__cv': 'warn',\n", + " 'metalearner__eps': 0.001,\n", + " 'metalearner__fit_intercept': True,\n", + " 'metalearner__l1_ratio': 0.5,\n", + " 'metalearner__max_iter': 1000,\n", + " 'metalearner__n_alphas': 100,\n", + " 'metalearner__n_jobs': None,\n", + " 'metalearner__normalize': False,\n", + " 'metalearner__positive': False,\n", + " 'metalearner__precompute': 'auto',\n", + " 'metalearner__random_state': None,\n", + " 'metalearner__selection': 'cyclic',\n", + " 'metalearner__tol': 0.0001,\n", + " 'metalearner__verbose': 0}" ] }, - "execution_count": 52, + "execution_count": 29, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "fitted_model.pipeline.steps[1][1].get_params()" + "fitted_model.named_steps['stackensembleregressor'].get_params()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also look at how each column in our dataset was featurized by AutoML" ] }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 37, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "['sentence_similarity_regressor.pkl']" + "[{'RawFeatureName': 'C1',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C2',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C3',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C4',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C5',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C6',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C7',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C8',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C9',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C10',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C11',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C12',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C13',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C14',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C15',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C16',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C17',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C18',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C19',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C20',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C21',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C22',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C23',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C24',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C25',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C26',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C27',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C28',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C29',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C30',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C31',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C32',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C33',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C34',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C35',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C36',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C37',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C38',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C39',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C40',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C41',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C42',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C43',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C44',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C45',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C46',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C47',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C48',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C49',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C50',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C51',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C52',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C53',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C54',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C55',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C56',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C57',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C58',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C59',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C60',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C61',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C62',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C63',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C64',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C65',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C66',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C67',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C68',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C69',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C70',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C71',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C72',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C73',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C74',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C75',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C76',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C77',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C78',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C79',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C80',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C81',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C82',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C83',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C84',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C85',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C86',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C87',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C88',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C89',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C90',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C91',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C92',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C93',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C94',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C95',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C96',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C97',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C98',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C99',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C100',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C101',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C102',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C103',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C104',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C105',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C106',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C107',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C108',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C109',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C110',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C111',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C112',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C113',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C114',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C115',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C116',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C117',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C118',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C119',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C120',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C121',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C122',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C123',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C124',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C125',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C126',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C127',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C128',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C129',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C130',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C131',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C132',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C133',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C134',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C135',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C136',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C137',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C138',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C139',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C140',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C141',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C142',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C143',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C144',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C145',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C146',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C147',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C148',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C149',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C150',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C151',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C152',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C153',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C154',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C155',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C156',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C157',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C158',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C159',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C160',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C161',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C162',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C163',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C164',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C165',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C166',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C167',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C168',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C169',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C170',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C171',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C172',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C173',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C174',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C175',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C176',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C177',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C178',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C179',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C180',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C181',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C182',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C183',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C184',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C185',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C186',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C187',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C188',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C189',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C190',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C191',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C192',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C193',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C194',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C195',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C196',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C197',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C198',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C199',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C200',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C201',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C202',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C203',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C204',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C205',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C206',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C207',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C208',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C209',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C210',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C211',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C212',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C213',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C214',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C215',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C216',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C217',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C218',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C219',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C220',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C221',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C222',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C223',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C224',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C225',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C226',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C227',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C228',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C229',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C230',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C231',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C232',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C233',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C234',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C235',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C236',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C237',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C238',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C239',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C240',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C241',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C242',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C243',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C244',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C245',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C246',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C247',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C248',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C249',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C250',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C251',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C252',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C253',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C254',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C255',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C256',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C257',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C258',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C259',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C260',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C261',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C262',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C263',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C264',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C265',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C266',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C267',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C268',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C269',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C270',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C271',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C272',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C273',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C274',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C275',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C276',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C277',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C278',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C279',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C280',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C281',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C282',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C283',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C284',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C285',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C286',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C287',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C288',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C289',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C290',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C291',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C292',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C293',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C294',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C295',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C296',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C297',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C298',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C299',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C300',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C301',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C302',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C303',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C304',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C305',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C306',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C307',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C308',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C309',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C310',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C311',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C312',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C313',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C314',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C315',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C316',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C317',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C318',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C319',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C320',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C321',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C322',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C323',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C324',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C325',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C326',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C327',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C328',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C329',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C330',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C331',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C332',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C333',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C334',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C335',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C336',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C337',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C338',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C339',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C340',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C341',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C342',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C343',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C344',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C345',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C346',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C347',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C348',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C349',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C350',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C351',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C352',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C353',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C354',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C355',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C356',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C357',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C358',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C359',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C360',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C361',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C362',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C363',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C364',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C365',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C366',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C367',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C368',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C369',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C370',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C371',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C372',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C373',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C374',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C375',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C376',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C377',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C378',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C379',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C380',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C381',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C382',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C383',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C384',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C385',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C386',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C387',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C388',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C389',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C390',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C391',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C392',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C393',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C394',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C395',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C396',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C397',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C398',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C399',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C400',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C401',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C402',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C403',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C404',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C405',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C406',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C407',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C408',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C409',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C410',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C411',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C412',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C413',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C414',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C415',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C416',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C417',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C418',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C419',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C420',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C421',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C422',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C423',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C424',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C425',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C426',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C427',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C428',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C429',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C430',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C431',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C432',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C433',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C434',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C435',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C436',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C437',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C438',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C439',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C440',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C441',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C442',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C443',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C444',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C445',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C446',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C447',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C448',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C449',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C450',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C451',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C452',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C453',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C454',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C455',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C456',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C457',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C458',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C459',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C460',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C461',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C462',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C463',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C464',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C465',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C466',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C467',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C468',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C469',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C470',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C471',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C472',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C473',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C474',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C475',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C476',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C477',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C478',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C479',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C480',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C481',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C482',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C483',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C484',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C485',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C486',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C487',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C488',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C489',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C490',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C491',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C492',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C493',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C494',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C495',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C496',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C497',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C498',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C499',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C500',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C501',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C502',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C503',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C504',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C505',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C506',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C507',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C508',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C509',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C510',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C511',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C512',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C513',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C514',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C515',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C516',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C517',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C518',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C519',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C520',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C521',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C522',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C523',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C524',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C525',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C526',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C527',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C528',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C529',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C530',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C531',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C532',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C533',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C534',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C535',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C536',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C537',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C538',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C539',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C540',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C541',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C542',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C543',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C544',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C545',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C546',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C547',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C548',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C549',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C550',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C551',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C552',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C553',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C554',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C555',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C556',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C557',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C558',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C559',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C560',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C561',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C562',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C563',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C564',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C565',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C566',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C567',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C568',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C569',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C570',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C571',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C572',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C573',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C574',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C575',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C576',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C577',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C578',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C579',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C580',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C581',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C582',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C583',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C584',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C585',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C586',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C587',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C588',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C589',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C590',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C591',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C592',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C593',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C594',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C595',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C596',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C597',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C598',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C599',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C600',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C601',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C602',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C603',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C604',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C605',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C606',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C607',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C608',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C609',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C610',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C611',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C612',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C613',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C614',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C615',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C616',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C617',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C618',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C619',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C620',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C621',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C622',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C623',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C624',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C625',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C626',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C627',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C628',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C629',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C630',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C631',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C632',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C633',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C634',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C635',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C636',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C637',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C638',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C639',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C640',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C641',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C642',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C643',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C644',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C645',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C646',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C647',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C648',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C649',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C650',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C651',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C652',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C653',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C654',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C655',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C656',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C657',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C658',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C659',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C660',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C661',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C662',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C663',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C664',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C665',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C666',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C667',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C668',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C669',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C670',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C671',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C672',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C673',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C674',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C675',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C676',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C677',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C678',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C679',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C680',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C681',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C682',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C683',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C684',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C685',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C686',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C687',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C688',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C689',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C690',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C691',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C692',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C693',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C694',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C695',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C696',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C697',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C698',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C699',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C700',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C701',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C702',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C703',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C704',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C705',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C706',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C707',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C708',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C709',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C710',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C711',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C712',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C713',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C714',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C715',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C716',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C717',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C718',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C719',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C720',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C721',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C722',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C723',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C724',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C725',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C726',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C727',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C728',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C729',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C730',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C731',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C732',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C733',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C734',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C735',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C736',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C737',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C738',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C739',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C740',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C741',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C742',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C743',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C744',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C745',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C746',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C747',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C748',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C749',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C750',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C751',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C752',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C753',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C754',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C755',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C756',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C757',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C758',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C759',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C760',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C761',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C762',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C763',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C764',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C765',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C766',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C767',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C768',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C769',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C770',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C771',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C772',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C773',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C774',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C775',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C776',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C777',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C778',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C779',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C780',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C781',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C782',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C783',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C784',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C785',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C786',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C787',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C788',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C789',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C790',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C791',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C792',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C793',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C794',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C795',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C796',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C797',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C798',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C799',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C800',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C801',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C802',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C803',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C804',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C805',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C806',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C807',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C808',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C809',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C810',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C811',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C812',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C813',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C814',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C815',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C816',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C817',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C818',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C819',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C820',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C821',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C822',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C823',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C824',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C825',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C826',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C827',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C828',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C829',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C830',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C831',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C832',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C833',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C834',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C835',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C836',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C837',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C838',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C839',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C840',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C841',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C842',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C843',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C844',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C845',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C846',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C847',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C848',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C849',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C850',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C851',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C852',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C853',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C854',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C855',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C856',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C857',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C858',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C859',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C860',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C861',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C862',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C863',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C864',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C865',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C866',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C867',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C868',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C869',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C870',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C871',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C872',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C873',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C874',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C875',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C876',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C877',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C878',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C879',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C880',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C881',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C882',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C883',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C884',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C885',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C886',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C887',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C888',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C889',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C890',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C891',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C892',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C893',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C894',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C895',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C896',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C897',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C898',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C899',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C900',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C901',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C902',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C903',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C904',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C905',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C906',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C907',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C908',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C909',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C910',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C911',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C912',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C913',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C914',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C915',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C916',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C917',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C918',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C919',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C920',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C921',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C922',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C923',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C924',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C925',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C926',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C927',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C928',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C929',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C930',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C931',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C932',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C933',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C934',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C935',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C936',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C937',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C938',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C939',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C940',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C941',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C942',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C943',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C944',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C945',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C946',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C947',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C948',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C949',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C950',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C951',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C952',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C953',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C954',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C955',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C956',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C957',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C958',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C959',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C960',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C961',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C962',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C963',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C964',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C965',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C966',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C967',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C968',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C969',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C970',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C971',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C972',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C973',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C974',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C975',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C976',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C977',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C978',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C979',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C980',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C981',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C982',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C983',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C984',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C985',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C986',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C987',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C988',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C989',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C990',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C991',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C992',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C993',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C994',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C995',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C996',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C997',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C998',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C999',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " {'RawFeatureName': 'C1000',\n", + " 'TypeDetected': 'Numeric',\n", + " 'Dropped': 'No',\n", + " 'EngineeredFeatureCount': 1,\n", + " 'Tranformations': ['MeanImputer']},\n", + " ...]" ] }, - "execution_count": 44, + "execution_count": 37, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from sklearn.externals import joblib\n", - "\n", - "model_path = 'sentence_similarity_regressor.pkl'\n", - "\n", - "joblib.dump(fitted_model, model_path)" + "fitted_model.named_steps['datatransformer'].get_featurization_summary()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prediction\n", + "Finally, we can use the best model to make a prediction on our test set using pearson correlation as our metric" ] }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 36, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.7722744639946534\n" + ] + } + ], "source": [ - "m2 = joblib.load('sentence_similarity_regressor.pkl')" + "X_test = testing_data.drop(\"score\", axis=1).values\n", + "y_test = testing_data['score'].values.flatten()\n", + "\n", + "y_pred = fitted_model.predict(X_test)\n", + "print(pearsonr(y_pred, y_test)[0])" ] }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 44, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "RegressionPipeline(pipeline=Pipeline(memory=None,\n", - " steps=[('datatransformer', DataTransformer(enable_feature_sweeping=None, feature_sweeping_timeout=None,\n", - " is_onnx_compatible=None, logger=None, observer=None, task=None)), ('prefittedsoftvotingregressor', PreFittedSoftVotingRegressor(estimators=[('24', Pipeline(memory=None,\n", - " steps=[('stand...333333333333, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667, 0.06666666666666667]))]),\n", - " stddev=None)" + "['sentence_similarity_regressor.pkl']" ] }, - "execution_count": 54, + "execution_count": 44, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "m2" + "#Save the model as a pkl file\n", + "model_path = 'sentence_similarity_regressor.pkl'\n", + "joblib.dump(fitted_model, model_path)" ] } ], From 16b43746f18019b3b0772d09fd60226345503903 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Sun, 16 Jun 2019 21:50:16 -0400 Subject: [PATCH 071/108] automl model deployed using ACI --- ...nt_google_universal_sentence_encoder.ipynb | 1260 +++++++++++++++++ 1 file changed, 1260 insertions(+) create mode 100644 scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb diff --git a/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb new file mode 100644 index 000000000..1c6f2df79 --- /dev/null +++ b/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb @@ -0,0 +1,1260 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using AutoML for Predicting Sentence Similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to use Azure AutoML to automate machine learning model selection and tuning. It also demonstrates how to use a popular sentence embedding model from Google, Universal Sentence Encoder. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What is Azure AutoML?\n", + "\n", + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", + "\n", + "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](automl.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Modeling Problem\n", + "\n", + "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence).\n", + "\n", + "For each sentence in the sentence pair, we will use Google's pretrained Universal Sentence Encoder (details provided below) to generate a $512$-dimensional embedding. Both embeddings in the sentence pair will be concatenated and the resulting $1024$-dimensional vector will be used as features in our regression problem. Our target variable is the sentence similarity score." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Turning diagnostics collection on. \n", + "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.43\n", + "Pandas version: 0.23.4\n", + "Tensorflow Version: 1.13.1\n" + ] + } + ], + "source": [ + "# Set the environment path to find NLP\n", + "import sys\n", + "sys.path.append(\"../../\")\n", + "import time\n", + "import os\n", + "import pandas as pd\n", + "import shutil\n", + "import numpy as np\n", + "import torch\n", + "import sys\n", + "from scipy.stats import pearsonr\n", + "from scipy.spatial import distance\n", + "from sklearn.externals import joblib\n", + "\n", + "# Import utils\n", + "from utils_nlp.azureml import azureml_utils\n", + "from utils_nlp.dataset import stsbenchmark\n", + "from utils_nlp.dataset.preprocess import (\n", + " to_lowercase,\n", + " to_spacy_tokens,\n", + " rm_spacy_stopwords,\n", + ")\n", + "\n", + "# Tensorflow dependencies for Google Universal Sentence Encoder\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "# AzureML packages\n", + "import azureml as aml\n", + "import logging\n", + "from azureml.telemetry import set_diagnostics_collection\n", + "set_diagnostics_collection(send_diagnostics=True)\n", + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.core.experiment import Experiment\n", + "from azureml.widgets import RunDetails\n", + "from azureml.train.automl.run import AutoMLRun\n", + "from azureml.core.webservice import AciWebservice, Webservice\n", + "\n", + "print(\"System version: {}\".format(sys.version))\n", + "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", + "print(\"Pandas version: {}\".format(pd.__version__))\n", + "print(\"Tensorflow Version:\", tf.VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "BASE_DATA_PATH = '../../data'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Data Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## STS Benchmark Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 158KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 274KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 164KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], + "source": [ + "# Load in the raw datasets as pandas dataframes\n", + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", + "# and renaming the remaining columns\n", + "train = stsbenchmark.clean_sts(train_raw)\n", + "dev = stsbenchmark.clean_sts(dev_raw)\n", + "test = stsbenchmark.clean_sts(test_raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training set has 5749 sentences\n", + "Development set has 1500 sentences\n", + "Testing set has 1379 sentences\n" + ] + } + ], + "source": [ + "print(\"Training set has {} sentences\".format(len(train)))\n", + "print(\"Development set has {} sentences\".format(len(dev)))\n", + "print(\"Testing set has {} sentences\".format(len(test)))\n" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
scoresentence1sentence2
05.00A plane is taking off.An air plane is taking off.
13.80A man is playing a large flute.A man is playing a flute.
23.80A man is spreading shreded cheese on a pizza.A man is spreading shredded cheese on an uncoo...
32.60Three men are playing chess.Two men are playing chess.
44.25A man is playing the cello.A man seated is playing the cello.
\n", + "
" + ], + "text/plain": [ + " score sentence1 \\\n", + "0 5.00 A plane is taking off. \n", + "1 3.80 A man is playing a large flute. \n", + "2 3.80 A man is spreading shreded cheese on a pizza. \n", + "3 2.60 Three men are playing chess. \n", + "4 4.25 A man is playing the cello. \n", + "\n", + " sentence2 \n", + "0 An air plane is taking off. \n", + "1 A man is playing a flute. \n", + "2 A man is spreading shredded cheese on an uncoo... \n", + "3 Two men are playing chess. \n", + "4 A man seated is playing the cello. " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train.head(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Feature Engineering: Universal Sentence Encoder" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have our sentence pairs loaded, we will convert these sentences into a numerical representation in order to use them in our machine learning model. To do this, we'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Google Universal Sentence Encoder: Transformer Model** The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Using the Pretrained Model**\n", + "\n", + "Tensorflow-hub provides the pretrained model for use by the public. We import the model from its url and then feed the model our sentences for it to encode." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", + "\n", + "# Import the Universal Sentence Encoder's TF Hub module\n", + "embedding_model = hub.Module(module_url)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def google_encoder(dataset):\n", + " \"\"\" Function that embeds sentences using the Google Universal\n", + " Sentence Encoder pretrained model\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " emb1: 512-dimensional representation of sentence1\n", + " emb2: 512-dimensional representation of sentence2\n", + " \"\"\"\n", + " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", + " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", + "\n", + " # Apply embedding model and normalize the input\n", + " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", + " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", + " \n", + " with tf.Session() as session:\n", + " session.run(tf.global_variables_initializer())\n", + " session.run(tf.tables_initializer())\n", + " emb1, emb2 = session.run(\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={\n", + " sts_input1: dataset['sentence1'],\n", + " sts_input2: dataset['sentence2']\n", + " })\n", + " return emb1, emb2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As features, we will embed both sentences using the Google Universal Sentence Encoder and concatenate their representations into a $1024$-dimensional vector. The resulting data will be saved in a dataframe for consumption by our AutoML model." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def feature_engineering(dataset):\n", + " \"\"\"Extracts embedding features from the dataset and returns\n", + " features and target in a dataframe\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " df: pandas dataframe with embedding features and target variable\n", + " \"\"\"\n", + " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", + " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", + " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", + " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", + " df = pd.DataFrame(df, columns=names)\n", + " df['score'] = dataset['score'].tolist()\n", + " return df" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "training_data = feature_engineering(train)\n", + "validation_data = feature_engineering(dev)\n", + "testing_data = feature_engineering(test)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "#Take this out later\n", + "\n", + "training_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_train.csv\"), index=None)\n", + "testing_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_test.csv\"), index=None)\n", + "validation_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_dev.csv\"), index=None)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Calculate Baseline Performance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Before using AutoML we will calculate a baseline to compare the AutoML results to. For the baseline we will take the Google Universal Sentence Encoder embeddings of each sentence, calculate the cosine similarity between the two sentence embeddings, then compare the predicted values with the true scores using pearson correlation. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What is Pearson Correlation?\n", + "\n", + "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", + "\n", + "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", + "\n", + "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "def get_baseline_performance(data):\n", + " \"\"\" Get baseline performance by calculating the cosine similarity between\n", + " the embeddings in the sentence pair and then evaluating the pearson \n", + " correlation between the predicted and true similarity scores\n", + " \n", + " Parameters:\n", + " ----------\n", + " data: dataframe containing embeddings and similarity scores\n", + " \"\"\"\n", + " emb1 = data[[i for i in data.columns if 'USEEmb1' in i]].values.tolist()\n", + " emb2 = data[[i for i in data.columns if 'USEEmb2' in i]].values.tolist()\n", + " scores = data['score'].values.tolist()\n", + " \n", + " predictions = [1-distance.cosine(emb1[i], emb2[i]) for i in range(len(emb1))]\n", + " print(\"Google Universal Sentence Encoder Pearson Correlation:\", round(pearsonr(predictions, scores)[0],3))" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Google Universal Sentence Encoder Pearson Correlation: 0.764\n" + ] + } + ], + "source": [ + "get_baseline_performance(testing_data)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# AutoML" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n", + "Workspace name: MAIDAPNLP\n", + "Azure region: eastus2\n", + "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", + "Resource group: nlprg\n" + ] + } + ], + "source": [ + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## AutoMLConfig Parameters\n", + "Next, we specify the parameters for the AutoMLConfig class. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**task** \n", + "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**preprocess** \n", + "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**primary_metric** \n", + "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Constraints:** \n", + "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after acheiving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"task\": 'regression', #type of task: classification, regression or forecasting\n", + " \"debug_log\": 'automated_ml_errors.log',\n", + " \"path\": './automated-ml-regression',\n", + " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", + " \"iterations\" : 50, #Number of algorithm options to try\n", + " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", + " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", + " \"verbosity\":logging.ERROR}" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = training_data.drop(\"score\", axis=1).values\n", + "y_train = training_data['score'].values.flatten()\n", + "X_validation = validation_data.drop(\"score\", axis=1).values\n", + "y_validation = validation_data['score'].values.flatten()\n", + "\n", + "# local compute\n", + "automated_ml_config = AutoMLConfig(\n", + " X = X_train,\n", + " y = y_train,\n", + " X_valid = X_validation,\n", + " y_valid = y_validation,\n", + " **automl_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run the Experiment\n", + "\n", + "Run the experiment locally and inspect the results using a widget" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running on local machine\n", + "Parent Run ID: AutoML_5b011db3-83db-4ab5-afa6-9acf2c1a9515\n", + "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", + "Current status: DatasetEvaluation. Gathering dataset statistics.\n", + "Current status: FeaturesGeneration. Generating features for the dataset.\n", + "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", + "Current status: ModelSelection. Beginning model selection.\n", + "\n", + "****************************************************************************************************\n", + "ITERATION: The iteration being evaluated.\n", + "PIPELINE: A summary description of the pipeline being evaluated.\n", + "DURATION: Time taken for the current iteration.\n", + "METRIC: The result of computing score on the fitted pipeline.\n", + "BEST: The best observed score thus far.\n", + "****************************************************************************************************\n", + "\n", + " ITERATION PIPELINE DURATION METRIC BEST\n", + " 0 StandardScalerWrapper RandomForest 0:01:17 0.1655 0.1655\n", + " 1 MinMaxScaler RandomForest 0:02:19 0.4102 0.4102\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:23 0.2577 0.4102\n", + " 3 StandardScalerWrapper LightGBM 0:00:21 0.2708 0.4102\n", + " 4 RobustScaler DecisionTree 0:00:27 0.2435 0.4102\n", + " 5 StandardScalerWrapper LassoLars 0:00:15 0.1246 0.4102\n", + " 6 StandardScalerWrapper LightGBM 0:00:20 0.6567 0.6567\n", + " 7 StandardScalerWrapper RandomForest 0:00:20 0.2128 0.6567\n", + " 8 StandardScalerWrapper LassoLars 0:00:20 0.0836 0.6567\n", + " 9 MinMaxScaler ExtremeRandomTrees 0:00:25 0.3767 0.6567\n", + " 10 RobustScaler ExtremeRandomTrees 0:01:05 0.3615 0.6567\n", + " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:21 0.2653 0.6567\n", + " 12 StandardScalerWrapper LassoLars 0:00:11 nan 0.6567\n", + "ERROR: Run AutoML_5b011db3-83db-4ab5-afa6-9acf2c1a9515_12 failed with exception \"Primary metric spearman_correlation is not available.\".\n", + " 13 MinMaxScaler ExtremeRandomTrees 0:00:22 0.2885 0.6567\n", + " 14 RobustScaler RandomForest 0:00:29 0.3499 0.6567\n", + " 15 StandardScalerWrapper LassoLars 0:00:14 nan 0.6567\n", + "ERROR: Run AutoML_5b011db3-83db-4ab5-afa6-9acf2c1a9515_15 failed with exception \"Primary metric spearman_correlation is not available.\".\n", + " 16 StandardScalerWrapper ExtremeRandomTrees 0:00:15 0.2155 0.6567\n", + " 17 StandardScalerWrapper RandomForest 0:00:16 0.2296 0.6567\n", + " 18 MinMaxScaler SGD 0:00:17 0.0990 0.6567\n", + " 19 StandardScalerWrapper RandomForest 0:00:46 0.3087 0.6567\n", + " 20 StandardScalerWrapper LightGBM 0:00:40 0.7412 0.7412\n", + " 21 StandardScalerWrapper LightGBM 0:00:32 0.6983 0.7412\n", + " 22 StandardScalerWrapper LightGBM 0:00:37 0.6864 0.7412\n", + " 23 StandardScalerWrapper RandomForest 0:04:30 0.4236 0.7412\n", + " 24 MaxAbsScaler DecisionTree 0:06:51 0.2587 0.7412\n", + " 25 MaxAbsScaler LightGBM 0:00:22 0.3161 0.7412\n", + " 26 StandardScalerWrapper LightGBM 0:01:25 0.5771 0.7412\n", + " 27 RobustScaler DecisionTree 0:00:21 0.2484 0.7412\n", + " 28 MaxAbsScaler LightGBM 0:02:55 0.7195 0.7412\n", + " 29 StandardScalerWrapper LightGBM 0:01:49 0.7379 0.7412\n", + " 30 SparseNormalizer LightGBM 0:00:38 0.6011 0.7412\n", + " 31 MaxAbsScaler LightGBM 0:00:47 0.6835 0.7412\n", + " 32 StandardScalerWrapper DecisionTree 0:06:19 0.2630 0.7412\n", + " 33 MaxAbsScaler LightGBM 0:00:32 0.7460 0.7460\n", + " 34 StandardScalerWrapper LightGBM 0:00:36 0.5717 0.7460\n", + " 35 StandardScalerWrapper LightGBM 0:00:36 0.7115 0.7460\n", + " 36 MaxAbsScaler LightGBM 0:00:37 0.7265 0.7460\n", + " 37 MaxAbsScaler LightGBM 0:01:12 0.6830 0.7460\n", + " 38 SparseNormalizer LightGBM 0:00:32 0.6854 0.7460\n", + " 39 MaxAbsScaler LightGBM 0:00:36 0.6779 0.7460\n", + " 40 SparseNormalizer LightGBM 0:01:38 0.3032 0.7460\n", + " 41 MaxAbsScaler LightGBM 0:00:30 0.5939 0.7460\n", + " 42 MinMaxScaler DecisionTree 0:00:31 0.1622 0.7460\n", + " 43 MaxAbsScaler LightGBM 0:02:34 0.7011 0.7460\n", + " 44 MaxAbsScaler LightGBM 0:00:41 0.6090 0.7460\n", + " 45 RobustScaler LightGBM 0:00:35 0.3380 0.7460\n", + " 46 MaxAbsScaler LightGBM 0:00:58 0.4714 0.7460\n", + " 47 MaxAbsScaler LightGBM 0:00:35 0.7303 0.7460\n", + " 48 VotingEnsemble 0:01:34 0.7938 0.7938\n", + " 49 StackEnsemble 0:06:14 0.7943 0.7943\n" + ] + } + ], + "source": [ + "experiment=Experiment(ws, 'automated-ml-regression')\n", + "local_run = experiment.submit(automated_ml_config, show_output=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, my accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Inspect the run details using the provided widget\n", + "RunDetails(local_run).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deploy\n", + "\n", + "### Retrieve the Best Model\n", + "Below we select the best pipeline from our iterations. The get_output method returns the best run and the fitted model for the last invocation. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "best_run, fitted_model = local_run.get_output()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Register the Fitted Model for Deployment\n", + "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered." + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Registering model AutoML5b011db38best\n", + "AutoML5b011db38best\n" + ] + } + ], + "source": [ + "description = 'AutoML Model'\n", + "tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"}\n", + "name = 'automl'\n", + "model = local_run.register_model(description = description, tags = tags)\n", + "\n", + "print(local_run.model_id) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create Scoring Script" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting score.py\n" + ] + } + ], + "source": [ + "%%writefile score.py\n", + "import pickle\n", + "import json\n", + "import numpy\n", + "import azureml.train.automl\n", + "from sklearn.externals import joblib\n", + "from azureml.core.model import Model\n", + "\n", + "\n", + "def init():\n", + " global model\n", + " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", + " # deserialize the model file back into a sklearn model\n", + " model = joblib.load(model_path)\n", + "\n", + "def run(rawdata):\n", + " try:\n", + " data = json.loads(rawdata)['data']\n", + " data = numpy.array(data)\n", + " result = model.predict(data)\n", + " except Exception as e:\n", + " result = str(e)\n", + " return json.dumps({\"error\": result})\n", + " return json.dumps({\"result\":result.tolist()})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create a YAML File for the Environment\n", + "\n", + "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, myenv.yml, which specifies the dependencies from the run." + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "experiment=Experiment(ws, 'automated-ml-regression')\n", + "ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No issues found in the SDK package versions.\n" + ] + } + ], + "source": [ + "dependencies = ml_run.get_run_sdk_dependencies(iteration = 7)" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "azureml-train-automl\t1.0.43.1\n", + "azureml-sdk\t1.0.43\n", + "azureml-core\t1.0.43\n" + ] + } + ], + "source": [ + "for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n", + " print('{}\\t{}'.format(p, dependencies[p]))" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'autoenv.yml'" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from azureml.core.conda_dependencies import CondaDependencies\n", + "\n", + "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n", + " pip_packages=['azureml-sdk[automl]'], python_version = '3.6.8')\n", + "\n", + "conda_env_file_name = 'autoenv.yml'\n", + "myenv.save_to_file('.', conda_env_file_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "# Substitute the actual version number in the environment file.\n", + "# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.\n", + "# However, we include this in case this code is used on an experiment from a previous SDK version.\n", + "\n", + "'''with open(conda_env_file_name, 'r') as cefr:\n", + " content = cefr.read()\n", + "\n", + "with open(conda_env_file_name, 'w') as cefw:\n", + " cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))\n", + "'''\n", + "# Substitute the actual model id in the script file.\n", + "\n", + "script_file_name = 'score.py'\n", + "\n", + "with open(script_file_name, 'r') as cefr:\n", + " content = cefr.read()\n", + "\n", + "with open(script_file_name, 'w') as cefw:\n", + " cefw.write(content.replace('<>', local_run.model_id))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create a Container Image" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating image\n", + "Running.\n", + "NotStarted...............................................\n", + "Succeeded\n", + "Image creation operation finished for image automl-image:8, operation \"Succeeded\"\n" + ] + } + ], + "source": [ + "from azureml.core.image import ContainerImage\n", + "\n", + "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", + " runtime = \"python\",\n", + " conda_file = \"autoenv.yml\",\n", + " description = \"Image with automl model\",\n", + " tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"})\n", + "\n", + "image = ContainerImage.create(name = \"automl-image\",\n", + " # this is the model object\n", + " models = [model],\n", + " image_config = image_config,\n", + " workspace = ws)\n", + "\n", + "image.wait_for_creation(show_output = True)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "https://maidapnlp0056795534.blob.core.windows.net/azureml/ImageLogs/48a3794d-b14b-4ab1-aef8-357485615f27/build.log?sv=2018-03-28&sr=b&sig=FSfsmE7TEOgWeYMa8DQjrTZg31z1WEd3uO%2F5Q1%2F02gU%3D&st=2019-06-16T21%3A46%3A17Z&se=2019-07-16T21%3A51%3A17Z&sp=rl\n" + ] + } + ], + "source": [ + "print(image.image_build_log_uri) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Deploy the Image as a Web Service on Azure Container Instance" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration (using default here)\n", + "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", + " memory_gb = 1)" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating service\n", + "Running...................\n", + "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "Healthy\n" + ] + } + ], + "source": [ + "# deploy image as web service\n", + "aci_service_name ='aci-service-with-automl'\n", + "aci_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = aci_service_name,\n", + " image = image,\n", + " deployment_config = aci_config)\n", + "\n", + "aci_service.wait_for_deployment(show_output = True)\n", + "print(aci_service.state)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Test" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "21256649\n" + ] + } + ], + "source": [ + "# load multiple sentences\n", + "import pandas as pd\n", + "import json \n", + "\n", + "sentences = []\n", + "data = pd.read_csv(\"testing_set.csv\")\n", + "train_y = data['score'].values.flatten()\n", + "train_x = data.drop(\"score\", axis=1).values\n", + "\n", + "print(type(train_x))\n", + "\n", + "train_x = train_x.tolist()\n", + "data = {'data': train_x}\n", + "data = json.dumps(data)\n", + "print(len(data))\n", + "\n", + "#print(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "nb sentences encoded : 27145\n", + "{\"result\": [1.4322549411038765, 3.701496597224398, 3.0479749754684877, 3.9308189127466533, 0.8635069938099544, 1.751576288452982, 3.040980961190109, 2.1431064151633423, 2.191154825045265, 1.2682110745155055, 1.2682110745155055, 4.40765538725924, 0.7558847754292286, 3.390359555883792, 2.3854790756015833, 1.7344997332170062, 4.589225643618903, 3.3416602405105817, 3.4048902285962597, 1.6773498215648046, 2.075599433031979, 0.7595681987065432, 3.9995388312550193, 3.7261206556907625, 1.19578134877992, 2.6713024996174513, 0.8020056210402666, 2.8197897135898087, 1.559042795238717, 3.3018493059315888, 4.021559714224404, 3.6230277884565276, 1.552067486049196, 4.134648970804008, 2.9405120739331627, -0.615576864235015, 0.7909618994365504, 2.932382403966035, 2.96584032513176, 0.5734777148904935, 2.9680829681354344, 2.642053672175344, 3.3176550749645064, 0.8861250646792223, 2.1513692735537417, 0.9555818872587727, 0.62480371701695, 0.27621102112607265, 2.5383000690934567, 1.894986180892777, 0.9470776270035255, 1.4772919968083298, -0.20512909341739372, 0.5368315777245871, 1.1370111739455173, -0.6110950310809333, 3.009552682781307, 0.838263432047385, 3.25524852531291, 1.748537225118001, 3.4162818656474045, 0.6309719383744596, 3.188171415158594, 1.1885539579202526, 3.885881180873047, -0.10989696061508847, 0.036949805478808884, 2.983700334965398, 0.1553958035603875, 3.4602422942767226, 2.8792367087831425, 3.3639351537595155, 0.6779550805450834, 2.9158586050826276, 2.670557684953456, 0.6516185013788782, 0.7987308948801775, -1.6724892263668112, 2.9758651057830274, 3.380656950457761, 0.04004516158518212, 0.7140577657874352, 2.6903054796295574, 1.5692886710979983, 0.11919953362192359, 1.143133149214036, 2.8050995640988585, 3.407908824524682, 3.2087369294620047, 0.7348397113383949, 1.7103949656579203, 3.195163273653194, 1.1199143710847377, 2.3346836178833947, 0.926696818163081, 3.0307953108319876, 4.168205451761104, 3.1367024152252605, 2.535571374257174, 2.70075409983324, -1.0044958301580198, 0.8428663210598888, 3.1940961166154502, 1.7675487826960015, 1.9053703873647803, 2.1382907699481084, -0.41305936846402436, 1.9052852132521325, 1.4009017961585104, 2.4418049938821267, 3.2257252328050776, 2.9018881361276554, 3.3011931786066007, 3.5698511304516214, -0.40603068624334326, -0.7481115480274674, 3.023014564190224, 2.2025556018829344, 0.8866930092348548, 0.7673690130703548, 1.4729108548829184, 0.8091773711256964, 1.3982419824010752, 0.497889676390312, 0.22313132806206126, 3.160855586109493, 0.9059444552529206, 2.1742813156418284, 2.323760254446841, 2.8829088427218896, 2.735227770796081, 2.926382914126975, 3.33319486772642, 4.08659817402215, 2.1160480705871563, 5.0896682512940625, 4.0524223720180395, 2.379174385587522, 1.6227063418743053, 3.7629148781572526, 4.176522298449342, 3.161785575922274, 2.4714191372317287, 3.908907190299748, 2.3143758345939633, 1.3225073955146498, 2.244382141500616, 3.2008764437317523, 1.6626302739062786, 0.25310424813460686, 2.0701176797980883, 1.3179306945641835, 3.0406061958776505, 3.1663366146122196, 3.3952014826237993, 2.7637033539720397, 0.23550756818263208, 1.3503743471435388, 3.757923837995467, 0.09644109520643562, 3.819737616673461, 2.8496691855314142, 1.6776859994046118, 3.233028474487374, 3.3166665542180405, 3.4020225680580762, 2.9230998306879763, 2.7375896396803756, 3.371268676623925, 2.351100544420622, 1.9837394897022427, 0.857168695309404, 3.299183079595004, 2.9734131513230335, 2.8062089719758836, 3.011163505483155, 2.8928652460502957, 3.3624907560547013, -0.7281242803067453, -0.08711697765668303, 0.3341523221312479, 2.046039991089384, 2.4177579231586983, 0.33274470050645744, 1.7461737760112204, -0.7195877983867224, -0.6296044131484234, -0.031746836738074435, 0.470579333672863, -0.1518817951981256, 3.593810223231587, 3.501204912910677, 1.2118771603351464, 2.928006962566029, 3.032530269483511, 3.715935300448145, 2.7492699001294874, 0.8160727303811668, -0.9619788359405301, 3.137679148655353, 0.5637181314112811, 1.0529799950560057, 1.0354067430198106, 0.21626668305393348, 0.7575978198258919, 1.0414766079673619, 3.7026328821540937, 0.18723276926880894, 2.018489939454151, 2.0982145454503907, 2.157541762033266, 0.9769805099453741, 0.914351958029362, 1.3249123736499873, 2.7977862360025885, 0.9575013815713964, 2.4384663704097327, 2.709006953931115, 3.7257064000887374, 2.366866244836494, 0.24240937509058735, 0.530211368038743, 1.9878195352184336, 2.8964998048649093, -0.671408510765914, 1.309177876131379, 1.6866903654001253, 2.6557547047992442, 1.0781755885340782, 0.6030910740174362, -0.13691451952812483, 0.5415414633283682, 1.8420112736436844, 3.4999537080657963, 3.1039569707062538, 2.68383212748226, -0.1990929748930763, 0.7985312625991492, 2.0211596075037117, -0.8466206372715817, 2.1330901572117935, -0.6155253096984299, 0.36905397052109823, -0.5380646127286067, 2.9145332002194477, 0.045828573716479526, 1.6578745076161314, 1.6246003796506798, -0.18990385887850791, 0.23744448164741594, 3.197354573255658, 3.5901615146301498, 3.582764091090041, 2.6294221152328583, 0.5268030077362613, 3.174438459771424, -0.04764748935583474, 1.9663393994338598, 2.9092550072300067, -0.37166341033469363, 4.229164378398597, 2.666773843905054, 3.2497869602753857, 3.0964060005334306, 1.6936622337157892, 1.5527513768130201, 2.0449976414853936, 2.4944068738879834, 1.7909269726797827, 4.231603748025412, 3.3296364232914226, 0.6757386671097878, 3.6449662889235266, 3.1389669533690627, 3.636499122887811, 3.9374237663081337, 0.878717702356582, -0.1290092187173273, 3.241939254658996, 2.7140887326055445, 1.3607405737593696, 1.4257490512391215, 3.23106725651732, 3.3218174551641275, 3.5152293218128228, 2.7732445462629687, 2.4136371334439337, 3.1472589026937174, 2.808800601425598, 1.3613838759960037, 3.573339845490277, 1.630077792944316, 3.0283175677111642, 3.7906917296943052, 0.040061762084844466, -0.04181369776242394, 1.7680746256292084, 3.2832409475675335, 3.039199515391712, 2.621054413449726, 3.7185706968951497, 2.0445555184667383, 1.4786708685517285, 3.6722082249869428, 2.9083077202272425, 3.28684709865959, 3.749337714599618, 2.0953913554482724, 3.5848399987718307, 3.1018762207977564, 1.017871382115535, 0.13396404610707469, 0.3304748751582507, 3.6543788011061262, 2.8957678867158596, 3.171904629549589, 2.0959071999748935, 3.2767306542490116, 0.7272101536144566, 1.2655240128925875, 3.3116757777286767, 2.563048521093771, 3.3708592325063838, 2.664535335194219, 3.168558668797516, -0.0018645519142349576, 3.030236721435313, 2.5488152028569107, 3.9438995266582126, 2.0717039531569643, 3.8899847740606703, 3.338091158172413, 3.7105463580128277, 3.362262276198568, 2.266417053559555, 2.950433252865158, 3.3050449057918008, 2.478457335026994, 0.4094746167202006, 1.096810624842116, 3.1598638129538834, 3.9662687657563827, 4.364913788844802, 2.282600646168691, 2.0084402125600307, 0.6999652756753105, 3.0422364032039497, 1.1460107527335772, 2.8549693609845, 0.8397603850858164, 3.586777347669963, 4.264916450277864, 0.17095021884135486, 3.531301695223584, 3.146340216254313, -0.3115213849483759, 2.572461876019024, 2.882790203231003, -0.3126564310037938, 3.6476180884539944, 3.708169714203367, 2.7073502593339747, 3.3411143187322287, 2.518567656533427, 3.5697766237053865, 1.1198336456740563, 1.6356868767276382, 2.549296253435131, 4.318640677999618, 3.5948615728341324, 3.146736265264743, 4.270698227566312, 0.5117433166472654, 0.8630208167754054, 1.1763071462064136, 3.4801500679157518, 2.3832933393086635, 0.8061180805428338, 2.455877512476104, 2.3284050209774843, 4.067809084438023, 2.933210846824142, 2.7193467588016635, 0.22923109860470703, 2.6682785548165717, 2.3761228353793373, 3.544127611221436, 1.3271789998884942, 2.1759086459332333, 1.9481421502566185, 3.0624118222221113, 1.4603220218772068, -0.32913723127021094, 1.0592630719686327, 2.9602122025486506, -1.193278580708063, -0.03328882577510761, 3.9671986187777457, 3.269583282482015, 1.6471796320970853, 2.1053039371910547, -0.29587233422710035, 1.7852465092221728, 2.574990263438694, 1.9254004986237399, 4.05256390162957, 0.4099957719151519, 2.954773795855815, 2.542267724100228, 2.7330156003574397, 2.826847651265175, 3.318641674677932, 2.051505713674652, 3.1440775825830443, 0.6679312105512905, 1.337013890270852, -0.3970435911432476, 4.187422428331585, 3.9825936316300194, 3.3966350815438866, 4.136552261399064, 1.5749402163799058, 4.002648845745151, 0.6476045329672662, 2.294980404395722, 3.0204747774331135, 0.3510539220580655, 4.1191462753277595, 2.669696941985229, 3.790893304846731, 1.0617475613901282, 3.241064350767533, 0.0514558309081663, 2.1543540510735797, 0.09754821771685118, 2.419579852425469, 2.650167111567565, 4.215553493119149, 4.111332304903263, 3.059277183559754, 2.51783676829828, 3.246904575363422, 0.8357560655740743, 3.7738681363036295, 3.8441105015706163, -0.013230127284269089, 1.8205137461929466, 3.00327595878893, 0.11256424494931405, 2.519415104119354, 2.9774850453078154, 3.9041183476161323, 0.7938809199389643, 0.16510411621087773, 4.84274805253783, 2.7767201528333123, 2.2332105813235277, 3.657120789428562, 2.8175463767404705, 2.4921794468454803, 1.149096653491113, 0.1474065600872051, -0.04749921559691117, 1.2515682461243107, 2.7030445426390686, 3.0600713317304797, 2.650678171534094, 4.259591320584862, 2.940458004734956, -0.019926879876643322, 3.085168671240447, 3.607430732127423, 2.851921479759724, 3.092034239849398, 1.9961675241084689, 2.128617021531064, -0.1885020298222957, 2.3930153979059074, 2.8759430694429393, 1.562369447356772, 4.175871888222478, 2.276974347770905, 3.2868405011310515, 1.0069274969333644, 3.79004586090991, 1.2501287794674134, 2.693503010394633, 3.1603623976228405, 3.30104327431587, 2.8628468780930194, 3.9450259517452237, 2.8400201025415535, 3.155688548951436, 3.8543625476715415, 2.4536403162158633, 2.593775214349628, 0.940856261326309, 3.393516637351181, 1.099920167095592, 5.150461606101626, 1.0427178660128522, 1.0561804584367296, 1.5579164009543898, 2.8143759481086716, 1.5768686609808888, 3.069369863901797, 2.028065576273459, 0.8954445695415738, 2.4913492484458444, 1.2423564363571413, 0.3961159134956118, 1.7530962874582725, 3.3387580691254066, 0.242562966675817, 1.344732425546174, 1.7425645513718773, 1.5007717649909478, 1.451561892844213, 2.148783184677128, 3.298943357792926, 1.8670245113319575, 2.44764921503121, 3.501084597366001, 2.4459520594019586, 3.8185920554842787, 1.0925308760479198, 2.830583458144948, 1.2604860523936416, 3.3290840522927776, 1.1046205278862673, 0.5338143345073081, 1.3155456359473403, 1.8280645409160434, 0.5110021206154163, 1.5705257864246902, 0.5375678725557025, 3.3207961779128383, 1.6336779818340075, 2.0793213919554447, 1.5054719688187377, 3.594948408057047, 1.1007970687190238, 1.0451943654792957, 3.1499668220680426, 3.444918700514315, 3.2766662394168278, 0.5449138955563286, 2.3113155051813705, 2.054931203591027, 3.24213901270507, 3.2179302713151094, 0.7019089824629756, 2.1425060897794084, 2.76724149191693, 1.7200765842290595, 2.3910017479122234, 2.1843184253788395, 1.6445391200752058, 2.7006872514372677, 2.208489482784824, 3.353657660670198, 1.9093494631689936, 1.96897369707361, 1.4357246494613602, 1.9536564137379835, 2.632999227240448, 3.8303036754326256, 2.6203575672079507, 0.4026052785953045, 2.2635647494863753, 2.3755716580325195, 1.8625535188071605, 1.3073395368471377, 3.191553676956045, 3.283658529055766, 3.1126275460060002, 2.4669996406260677, 2.980393257832582, 1.8532639259031156, 1.866645352191568, 1.5492698288624038, 2.3078376347199065, 2.9841506069295876, 1.474418822521201, 1.062727555495467, 3.0296456111282306, 1.6170112932623772, 3.7846190893506493, 1.4560719540457758, 1.7255259329230586, 2.0764995342234083, 1.3579049436768975, 3.2837069022140155, 1.399837722939238, 0.29001894779738446, 3.260959355222623, 2.233989780695921, 0.9558052873505911, 3.3747434472036306, 4.050143470350914, 2.876204074546448, 0.7165243761246614, 1.7046918394376491, 0.6819803065986494, 1.3062139099557046, 2.637300784085148, 2.3814730588102093, 1.570177775344415, 2.8308256983698414, 1.9804073698021392, 3.5501224564539045, 1.7374834465086417, 1.7868974070436345, 1.1617020178530346, 1.8744257846592922, 1.4537631950697976, 3.0310164842474334, 0.026724343268818895, 2.4360794775521692, 3.5349573769444205, 1.9473931992246207, 2.9499198837722287, 1.0730651500088713, 4.181132547494148, 1.6467412633948841, 3.216857821574723, 2.7032342747261278, 1.4986956323271712, 1.0796946756431405, 2.3906943657234514, 3.0623912982131816, 3.4528763556570268, 2.1922748803925693, 2.068452090710994, 1.6795487717688693, 1.5352328619826872, 2.78656647089892, 2.0366550647325394, 2.8347828420272276, 3.0984352833064235, 1.8419051736213503, 3.5737059141247927, 3.0248017415307284, 2.1704218556225654, 2.794586978128984, 3.765226072156372, 3.223842561038022, 3.213482698729941, 3.566269012625033, 3.4010316948805035, 1.131953469548626, 3.535590944368171, 2.4845060715593226, 2.420294935807053, 1.695045013169775, 2.913700029009367, 3.2200083697318025, 2.0820077025144923, 2.774788083465545, 2.273100132641028, 3.195370747978059, 3.2158054255664816, 1.8092274574090448, 1.6479442662090267, 3.8000476169459447, 2.6444572286687325, 2.610102078361811, 2.3741287327023683, 3.2883241081102557, 2.019312092796862, 1.4566615316886997, 2.5578744933076787, 0.9328328938617243, 3.9855163317830775, 3.94426540930372, 0.921969706361677, 2.199866048920295, 1.820887756238628, 1.3261376563770018, 1.2539816820019634, 2.6274795283590286, 1.7072177366049188, 3.1520015251394176, 1.7269348732213232, 2.1731125559666724, 1.0221640886152912, 2.432268321418929, 0.9680734208483883, 3.5257924015043156, 3.027058211829048, 2.479153650667325, 0.3959004258128349, 3.9513497946122533, 3.009174235905162, 2.9316174101837107, 1.7163291136809375, 2.052841489113538, 1.5323089160768548, 1.775568961663204, 3.315804475282985, 2.943900923253929, 4.402488702636033, 2.4149762369037955, 3.617767824843806, 1.702416067615041, 1.7716053726016083, 2.15072597999708, 0.9428017613030599, 2.99373622000175, 3.505809699552851, 3.2715904111905747, 1.3390428125548293, 1.5807076118556362, 2.5205507060381374, 2.445627337061468, 1.2221226037319926, 2.1722415251950573, 3.2975263399250205, 2.5805739236949425, 2.4926253237820064, 3.7764418504461177, 2.8084982492205732, 2.3573253959202614, 1.7124861046291193, 2.7243773957628674, 2.2138274001326077, 3.574235490950946, 3.5169925827682813, 2.2522998196583215, 3.112821615150023, 1.8994121550578495, 2.6233457683435186, 1.955110095117429, 1.9078363429863705, 2.7008432785267056, 1.7812399821718983, 2.451268853446311, 3.312417324723661, 2.1959554817541074, 2.5801672417799617, 2.864168645166507, 3.397520995298612, 2.792564299077505, 3.301874370959775, 2.1270726656893095, 2.9325723601899463, 2.2321602164672316, 2.3069482515653488, 2.7291120271957445, 1.927911267754416, 0.9503678368061559, 1.9834417172741512, 1.5785438830201102, 1.5071820462757946, 1.9346563118105005, 3.1202554097104067, 2.94418377080784, 3.1500132038961213, 2.521169068524847, 2.678330875290869, 3.609587508745854, 1.8428897294743645, 0.03685139451418595, 2.085203747835482, 3.2139517817171583, 2.372529727987184, 2.890071029598356, 1.2570206235893544, 2.9822100514282206, 2.7946669903333565, 2.5910912138693414, 2.1337822960308013, 2.62095537714702, 4.020971781361753, 3.375138296968086, 2.606413711838024, 2.3723127681667893, 1.9379444332399371, 2.3277226423814725, 1.246032393249457, 2.7915009517140428, 2.728766097313656, 4.049737803539772, 4.100521193482738, 2.540756701700773, 1.197432368505994, 1.8134141544498266, 2.3138651323610553, 3.9644572046819335, 1.9420722687705223, 2.2183224234830106, 2.1413062978809996, 2.0055896139994123, 1.6791130806353052, 1.5701179449621039, 2.8704157411056284, 1.3438640753025326, 3.0137184617471973, 1.0446594274161634, 1.9688380047844123, 2.592386412725151, 2.1842515908928726, 1.4008314580648507, 1.0806186594103357, 2.4967275472440797, 3.6436599711545115, 3.3037655789004208, 3.5261775648129237, 3.408040593632561, 1.8385654358171348, 2.7261742995954843, 2.142816060298036, 2.261390051740013, 2.5741713701007076, 2.4963853632379274, 1.882218360864142, 1.666866806610702, 2.3724477642764996, 3.6864590775628936, 1.691959375842302, 1.611300573712617, 2.1212737475564287, 2.3545245905279657, 3.171961863110192, 2.0070594077241704, 2.827291687646254, 3.1196527048287814, 0.8943604060387635, 2.2551436053175933, 3.7281026787636917, 2.06886855152618, 1.5504113912490123, 2.525567554383547, 3.6169429323214026, 2.6930183506335816, 1.9875558924773489, 3.523268837441312, 0.9065067161187119, 1.8099069957146723, 3.187811383966379, 3.5155280605634665, 2.047901545670622, 3.8172143835889396, 3.262602238451174, 3.4419413439123727, 3.4489833806573995, 2.614879235673341, 3.2252600559005966, 2.7427303302759745, 2.7303969558102072, 2.8776194090224476, 1.262866780447623, 2.9228836583024957, 3.107524256178829, 3.2047930734178887, 1.9130300226910606, 0.9275409553644343, 2.141380826182187, 2.7287278045956183, 2.4921514929391364, 3.2599808176056277, 3.5910640378350416, 3.8434992022533536, 1.169160427708226, 1.7105542196153705, 0.9767318141667023, 1.5786155307717706, 1.532716751204954, 0.9949547976215363, 3.5527948138967687, 2.383701565836542, 2.717166077997347, 2.0727823933633407, 3.1703457038234544, 1.586248973965517, 4.160821812016183, 2.1212198878935085, 3.7036652878602943, 3.879432287083566, 1.9948833308544178, 3.04199494589632, 2.505764851183073, 3.0752478917743713, 3.322244697434543, 1.9073120986909635, 3.736479095614888, 2.863853417269316, 3.4129734723857186, 3.488418513081363, 2.238447442495054, 3.5436387461256147, 2.8705912933543165, 2.5462074627619455, 2.196302995568274, 3.5784684843085173, 3.55158245129426, 3.183915332500164, 3.6746952177818755, 2.8462297017754454, 4.0272153173033, 4.033549887958106, 4.179007848295367, 2.434440041340013, 4.041411220247507, 4.701505303246616, 3.102433742284696, 2.4090018259737653, 2.403586224956943, 3.021399977946243, 3.5078028370245122, 3.313043942814279, 2.6461676333438287, 2.7872655228854692, 3.133842698030484, 3.209846654315675, 2.8608638837894715, 2.8973507536373053, 3.6248454895442976, 3.915661341838819, 3.155519339477204, 2.675756984457103, 3.519675490814952, 3.996976704537199, 4.399283050534686, 4.156023872072124, 2.9055929565047336, 1.9011112530838106, 1.9524412414445327, 3.4388761476142897, 3.3219012028323616, 2.2942521393085165, 3.5834269957745843, 3.516972582169315, 1.4097402708561835, 3.1036074657609136, 1.6125561401852657, 2.4235724288626828, 3.42168746671928, 3.0030278679623312, 2.3358541288561976, 3.9073176889200627, 3.589986837702798, 2.733227010825774, 3.6662353661574927, 2.135288122777741, 3.542872889268646, 3.936383680764472, 2.6236226685764454, 2.1209210217099863, 2.6541542393752615, 3.193300085199721, 3.2314428159027435, 3.251256366423277, 2.33754038070052, 3.8845088413215048, 2.089592291925943, 2.7158783480103263, 2.3591905423799675, 3.5638880306947045, 2.395785265989471, 3.161668263908236, 2.5456082931083652, 3.7374057400808156, 2.6826011301619537, 1.8405832150016095, 3.3345117184517084, 3.792600285904324, 2.3498721586784312, 3.1940835024154017, 2.7132250832116243, 2.325201544606895, 3.9074023537253004, 3.8817991210188016, 2.9521128150320437, 3.645220844748795, 2.373204070732212, 3.743920789453647, 4.40217650669954, 3.7400862238736248, 2.548611731629551, 3.2013720675447406, 4.3195085949746534, 2.2210569812968872, 2.6856528126368078, 2.4943282305470436, 3.536334618980622, 3.3254576377883347, 3.798298871342468, 1.872937186649351, 3.453679166957189, 3.727930120341713, 2.453214695314431, 2.799960202824836, 2.333997873338583, 1.8379303045315978, 2.351353754439647, 2.3526940747015517, 3.3120855708148533, 0.9885897340084746, 2.7092409287724983, 2.685557772292697, 2.5560068670794727, 3.4470505697395333, 3.190294268668865, 2.160210149667125, 2.5472874122915363, 3.776604766076225, 3.786430040568954, 3.4836441808214187, 3.556460641563002, 3.287154471186025, 3.5892489235539142, 3.8768095989813696, 2.7661126699615504, 3.996281853753621, 3.112285954250535, 3.9399849652501895, 3.70146183581133, 4.005486591735814, 3.068837937219039, 3.2116113227859766, 2.9253857337976688, 3.765921328407709, 2.9601534268064387, 3.0471965561074215, 3.269449784250457, 3.379796855877349, 2.558891613093514, 2.7904714249580587, 2.005567073234017, 2.4703842487823535, 3.7825165314180613, 3.5349014973513877, 2.914037118555546, 3.8901382407095304, 2.079318259216046, 3.3070473323764147, 3.8953926303357975, 3.8928875159177276, 4.340015248285244, 4.275896150418932, 3.170508377134444, 3.5657251086341177, 3.171395331301136, 3.9495000194288896, 2.523326295217936, 1.3026069968671217, 2.56765371181606, 3.3009363989174445, 3.18361467490262, 3.9925755813027988, 3.7930267091657623, 4.304279029718165, 1.922222772917206, 3.291387452575052, 3.7420297437272816, 3.3666263620775934, 4.272417190640198, 3.8245937622790263, 2.4436374817708786, 2.4681628592087828, 2.986713644557342, 2.554757981125605, 1.8568185767513508, 3.3396747695701623, 2.3449800295721954, 3.8762599053751807, 2.030163267506005, 3.2557105403113407, 3.698676340755948, 3.9568478908833478, 3.4056597587959616, 2.1860411639600454, 1.2489636118564822, 2.0065124303173048, 2.9979524003277946, 3.4054997089862655, 3.932833987075662, 4.02349369443594, 2.814155912718853, 2.15932506668032, 3.4289587459275395, 3.549411132562469, 2.108170375976798, 3.361597440347824, 3.8604597948182984, 3.0542136503257695, 2.476484391975045, 2.086549259529832, 3.6431511882613035, 3.9992210624154105, 4.17923930269111, 3.989868435904451, 2.9189537451078134, 2.807407475060708, 3.1208244919685937, 2.876056275988938, 3.0666481145413966, 2.5944686711919585, 1.395511934609011, 2.6247972740848593, 3.3462397093517846, 3.4697358621602574, 4.264667764991033, 2.4544661442972626, 3.191974977035501, 3.1355436576264952, 2.428892178896903, 3.564860999236051, 3.764887900469542, 2.0210688692117094, 2.2533908842492845, 3.702743151791149, 3.5391483673554647, 3.948438646080461, 4.0593897729636215, 3.467727186963001, 2.6341482578892026, 2.989422374751972, 2.8857125151331466, 2.057325612444448, 0.7840450270696007, 3.9019818197796776, 2.662213590486712, 2.9827813713511984, 2.959775127139561, 3.638022302813195, 4.046063297459986, 3.688822022914126, 3.0931245107899357, 1.6410997814874846, 1.7393608242671332, 3.2362246856598738, 0.6916003005419402, 1.4396832384196205, 3.353868175581923, 2.20945520501893, 2.888015907972261, 1.0668136600162215, 3.7700780559107483, 2.3101120373776434, 1.3899839396487996, 1.8380253022599424, 2.7287124928994646, 2.8467558726652227, 2.326884590544546, 1.6411879559400973, 2.609639046682453, 2.5044826203807253, 3.0486460303552496, 1.8092675996158891, -0.4937381538465453, 3.849915342047091, 2.7442018614350503, 4.349024715007115, 3.479268768413077, 2.6632466120084177, 1.586133632497892, 3.429270627067447, 1.2251076908853769, 2.627886365188162, 3.195968242499607, 1.1482657217679582, 2.8247250044533394, 2.7872044478810207, 0.8524517456663689, 2.264143574052594, 1.9214173082940582, 1.6380426489672781, 1.5774303438258608, 2.0246951372886657, 4.1079330218585675, 2.755438754919561, 2.583562787737748, 1.8337288258979139, 0.1885613115300917, 3.382622615781877, 2.269057379232853, 3.2270020193505773, 1.1975269144974519, 1.7316309494012114, 2.422846693451826, 3.32110256648652, 2.496395614313873, 1.0246012940654832, 3.2922716383371484, 3.168953810117296, 3.1311381199531003, 2.583444313603049, 3.123089545534961, 1.832792273808336, 3.7763920171642176, 2.0345258354740614, 3.120246316863542, 1.1947157035881104, 2.764449809256689, 3.836836680649848, 3.257126413127347, 3.4355096508657224, 3.0180230389811347, 1.8459885839530417, 1.7226503605353987, 2.181194882253422, 2.9263224772394723, 3.337916434932407, 3.1504306386719043, 4.159031124203057, 3.0048766929898973, 3.113484155583161, 2.474780691047801, 3.340121062727308, 3.7989488279762686, 3.9848958140051116, 2.5998847820457684, 3.915139384480525, 2.973925101181542, 1.8971592512650863, 2.2483213528296173, 1.2322895509348137, 1.4944566898701441, 1.4103916530251115, 2.268672356704769, 3.539754905338311, 1.7714864319928805, 3.403329431683947, 0.9954729768421611, 3.21230923633732, 3.2304617860618823, 4.236329871205015, 2.970097282282048, 3.7243116004060757, 3.461797259013339, 3.5017450172096285, 3.6109203684973203, 2.7948224488386626, 3.0732890408649136, 2.685628752807818, 1.9683281400960113, 3.4094349805202078, 3.3665982733131927, 1.4162582131390993, 1.8983074735384338, 3.6736563505292366, 2.882558982587445, 1.4084784825152474, 5.019653805613721, 3.8240290075701373, 2.1041429509647886, 0.21117945333162425, 3.4432332405699735, 3.3076017602004986, 3.988881104277659, 1.6851760461245644, 2.488383036441165, -0.12013789383555329, 4.340147453010362, 3.0591657584400576, 4.107040301206625, 2.6282017418678514, 3.565019855821508, 1.7960605606187197, 1.2897780955382632, 2.599856496073866, 1.7610094244414518, 2.4749320231655463, 3.0260486874532613, 3.6178081672564883, 2.466888708290946, 1.402213746483381, 1.7942478474945243, 2.9375168124517548, 1.3936229283108013, 2.163264479601719, 3.773159779899722, 2.2854935964705776, 1.8644065757768522, 0.6088108323103216, 3.1952009081140855, 1.234070690257157, 3.5095188969772493, -0.6065986175491527, 0.33188611125404766, 2.586142392248051, 2.423664544094323, 1.2539555333180648, 2.9879218152043046, 3.3822518393540437, 0.685035493291587, 0.9796795478995406, 2.549684881321413, 3.4983246763538447, 0.3659437407212731, 2.4807028666555175, 2.932567066072208, 2.6577454316201012, 1.8835696359884677, 4.055598460475293, 1.0127576374407976, 0.519356696082373, 1.8330501492773421, 2.3140569667851913, 2.7290517475495553, 2.9695085767283027, 1.7249315975936552, 1.8970763660002432, 2.0118047405550974, 1.3782117371379372, 0.8387148199255559, 1.6379720580113593, 2.6500938114363155, 2.6491691543336495, 3.412113900700073, 1.5281752076240287, 1.9875038638521683, 1.8243450071248897, 3.9293659169442905, 2.9248129692289937, 3.0426722801864687, 2.3094061676058852, 0.5766896412107054, 4.608376571735175, 2.0775345318856266, 2.6698296359834828, 2.10639950935701, 3.842149795467631, 4.34104834441008, 2.8788640400544474, 2.5789339422778492, 1.9655186072567368, 2.5252159068025444, 3.4267423266140136, 2.3019997382971646, 1.811152715768971, 3.3880978705411464, 2.723588287341063, 4.399364698550924, 3.381992360658265, 3.204781431502833, 0.9515070746612668, 2.5323125170367997, 3.0664739484819052, 3.2913351012261614, 2.402012594305343, 1.0905287813173279, 1.6787023139513964, 3.1699922804484326, 3.5410265499101183, 3.429952027233839, 3.8613572098201447, 2.2511361529290093, 3.5498650607802706, 4.958002383770324, 3.3832257203375993, 3.5853940551113337, 2.41086327024005, 2.003554593313454, 2.7078422424462327, 3.9440720884131744, 2.4532810051596283, 1.9253109804602349, 0.7230651794767462, 3.6856469584305613, 1.2171716870775766, 2.104686278625223, 2.429148270005076, 2.090901378342806, 1.6532425082227786, 3.1409705996072232, -0.03483460430907126, 2.9103796095485346, 0.7765505488025757, 0.9034238938848846, 2.7581439814843427, 1.9901517134495714, 1.6525529435360595, 1.6769384354410604, 1.2943639927570159, -0.4128409988059286, 1.572947121919047]}\n" + ] + } + ], + "source": [ + "score = aci_service.run(input_data = data)\n", + "\n", + "# embeddings will print the error message incase error occurs.\n", + "print('nb sentences encoded : {0}'.format(len(score)))\n", + "print(score)" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.7710588060229734\n" + ] + } + ], + "source": [ + "from scipy.stats import pearsonr\n", + "#print(train_y)\n", + "result = json.loads(score)\n", + "output = result[\"result\"]\n", + "print(pearsonr(output, train_y)[0])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inspect the Best Model\n", + "\n", + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case). The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). Our best model is a Pipeline with two steps: a DataTransformer step and a PreFittedSoftVotingRegressor step. We demonstrate how to extract additional information about what data transformations were used and which models make up the ensemble." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "lookup_metric = \"spearman_correlation\"\n", + "best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n", + "print(fitted_model)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can look at the different models that are used to produce the stack ensemble model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fitted_model.named_steps['stackensembleregressor'].get_params()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also look at how each column in our dataset was featurized by AutoML" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fitted_model.named_steps['datatransformer'].get_featurization_summary()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From da1ccb7dd88370d1ba83be87f2769e87486f0b5b Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Mon, 17 Jun 2019 15:25:09 -0400 Subject: [PATCH 072/108] fixed issue with autoenv.yaml file --- ...nt_google_universal_sentence_encoder.ipynb | 276 +++++++++--------- 1 file changed, 135 insertions(+), 141 deletions(-) diff --git a/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb index 1c6f2df79..5a982afe6 100644 --- a/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb +++ b/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb @@ -61,11 +61,18 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 1, "metadata": { "scrolled": false }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Some hub symbols are not available because TensorFlow version is less than 1.14\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -126,7 +133,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -156,14 +163,14 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 158KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 260KB/s]\n" ] }, { @@ -177,7 +184,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 274KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 304KB/s]\n" ] }, { @@ -191,7 +198,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 164KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 304KB/s]\n" ] }, { @@ -211,7 +218,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -224,7 +231,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -245,7 +252,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -325,7 +332,7 @@ "4 A man seated is playing the cello. " ] }, - "execution_count": 7, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -366,7 +373,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -378,7 +385,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -423,7 +430,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -450,7 +457,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -461,7 +468,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -501,7 +508,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -524,7 +531,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -555,7 +562,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -592,6 +599,8 @@ " workspace_name=\"\",\n", " workspace_region=\"\"\n", ")\n", + "\n", + "# @Courtney : put the print in another cell and don't run it. \n", "print('Workspace name: ' + ws.name, \n", " 'Azure region: ' + ws.location, \n", " 'Subscription id: ' + ws.subscription_id, \n", @@ -640,7 +649,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -657,7 +666,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -686,7 +695,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -694,7 +703,7 @@ "output_type": "stream", "text": [ "Running on local machine\n", - "Parent Run ID: AutoML_5b011db3-83db-4ab5-afa6-9acf2c1a9515\n", + "Parent Run ID: AutoML_5c05b58e-1709-4042-a38c-f4b96cbae855\n", "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", "Current status: DatasetEvaluation. Gathering dataset statistics.\n", "Current status: FeaturesGeneration. Generating features for the dataset.\n", @@ -710,58 +719,59 @@ "****************************************************************************************************\n", "\n", " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:01:17 0.1655 0.1655\n", - " 1 MinMaxScaler RandomForest 0:02:19 0.4102 0.4102\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:23 0.2577 0.4102\n", - " 3 StandardScalerWrapper LightGBM 0:00:21 0.2708 0.4102\n", - " 4 RobustScaler DecisionTree 0:00:27 0.2435 0.4102\n", - " 5 StandardScalerWrapper LassoLars 0:00:15 0.1246 0.4102\n", - " 6 StandardScalerWrapper LightGBM 0:00:20 0.6567 0.6567\n", - " 7 StandardScalerWrapper RandomForest 0:00:20 0.2128 0.6567\n", - " 8 StandardScalerWrapper LassoLars 0:00:20 0.0836 0.6567\n", - " 9 MinMaxScaler ExtremeRandomTrees 0:00:25 0.3767 0.6567\n", - " 10 RobustScaler ExtremeRandomTrees 0:01:05 0.3615 0.6567\n", - " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:21 0.2653 0.6567\n", - " 12 StandardScalerWrapper LassoLars 0:00:11 nan 0.6567\n", - "ERROR: Run AutoML_5b011db3-83db-4ab5-afa6-9acf2c1a9515_12 failed with exception \"Primary metric spearman_correlation is not available.\".\n", - " 13 MinMaxScaler ExtremeRandomTrees 0:00:22 0.2885 0.6567\n", - " 14 RobustScaler RandomForest 0:00:29 0.3499 0.6567\n", - " 15 StandardScalerWrapper LassoLars 0:00:14 nan 0.6567\n", - "ERROR: Run AutoML_5b011db3-83db-4ab5-afa6-9acf2c1a9515_15 failed with exception \"Primary metric spearman_correlation is not available.\".\n", - " 16 StandardScalerWrapper ExtremeRandomTrees 0:00:15 0.2155 0.6567\n", - " 17 StandardScalerWrapper RandomForest 0:00:16 0.2296 0.6567\n", - " 18 MinMaxScaler SGD 0:00:17 0.0990 0.6567\n", - " 19 StandardScalerWrapper RandomForest 0:00:46 0.3087 0.6567\n", - " 20 StandardScalerWrapper LightGBM 0:00:40 0.7412 0.7412\n", - " 21 StandardScalerWrapper LightGBM 0:00:32 0.6983 0.7412\n", - " 22 StandardScalerWrapper LightGBM 0:00:37 0.6864 0.7412\n", - " 23 StandardScalerWrapper RandomForest 0:04:30 0.4236 0.7412\n", - " 24 MaxAbsScaler DecisionTree 0:06:51 0.2587 0.7412\n", - " 25 MaxAbsScaler LightGBM 0:00:22 0.3161 0.7412\n", - " 26 StandardScalerWrapper LightGBM 0:01:25 0.5771 0.7412\n", - " 27 RobustScaler DecisionTree 0:00:21 0.2484 0.7412\n", - " 28 MaxAbsScaler LightGBM 0:02:55 0.7195 0.7412\n", - " 29 StandardScalerWrapper LightGBM 0:01:49 0.7379 0.7412\n", - " 30 SparseNormalizer LightGBM 0:00:38 0.6011 0.7412\n", - " 31 MaxAbsScaler LightGBM 0:00:47 0.6835 0.7412\n", - " 32 StandardScalerWrapper DecisionTree 0:06:19 0.2630 0.7412\n", - " 33 MaxAbsScaler LightGBM 0:00:32 0.7460 0.7460\n", - " 34 StandardScalerWrapper LightGBM 0:00:36 0.5717 0.7460\n", - " 35 StandardScalerWrapper LightGBM 0:00:36 0.7115 0.7460\n", - " 36 MaxAbsScaler LightGBM 0:00:37 0.7265 0.7460\n", - " 37 MaxAbsScaler LightGBM 0:01:12 0.6830 0.7460\n", - " 38 SparseNormalizer LightGBM 0:00:32 0.6854 0.7460\n", - " 39 MaxAbsScaler LightGBM 0:00:36 0.6779 0.7460\n", - " 40 SparseNormalizer LightGBM 0:01:38 0.3032 0.7460\n", - " 41 MaxAbsScaler LightGBM 0:00:30 0.5939 0.7460\n", - " 42 MinMaxScaler DecisionTree 0:00:31 0.1622 0.7460\n", - " 43 MaxAbsScaler LightGBM 0:02:34 0.7011 0.7460\n", - " 44 MaxAbsScaler LightGBM 0:00:41 0.6090 0.7460\n", - " 45 RobustScaler LightGBM 0:00:35 0.3380 0.7460\n", - " 46 MaxAbsScaler LightGBM 0:00:58 0.4714 0.7460\n", - " 47 MaxAbsScaler LightGBM 0:00:35 0.7303 0.7460\n", - " 48 VotingEnsemble 0:01:34 0.7938 0.7938\n", - " 49 StackEnsemble 0:06:14 0.7943 0.7943\n" + " 0 StandardScalerWrapper RandomForest 0:00:14 0.1703 0.1703\n", + " 1 MinMaxScaler RandomForest 0:00:55 0.4157 0.4157\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:32 0.2771 0.4157\n", + " 3 StandardScalerWrapper LightGBM 0:00:09 0.2708 0.4157\n", + " 4 RobustScaler DecisionTree 0:00:12 0.2435 0.4157\n", + " 5 StandardScalerWrapper LassoLars 0:00:07 0.1246 0.4157\n", + " 6 StandardScalerWrapper LightGBM 0:00:12 0.6567 0.6567\n", + " 7 StandardScalerWrapper RandomForest 0:00:10 0.1989 0.6567\n", + " 8 StandardScalerWrapper LassoLars 0:00:09 0.0836 0.6567\n", + " 9 MinMaxScaler ExtremeRandomTrees 0:00:12 0.3547 0.6567\n", + " 10 RobustScaler ExtremeRandomTrees 0:00:33 0.3561 0.6567\n", + " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:11 0.2956 0.6567\n", + " 12 StandardScalerWrapper LassoLars 0:00:10 nan 0.6567\n", + "ERROR: Run AutoML_5c05b58e-1709-4042-a38c-f4b96cbae855_12 failed with exception \"Primary metric spearman_correlation is not available.\".\n", + " 13 MinMaxScaler ExtremeRandomTrees 0:00:12 0.2495 0.6567\n", + " 14 RobustScaler RandomForest 0:00:14 0.3333 0.6567\n", + " 15 StandardScalerWrapper LassoLars 0:00:06 nan 0.6567\n", + "ERROR: Run AutoML_5c05b58e-1709-4042-a38c-f4b96cbae855_15 failed with exception \"Primary metric spearman_correlation is not available.\".\n", + " 16 StandardScalerWrapper ExtremeRandomTrees 0:00:09 0.2098 0.6567\n", + " 17 StandardScalerWrapper RandomForest 0:00:09 0.2262 0.6567\n", + " 18 MinMaxScaler SGD 0:00:08 0.0877 0.6567\n", + " 19 StandardScalerWrapper RandomForest 0:00:20 0.3533 0.6567\n", + " 20 StandardScalerWrapper LightGBM 0:00:27 0.7412 0.7412\n", + " 21 StandardScalerWrapper LightGBM 0:00:28 0.6983 0.7412\n", + " 22 StandardScalerWrapper LightGBM 0:00:31 0.6864 0.7412\n", + " 23 StandardScalerWrapper RandomForest 0:03:53 0.4227 0.7412\n", + " 24 MaxAbsScaler DecisionTree 0:05:29 0.1967 0.7412\n", + " 25 MaxAbsScaler LightGBM 0:01:14 0.3161 0.7412\n", + " 26 StandardScalerWrapper LightGBM 0:01:22 0.5771 0.7412\n", + " 27 StandardScalerWrapper ExtremeRandomTrees 0:00:31 0.1956 0.7412\n", + " 28 0:15:12 nan 0.7412\n", + "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", + " 29 MaxAbsScaler LightGBM 0:04:06 0.7195 0.7412\n", + " 30 SparseNormalizer LightGBM 0:00:39 0.6011 0.7412\n", + " 31 MaxAbsScaler LightGBM 0:00:31 0.7445 0.7445\n", + " 32 StandardScalerWrapper LightGBM 0:00:43 0.4265 0.7445\n", + " 33 MaxAbsScaler LightGBM 0:00:30 0.7460 0.7460\n", + " 34 MaxAbsScaler LightGBM 0:00:27 0.5939 0.7460\n", + " 35 StandardScalerWrapper LightGBM 0:00:38 0.7115 0.7460\n", + " 36 MaxAbsScaler LightGBM 0:00:35 0.7265 0.7460\n", + " 37 MaxAbsScaler LightGBM 0:01:15 0.6830 0.7460\n", + " 38 StandardScalerWrapper LightGBM 0:00:35 0.5717 0.7460\n", + " 39 MaxAbsScaler LightGBM 0:00:38 0.6779 0.7460\n", + " 40 TruncatedSVDWrapper LightGBM 0:00:17 0.6970 0.7460\n", + " 41 MaxAbsScaler LightGBM 0:00:32 0.7303 0.7460\n", + " 42 MaxAbsScaler LightGBM 0:03:12 0.7011 0.7460\n", + " 43 StandardScalerWrapper LightGBM 0:00:35 0.6661 0.7460\n", + " 44 SparseNormalizer LightGBM 0:00:31 0.6854 0.7460\n", + " 45 TruncatedSVDWrapper LightGBM 0:00:21 0.7386 0.7460\n", + " 46 MaxAbsScaler LightGBM 0:00:51 0.7113 0.7460\n", + " 47 MaxAbsScaler LightGBM 0:00:37 0.6230 0.7460\n", + " 48 VotingEnsemble 0:01:26 0.8104 0.8104\n", + " 49 StackEnsemble 0:05:20 0.8102 0.8104\n" ] } ], @@ -779,9 +789,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 17, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "20258ec1fa27452db1ac708b631d48aa", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Inspect the run details using the provided widget\n", "RunDetails(local_run).show()" @@ -799,7 +824,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -816,15 +841,15 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 19, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Registering model AutoML5b011db38best\n", - "AutoML5b011db38best\n" + "Registering model AutoML5c05b58e1best\n", + "AutoML5c05b58e1best\n" ] } ], @@ -846,7 +871,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -895,7 +920,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -905,36 +930,18 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No issues found in the SDK package versions.\n" - ] - } - ], + "outputs": [], "source": [ "dependencies = ml_run.get_run_sdk_dependencies(iteration = 7)" ] }, { "cell_type": "code", - "execution_count": 41, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "azureml-train-automl\t1.0.43.1\n", - "azureml-sdk\t1.0.43\n", - "azureml-core\t1.0.43\n" - ] - } - ], + "outputs": [], "source": [ "for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n", " print('{}\\t{}'.format(p, dependencies[p]))" @@ -942,7 +949,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": 21, "metadata": {}, "outputs": [ { @@ -951,7 +958,7 @@ "'autoenv.yml'" ] }, - "execution_count": 42, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } @@ -968,21 +975,10 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ - "\n", - "# Substitute the actual version number in the environment file.\n", - "# This is not strictly needed in this notebook because the model should have been generated using the current SDK version.\n", - "# However, we include this in case this code is used on an experiment from a previous SDK version.\n", - "\n", - "'''with open(conda_env_file_name, 'r') as cefr:\n", - " content = cefr.read()\n", - "\n", - "with open(conda_env_file_name, 'w') as cefw:\n", - " cefw.write(content.replace(azureml.core.VERSION, dependencies['azureml-sdk']))\n", - "'''\n", "# Substitute the actual model id in the script file.\n", "\n", "script_file_name = 'score.py'\n", @@ -1003,7 +999,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 23, "metadata": {}, "outputs": [ { @@ -1012,9 +1008,9 @@ "text": [ "Creating image\n", "Running.\n", - "NotStarted...............................................\n", + "NotStarted..............................................\n", "Succeeded\n", - "Image creation operation finished for image automl-image:8, operation \"Succeeded\"\n" + "Image creation operation finished for image automl-image:9, operation \"Succeeded\"\n" ] } ], @@ -1038,17 +1034,9 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "https://maidapnlp0056795534.blob.core.windows.net/azureml/ImageLogs/48a3794d-b14b-4ab1-aef8-357485615f27/build.log?sv=2018-03-28&sr=b&sig=FSfsmE7TEOgWeYMa8DQjrTZg31z1WEd3uO%2F5Q1%2F02gU%3D&st=2019-06-16T21%3A46%3A17Z&se=2019-07-16T21%3A51%3A17Z&sp=rl\n" - ] - } - ], + "outputs": [], "source": [ "print(image.image_build_log_uri) " ] @@ -1062,7 +1050,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -1073,7 +1061,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 25, "metadata": {}, "outputs": [ { @@ -1081,7 +1069,7 @@ "output_type": "stream", "text": [ "Creating service\n", - "Running...................\n", + "Running.........................\n", "SucceededACI service creation operation finished, operation \"Succeeded\"\n", "Healthy\n" ] @@ -1089,7 +1077,7 @@ ], "source": [ "# deploy image as web service\n", - "aci_service_name ='aci-service-with-automl'\n", + "aci_service_name ='aci-service'\n", "aci_service = Webservice.deploy_from_image(workspace = ws, \n", " name = aci_service_name,\n", " image = image,\n", @@ -1108,7 +1096,7 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 26, "metadata": {}, "outputs": [ { @@ -1142,15 +1130,21 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 27, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "nb sentences encoded : 27145\n", - "{\"result\": [1.4322549411038765, 3.701496597224398, 3.0479749754684877, 3.9308189127466533, 0.8635069938099544, 1.751576288452982, 3.040980961190109, 2.1431064151633423, 2.191154825045265, 1.2682110745155055, 1.2682110745155055, 4.40765538725924, 0.7558847754292286, 3.390359555883792, 2.3854790756015833, 1.7344997332170062, 4.589225643618903, 3.3416602405105817, 3.4048902285962597, 1.6773498215648046, 2.075599433031979, 0.7595681987065432, 3.9995388312550193, 3.7261206556907625, 1.19578134877992, 2.6713024996174513, 0.8020056210402666, 2.8197897135898087, 1.559042795238717, 3.3018493059315888, 4.021559714224404, 3.6230277884565276, 1.552067486049196, 4.134648970804008, 2.9405120739331627, -0.615576864235015, 0.7909618994365504, 2.932382403966035, 2.96584032513176, 0.5734777148904935, 2.9680829681354344, 2.642053672175344, 3.3176550749645064, 0.8861250646792223, 2.1513692735537417, 0.9555818872587727, 0.62480371701695, 0.27621102112607265, 2.5383000690934567, 1.894986180892777, 0.9470776270035255, 1.4772919968083298, -0.20512909341739372, 0.5368315777245871, 1.1370111739455173, -0.6110950310809333, 3.009552682781307, 0.838263432047385, 3.25524852531291, 1.748537225118001, 3.4162818656474045, 0.6309719383744596, 3.188171415158594, 1.1885539579202526, 3.885881180873047, -0.10989696061508847, 0.036949805478808884, 2.983700334965398, 0.1553958035603875, 3.4602422942767226, 2.8792367087831425, 3.3639351537595155, 0.6779550805450834, 2.9158586050826276, 2.670557684953456, 0.6516185013788782, 0.7987308948801775, -1.6724892263668112, 2.9758651057830274, 3.380656950457761, 0.04004516158518212, 0.7140577657874352, 2.6903054796295574, 1.5692886710979983, 0.11919953362192359, 1.143133149214036, 2.8050995640988585, 3.407908824524682, 3.2087369294620047, 0.7348397113383949, 1.7103949656579203, 3.195163273653194, 1.1199143710847377, 2.3346836178833947, 0.926696818163081, 3.0307953108319876, 4.168205451761104, 3.1367024152252605, 2.535571374257174, 2.70075409983324, -1.0044958301580198, 0.8428663210598888, 3.1940961166154502, 1.7675487826960015, 1.9053703873647803, 2.1382907699481084, -0.41305936846402436, 1.9052852132521325, 1.4009017961585104, 2.4418049938821267, 3.2257252328050776, 2.9018881361276554, 3.3011931786066007, 3.5698511304516214, -0.40603068624334326, -0.7481115480274674, 3.023014564190224, 2.2025556018829344, 0.8866930092348548, 0.7673690130703548, 1.4729108548829184, 0.8091773711256964, 1.3982419824010752, 0.497889676390312, 0.22313132806206126, 3.160855586109493, 0.9059444552529206, 2.1742813156418284, 2.323760254446841, 2.8829088427218896, 2.735227770796081, 2.926382914126975, 3.33319486772642, 4.08659817402215, 2.1160480705871563, 5.0896682512940625, 4.0524223720180395, 2.379174385587522, 1.6227063418743053, 3.7629148781572526, 4.176522298449342, 3.161785575922274, 2.4714191372317287, 3.908907190299748, 2.3143758345939633, 1.3225073955146498, 2.244382141500616, 3.2008764437317523, 1.6626302739062786, 0.25310424813460686, 2.0701176797980883, 1.3179306945641835, 3.0406061958776505, 3.1663366146122196, 3.3952014826237993, 2.7637033539720397, 0.23550756818263208, 1.3503743471435388, 3.757923837995467, 0.09644109520643562, 3.819737616673461, 2.8496691855314142, 1.6776859994046118, 3.233028474487374, 3.3166665542180405, 3.4020225680580762, 2.9230998306879763, 2.7375896396803756, 3.371268676623925, 2.351100544420622, 1.9837394897022427, 0.857168695309404, 3.299183079595004, 2.9734131513230335, 2.8062089719758836, 3.011163505483155, 2.8928652460502957, 3.3624907560547013, -0.7281242803067453, -0.08711697765668303, 0.3341523221312479, 2.046039991089384, 2.4177579231586983, 0.33274470050645744, 1.7461737760112204, -0.7195877983867224, -0.6296044131484234, -0.031746836738074435, 0.470579333672863, -0.1518817951981256, 3.593810223231587, 3.501204912910677, 1.2118771603351464, 2.928006962566029, 3.032530269483511, 3.715935300448145, 2.7492699001294874, 0.8160727303811668, -0.9619788359405301, 3.137679148655353, 0.5637181314112811, 1.0529799950560057, 1.0354067430198106, 0.21626668305393348, 0.7575978198258919, 1.0414766079673619, 3.7026328821540937, 0.18723276926880894, 2.018489939454151, 2.0982145454503907, 2.157541762033266, 0.9769805099453741, 0.914351958029362, 1.3249123736499873, 2.7977862360025885, 0.9575013815713964, 2.4384663704097327, 2.709006953931115, 3.7257064000887374, 2.366866244836494, 0.24240937509058735, 0.530211368038743, 1.9878195352184336, 2.8964998048649093, -0.671408510765914, 1.309177876131379, 1.6866903654001253, 2.6557547047992442, 1.0781755885340782, 0.6030910740174362, -0.13691451952812483, 0.5415414633283682, 1.8420112736436844, 3.4999537080657963, 3.1039569707062538, 2.68383212748226, -0.1990929748930763, 0.7985312625991492, 2.0211596075037117, -0.8466206372715817, 2.1330901572117935, -0.6155253096984299, 0.36905397052109823, -0.5380646127286067, 2.9145332002194477, 0.045828573716479526, 1.6578745076161314, 1.6246003796506798, -0.18990385887850791, 0.23744448164741594, 3.197354573255658, 3.5901615146301498, 3.582764091090041, 2.6294221152328583, 0.5268030077362613, 3.174438459771424, -0.04764748935583474, 1.9663393994338598, 2.9092550072300067, -0.37166341033469363, 4.229164378398597, 2.666773843905054, 3.2497869602753857, 3.0964060005334306, 1.6936622337157892, 1.5527513768130201, 2.0449976414853936, 2.4944068738879834, 1.7909269726797827, 4.231603748025412, 3.3296364232914226, 0.6757386671097878, 3.6449662889235266, 3.1389669533690627, 3.636499122887811, 3.9374237663081337, 0.878717702356582, -0.1290092187173273, 3.241939254658996, 2.7140887326055445, 1.3607405737593696, 1.4257490512391215, 3.23106725651732, 3.3218174551641275, 3.5152293218128228, 2.7732445462629687, 2.4136371334439337, 3.1472589026937174, 2.808800601425598, 1.3613838759960037, 3.573339845490277, 1.630077792944316, 3.0283175677111642, 3.7906917296943052, 0.040061762084844466, -0.04181369776242394, 1.7680746256292084, 3.2832409475675335, 3.039199515391712, 2.621054413449726, 3.7185706968951497, 2.0445555184667383, 1.4786708685517285, 3.6722082249869428, 2.9083077202272425, 3.28684709865959, 3.749337714599618, 2.0953913554482724, 3.5848399987718307, 3.1018762207977564, 1.017871382115535, 0.13396404610707469, 0.3304748751582507, 3.6543788011061262, 2.8957678867158596, 3.171904629549589, 2.0959071999748935, 3.2767306542490116, 0.7272101536144566, 1.2655240128925875, 3.3116757777286767, 2.563048521093771, 3.3708592325063838, 2.664535335194219, 3.168558668797516, -0.0018645519142349576, 3.030236721435313, 2.5488152028569107, 3.9438995266582126, 2.0717039531569643, 3.8899847740606703, 3.338091158172413, 3.7105463580128277, 3.362262276198568, 2.266417053559555, 2.950433252865158, 3.3050449057918008, 2.478457335026994, 0.4094746167202006, 1.096810624842116, 3.1598638129538834, 3.9662687657563827, 4.364913788844802, 2.282600646168691, 2.0084402125600307, 0.6999652756753105, 3.0422364032039497, 1.1460107527335772, 2.8549693609845, 0.8397603850858164, 3.586777347669963, 4.264916450277864, 0.17095021884135486, 3.531301695223584, 3.146340216254313, -0.3115213849483759, 2.572461876019024, 2.882790203231003, -0.3126564310037938, 3.6476180884539944, 3.708169714203367, 2.7073502593339747, 3.3411143187322287, 2.518567656533427, 3.5697766237053865, 1.1198336456740563, 1.6356868767276382, 2.549296253435131, 4.318640677999618, 3.5948615728341324, 3.146736265264743, 4.270698227566312, 0.5117433166472654, 0.8630208167754054, 1.1763071462064136, 3.4801500679157518, 2.3832933393086635, 0.8061180805428338, 2.455877512476104, 2.3284050209774843, 4.067809084438023, 2.933210846824142, 2.7193467588016635, 0.22923109860470703, 2.6682785548165717, 2.3761228353793373, 3.544127611221436, 1.3271789998884942, 2.1759086459332333, 1.9481421502566185, 3.0624118222221113, 1.4603220218772068, -0.32913723127021094, 1.0592630719686327, 2.9602122025486506, -1.193278580708063, -0.03328882577510761, 3.9671986187777457, 3.269583282482015, 1.6471796320970853, 2.1053039371910547, -0.29587233422710035, 1.7852465092221728, 2.574990263438694, 1.9254004986237399, 4.05256390162957, 0.4099957719151519, 2.954773795855815, 2.542267724100228, 2.7330156003574397, 2.826847651265175, 3.318641674677932, 2.051505713674652, 3.1440775825830443, 0.6679312105512905, 1.337013890270852, -0.3970435911432476, 4.187422428331585, 3.9825936316300194, 3.3966350815438866, 4.136552261399064, 1.5749402163799058, 4.002648845745151, 0.6476045329672662, 2.294980404395722, 3.0204747774331135, 0.3510539220580655, 4.1191462753277595, 2.669696941985229, 3.790893304846731, 1.0617475613901282, 3.241064350767533, 0.0514558309081663, 2.1543540510735797, 0.09754821771685118, 2.419579852425469, 2.650167111567565, 4.215553493119149, 4.111332304903263, 3.059277183559754, 2.51783676829828, 3.246904575363422, 0.8357560655740743, 3.7738681363036295, 3.8441105015706163, -0.013230127284269089, 1.8205137461929466, 3.00327595878893, 0.11256424494931405, 2.519415104119354, 2.9774850453078154, 3.9041183476161323, 0.7938809199389643, 0.16510411621087773, 4.84274805253783, 2.7767201528333123, 2.2332105813235277, 3.657120789428562, 2.8175463767404705, 2.4921794468454803, 1.149096653491113, 0.1474065600872051, -0.04749921559691117, 1.2515682461243107, 2.7030445426390686, 3.0600713317304797, 2.650678171534094, 4.259591320584862, 2.940458004734956, -0.019926879876643322, 3.085168671240447, 3.607430732127423, 2.851921479759724, 3.092034239849398, 1.9961675241084689, 2.128617021531064, -0.1885020298222957, 2.3930153979059074, 2.8759430694429393, 1.562369447356772, 4.175871888222478, 2.276974347770905, 3.2868405011310515, 1.0069274969333644, 3.79004586090991, 1.2501287794674134, 2.693503010394633, 3.1603623976228405, 3.30104327431587, 2.8628468780930194, 3.9450259517452237, 2.8400201025415535, 3.155688548951436, 3.8543625476715415, 2.4536403162158633, 2.593775214349628, 0.940856261326309, 3.393516637351181, 1.099920167095592, 5.150461606101626, 1.0427178660128522, 1.0561804584367296, 1.5579164009543898, 2.8143759481086716, 1.5768686609808888, 3.069369863901797, 2.028065576273459, 0.8954445695415738, 2.4913492484458444, 1.2423564363571413, 0.3961159134956118, 1.7530962874582725, 3.3387580691254066, 0.242562966675817, 1.344732425546174, 1.7425645513718773, 1.5007717649909478, 1.451561892844213, 2.148783184677128, 3.298943357792926, 1.8670245113319575, 2.44764921503121, 3.501084597366001, 2.4459520594019586, 3.8185920554842787, 1.0925308760479198, 2.830583458144948, 1.2604860523936416, 3.3290840522927776, 1.1046205278862673, 0.5338143345073081, 1.3155456359473403, 1.8280645409160434, 0.5110021206154163, 1.5705257864246902, 0.5375678725557025, 3.3207961779128383, 1.6336779818340075, 2.0793213919554447, 1.5054719688187377, 3.594948408057047, 1.1007970687190238, 1.0451943654792957, 3.1499668220680426, 3.444918700514315, 3.2766662394168278, 0.5449138955563286, 2.3113155051813705, 2.054931203591027, 3.24213901270507, 3.2179302713151094, 0.7019089824629756, 2.1425060897794084, 2.76724149191693, 1.7200765842290595, 2.3910017479122234, 2.1843184253788395, 1.6445391200752058, 2.7006872514372677, 2.208489482784824, 3.353657660670198, 1.9093494631689936, 1.96897369707361, 1.4357246494613602, 1.9536564137379835, 2.632999227240448, 3.8303036754326256, 2.6203575672079507, 0.4026052785953045, 2.2635647494863753, 2.3755716580325195, 1.8625535188071605, 1.3073395368471377, 3.191553676956045, 3.283658529055766, 3.1126275460060002, 2.4669996406260677, 2.980393257832582, 1.8532639259031156, 1.866645352191568, 1.5492698288624038, 2.3078376347199065, 2.9841506069295876, 1.474418822521201, 1.062727555495467, 3.0296456111282306, 1.6170112932623772, 3.7846190893506493, 1.4560719540457758, 1.7255259329230586, 2.0764995342234083, 1.3579049436768975, 3.2837069022140155, 1.399837722939238, 0.29001894779738446, 3.260959355222623, 2.233989780695921, 0.9558052873505911, 3.3747434472036306, 4.050143470350914, 2.876204074546448, 0.7165243761246614, 1.7046918394376491, 0.6819803065986494, 1.3062139099557046, 2.637300784085148, 2.3814730588102093, 1.570177775344415, 2.8308256983698414, 1.9804073698021392, 3.5501224564539045, 1.7374834465086417, 1.7868974070436345, 1.1617020178530346, 1.8744257846592922, 1.4537631950697976, 3.0310164842474334, 0.026724343268818895, 2.4360794775521692, 3.5349573769444205, 1.9473931992246207, 2.9499198837722287, 1.0730651500088713, 4.181132547494148, 1.6467412633948841, 3.216857821574723, 2.7032342747261278, 1.4986956323271712, 1.0796946756431405, 2.3906943657234514, 3.0623912982131816, 3.4528763556570268, 2.1922748803925693, 2.068452090710994, 1.6795487717688693, 1.5352328619826872, 2.78656647089892, 2.0366550647325394, 2.8347828420272276, 3.0984352833064235, 1.8419051736213503, 3.5737059141247927, 3.0248017415307284, 2.1704218556225654, 2.794586978128984, 3.765226072156372, 3.223842561038022, 3.213482698729941, 3.566269012625033, 3.4010316948805035, 1.131953469548626, 3.535590944368171, 2.4845060715593226, 2.420294935807053, 1.695045013169775, 2.913700029009367, 3.2200083697318025, 2.0820077025144923, 2.774788083465545, 2.273100132641028, 3.195370747978059, 3.2158054255664816, 1.8092274574090448, 1.6479442662090267, 3.8000476169459447, 2.6444572286687325, 2.610102078361811, 2.3741287327023683, 3.2883241081102557, 2.019312092796862, 1.4566615316886997, 2.5578744933076787, 0.9328328938617243, 3.9855163317830775, 3.94426540930372, 0.921969706361677, 2.199866048920295, 1.820887756238628, 1.3261376563770018, 1.2539816820019634, 2.6274795283590286, 1.7072177366049188, 3.1520015251394176, 1.7269348732213232, 2.1731125559666724, 1.0221640886152912, 2.432268321418929, 0.9680734208483883, 3.5257924015043156, 3.027058211829048, 2.479153650667325, 0.3959004258128349, 3.9513497946122533, 3.009174235905162, 2.9316174101837107, 1.7163291136809375, 2.052841489113538, 1.5323089160768548, 1.775568961663204, 3.315804475282985, 2.943900923253929, 4.402488702636033, 2.4149762369037955, 3.617767824843806, 1.702416067615041, 1.7716053726016083, 2.15072597999708, 0.9428017613030599, 2.99373622000175, 3.505809699552851, 3.2715904111905747, 1.3390428125548293, 1.5807076118556362, 2.5205507060381374, 2.445627337061468, 1.2221226037319926, 2.1722415251950573, 3.2975263399250205, 2.5805739236949425, 2.4926253237820064, 3.7764418504461177, 2.8084982492205732, 2.3573253959202614, 1.7124861046291193, 2.7243773957628674, 2.2138274001326077, 3.574235490950946, 3.5169925827682813, 2.2522998196583215, 3.112821615150023, 1.8994121550578495, 2.6233457683435186, 1.955110095117429, 1.9078363429863705, 2.7008432785267056, 1.7812399821718983, 2.451268853446311, 3.312417324723661, 2.1959554817541074, 2.5801672417799617, 2.864168645166507, 3.397520995298612, 2.792564299077505, 3.301874370959775, 2.1270726656893095, 2.9325723601899463, 2.2321602164672316, 2.3069482515653488, 2.7291120271957445, 1.927911267754416, 0.9503678368061559, 1.9834417172741512, 1.5785438830201102, 1.5071820462757946, 1.9346563118105005, 3.1202554097104067, 2.94418377080784, 3.1500132038961213, 2.521169068524847, 2.678330875290869, 3.609587508745854, 1.8428897294743645, 0.03685139451418595, 2.085203747835482, 3.2139517817171583, 2.372529727987184, 2.890071029598356, 1.2570206235893544, 2.9822100514282206, 2.7946669903333565, 2.5910912138693414, 2.1337822960308013, 2.62095537714702, 4.020971781361753, 3.375138296968086, 2.606413711838024, 2.3723127681667893, 1.9379444332399371, 2.3277226423814725, 1.246032393249457, 2.7915009517140428, 2.728766097313656, 4.049737803539772, 4.100521193482738, 2.540756701700773, 1.197432368505994, 1.8134141544498266, 2.3138651323610553, 3.9644572046819335, 1.9420722687705223, 2.2183224234830106, 2.1413062978809996, 2.0055896139994123, 1.6791130806353052, 1.5701179449621039, 2.8704157411056284, 1.3438640753025326, 3.0137184617471973, 1.0446594274161634, 1.9688380047844123, 2.592386412725151, 2.1842515908928726, 1.4008314580648507, 1.0806186594103357, 2.4967275472440797, 3.6436599711545115, 3.3037655789004208, 3.5261775648129237, 3.408040593632561, 1.8385654358171348, 2.7261742995954843, 2.142816060298036, 2.261390051740013, 2.5741713701007076, 2.4963853632379274, 1.882218360864142, 1.666866806610702, 2.3724477642764996, 3.6864590775628936, 1.691959375842302, 1.611300573712617, 2.1212737475564287, 2.3545245905279657, 3.171961863110192, 2.0070594077241704, 2.827291687646254, 3.1196527048287814, 0.8943604060387635, 2.2551436053175933, 3.7281026787636917, 2.06886855152618, 1.5504113912490123, 2.525567554383547, 3.6169429323214026, 2.6930183506335816, 1.9875558924773489, 3.523268837441312, 0.9065067161187119, 1.8099069957146723, 3.187811383966379, 3.5155280605634665, 2.047901545670622, 3.8172143835889396, 3.262602238451174, 3.4419413439123727, 3.4489833806573995, 2.614879235673341, 3.2252600559005966, 2.7427303302759745, 2.7303969558102072, 2.8776194090224476, 1.262866780447623, 2.9228836583024957, 3.107524256178829, 3.2047930734178887, 1.9130300226910606, 0.9275409553644343, 2.141380826182187, 2.7287278045956183, 2.4921514929391364, 3.2599808176056277, 3.5910640378350416, 3.8434992022533536, 1.169160427708226, 1.7105542196153705, 0.9767318141667023, 1.5786155307717706, 1.532716751204954, 0.9949547976215363, 3.5527948138967687, 2.383701565836542, 2.717166077997347, 2.0727823933633407, 3.1703457038234544, 1.586248973965517, 4.160821812016183, 2.1212198878935085, 3.7036652878602943, 3.879432287083566, 1.9948833308544178, 3.04199494589632, 2.505764851183073, 3.0752478917743713, 3.322244697434543, 1.9073120986909635, 3.736479095614888, 2.863853417269316, 3.4129734723857186, 3.488418513081363, 2.238447442495054, 3.5436387461256147, 2.8705912933543165, 2.5462074627619455, 2.196302995568274, 3.5784684843085173, 3.55158245129426, 3.183915332500164, 3.6746952177818755, 2.8462297017754454, 4.0272153173033, 4.033549887958106, 4.179007848295367, 2.434440041340013, 4.041411220247507, 4.701505303246616, 3.102433742284696, 2.4090018259737653, 2.403586224956943, 3.021399977946243, 3.5078028370245122, 3.313043942814279, 2.6461676333438287, 2.7872655228854692, 3.133842698030484, 3.209846654315675, 2.8608638837894715, 2.8973507536373053, 3.6248454895442976, 3.915661341838819, 3.155519339477204, 2.675756984457103, 3.519675490814952, 3.996976704537199, 4.399283050534686, 4.156023872072124, 2.9055929565047336, 1.9011112530838106, 1.9524412414445327, 3.4388761476142897, 3.3219012028323616, 2.2942521393085165, 3.5834269957745843, 3.516972582169315, 1.4097402708561835, 3.1036074657609136, 1.6125561401852657, 2.4235724288626828, 3.42168746671928, 3.0030278679623312, 2.3358541288561976, 3.9073176889200627, 3.589986837702798, 2.733227010825774, 3.6662353661574927, 2.135288122777741, 3.542872889268646, 3.936383680764472, 2.6236226685764454, 2.1209210217099863, 2.6541542393752615, 3.193300085199721, 3.2314428159027435, 3.251256366423277, 2.33754038070052, 3.8845088413215048, 2.089592291925943, 2.7158783480103263, 2.3591905423799675, 3.5638880306947045, 2.395785265989471, 3.161668263908236, 2.5456082931083652, 3.7374057400808156, 2.6826011301619537, 1.8405832150016095, 3.3345117184517084, 3.792600285904324, 2.3498721586784312, 3.1940835024154017, 2.7132250832116243, 2.325201544606895, 3.9074023537253004, 3.8817991210188016, 2.9521128150320437, 3.645220844748795, 2.373204070732212, 3.743920789453647, 4.40217650669954, 3.7400862238736248, 2.548611731629551, 3.2013720675447406, 4.3195085949746534, 2.2210569812968872, 2.6856528126368078, 2.4943282305470436, 3.536334618980622, 3.3254576377883347, 3.798298871342468, 1.872937186649351, 3.453679166957189, 3.727930120341713, 2.453214695314431, 2.799960202824836, 2.333997873338583, 1.8379303045315978, 2.351353754439647, 2.3526940747015517, 3.3120855708148533, 0.9885897340084746, 2.7092409287724983, 2.685557772292697, 2.5560068670794727, 3.4470505697395333, 3.190294268668865, 2.160210149667125, 2.5472874122915363, 3.776604766076225, 3.786430040568954, 3.4836441808214187, 3.556460641563002, 3.287154471186025, 3.5892489235539142, 3.8768095989813696, 2.7661126699615504, 3.996281853753621, 3.112285954250535, 3.9399849652501895, 3.70146183581133, 4.005486591735814, 3.068837937219039, 3.2116113227859766, 2.9253857337976688, 3.765921328407709, 2.9601534268064387, 3.0471965561074215, 3.269449784250457, 3.379796855877349, 2.558891613093514, 2.7904714249580587, 2.005567073234017, 2.4703842487823535, 3.7825165314180613, 3.5349014973513877, 2.914037118555546, 3.8901382407095304, 2.079318259216046, 3.3070473323764147, 3.8953926303357975, 3.8928875159177276, 4.340015248285244, 4.275896150418932, 3.170508377134444, 3.5657251086341177, 3.171395331301136, 3.9495000194288896, 2.523326295217936, 1.3026069968671217, 2.56765371181606, 3.3009363989174445, 3.18361467490262, 3.9925755813027988, 3.7930267091657623, 4.304279029718165, 1.922222772917206, 3.291387452575052, 3.7420297437272816, 3.3666263620775934, 4.272417190640198, 3.8245937622790263, 2.4436374817708786, 2.4681628592087828, 2.986713644557342, 2.554757981125605, 1.8568185767513508, 3.3396747695701623, 2.3449800295721954, 3.8762599053751807, 2.030163267506005, 3.2557105403113407, 3.698676340755948, 3.9568478908833478, 3.4056597587959616, 2.1860411639600454, 1.2489636118564822, 2.0065124303173048, 2.9979524003277946, 3.4054997089862655, 3.932833987075662, 4.02349369443594, 2.814155912718853, 2.15932506668032, 3.4289587459275395, 3.549411132562469, 2.108170375976798, 3.361597440347824, 3.8604597948182984, 3.0542136503257695, 2.476484391975045, 2.086549259529832, 3.6431511882613035, 3.9992210624154105, 4.17923930269111, 3.989868435904451, 2.9189537451078134, 2.807407475060708, 3.1208244919685937, 2.876056275988938, 3.0666481145413966, 2.5944686711919585, 1.395511934609011, 2.6247972740848593, 3.3462397093517846, 3.4697358621602574, 4.264667764991033, 2.4544661442972626, 3.191974977035501, 3.1355436576264952, 2.428892178896903, 3.564860999236051, 3.764887900469542, 2.0210688692117094, 2.2533908842492845, 3.702743151791149, 3.5391483673554647, 3.948438646080461, 4.0593897729636215, 3.467727186963001, 2.6341482578892026, 2.989422374751972, 2.8857125151331466, 2.057325612444448, 0.7840450270696007, 3.9019818197796776, 2.662213590486712, 2.9827813713511984, 2.959775127139561, 3.638022302813195, 4.046063297459986, 3.688822022914126, 3.0931245107899357, 1.6410997814874846, 1.7393608242671332, 3.2362246856598738, 0.6916003005419402, 1.4396832384196205, 3.353868175581923, 2.20945520501893, 2.888015907972261, 1.0668136600162215, 3.7700780559107483, 2.3101120373776434, 1.3899839396487996, 1.8380253022599424, 2.7287124928994646, 2.8467558726652227, 2.326884590544546, 1.6411879559400973, 2.609639046682453, 2.5044826203807253, 3.0486460303552496, 1.8092675996158891, -0.4937381538465453, 3.849915342047091, 2.7442018614350503, 4.349024715007115, 3.479268768413077, 2.6632466120084177, 1.586133632497892, 3.429270627067447, 1.2251076908853769, 2.627886365188162, 3.195968242499607, 1.1482657217679582, 2.8247250044533394, 2.7872044478810207, 0.8524517456663689, 2.264143574052594, 1.9214173082940582, 1.6380426489672781, 1.5774303438258608, 2.0246951372886657, 4.1079330218585675, 2.755438754919561, 2.583562787737748, 1.8337288258979139, 0.1885613115300917, 3.382622615781877, 2.269057379232853, 3.2270020193505773, 1.1975269144974519, 1.7316309494012114, 2.422846693451826, 3.32110256648652, 2.496395614313873, 1.0246012940654832, 3.2922716383371484, 3.168953810117296, 3.1311381199531003, 2.583444313603049, 3.123089545534961, 1.832792273808336, 3.7763920171642176, 2.0345258354740614, 3.120246316863542, 1.1947157035881104, 2.764449809256689, 3.836836680649848, 3.257126413127347, 3.4355096508657224, 3.0180230389811347, 1.8459885839530417, 1.7226503605353987, 2.181194882253422, 2.9263224772394723, 3.337916434932407, 3.1504306386719043, 4.159031124203057, 3.0048766929898973, 3.113484155583161, 2.474780691047801, 3.340121062727308, 3.7989488279762686, 3.9848958140051116, 2.5998847820457684, 3.915139384480525, 2.973925101181542, 1.8971592512650863, 2.2483213528296173, 1.2322895509348137, 1.4944566898701441, 1.4103916530251115, 2.268672356704769, 3.539754905338311, 1.7714864319928805, 3.403329431683947, 0.9954729768421611, 3.21230923633732, 3.2304617860618823, 4.236329871205015, 2.970097282282048, 3.7243116004060757, 3.461797259013339, 3.5017450172096285, 3.6109203684973203, 2.7948224488386626, 3.0732890408649136, 2.685628752807818, 1.9683281400960113, 3.4094349805202078, 3.3665982733131927, 1.4162582131390993, 1.8983074735384338, 3.6736563505292366, 2.882558982587445, 1.4084784825152474, 5.019653805613721, 3.8240290075701373, 2.1041429509647886, 0.21117945333162425, 3.4432332405699735, 3.3076017602004986, 3.988881104277659, 1.6851760461245644, 2.488383036441165, -0.12013789383555329, 4.340147453010362, 3.0591657584400576, 4.107040301206625, 2.6282017418678514, 3.565019855821508, 1.7960605606187197, 1.2897780955382632, 2.599856496073866, 1.7610094244414518, 2.4749320231655463, 3.0260486874532613, 3.6178081672564883, 2.466888708290946, 1.402213746483381, 1.7942478474945243, 2.9375168124517548, 1.3936229283108013, 2.163264479601719, 3.773159779899722, 2.2854935964705776, 1.8644065757768522, 0.6088108323103216, 3.1952009081140855, 1.234070690257157, 3.5095188969772493, -0.6065986175491527, 0.33188611125404766, 2.586142392248051, 2.423664544094323, 1.2539555333180648, 2.9879218152043046, 3.3822518393540437, 0.685035493291587, 0.9796795478995406, 2.549684881321413, 3.4983246763538447, 0.3659437407212731, 2.4807028666555175, 2.932567066072208, 2.6577454316201012, 1.8835696359884677, 4.055598460475293, 1.0127576374407976, 0.519356696082373, 1.8330501492773421, 2.3140569667851913, 2.7290517475495553, 2.9695085767283027, 1.7249315975936552, 1.8970763660002432, 2.0118047405550974, 1.3782117371379372, 0.8387148199255559, 1.6379720580113593, 2.6500938114363155, 2.6491691543336495, 3.412113900700073, 1.5281752076240287, 1.9875038638521683, 1.8243450071248897, 3.9293659169442905, 2.9248129692289937, 3.0426722801864687, 2.3094061676058852, 0.5766896412107054, 4.608376571735175, 2.0775345318856266, 2.6698296359834828, 2.10639950935701, 3.842149795467631, 4.34104834441008, 2.8788640400544474, 2.5789339422778492, 1.9655186072567368, 2.5252159068025444, 3.4267423266140136, 2.3019997382971646, 1.811152715768971, 3.3880978705411464, 2.723588287341063, 4.399364698550924, 3.381992360658265, 3.204781431502833, 0.9515070746612668, 2.5323125170367997, 3.0664739484819052, 3.2913351012261614, 2.402012594305343, 1.0905287813173279, 1.6787023139513964, 3.1699922804484326, 3.5410265499101183, 3.429952027233839, 3.8613572098201447, 2.2511361529290093, 3.5498650607802706, 4.958002383770324, 3.3832257203375993, 3.5853940551113337, 2.41086327024005, 2.003554593313454, 2.7078422424462327, 3.9440720884131744, 2.4532810051596283, 1.9253109804602349, 0.7230651794767462, 3.6856469584305613, 1.2171716870775766, 2.104686278625223, 2.429148270005076, 2.090901378342806, 1.6532425082227786, 3.1409705996072232, -0.03483460430907126, 2.9103796095485346, 0.7765505488025757, 0.9034238938848846, 2.7581439814843427, 1.9901517134495714, 1.6525529435360595, 1.6769384354410604, 1.2943639927570159, -0.4128409988059286, 1.572947121919047]}\n" + "nb sentences encoded : 27018\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\"result\": [2.4007649356586533, 3.7203041050076435, 3.01905608701006, 3.902856364455924, 1.1550228301889944, 1.3840207272790979, 3.178933834598589, 2.0223381553819926, 2.622230818796456, 1.7338832115242881, 1.7338832115242881, 4.323554015554397, 1.425456035672047, 3.484031993820433, 2.6054303529758225, 1.7605201466322773, 4.306348418450282, 3.348479712581535, 3.385623520970331, 1.6276171412284968, 2.1343994392871606, 1.6012629512613972, 3.869952502388212, 3.686662790001718, 1.6451070211205048, 3.1421279572615366, 1.111655437831912, 3.0180103017472146, 1.9712967228763916, 3.4114704338436295, 3.6518193090231668, 3.562469227771801, 2.124821531366967, 3.834628005697218, 2.945914751898513, -0.2392489880655022, 1.2437658111011634, 3.0753114019001915, 3.336621543167602, 0.7072309427745641, 2.7822500083684014, 2.897998498396679, 3.507070164144911, 0.8324353540675962, 2.3439851393673887, 1.196795770111204, 1.2525496534114489, 0.823759560298268, 2.9920972505050303, 1.9236245820356928, 1.5376412762417093, 1.9239034683592888, 0.680217556271701, 0.8143318319044779, 1.6569386883279007, 0.4259460646219386, 3.3319423567551247, 0.9045598298338577, 3.211903077820651, 1.7243704134512794, 3.514874356499507, 0.9110995408450886, 3.3741522237839656, 1.2281258642856878, 3.465292338670356, 0.32058502612519185, 0.8850275497988621, 3.065256206079797, 0.4053934125165189, 3.5740675568691405, 3.281222953364415, 3.5140749236641007, 1.071282888904832, 3.0968370948355677, 2.845044903199787, 1.1761123158943305, 1.0931252885255132, -0.2190979331369351, 3.0278360651324765, 3.3771503236637863, 0.3595765778211298, 1.4457001019091615, 3.0655501960586586, 1.6492383358940006, 0.700043224420747, 2.034128871545479, 2.9722384301971485, 3.5590364352072705, 3.012090526492372, 1.432290360589044, 2.0708695062946347, 3.6032182783315987, 1.4070917999765014, 2.9198148106213044, 1.602526171561382, 3.0423404255325828, 4.134046333382066, 3.1833924704358454, 2.6972409320246538, 2.9434198535949747, -0.05719679744147322, 1.2503400387171553, 3.2788294771940167, 2.241817370332072, 2.8001416992606636, 2.5398034571458528, 0.3029301262149539, 2.178643890606605, 2.0575427816124714, 2.6827134271921307, 3.43005114934858, 3.047697949118931, 3.472035416873391, 3.703490992658692, 0.225430822332008, 0.043626339595712264, 3.0879265634103734, 2.664410529751562, 0.7146709184194655, 1.3892539947195832, 1.930121000571028, 1.5100478053809738, 1.7024385761581813, 0.9894313783101468, 0.6221041392720973, 3.5563839037874154, 1.3334827047760667, 2.550764256588327, 2.5132284082157508, 3.0888773695858593, 3.0042233009733916, 3.144856207572728, 3.363023145527185, 4.013332011077687, 2.4978689717062226, 4.7154419897349715, 4.091931365848677, 2.5915953148524076, 1.9035472956314814, 3.833530140479712, 3.996015336852124, 3.4759896565796544, 2.529646569551917, 4.01565651738302, 2.40554205513067, 1.6604893811174333, 2.3036266116855906, 3.291920898341884, 1.8514038884508663, 0.7320211729888076, 2.3516518607367636, 1.714361267002725, 3.3393321350157557, 3.404014965739996, 3.3363768800838876, 3.1177630731489376, 0.7495448149489056, 1.44139528899232, 3.8746406822493165, 0.9831310767486685, 3.7189856624445485, 3.128158819716561, 1.4778378372937289, 3.251769986232879, 3.1260653092856465, 3.8012358220902875, 2.972991227310789, 2.7869173126273847, 3.620144049554889, 2.7610451957087454, 2.0077517983652537, 1.62559678706209, 3.3835946499295915, 3.342784987133866, 2.778189118048538, 3.1837254309785257, 3.070986244981476, 3.466654863132514, 0.1706408313049759, 0.382837393103195, 0.6416225628881164, 1.9705785347947344, 2.4767273642199097, 0.9397530596698768, 1.8664705351321715, 0.3653088047711237, 0.060504487338421606, 0.6283130609242394, 0.7083774945975357, 0.8152272276658127, 3.4867539254639994, 3.757142718332686, 1.425830358140289, 3.138372825095919, 3.109286797434089, 3.5647830017865174, 2.4814118018830107, 1.1912843278368994, -0.211299440780044, 3.0640266338622344, 0.9568925868946916, 1.4447557293942643, 1.1420885489452837, 0.8900281057215034, 0.8318306978489256, 1.6307705462289308, 3.4983493687922675, 0.6783178581004078, 2.4580089109110053, 2.329802776356859, 2.2295937954146856, 1.6013504699842562, 0.9129166329934331, 1.7457743241980899, 3.1655465968364505, 1.355118817170123, 2.5743628644444585, 2.7571419830768256, 3.4745078193915346, 2.5051203913814897, 0.8307662328614271, 0.720744510775827, 1.8276461846463783, 3.01690229431437, -0.25050333797693924, 1.2815725435665288, 1.8872697372451213, 2.610577907494665, 1.3883996116181896, 1.2228511391747832, 0.35257187262163364, 0.8965247082238874, 2.2941033262696093, 3.466202232227163, 3.1068231722279633, 3.045204921878465, 0.45158281481116574, 0.933151563866055, 2.243300376185762, 0.07105386847779536, 2.4037748914249484, -0.08515316836355831, 0.913547797393113, 0.0010811001570994374, 2.6841702656376856, 0.6660893597738771, 2.4119960757687875, 2.0595140086068637, 0.43530312378093233, 1.0416628278285354, 2.8734853544394308, 3.7976065265996, 3.6161862057111867, 2.7500292221185023, 1.1883248613421873, 3.345537123161656, 0.9461683170219358, 2.645964435953183, 3.0503539161431528, 0.02160243930148138, 4.207381581116232, 3.1888530831664124, 3.5807382073022747, 2.9365557843933274, 1.8450879607864796, 1.9307271926057505, 2.5485730185570334, 2.555793825137589, 1.7867196131587055, 4.206651146773874, 3.466417205623947, 1.1866513656691242, 3.4247481250143985, 3.2486935215344124, 3.751927409699416, 4.073381170104493, 1.4681226441012432, 0.7175379704412098, 3.2199806504598225, 2.8939563419398264, 1.7042682827949156, 1.8045430872239652, 3.063693264683336, 3.3147825728537157, 3.377488576856278, 2.631703354832816, 1.9792029734778631, 3.5481532566384923, 3.3685051150052976, 1.7323567312852364, 3.7209295890706864, 1.7547467154102796, 2.763679923184088, 3.617752810172748, 0.17499532980648613, 0.7242352481260469, 2.1087864702964487, 3.407714961810979, 3.4456155889786166, 2.9553953025572057, 3.770130702080153, 2.2909553383442476, 1.9822024869846375, 3.7059386406902366, 2.954842082898494, 3.7044464337001783, 3.5903349466486034, 2.135460796431692, 3.807138304298016, 2.969702904218857, 1.62352803589241, 0.6537953084301349, 0.9787363044080096, 3.5468258351266404, 3.347184776089763, 3.1943672324179957, 2.3960911552047324, 3.2700265623256337, 0.8962644507418412, 1.9281645098119242, 3.4269055678798415, 2.4575140645674036, 3.443161137798304, 2.607613774906206, 3.402960449177406, 0.12448582120560814, 3.1136945522439143, 3.068403804825057, 3.6577983809139285, 2.6776231883508554, 3.952742078214713, 3.647291086545242, 3.4383021672325236, 3.262818182001557, 2.640391532766498, 3.231945060912891, 3.2000507949684445, 2.7703545014052726, 0.8331445128667355, 1.7949420809913157, 3.5300512892143874, 3.993110424063823, 4.176896626114492, 2.425195004734868, 2.0264410800699153, 0.7531986543600562, 3.1973635935527467, 1.714744851755237, 2.8469963787728485, 1.0622498557913416, 3.528951499711494, 4.245668920107155, 0.8090282336075807, 3.771529999518523, 3.0706324398965092, 0.16155029210451213, 2.577498432153764, 3.373535217339784, 0.19928354084516253, 3.7671545285397663, 3.5453055378572453, 3.1227485639965544, 3.014042056501635, 2.6843448435920125, 3.2186726678477813, 1.5168294086042708, 1.99028904358292, 2.771840928110641, 4.096401701677807, 3.8210128471610325, 3.421090305572525, 4.0799640437889035, 1.1979911049453364, 1.2180758168031065, 1.436703461325978, 3.347879993586396, 2.4025182611477955, 1.23037296675016, 2.3636640084955087, 2.423460147035201, 4.146036541875713, 3.3982643737285914, 2.7542003931208012, 0.6110590294045548, 2.6854984402422546, 2.603755669960302, 3.4137779066710423, 1.5121894241671099, 2.0913552771801807, 2.827220540899616, 3.413622807541957, 1.7612197811243804, 0.1504072586714507, 1.233232024294982, 2.7551491240830006, -0.31992309227563764, 0.6646156517721699, 4.073629622709906, 3.406296727242307, 1.8605601331168824, 2.650801211522822, 0.2601210474295682, 2.046291019644924, 2.858555525628637, 2.1026937875881813, 3.7518529595192875, 0.9583201625870615, 3.097799931549756, 2.9054741688676518, 2.6441238359580828, 3.1845230257713366, 3.536449681631408, 2.4545165695728723, 3.5547517385032616, 0.7444258549051933, 1.6271314830233479, 0.6302729386007294, 4.320773767831801, 3.816297374117777, 3.6349317889361736, 4.021965486546852, 1.6704224158152112, 4.039212643255107, 1.0856242613212121, 2.3304699493289474, 3.1305519515355753, 0.6965602809808259, 4.262738327208635, 3.0437801839029115, 3.950757460330295, 1.412302179775267, 3.889742568787724, 0.6786148984300411, 2.4838593710267007, 0.654101747099627, 2.583098003689608, 2.818493227722639, 4.130215985078361, 3.913518738390418, 3.024256401117099, 2.7852458525669013, 3.2480414589000404, 0.9728466260265598, 3.565262733522141, 3.929838803757427, 0.5028391606654596, 2.090004509911121, 3.2834303755658376, 1.1340542891269836, 2.4449012431913295, 3.298761407923188, 4.083922765892926, 1.3926948034239552, 0.5263184160167794, 4.657861306433003, 2.7029299012372405, 2.823283939915331, 3.7342853500543036, 2.868196168939005, 2.323410689386915, 1.2789545967901779, 0.6148851219515439, 0.2857623751057112, 1.5526051243016619, 3.2968079891109827, 3.199813840882855, 2.822673463498511, 3.996560000579014, 3.4368410475520106, 0.5387465130421016, 3.2144963961864224, 3.6250401496359803, 2.530109542295146, 3.661496824951344, 2.57004764881931, 2.497163411244967, 0.8552174607208394, 2.314861786651138, 3.226208716885824, 1.0888971502178093, 4.097519935679198, 2.3137168269095985, 3.713266883115758, 1.3470152602283132, 4.1477415215905244, 1.3787303069833947, 2.783921973162389, 3.286047654465115, 3.5450918950284414, 3.243038342934074, 3.881319435145448, 2.6332230527484732, 3.031088047412508, 3.9590878175779527, 2.850714014447557, 2.479376685176867, 1.03707003686562, 3.7906223073421463, 1.5182353258292025, 4.863782566406362, 1.7684636605450037, 1.4112498008710455, 1.604204256009843, 2.984325632476774, 1.5414034680086948, 3.416017860268618, 2.1742178178426292, 1.1989604522466768, 2.9953078584927018, 1.4583392392992678, 1.0003298189181882, 1.8377120914761766, 3.66991625941158, 0.5400579209192116, 1.4459360036311313, 1.5948839264030654, 2.099930795696326, 1.6918701183528013, 2.560092812607309, 3.330682890115802, 1.959174916040434, 2.5004523207697535, 3.5791231927634946, 2.308375936108195, 3.8904198923046454, 1.7763068151583101, 2.952609890681845, 2.110326965971543, 3.779523700581668, 1.1960219365830944, 1.2077961255982665, 1.5398203290497199, 2.2043086926716287, 1.073135130300595, 1.8934420832937688, 1.0238102796505002, 3.503565303995557, 1.62492764414433, 2.0770609423805184, 1.9948054754566058, 4.025946118131413, 1.7624947399018527, 1.8152564359098275, 3.1696252811668426, 3.3969423429389356, 3.7753738723103742, 1.0947613928149178, 2.3398224157435425, 2.4990117665020692, 2.9781136890034476, 3.261538318444305, 1.0127385513707918, 2.5812838461411927, 2.758573469573072, 1.9257398023555914, 2.5742937344184718, 2.5895961688817084, 2.1511742248346053, 3.0548549895363926, 2.7207298522885575, 3.636010882568026, 2.3040651892989605, 1.94086883457108, 1.8127623139073386, 2.181532308900643, 2.8408944189255823, 3.8542324557227245, 2.677435921373939, 0.5007048519536742, 2.1445654577402773, 2.6892382671318527, 2.3721654806086128, 1.41310139591782, 3.4727111191925824, 3.592725949620006, 3.588137663341875, 3.095131086693382, 3.1630869677026543, 2.5169578600178846, 2.2924490804605724, 1.8366148648830272, 2.736553140462617, 3.201378347367616, 1.702570641619703, 1.9068809741423467, 3.400209658236268, 1.974274726099257, 3.631584887655271, 1.6172447530514695, 2.351199840928339, 2.2752482582598224, 1.9430059394059969, 3.5921915414575754, 1.6873260069203793, 1.1071239195601534, 3.328030585208255, 2.429212093439262, 1.1198444299984158, 3.508556830928189, 4.032919585985083, 3.166842173167614, 0.7050881376855174, 1.9540927777939345, 1.3216569012987762, 1.5461667060622044, 3.156026957552728, 2.628858327598987, 1.556984437966439, 2.845001237048929, 2.4997845380124404, 3.708049943488315, 2.232049412888785, 2.3311256211051794, 1.5642007429618343, 2.0899784046815006, 1.8032159432803923, 3.1255853932777597, 0.8413036189722212, 2.8763190809588193, 3.6876558677865705, 2.189062786778165, 2.829582497494671, 0.9756493553838288, 4.304298240436837, 1.7457715148646749, 3.342280290528494, 2.7137922341551763, 2.227923035829614, 1.6982824583921725, 2.558979259456806, 3.059370988741424, 3.5936397722099502, 2.7505829477837045, 2.5060400718855447, 2.0528613827397857, 2.007372232890293, 2.6863008434490596, 2.506805340379085, 2.869577318812625, 3.1679419307060206, 2.2968588226889537, 3.547150110761137, 2.8380284825525006, 2.619904186797817, 2.7461132281211498, 3.4079562235344127, 2.877709866466577, 2.8401351158091708, 3.0733962196639877, 3.6218696214465953, 1.4887307265388356, 3.58660500134064, 2.9572020036640843, 2.6341039499767267, 2.2782857206253384, 3.1322145945772, 3.131648316252123, 2.107115324202573, 2.798597730251243, 2.7658248649373856, 3.1166003469277728, 3.1268471085741636, 1.964078945543616, 1.6708352840862384, 3.9506874070454296, 2.5456095060280464, 2.194620419111837, 2.508968889597246, 3.0579378571010394, 2.130147038193876, 1.9952200321764486, 2.8316935605523392, 1.2367999525997722, 3.6771943633424664, 3.2101344985939613, 1.2090441597142658, 2.0545455123522007, 2.2053158410639373, 1.5200620991258607, 1.4601831066814448, 2.9441695479837615, 2.1752116615864483, 3.2374649315244572, 1.7925449316172752, 2.507735656543983, 1.7247329475830961, 2.426189713708539, 1.4752241072219026, 3.20777469787363, 3.10724013604713, 2.4839521438542658, 0.8728502275804966, 3.711190782678877, 3.0284155539598006, 3.2898362998350716, 2.1355209578637524, 2.453902453341244, 1.854354554245718, 2.0148192329704115, 3.5461202162333185, 3.141836818120166, 4.101723038133673, 2.4329927223335077, 3.6368390219625657, 1.733466125475294, 1.6602608909185481, 2.22707951526562, 1.3244461639255156, 2.88256590405389, 3.163494826663484, 3.3791828090488076, 1.7016271797206928, 2.0137802764720756, 2.820670637894466, 2.389788962231025, 1.505257318475078, 2.3626819769201415, 3.1133470578930664, 2.9076674172126773, 2.7130002186952145, 3.3131934566484627, 2.9872284567906657, 2.711230302466336, 2.2027114398511722, 2.6005573530380857, 2.3761147643903757, 3.059756622914858, 3.423177334934199, 2.555409608717624, 3.114645027419842, 2.489437240647881, 2.7850122425465593, 2.046035381086139, 1.8502818708375561, 2.822046366929282, 2.091956116877832, 2.7243767064607396, 3.0198886758826142, 2.2846621415815735, 2.9925468404824622, 2.5040400407220096, 3.2011328545298974, 2.899057745967722, 2.88347143561458, 2.477843029869807, 2.8087378040008515, 2.6455674844717127, 2.3163831725788313, 2.7629868636108443, 2.1742888892881456, 1.675904992182427, 2.4991527620658127, 1.9694541866830546, 2.067169288463332, 2.2321600228506497, 3.2674350153606895, 2.7653115505827754, 3.137156896609537, 2.5386968252405784, 2.4351194277298376, 3.3730478236603463, 2.119980705824364, 0.9616803129944496, 2.423854203694371, 3.0384514018665985, 2.8119229208783425, 2.9050246184001725, 1.9766031683918777, 2.889477347000595, 2.8789492484396058, 2.844152611528514, 2.2676204871100865, 2.4136481150968128, 3.5400521729317873, 3.2941520559244726, 2.630960525297087, 2.73492119026562, 2.1926812748788294, 2.740937399972681, 1.8614950423611356, 2.593176926387489, 2.85000216779835, 3.8809945185395, 3.909857264971338, 2.469823835415648, 1.7302720279029715, 2.1666582110707724, 2.5908463344069537, 3.3736328448572377, 2.239697597323414, 2.244741685388107, 2.1041302615978954, 2.2806820770645606, 1.9285337770348905, 1.676603504648464, 2.586535329497653, 2.324007983498282, 2.9027989099533174, 1.2852376480182786, 2.062621602297574, 2.568913058356344, 2.1733302440463302, 1.763354019719918, 1.4292968491672942, 2.5070526672286024, 3.234117561165093, 3.6191728964909298, 3.434963522851285, 3.4471321769303147, 2.2071106858189777, 2.6257480444101335, 2.5873937516722423, 2.540284755928624, 2.490298362612369, 2.4783555437440454, 2.139422690872629, 2.2284906474734707, 2.660037964168879, 3.803744795968945, 1.9569439019540176, 1.9854245005140652, 2.3888392029217242, 2.5591552899889614, 2.953703568924823, 2.0852016002112066, 2.5120289596833105, 3.091705475317183, 1.8516296581776697, 2.551319078836151, 3.6612886614092006, 2.0512107121829977, 2.0513810933859973, 2.49492757046954, 3.238911779445031, 2.7158631562534628, 2.1552656474450806, 3.676226361567823, 1.3843819128455084, 1.9523879048737625, 2.9645329352282377, 3.357052170204582, 2.38796249311755, 3.395932757348177, 3.1691574284828214, 3.3962278399115107, 3.24409842669985, 3.084678113977659, 3.0979939523331765, 2.944913980668609, 2.950675869664319, 3.1186375828548507, 1.8735532615855475, 2.788452644337485, 3.2965700362708166, 3.2476246105920774, 2.016500793641796, 1.4775744638012087, 2.455294653905821, 3.1012216346784824, 2.7039056917080524, 3.0225901376550763, 3.73051662712301, 3.561781207394679, 1.3372006152725102, 1.9669603420963115, 1.6382466828433202, 1.8051270397607486, 1.9902983362699878, 1.295709814038781, 3.0541525599340336, 2.4120597983142638, 2.917200213117251, 2.295869511847802, 2.915612746298734, 2.036172001889443, 3.968347889909805, 2.319743870170633, 3.5303672681271494, 4.041713910191172, 2.323276105865658, 3.2026264274786445, 2.7452772263380907, 3.153410322692673, 3.4805220553619587, 1.9615463116693832, 3.928706486198638, 2.7042271198622747, 3.643225138480707, 3.2675392481455128, 2.56596489458562, 3.5498042797541043, 2.992139036312882, 2.4496832035067135, 2.4941386286767653, 3.2537238041078065, 3.2098441301263776, 3.071732022837564, 3.613405870873094, 3.0571169262105187, 3.5034655580449625, 3.864997636939696, 3.8923866651833556, 2.5443538226393705, 3.9644087864479416, 4.3742334909931655, 3.2626279780279344, 2.4832071592967426, 2.6810079278106476, 3.0657080692755536, 3.185633346500324, 3.2179949166006674, 2.83102762384967, 3.006989775851423, 3.270420949088386, 3.4415568771256764, 2.9762682597850882, 3.058296575491441, 3.714968305401228, 3.5189049998383695, 2.9630203678276215, 2.6915069855819813, 3.915976218166813, 4.025270887934071, 4.061352212082405, 3.9328608208624245, 3.022448871576655, 2.40077672413356, 2.4322528876532474, 3.506271082617848, 3.5784556420288904, 2.48930586320751, 3.3229979738977242, 3.491603286407794, 1.8874204231159117, 2.849572663825799, 2.1829460481559635, 2.6769747617114317, 3.517350445439615, 2.9147329348132036, 2.989421460394383, 3.6817362941500678, 3.8333193660029026, 3.1694420326903945, 3.1894911024923944, 2.2845336231301836, 3.549367105938334, 3.678898232991184, 2.6454487192773812, 2.281455385264731, 2.8839677846679663, 2.837990615102943, 3.259142296492863, 3.107086356989458, 2.51649737578461, 3.5780840532158367, 2.4337906650849135, 2.961484560644563, 2.387360437409033, 3.5970532153736197, 2.6035127208714868, 3.4022561358815513, 2.7411298547814833, 3.4680806329054787, 3.0173208397895683, 2.230372060650678, 3.194706703121717, 3.6521004250325655, 2.5948319460785485, 3.3124170272140043, 2.9043328833901496, 2.715247970003449, 3.8614548713669365, 3.7184080370220913, 3.217045946968865, 3.5754074785612766, 2.981847561872253, 3.7945639438917533, 4.166949953023856, 3.7415112307491993, 2.7156515482510564, 3.522642678631886, 4.039979299015728, 2.5596421353993533, 2.491434958002671, 2.7286534090548136, 3.5607026110454556, 3.124543976734519, 3.7439454443743507, 2.2444113025301307, 3.31593649916145, 3.6421614510223934, 2.80184715721985, 2.8152294772434083, 2.7189682284990693, 1.9712638327633405, 2.2225357246300934, 2.4764235274714905, 3.757696482835125, 1.631341267532276, 2.8086425136838487, 3.1708671559816115, 2.6598111186275233, 3.3036248251710587, 3.3856884289496803, 2.7417197284606054, 2.6201310852841924, 3.706651060591473, 3.481852755800717, 3.440320271552699, 3.3263662299393912, 3.519141542885544, 3.6715816246656874, 3.6561722282005378, 2.900139080063901, 3.920545710124385, 2.9010944307942377, 3.7418992069045216, 3.6071454851401668, 3.9007254288359463, 3.269838809357927, 3.416252095647819, 3.1498544378698474, 3.5443570326277007, 2.7042181447208185, 3.2330074449447856, 3.3921173402155538, 3.4697086474481518, 2.632894143723635, 3.3117417419920647, 2.152796430328292, 2.784557712994731, 3.729239087025558, 3.7039597278432046, 3.088913459003473, 3.8998808599935435, 2.5520075615859854, 3.4921049087665565, 3.7909321587975393, 3.7925086914518973, 4.114226111933249, 3.8835507667800093, 3.1066634389208074, 3.63132138965877, 3.584869087427812, 3.6376363302140136, 2.7310233652756737, 1.812675089707175, 2.5590693851535717, 3.574132710384399, 3.3152510020368697, 3.7480643197771655, 3.6306142221636972, 4.070325586658873, 2.197284716179904, 3.2759028954110057, 3.622139154195582, 3.6377816514663843, 4.255945002756924, 3.68300081197788, 2.5376882929345066, 2.839389575003969, 3.3472722189690858, 2.7199727784504146, 2.1030216705599507, 3.370292140583007, 2.534134794807473, 4.002564487009867, 2.067986536681024, 3.1414731195799694, 3.470109912459018, 3.708052307762132, 3.151333601970356, 2.418976163687315, 1.5262332310045914, 2.5739240694172665, 2.8538279132275686, 3.3329645305631446, 4.091109259808409, 3.918619742233721, 2.815194544052006, 2.353515917087883, 3.3901850289052113, 3.4421072874854644, 2.363093205627888, 3.224592657854113, 3.7867706571060995, 3.2383798293646895, 2.5572444273433264, 2.046564606675505, 3.714942860007509, 3.7642321086462673, 4.340547476353654, 3.8987540261427123, 2.9798952620802863, 2.88616439114652, 3.2504492534204577, 2.986593726731848, 2.9899548801174576, 2.813098001075675, 1.8286751718300847, 2.8123307238250748, 3.222209944862631, 3.4597944907334974, 3.7907370319135745, 2.518916568181, 3.334813091465324, 3.617222304642308, 2.583895605422133, 3.30287686719256, 3.7575435441737826, 2.002515992067927, 2.567724088146479, 3.465644343529918, 3.5248877031053567, 3.97915230585789, 4.201747165540247, 3.235671379347487, 2.9748735808497213, 2.920438658557805, 2.8858398983929248, 2.2869379508825936, 1.4556482455235786, 3.8944220790841393, 2.948126700029103, 3.325631163599958, 3.3542076849026095, 3.679748815703511, 3.9951804164614355, 3.555864818181974, 3.0304880634330167, 1.9203384395118877, 2.06125851549044, 3.4882751852624043, 1.302545255732906, 1.8823212502123445, 3.293184795322817, 2.2915763130461646, 3.0153008098454617, 1.309190441039, 3.783466877872818, 2.5695571103501, 2.073721108822683, 2.116053124434779, 3.2251765126128724, 3.1171300726060043, 2.4403975190682776, 2.21139585032154, 2.85344569019514, 2.838834090000973, 3.3216411862083866, 2.418433740675964, 0.5842643928044109, 3.758925722611947, 2.9622500916988344, 4.366839792208037, 3.526978928300739, 2.8720433548905313, 1.7395332195844833, 3.0349753878570236, 1.6973289259321238, 2.9788230588508395, 3.4828497210998437, 1.7941506512349572, 2.8216032195046106, 3.0650214748449516, 0.9271829288501763, 2.540642883686294, 1.882018454025525, 2.3578944365867938, 1.8725060802170725, 2.468926398649008, 4.2456384979915756, 3.0737515512934093, 2.8981538989888054, 2.155094084567309, 0.7381764589940008, 3.4891839578315262, 2.6544098341609415, 3.0491550082889707, 1.3307568364332314, 2.0020094121839698, 2.417875824564256, 3.351076177367197, 2.7600565986554373, 1.4576973854400834, 3.4937065134447023, 3.4075247378155282, 3.167528732529367, 2.875933713792609, 3.2510334543287747, 2.2302785740657094, 3.820771508325611, 2.3310925130576834, 3.0440211506601424, 1.531677081003587, 2.8324096403411287, 3.94491271348647, 3.5487696817749788, 3.3046400074277873, 3.2331021255154657, 2.0459871446074276, 2.054793114012052, 2.5539570228655726, 3.11362435498946, 3.338476101128342, 3.013147800773455, 3.7718908322183795, 3.187412490777917, 2.9912362873468292, 2.594841068902907, 3.009954460073072, 3.9306272150503943, 3.842726463442695, 3.1600353683724713, 3.862533428432032, 3.1752073712122266, 2.1055200792533233, 2.290986798209671, 1.6561998519963164, 2.051581159789608, 2.0579811739193032, 2.5740558071651205, 3.5807291794415104, 2.2007137129304204, 3.0727838766505013, 1.4944885012842102, 3.150749661894655, 3.387202705738997, 3.951069845294468, 3.0731707589595896, 3.5830931000509816, 3.663824690305388, 3.6015642994370713, 3.9574621324709267, 3.1856567419930193, 2.9879406818956498, 2.5524536969954035, 2.239341893005705, 3.084176617426294, 3.1819619107192727, 1.937136649872337, 2.2788553138980236, 3.587746400109457, 2.9725323636335315, 1.9631741431643428, 4.605332358109236, 3.823700609110611, 2.2347561057660696, 0.8396625055692859, 3.396104247404846, 3.475560991080612, 3.9545496780399243, 1.846141179967212, 2.80678588779163, 0.7488139837733249, 4.2567884956927555, 3.1837653852771393, 3.962123383905963, 2.5947344338484206, 3.4426881464236656, 2.081724754151715, 1.9912106273442574, 2.9709320384178457, 1.8037442758687694, 2.8813980632514538, 3.3755108556433386, 3.7405653405973913, 2.6191846156338463, 1.6720714970120745, 2.335831864189882, 2.9988997506562844, 1.7877199724383026, 2.6536321443084936, 3.695990832996317, 2.907805675813865, 2.4331897249780576, 1.1228995521304208, 3.6552455784967037, 1.4720646185160096, 3.8414397919385146, -0.0024038636618798, 0.4891400444319629, 2.994557163201774, 2.83443220365923, 1.9004078494421095, 3.1909590403621824, 3.3218691169647006, 1.0287792222505625, 1.4065322204716273, 2.5246112282529576, 3.484904963259684, 0.7348979581660239, 2.699549172978241, 2.8735800276621166, 2.6796477115723674, 1.9647321674035938, 4.132617683554722, 1.1567336674343123, 1.0067948849874369, 2.0609990307401067, 2.7966758810509758, 2.9429116568674614, 3.029512216087856, 2.1607349662266127, 2.3073796871805743, 2.459907431878223, 2.0093044149296393, 1.3507236626420844, 1.9919483732507655, 2.719386677643159, 2.64544066372447, 3.642895024003619, 2.0458309277991944, 2.5812584053163423, 2.081622937815319, 4.038987189166854, 3.2283460434176203, 3.1299333112198617, 2.44330677839802, 0.8651243782261259, 4.266610422354583, 2.2886985124107193, 2.8774511534450555, 2.4249759408786775, 4.042692437612484, 4.177254951082251, 3.063129337017748, 2.9752100190877817, 1.9678322508873571, 2.9098875505857267, 3.492035142037713, 2.563735154259473, 2.018926768613127, 3.687986025252337, 2.885724344353045, 4.2534628167350865, 3.380156567086184, 3.4186966546348594, 1.6840294615770652, 2.8592741025588664, 3.143120940191124, 3.5037172078344825, 2.5607331638979374, 1.8300423396130263, 2.320562409401714, 3.1163754112083355, 3.7107336258970913, 3.6631871831580503, 3.8039790267751714, 2.5498822163043986, 3.499174920111733, 4.582165428796414, 3.6461050722084756, 3.791987404709681, 2.4527741447393208, 2.246446979352826, 2.7304762272559047, 3.730930529248425, 2.8316325249900167, 2.634246929739397, 1.1853713655264004, 3.5431372942489583, 1.472237875135905, 2.1601648770079467, 2.589000106851014, 2.5875364233197145, 1.977727221188185, 3.385190650696605, 0.7890993817638915, 2.9336873381748627, 1.3216326198792667, 1.356744263747531, 3.0909000167547713, 2.3693370546284456, 1.4961197095665637, 2.096660756641706, 1.8479874198015775, 0.11619308951303675, 1.6860747366973479]}\n" ] } ], @@ -1164,14 +1158,14 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 28, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.7710588060229734\n" + "0.7764788693697073\n" ] } ], From b072df97f53b0f1b06e72f33187c76624924c04d Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Tue, 18 Jun 2019 11:12:41 -0400 Subject: [PATCH 073/108] Clean notebook, clear workspace information, rerun, lowercase dataset --- .../sentence_similarity/autoMLwidget.PNG | Bin 0 -> 87866 bytes ...nt_google_universal_sentence_encoder.ipynb | 471 ++++++++---------- 2 files changed, 220 insertions(+), 251 deletions(-) create mode 100644 scenarios/sentence_similarity/autoMLwidget.PNG diff --git a/scenarios/sentence_similarity/autoMLwidget.PNG b/scenarios/sentence_similarity/autoMLwidget.PNG new file mode 100644 index 0000000000000000000000000000000000000000..363a599e89407cd6baf655aa3ee6bce07aabcc20 GIT binary patch literal 87866 zcmeFZ2Ut_v+AfN+1Pd5k7OF&W2?$DWQdI<0Kty^MB7)LHLSOH&Kf-jk#=0SBy%mF`s@jhj7iif8% zY1f9wHsJS;8)qGZd3b)kwRvnAy9y5F;o%mXJ9Ek=)NO&y8i3h5&Ir!9f52VbAd9eh zF>4;$3*$a1kuc)tn!uD_8<5qz?T*^zdL6ZU_Br)iSd@cPYLrL|Z=A^alP7*V{d<(u zxX|xH%2`{-vrZloIwbMyry(|54~kj{A7$nEqN@iY*Iuu=4R|N`E;P+NC!H8@33eq` zBPg~>2F%&ESI64_{<&al4`U4z-5gWB{{HDNY2!=(>33>KPXiguT?X6#+$uGs^wpND zfA5v*m0jf*_498>*>WTY&s_&A{B*}-?K-xu)vB!`kOYwK3?wqBQE1XRudHeeKma+Ece7i@e9&iiqPlH@!j?l&SuGmVD zL!PitW!SsJXpWAoEAFsuFLObjIOW?3;||GKGAmvYa%>nC>*C$|!~i#sYV3mey{p$# zoKm^v^5D)Ms+XKRjv(u~u#q{W1QRGx3z~4qb0ralX2B_+Ct}0{w4LELh<>Lt=@Wj| zL=BafhzSwf(jL3uyCF4V&Ml(t>8d}(K4C1QFVz~mQ*!58)00$2Fa4zIFZyTC4V(Wq zv}Y2}f1tXavc(7d?NHeY^kS~k>cws&H^M-W-h+J3yjkz*IZq;$2c`4v9EUMZm6j~@ z)Lm;aV8zu{f^XvdG6O{m;$iRys;$4Gk>s!kA^i~#XLigO%XF!9hPTL~r6zyvm3Tfl zgVg}U7&gUKiCiLy@LNUdN$PTIPC>6LdD&hbD~=%l%K9Z=_E%PT=Inf&6#JLbJ7y=2 z58UmVuFjd%tBKQ%sr33`&)_)ou+vv2JNTSH(gfbceXA$crlk(9zCZ~Yf>dDdW4G-C zJFFfe(0SBTe2%^6HOZLp3ujZ5tkB!njVD>Q!d0NP zQTyk6UnT|$w?fsY%8hc>D+6NXAOfo?D7Z~x3{%mt3DBtSrXiQ#pIf0YQJ_9axGrBBbOY9$< zsehp(;Fb2LWo}2F?&IAK+P(2h48^aI{t+vvywV$eh1mRD61#ds2z-0>&|nzP5WhU4 z_Kke0W-*Fjgcvfs8u1{hkFLjavm<&R=z?g9%Ie#|?Nvz|eVX5eX+txQrklS|q26CV z3j=M65@uvYYx7bwwJ`}Pw}l*1D^S&o5!ofI;a+pf`I?~@(xp!#Ioz66;5nc} z@#WC=P-G+Q7W+4l{U5NL(idap>k!ttknX@2)ernTef+Fm?wp7GB*z$yoNLO>E93t~ zvv0qi%MEPU*M1{}=J>L)Xg%#8j;o&1^?>Ao^YQkl`7Dwqu?? zqSnmP`{Y7w+-hf9E(~OwK4>p$Bqexlj$-#EL3DE9xe}q;IPzXK%A%meTtr08oq73@ z@|}sHcl7=&Tks)lE%TS#8Q%pq&ps7|?=qY~a(srXyP0MZOw(2ijo4DgVRb{~Ezv~& z=Q=ensO6Pya*;@syl$Q{GcJ4!yF~?NNAOKkjpB#|AN=8-YIkmjo$fQ*Demci@5{bm z7k0ef?A?N}s2<*3TlMvj0+L@(q)MI12(s=vp+9w){Od!|nJ{gc$opfGU#&wPf%b=` z7fY(IZDZsgD2*N}(IMRqG=jv;f@rw{nl_2{+aP)BdkBImSA$#_ed$+2qkenDbcEU@ z9EA*E$wt(B7&Vwp2w9Jc~Xn?D78toSIAJWy)w?g*JN1FO0|o_Wd? z{=BsD&GO4?^*a{19&q9wrQ@xAqJTcGE0m(zN0JV`PNX@BO@t*^ z*27mC0T0&ful1R9dEa`3jgzyS?k;L01YJo}y5LqovM7in0Go@G2mT_A!Cbp*dvHjF28bD$ws;`F?bLw#kFIf z1vVlnhiLhkdua)#Op2DJ7?n}ZY4St}nCjvIMI z>|R9r`$RrTBA5opf6^7>PpKiWG-Cqeav_z zNVCW>>)2l|K0GuuAiF&BiGe?%UgO>>G;gbNn{(744t7qWGuQ9po}?SqM$jqf)~Wo+ z{N3`Idi${c0eH=p26|;V!ba0mucUECw~c=66D)@Fxa7=evSL~R<}u3X!NBbcYBd7W zYBM^A%69%x&Q-;QD^b_fvbFF-BBUD^pObUXfTZOv6+8s35sczjCNLL87 ze``LC7u4U>$!DW;k@=^K5d=xX!?5|c_k&9f+dyvbk6_5WG?mg;h307c{qCTKa_lAc z&4Ya$pAk83$Dr!%Ii%F5fJC!>4Dwy;g&}&ZTt2Aaa9}RSkA{08DQ^v(GU%^AARoVG z(%O+`aMseTSh_3QfXV!AM&%agD8?A+-ulVjTjeT;=9*SzG55>d9B9RU_^1*-{;|wj z<{3$&nd+Xk>Jz;KmVKF(`3D&d`37PXKbxl)=r83a?CCt&Xz~k#e2_GxZEzsu^7qlG zV?>00FLaxvP8GC2I_^_zUD)N%%_g+Vc^0~HHrH%SA|E`Mm9m&i#*vRS%LIOsCl_H(1m`6fe27(-oB1M!7MOfI-x1Vd zHh$dZz}y++hCg)iw415ZDs9I#+uB^J<+5`z#-B#cM|UaT=3I;XTqq?+(FU`anNXt) zd4zVG5x%@0RAm~(n((TjS%a@zDn9cyVjCka-8mSn)DCl)-c!Pi^b>^%H0}N|B4>!_ zBZO!?QG1>}1npmbyL0PZUeYShCSZilBL>R- z{pO4OeRj_^^cpq@<)DpeyGWkm`}?KchZ~dke4*(mf6oh70}>)nCkItoFg#=e+YaA5 z>iY70Z(7nt3H_IH+&Yf|-isS0JH=V+Cv&KGOF$pRc(q5F8^Q*KZE?jCw!EAS|)<_N*Z}Yc`knBn!reh7yi0X9Z z;Z^}xyf#P+dknpQrFd#Gq3x3X+6nhxpdU|u?7_^=+t1GL&7N;R+-3MxR}3h&AYby( zT@Vs@HdOp&K~st&;W@q%bA90yy~J-Y&YuM6rdLQ^Q_PtVZGigZk}x2Nhf%%3UJH|d z%IOC`y<;{vMNm1DcwW`-!QOF+@u6hX)fdsywqo}!V7%u3PaeLE-B^G8B4VPstB?N$ z?D71I$w6$w&}sK7rtYtk-m51QoF;|9mtuwH)z7>KJM)>(A&-54SmpXklLn;eK$8Cy zbik{o%T{{nVzNod1OMW$Te7KrM{9x}JRqj=DF?3JNV;|s+7Z|jbYZfhJP|gj@f(O% z%k(hEo%F?&G9O5VESVwZi&blLF|j+9g7qRF>WB!Ea9E);XSTK zCn0-T=Zo3PxtTRJL%HiG49P|!UbfR$Y)smJ$@Nq#rMyqc!kx@HR|KeLOFJbfZb z-TnJ;KDd7c-S%Z#$5xjbvhzAb0VfdjS)_DcKaXn5mM%Bg10fL&SOv!J*>^JF4g5BY z79Dyn_QT?%>E)*18*b)6(?3_={j8RXjqDjpD6M>4xsPQ;WUvZG{1k|JcCTix)2f% z+IVT}W_x1)dt=DsW%GlcFvbP(WKs=37i+L38?44%BPboOAwUNhu@f8dA&>k^E=tPk6$jrRdeuHc?6pX$*%b7IVkC4k#-S3_W(y@f6kIUliakF>NfsBpzJ(Sr8F%b<~i0P7T zK@S!rRKf_R5#P>C?WZ2*>eaTc>PzG$xX!Bm;+f5O89~m~v0+Q}`Wy>!sjj@=Cc--( z*LJ!-NyEF?6byq&w|3!RG_!dBkTEY#%lsvO32PEIB(rMja~r~v?2@bg*+#5Qk-cx; zu^>J;*9n)Gv2CY4HlkIvl%ZCb;ELwW4x}aim?fumew97rAK(nyFDfeI_i~%KuV(j< z%j+!zR~;Fqp&uTKQ|iu%i19|r zDhFIa%VH00LdG??{YLj7Hb(=!ZbZ}?~{)y;#thZ>Y-)vo-BkOXHWsJ;nkT&z)R-bJ0i zW*I(2ZkR_oyM`}TBW3l*cX*FSBng>hRX&cXO6x4bh7rm9ZnQqaow09I1hs(hHF65L ze%U+3XB=kiq&_=m)W}t<6;vi^qc& z_|iMjZBx<-!5BeJt)Kd|mWfWQdYt#TW&3UK*)Hlc)25>bjMFDR9y1EdG%s}0i?xwN z*nT;YbyM3e%)F6I${7AiiW(M>8r#|YTRfj=V5~>nlLOJmxHeC4gNF%f&G-K}yZ-|YR3 zF@xrd@eFDJk<=Jw{76ArBhuI`!HbM@CAaUT)L zi_o*zj+%Mx%oaXh6xM<%5oRFWs_s(mNlgf))x2+LUAc0=vviN?qD=NrF6*M1FQD8Z z$6gK>wZJSNc_wd!S>Jm$;W)?5!KC$hapcw$?|JeSU&b3Y|3Vgob9t#?6BHamqbW9YroCn)99#DTv+=BXP73%i6+%+PPuHdJ}+L zi&cLI*wWjtCU4=xe7yhpx8C#b2HOlZev9}(!f7Z*Wxv&kUzC^8`Q_E#GuZYI(&nTG!UeT&&Y1Iuu@s8hp)1+7Sk?3}%{PDd z(k}^`Z&aD0`C|g@h8i7w>5-D>wCD1k$lTU=bJDORS-H7}edV=h*_HzqsDuQC#~$y! z@2$LYOrC6wP`%fVi6f>ITK-Ns&ws-0KoE_JACW8``Y8l_WW+E3jyTnJ<5vk&-ea75 zPqkW=QQBx6<4B>%SFuug#N6esZpr$bI@*eO2}`Oa0A0OGBfnMsm42LpE){%E231@YIZtUZxv+&;FTf zI_|zj2^H>_`Bg@^@RA7}u7JJP5~TiS1$s!SX}^pUp96=Nxsm)dRQ1i3o@wT^R!7Y5 z&}QP>cVF8+tUr1CdNJuM`>KZ9W1<~&`29ow7iBK7J}z?-Aw_~ai}VDlN7j5w_oS^LHUB;?NId_{ zmtL=T<>#Sde2_<1`sbvVfae?By%H&_+ZLy!P&Aj~o)kV!H z9P6DsDuA;ZXx>j8gR{v`;Gye!0|c@f2p$^J9GlZ5%w`L7*7OGOp#voM`moU=PWzRn zIjrz)^2Qf5@pTvtpYvK6HfvU!O=5~rBEC)|#SNE7n&yJtxgVfiNCp4(A@Ul3B%D^r zg8RN2W3||CHoP-(hf2OPM%xws84nKQLQ#E;ae}xZ^&P<-zJfFF&jw*ufP1d5HwU`2 z7fF%fN@3rggm(wKmSdL}(I_`oCATJwp^sB_tR1DlqPl=5*`Tq8uolxuFQo>qFFlLY z-~^+dHzcl<2$#80mq{1;4+Z8>?^E!UnYe++BI~BYs0RIH`usX+jYlq^*O=W zfQ$LqF!|QdMW31EuG<|_-9Ty6|B540aTMb_hJ#J~7}8suQ`0z>m{nFAu}0TnM^Mt1 z+}QoPk++q?SViveoO#GdXg9dr?HkJ5ydOMD!=bTj<>*a8^uHx0rP6C%Z*`{;!+>^} zwH!mtrI~KaelV`qnUv%VlM0l3 z97%-H;Y0BTlFSU>6=MptpNtz_2GgZi;szK^pwTAgqHbh>QW)MlzyRpsM#~jW8JKrq za~jcojL#w2hPUsAaE995-MaECndDt=6-<0=BfXp(=5=r+b}V(t8O`|yPWBf^8isbi z)(wu%uD&wCvEKTuL^nL0t0@uG6<0dfSze#wR1@5;ReI0C5wG?s4H4JU#q7GPFeV^=Wt z)iQI<2$=@oD6)sIJOO7PGq3pIYq%1NM6Azq+uc^j`1=dL4q#~o1VqH_HKgzutn-S; z@5W>FPe?l@_8@K~-hfK`9uWE)v%>b>;oUd~_K+?Y{OtaY1NCgIxLtP>m7kuMq_^4y z1kyaoaQzL=k-Zkq{jy0%ImG=N-SCw#Os|6!i=(v=C>Xjs3Kc#8x^O_oVQL<}ER5{t zz6OtFeo*=y8p7t!(Q#C&Cl%@V?qyrkh7~QSkZF8yLJG^AjYbk91`OTx=l!|VO(_-0ZmekqHYYf6-QAAMtmfiBzWXH}vovPt z#$G*6+{}Wcfth)GN$3q)*)aDD*$}%NF0*Uzu~*&UZw}u#RHW`@>= zyDV_+X0IvNhh~lDqMBx5pfc{3socQGNE$eRahHG#?c`PoKM{t;(Ib|QBjJqal#zHh z+BoWCCvR{_(<6c*Ci7P(cBp#inDCCj18G5bAva7fwK}l z${%Q+bX~8N?A<$ZIg$BM!IxlgYci!;?RO{*g61G(=Dh=!x)SLy&Kd#N$Y@-XM$K(6 zw5?91l!k&{2(H346d^4U4_x}1QfaG0I5s>Lre&9Y`AHg%d&k~zWrf@9wu)l)bG2Co z3iKgF-Ew$)DCGVQ&28eBc`LSDEeZTO`xA6@+X};0);w@A7 zequOTu%g;8D#VM|&-#1AtLOcqzVO7q+R~r${fhsO9t{AARKW_1pGo)~7r*TKh35|v z8NZ5O)_jxqgBEZl^G6SqUsU-I;(sUM-2R;{fQwBMh41;&1uNvf(*mvx7Jl#X6Mugv z0SL0m7QhA2F#RYnkSj93+_#o zHM=z5_(d82Oze-O@rQ_;dFL;=;twl^JuazQ6fC!s5U2=WswZvuR5x&#^W77z74nN0 zc@wm!L~}b}mw7w*9Dw}w5BUK|XaJpm%zQuO^V^kf?)c@FwBLtr5f$tM@x*fZMIl~u zsNm5wrrq`B#XxvC05a8~)T>(K_-m95BDiF6;(BWahtp>9QuX!i&*BDa6L_kzmK#(E ztx18Xxc;mlSHlj?9t_qE;WT#RFzUcP@sbUb z%MrM5DJ(l2dkL%{SY8?SgmLd`T>E-J{Vjvs@2MI$5TNWx8AyqS@28Hb#Y8!DkX z8&XLHZ(qe5NX$@$H!-Yjci8cQ0GI5R*hcoobQ<;+MD2v}lrR-PixMcWe3zBwu2i zpWQdulw0JDVSCN#6yUNUH-_s)`x0xw4#k2azxhwI{e_zrvErAq;WdD!$s}%}GwDvm zieSN$YGfpP4YAahiAA?w5mu2+rjT$e8HB(Q23K*Dmye^k!4fJ^D;wS^AaW;DVi9eh z#E`@=7JRmkpDZeS+sQ1_gpwkG38q&RC5s@0x4-~OLTL;o+k<9D|isWE-&MH|r zkWD-f=nKVdbO-v|f;XCaI);oX%+*ZEZu)BwjJzO)1I)e$`lM(DH)##GHaJ7cOCz%1 z0X~2Wn%+{`;D#s9p;@@VzWPn~P`H$hTSelWHVfo;N!(1r040fDuH`4<*ht3VDBH4i zxC6E9*qcCEVED5Bt(a|wW8*9uRhC`ZgsFsbG*K%;_aVrL{Ng9AH`;bNu)s+6gc8TI8$05uyaLSXyz zQEtsEs%d771h@=}pOd#q#uT@{hNrSe(hRD#co$+odPr-MmMioPx}(f>m5zY5PcnMC zS*mY1D~EA_BQ7E$uU)HH$OFYNHtH3$=3%0jxoo4+$jI4lPT9!CCHa+j25xP#dsD&h zc;bKd@>^W^@$Y9@@9Yhzp;$^Iw|~_}Z1l()ob!FaVtX zQS0VL7_tg1mpXY^y*Ms6gdAZ-A7Y_aSy+JzZrJo&)qV!SUUFj7kw+ONS_U~3w)@7^ zm_P-Qwdti$1U*WLSc455rMFlv07ORb=;vE|OMX+>j7w^pQQ-=^BN|L>++vc`#$80z z6+}E3GuHD_a9?!~fQLbw9s!)8g1xRT=A{+sz?bbK3$jc%jYzP>v-&$X4eYiyMgm07 z3}j~jp1PA)W=+l4X0rnh^Xhi14mYiqbL(#s>Kf9tHgil}AtW2lI>@bbX}@EO;r|o^ zn4lxpffC;^@G57iohSUQE*xb|O2d+B&3dm1k494F!QJ#;`=x5YijlN_L>SCCS#{wR$5!b>3(-AF6=^^~bjb zL-w&NaQFg8=8EN^0pMr^4y|d#NfdA_@yJ-K=gIGl*Lwd-RHKgv87aTxIIsB!?Za%F ze{~Fv%1GwgrH<9a$yjT?=do76_gWsvAGa;oHqml}yr)^Yal+Z>g*NNWzgFftfK-$7 zN)aOD#N+1z13n&mb|uajcxY=E7<{hp2yTmstEWVj(!a?1|BW?!gBbpnI ze=&iTE`>eTflulUC_JM-VRa=SVY9s;A`Ad@BGz+du@exVmp3 zKQagHiMPKuc#G2=PNx_lSVOYi!3AAJhbGc4t<*_XJ9-4PDL1+sHW}?LGib1y5(on@ zY6AH)e7%)?5Wz+g{OP00j&}^}+|x;=83g6qk77{(P2)CcfM$tKp)YhX>$!wqS0{VUC; z`CaJrvPQ`We{cV=*(a>_$YJ9I=c&E!;dt_r5-#kO!FtL78hdbQONY^>zF?&tV`IgIqr3r);Mh9Ca9&u?ZgMl0k!KAhMNjt zoRdr4MCjGu7mesJ+9V*m9cdtP-Lfqd5;80ssM8(tm&fG)PnH|}*A^-)K5^$vyBlz? zK1va8nl(nof+N>AK1du!N79h?VD_wOcT$JSP5i+-WdS($8uf5+Ld3*gghSIx3R=%l z760LIlHRMZ@m=nrkyjq2cpI|5kcb_X6DoR<8wsHs>!QoIDz?;^V!13FoVIia$J!)q z$jmovV9w(>^PA-KCPlj)n$}yC3^vIHa%KWiz(WvH4mblcbwmmJk?aj$^WJ1)B}NP5 zLF+3rpluSNgcDhIL{<~Ak%@3+8z=Ige9)cnsoX8zxMq!|8nS_#e|hvVqs z-~lD>iV^~$f`6k=AL$jv70;t_9KF+O(ZqJ^;}zQ3tgBDKFsG0C!e=KNJR zAOG^DhfoQm_RPv>wKG`R8SnVw>q@>ev4Ry3eJ1zF>fs|l8Aw(we-oTt|0I()^qw`O zx4Y_GnWi9ClOzc0T^{PyNW;fh)ZKlrUb#aPBoZIGIQP6=JhHXjm!#+6vZ7({3b|+8 ze91R3gsUEVzw*OC)|%FycbMB<2%pDOzSaZTKBHQl(|JLs7(;#QVT*cheo+?pI?s&l zBfnF|-vz%)0{IIRkY%QPgU5XK%6;L{6)cTi*mzSIRNgw`Vn8se0lYDf_Bfl8lHeOz zr$&7d_5p>mP*b`E@6|}BE7^)nicvCWRvtpq2R@L8)baNL$!vsUwCMQzt?i&McvGUq zhk==(7yPfUn**u^f8lAzzXOd?TL2G#MuA7j9-HK+FfO?U6RNMx+($KC=k^Ryd;k@l z!mO$1Ji&T{f+YrdGEdHrcYyo>nhNc}&Wufx%gXP0+EuA%CiiQ_AD?_5(-z?&Kpj+e zTNXP*(C}3Rje@MOvNZ|M?CRdm|s=-<5XVZ-V#Ky&HidB!0*{ zkr@q*eEy)!%hcc8-H+#+P5R96PueFso(Vrhxu%GSNVH{r>d&zqce7LyM&)B9ch_Up z`^S1EK$>r4E*=l~v{MTojsK?Y<1Yhf1q2aPP8!#{CMv5ZR3%U$4=|}})SCWTvKjhx zfUt2x(P?~UMKlae6>&2diZ>};w#Gyt{O)Hq=W3`WZSgP?xSBK`UsbY%%TgN4;uCz`Af0+2=sM-Ep$YmiRIk~l zC(G@=W%UtG^m_RYh_<^3T8itCHsGQb2zchphEqWcf-TE$9Huk(Xh{6Y;sQsbF2Xs5AEp)mL&V=wnZqQwvq3EB5;F zPLSX4LGjBIMp^ZGF=O*9>%hJBJ~w303m(%eY$en{A_z>5UH2i$=E-;e@&dj8+KBo0 z#UKCQjne-&-t%h3mbEv$Acvo~llB)}7d8>S3~W&?4%xzF4&atJ)xcg+6{vshjC6O) z^hXF*586RGk`(I|62fKEDYh=|jhA`-_*S-Fb4(2hTNw)IG|q`{e}<2MuB3OHNrKwR zrK**M59WP^{h~Tgx*a;+{_29I+i-dPhu|QfiISxx75Z_~!_n1Nrhi`$Z1H!&3egX{ zUV1t9&wcc5)DVU$DgsS_8yhG=^^PHtZSS8;qtLEEzT<`7tpa${|7X0}W;j^Cr&R-w z5Uw6ldJITdB#U0`cbTfM1K3ZKO=4iCmMh9o@_T*b>ae%p@(l`1ZR>e;Y<_jBdh~g` zp73Uql_?cOnx14T!Pd*uV$Z;UB9KDvVoo{%TWoVMHO|3LNE1L4_Z+<$>Qj94tA;qW@N@Fa5#< zRo?!Uef8V^V9(#bY`nVtlf*0Jq2t1x8jQjVk!lauD6Uchk&QZYLA7_w!bRt?M<%Me|}jLS5~rO z0QqMoe|KNxGLa4=)_&h?YyeqDvxm^-oR5YK+3X6VxB*hJy}+ah`@QAH8QG1;VvQ$D z;majaYrYj_a zb%k>SC#E(dhD%kXV)q!kP6P`En}j4R&lK0YN5eezi6k9kH<*8AYVIGNshz&1u)@gi zv3&Ib^ZTwd7NHAUORx*i4gkmv;D>4Xu=L$V1sd9@T0%p_Sz9{$h{)wey%$G0#-mDf z6=4lTP09I2&|(i-xTKPn(YWQnu=T@O|CU71>qfl@g+=2|dcpOM^gy*YFXNT%W90e+ zkMa#lq2Hz|UeAuBm3KL}U;M*ckB#wUMz<S_P8Z0vQ7c}zkLISV&{}?oT4Xr zr@heaX)q%0SZ5HfGO50pQDq_QwbW7WPQswiKw9+-R%`a5A>x;*ed#LT>rt-#?0m1N zgU@Cn1Z%GFb~K#*BVON~5~#S|yDuIe93&a{%Q`MQQ3_;op@ zH_IBf)RDO0N?QarD6(06?Atnhgr6lwDpr9=&WjB{oLK*i)ysK+y|ht!{CeO5 zLar@oE?GDZvVYz##X52FsruCp2+odufMv@@`AVa$pYMIneHWr6mkWEl z@r)wOOFb>5#%a=n%=XWEqiHjtf_V;hNOZm@Y-PwaEc@BS`r)=mA#Ka$)5U)zO~p~3 zcHyV{gv-gNOmF5l8uU811??n~ z?}>%6#f8r#1}%VFtck4+Rr{!AUHNqnoKw9zZU59MCO{h4nx-$>!nzznJX1CaqZ>?H zrM#^t=iFm1nN_+$mfRg-bhSQmnhL$3l(^k@lzXeYZ~3vx$*H1Hqbk5@I9A#Do~#Mp z&yVX{>GHdW`q6ENuamWFTX&ZDmorxrgY|?-ktu2Fx3ltZYZx(6QZ|WcJ*%#WEMFRk zE!Nw*;yJ2rVl>n7YxR6O-q)^l28qfJWlJZ<&YbGdJwN9(a42u2Uq2=|XaRE26H^v? z(3X^6iXF5K#_&DtB@_FdUe2R*Vka~P#8`4I)u}~)#9&$U{?9wL= zCZiA7?o*1q@9(S(3AtyoQAwmM~QOYnpjD;kkHda97)zv30->A?hw939r7n`G*uw*LS+k*xE8 zX6;zKvsdEO&iIgv0Aual{+W68ZqY#k6Cn>vLw4G-C8<^iJ;nDO!2f!*sao%EAzmPM zxgmX}rRc`-)604B!caSm&yj4TqRUI=6oS*mivdQMZP|@m}gr{svH~Rw>c1El|FHpkojG@h3?KN<))#8FRM$3027`8u$rBT#<3GsrAQ4h%q$(ql* z(TOfKfK85Q(dimpR*ebuWc#CQ{51Q}g?70K{AxqYlXt)y5ps4K5vuiXbse(+QsSB~ z5=7`0D~dQa4IOFVQ@E!8*e1=6ZF69vt^xH@n_uJ3EeX=$OZqq0_jS_562?_5`^c7w zuqDX-XO`xf)XvGP$E{NyY8V4G(~T0o^mk5|MzHZRZhk5I&?&L+fOXU@VdsM?$F&s? zIXPGnw_d!qu(iINIC0wH_?7yDRzV9Bokd5gEq5K7OHP|84n#JbJ&&%78+bYayzb*i zRLi4tRgR(Bclw?S3Q;Nul#iSG3#q)Y94WDX@_qR0wv#H@ zcdcV-r5ns8_*p=8?udmvSVxP0u<0dMO^f~Tgqys4YNFF*vW8dIq;~bowZwy@%3yeD zVFJ{5;g-J@{C>-Pg+{7ejg`l9irh?zvW@HTrVj$?(0Slo^32`VqhPa!1KQooH5d(w z!qG^Dq=->lDln*Lt$Yd!Xf}3<4{qXvWMK{BeF1&s8M0dQLSWAAm;M;UBvQ5<=sh2F z_miic>lNtRf5l;8lZf~iK-{oj_9npICR-0K+Uv0VZ z{-aG3@@{5v-ay3KZ$@k>CZ@HbPEn!kNJ}DpYe9-$jG3MV2q1h3i?s7xZ;FU*1U!Y)!OOkOm2|rPTz-N zf(Pj$0mse$=10oB$CMta_zinn7Z?-X#gxXEg^V993q19NdEjwqZY`P6Gq~*%?E~AY zWHTZ~0s~b1-9EfZR(w>@Q@ZIwl_Q~9lYj#qj9;r}9L&SS-m^_gik;v$keiJC<56=-wx(hyosD zT#d7hT*BuDmtU&Jl`^Y)`hxnGYuLksK}(seAHyoD@-)Mxtd=SFC<)apFL0DHm=oce z-@o1(;XI%zCPqot*z}=?FID2$j)~$uBEFJ%4AsGgn%fXis{GQIZ#r}{=491nLq@I} zl?~Vc5$D(P?3UtMQL6zx$fnHuQ5wF}sql{yfBceQMd+66cgqxW=m&yB6!L(m-3`b94J^Ds{i0+xGbHYO$FhxEGlROBpLxli!R#gAScj}#S zK@&~&4a7tDODvJ01lZAjt7Xqd$d{*GC1nowH0zGTYjZ-25XI|{{GXBS5^W`;uN&Ab z-;9j2zed*fUBVlnhAt6`0<(}cOX0vkkO1{FL-ffQ>@Vaz@UX>%`ui%DbUV*Ic_+lS zr5(PW$ogKDz-dPxWugRMR%i!2N2>&46%MDiY&lu_C`^LC z1yBj_CZw6t&8pb8JU0T}e*ri*U%RBHV%|3QY9G3RQoFC&?$J}u<)!)hit!s?}stz%?eK=@KtHkDZ4Ct7{oHH1g*ewdOm;iXw!<7CE9_~5(XBI=Ml)bN$vv9{9rolR# zA)4w*q5f0An?4qGQ2T~oD7#Xg&RoeNS!)dVDx+L017Es;%8gWx$*)a6h8EQIuh{%C z59L>Ta}o1mqCSG!J$K7ZIAAlnCudI4PPduip;8H?mzX^dcb#?lI=MBu*Jg7NJy_0p z5Et^<;>cKpcK;n4no;$O8S&?>haP25$aj}2KJ6i`Ob*)3{i!>GlhGtow7Z4nVIbU% z*U*AWp5FwwvkURpESORy|$A~ zXk2*!NhckKwm^lcXP_7CsHaFbS+|Z;f_HBYn_f4vsGY%wQ?n_&4Y%`e9xV2jph^&( z9{nwnPYWpfX6AnhEOQg_n!eEW9zM2 zs2oOzcdCqtN7msB2C;vgxBqWohupSbhOb6S{S_Vf-{2jW)ZfOWyZ@(v7Q1Bg3aJ_4 ziN8$KpAFphRa|j#0q~4Xq`_?^)WOa^(DKpyXPCoS4SGRe&7TyM$Usa5OugAh2^f$gWlT3jvj~BL}7q2RY zDk_2ixanJZ_pYQ^71vUP%7rI_6^C1Qy;Nz3VnE$9z zqSly0jA``*@VxUsGz#yH(U5_16o7RR{A)*VORk7)Ztf<3-$G7Cq78JpFJ<+wg)|Fl zWg$kp6n9O@f;X7tIVEf`4Tf85CG17RMl&M6X$83I&+n(`CRNVd31EC9Hw<978$LKa z8Y58O8k}W+(7BYXf2UY2gxLDnKo#&KGvbCFy)}aC_m;i3!!q=pqfXB=5Tm*dc%xn$ z>-7W1GIVo)Ji^gP(h}Nc-8FFtOAMzpk-P^49;OXn$mo-@p0=&_IPd;0cP0L)i;>VYX=B?aVQngj2;r%d!W6K_wGbVfZuzFo@D>_Zr0YJ-)sVVW(?xQ zFRdKe9rUoMVGla&WU+Riiwy$WkrdnkCtGmmI^K`(JuL8S7FcjS{{77XuWw+x6I~OF zz}Pu_ca_qk&pf@&{yDEB)>Kx9JeKCg4skwx+yN!JcB3YS`9BQc&kG!t>G)_#131kE(o8q3xm1n5!dp+Yn|D2U$5ND5xqWxzz9GCjWBF2s ziK?#dtQhr4OqmSGv^<|_`1L0<@K6@Z5^)+W z;Dog#VvnnAJ!)t7z?a;NhNbJ}y1%U!w56B$(?jG+MAFl1x7%3;;Rj3upGrO2ku(CO2zN(bFVWtZ zQho0GWh6IZ?BrqhX@Wg5foxviXF5I8V71%iMda4=(7tEiwli8Hik;PWJ5L`~8Y)#? zk613Bzi+`O;-PXuQ#=^Fg+Wf#Kl7#Utw^wr#JgMR3XHpMvj#)8=ce@L4u@@zD>NB- zbFDc|YszT;Enme@^35cBCYE38T?)2Tq1Ze_ECQV%<*!3Xl?oYdmD-H)P`rfoE^1-7 z&_rSd1=<1%5l|fydZrRnia7u41t$SbcG>@8@4bVX?E1Z5y}1PoQp5t%MM0#75{eWx zQj{*ei=jw~5RhsDDo8IPU69@p5Tpx1X`v&%3PK=(M8qUSNNDecxA(K3XV2d6Gw+-^ z=gc{?XZVX@0$gjYYyH;m`~7^^E6@2KJ(ffHn;Gr5&n!daS6FS~e%DV!bd9>$oJ&qE zhD$wYIV)abMOwMOemR3w>?_J#T#wIJQ2^6vGoz_ZV^-g#i$nYM2KErhKPKJ1x$W&W zJ<8d#b}xk(Q;SaPuLW={-wWaXcu1WGc z*+7f6n||upr;huVq?~2Vm(*tDgpXGWdN_XjLl|?6?kcAW>tN5)eU?S8>LULe1&Wau2TaQXZo#;`->xqM)Lg#YZH(%8rHA#0MRQ%~VR{hKhK z7vSuf&z=VkY=uv%zV5#raP$tMA2CXA4F?yDcNtTkgCqS%ZEqsi=*GkJ@V6THDp4xA zds6r7V5#-+@Wxe;>O9~N`P5u8zweSG!>+?_rCG?ba12e{2#=42Y73`n48MD2&?$tz z7K|Z$N9<#q)xepFx|B%&GMkY2uxL=rxcXjCmgn&}?{lfPCk;PPE9z_opfxvWY~ zeRgDLGxj?4ECIAhbIA$P9^QjsGH3Iei`l%3cTaYuQb#7SRe#k&ACAB*(7pe;3?vN7y;CZtE(H3?0f^NB7sV;C1cV4!PU7}L1Hj$86OvS(V` z!|v7mYn7Gn*v=jKnKMxFn35D@w$QE(3|5;w8fnF;OS+_8tP9e<3+ zG(PGu@IQkNIThO@5c+IJ*T0lAi|f6?O6m~a*3A-lx80M7qrzhwwJ&RD<=p?nj%F`e z`;u?LkCA2x9rnP8iL`Bk&wDS*r|@HRoZlZ+H#crM9!2>o-(`quWHw?>3QJR;#aP+_ z=9CGH6uUIG-wtbhY*C6whSwX$+~Sj3W~`+ARUkg@8RfaF8uND~B}N~-R$9CIS=-uo zJnIr~6jueRw~)aON>igsVL2$$xGNJ?$Q)S%hpiaZ#n+W-*SHB1cE+|3Tgqn`fxXxJ zO^4K2)*M5FN_d`6=|*B~14qHoR%SS(#22W=q#c*#Ooy3zVl0A23lu%Y z7747p`Jcp88BOu)M9rwuD3SMsKO*8^u)cslFL}P5Igr%rQSy@GtWII?jmYk~__c%q ziLcH0$yfT^o1w|w-U-v8)Wp+@**qOZHu({e-YX5);uHRjFQsq<(0*7Qa7s7{Fo z4pNT&X_oy(C)>(gIb>Apt*>WG$y9nuMwyme9$~>z!b5vf zue$|uQF*pKaT~^}bzP8`l{05^wDIR7{MQ2VnE&*?r-DSPCa1fj{3`+Q)~SX+BB4f> zXy#E1x`~)v`s~Xej&#!od2Q04rPta$!GlgZ8oGAt;-KBAJvZc=3nt&YguhR9{)Pi{2`Gr+=)>rJeutj_Q(gj|XR&n@4a zuTr69%2nPtc;$FJEH!Fu6dITTRhc-_Cun**G=7k1U6>qRXneVAfO`*G>QyTIIYN(?XY|5K#;_V@XLSwLVcuLYkJdSiCC9_!pT*iRS8Mv zA3up35W|a0NB`7EE#c%oNR=Z>q$8Ib@Rj{6bfT@Yj?oIrOm;x&3N{>9*H8a2fxVYr z?LzmZ{zPB8!c8+y#?uBxxh+1524E!9Cxmx1kVzEp|g#@^IBaLeGF$xWBLtWx}jb>f_P^tyKOBu%&mNE2lC5)xAmskHusNHYm%} zg6CV~zF9E#LExt`Z@Dd}vLG+NvY;JW%PlIF68a9$D@dEDfZZ?bF>2H7`=)5?>@H%h zRu&ib*_1h62NJ#qUZIVszO6Zn*7kRHb1(|tUsU(zo()6(_1d46)(#i7M##VAqWk_Z zlygnh2vTivR9C9PzJyDyW_L zL;JkokC&Z(-@1(4{p>;=By;3190Z;V)sp!r2_-Gm|IwEBDd8aXLpFj#ad_@1hmus# z4Js3Vy7r||f>^!Vqn@XRmg9O|?6Z(v(n4GendG{-B*G-Q zv60{ie@wsNTX(vOC)c)Sk8j=kUNT)U;Z*;Q(?}YN#OB>U>y*TKT?J4#^M6*RWM#}VKo`I#hMSA zE(1fMHO*yMxL@2t!kH~MZ-(-~@8mVHb@@X3ojs0^+6_J=XKut0d3t@@mI`$_6&Ozq zCfNAbf+xy%q&d5-m^)(*XnojM@*NIz5sBiDZuJD$)VyeZpaiplrm^#MU<|1usw$Qa zT|Ok`E&)}_6*T1V5WJ^+xTyx5I+?%_e1a`fu1V7O<^EHpZpllh;qfErEIxIzrv2#M zjs~fZER@AmuQ7p(rV0|h*wifP($h4$E#iPA$AgZ=qL>>Om*R!i9_Gzd$Rj-8?#7RF z23XNU(N=|$lN!ysOSO08?UV@NIg`5m64)UM4?iQtKski`QI~z4Z$s2NE2T4)n2oA= z;X|@sk8wRu_`}Y)6^-JDaY+oHbk9))`NTJ_E~#tZ!6Hbfd0!kJE6*-2_35lSPpU$|bzzjbTB3*t_2D{abplGi3T?sPY^iI0DHm4UA+lkE!i6-t)v}9|rK_HC0&qmFCZ5ntLzXqC_%C-9q(m zYCu6W2CtZnlpb;Dpf_tJTl8#rg(=p}ezi7%Tv-8G84l|`lUVs!%-FS^pW`|Ygcd1# zgOI<(7kdnugg#}lD3<6x9_NoN#m0`9uCsvR?lz*9$K`CL^MxL%ebXY@?+p^X)x5L% zT)h^&rR}YG1CU3t9B>+q!^mmL6k6sQS$zqKlu!Pvf27er!uu*3ggr}~d4zoqxc=mS zf>MqqpOzi%nxr=W@KbU$gGrb4dlvuN$9+2dp2-09O}p!dv{Qg8ftfQ__PW;0;8dMZE=_7psa507fHcY>M+zWJjs`%f(P~s(! z)fwOilk@rnI57SC4}a)?KH|T47LQrzqEAiFwDKS~huB`40k@>{M8M2_Tfe@t|Ra zf=EK9hqvKOQ-3AN!l4-|G3I;+D-?HIW#Y_a`S)zWnJ2U(W2^ua<#8JS(BS*hm77tZ7yAU_s zssq28T-sStAX=b;Sq-qQjvO~sbF z9$vQOT>(jB@;b*U`8~#su~E+Dy%nr5Z%H|^Azz4>RQB_d+be~I-t;FjoDQUcdtAMI z%J6AM7xEnxeDuy63vWZNd>sJ?mN-3bkyvo+nAjF88l*a|LrL~*PG*w50MtXuSiL70 ziHtOP#DfsKGs*|bKn&h9Q)P;1#)ffI-&jT0sOkrzZB~h5eH0)Ja7sCez>%vLyC-$M zTh4S*`TeyZ1of#z!ty2ALxrWLQqgx9ZwW=eg+db1ua!z`{k|ct@DAopG{Oi$$yh0( zr*g4vQsI?e6YdAw72XNqkboG2a~j_2bF-!L;Yw#K6;(|Rcavx|V2S7CvtE7uC!GKO zGd@vUM3ZKi_E^=aWnX1N@;LLp`V;9Mh5iD(l~x$o{_4i-qw2#|5#ZC#DW@g-6p`G$ zk5lt%o<(hGbvTN;|EzA9#LBxJ>4`&}>uOPWv{XuBp;BN!Sm0ClyCg|PsJmr5$ob`K zR^Fpn>5Ws}VV*TvBdyvvMZ>rT=K<4L;EgGR}cV+eCJm2 zL!CX8eKFN(SLU8GHI0AKAS6C>qc<4in(4bUt-uH#6qX3|Eb$AM8#2nACtVV|$2HiA zQ8_J^4kof040cvSZNlqU6PYjReAA*tkPMZQG-y+(?ZP)RcN$9>99HUy$A`RUO(cMw zN9*u7b?tllxVqU^7fR<6?hbEz=@Zy*X<$YM_vCRRNw5{6OC`0;&}oj{OrM%iH(Ob? zE3eh#%WB}i5_rx%nhA6`Ew5u!EmGx>p)Bqp$RUxyVjy%rG&LGzv}g3nW8}fRHK(LR zFEV-oAy|Op61>-+?$IlNTKeI0F*IYz_`lT$4BW>7gT5R66fjs3jTXw`(s7uJ+0EgV zq*7OL*N0zz!rxfs1I;iC2@7<&N7pk+ngRA$7KCQ=OA~kxs2n!FUR>tQUV$XW(4aa46JE|2JEeSh&0jdF9 zCC3Ptd~CnXlClwEH^@?mP%u$MvFX7zi~ML2g4mee1lABxz8|$g57O7A^lJJnrXw)Y zu7h9Pd5pJ%KSSQwf6UyWqef~;zOts~P0h(DL3Z?V35K5nm}DCs1Lv)ZrA&4??^j@7 zs>;OzolJ1eTu@^c-sY{}TvqKJw=Y@7Jn)|24eHGs@CT{+h@GED!s}j1EKJLskwD+9 z;v(hi zuo}}-7))&9pV8s!3Wml!(ameV8b>RhH;4w23n~YohyI>JgELcKME)FZXa7aO7~T=AZu=?yq}nFI5#>M|F6 zuHO8z(!Q%GEEje+j-;NryZH`?5s!v3OONnlFHuTppP?!Tpp=M^HN!WRAtyrC652tv zFJpV;cm(#!s~$EGlh4@&a7HzGWc0lx7S>E3$$MLB6Ve&r!G#fCm}4jn>uuN0TPK!F zqS($8;2Z$o0>Qnluw6o95ITI;PsY7xQ(`^xF*)7>^J9N)8^%F(v6hSYV7CC4u159= zp%twKtQTaWN_gaTdG&Z97c(o*V<-7hRu zHVvNJ1fKKgWH2b1*gO)??7jFzskQ9T*Hyd#Y$dMYKOHq{ERKhxZ#ynYQWZ3m#JVFV zi>%LUMrXR!bVtXZxQrOndAv>Af)6C|wW# zVe-LZZLAL1+0SpCn~7cWYW;4C-0M^QBVKol&uAQZ7r2Eh)KJhn$`0;U!n`1?fm?gB zGKz1vKC~|0PWSypZw6shViT*ouA7XK?6`ahG27~w3ycYe3UQqerSbl9x|PAO(yif_ zC4)H3H7oVl{_JAc%zCOE5cM9pnUovRMXal4b$+O7cbpuq*u!YJtHK&bBP-Vn)E`}G zFy=~JIE%SXS={*?02ESIiibKV{Gbf{d`?6G-37{u!Zp@+d1$kM;r>ve+V7oW#w#AJ zjtQFbSs4PN*uKmg^rsdBj%3?#u@$+$*K7b&Is~$sTSu^}k6wwyIFo_p;!R9-PuAL= zLnGcEP1dQn%Au}^_D!?6CsUsLU@k+?NT%|E&D~L<6`&|VnG&xI65I<`erB7bGnIg# zDvt*pYb#9ispe@D$hB&p@^l&OUlr(h>;zmSV;=(2we`~~54;>FOfbu0w1zol2Y3XE`4#H!d#x{zRZ?0xy9)-Wdn|}!4J|9mHlr@9~pDfW-`2gOUgs4B5ynXMFBve z0WBxMRkZ9c3E=VF!(BVQvptB=jCEqtfK;l=6h1Pn{8eK8?)_V(=K|fCT3Y4&uhadqY*(D?^2 zOX1vDptO3+FL*myT!qUdHUgdBAWEl=J%v44$h?5jyX5fp8(HY3Vnv&A*7ncjkQ3fI zlA+-j;&x>6za>ll83FlnR`s9;q_zIi{EESX6^^SuGT6w6!q%PhADiuiychhh+H2QJ z=UKvWTMnTfL5_y(?fQXQ=}ji-k48$lX&2^zhJW}TtACZoSF?kY>9d+&j!(9hl902I zKOIOdrL(C)FxB7ND`CT34RTdWFm3;a(n8q&pHiIaV(+hZtxJDAJ~_k!wYi%Rv4xQxEqwRXzAq_*o!9gMpVN9zcG{e9rE7*8ke;<;n>>qXgke> z*JM!hNMekzs=zW#h_QUPzNl`G6)|#E6rCVE3cQ8L6XUWyNA4*<@KgCd`m!37ef4^} zrWCzGr4P>2pBx!z$=^Qr$|}fYRZgyMzH6Gs!%1psv)4ATCu(h#S()~(m1$@#C!y_A znueJ(h7QaGoZS*_&~P_l!#$4K$-m#ZK^uopy;ShjUhc7z@1wjBp4!_l1JkQPowtYe z_Dba{CDHO|A;HsdNn=emor|%c&*8zuS2{)ff@fbXhBOwEj9P)s@V2t%!Yl17D0h)` zMMI|Cels5#dWw1~KFqGmzs)|Ab+D>8r8vlqt{1K_1cC{)rUoId$3gLx|5R`ej4ZlXkXhENO11@^XVNBY62wjvm~9S_*nr!ZYdE!OF0z+;?aiRCDku&?NVNQJO}yj? zOPtaj4bE|&PTg`9V^5LW9ebg*^w$AXB(c2`BEo^LaHzk`d6#r;5bVE_8ZYARD57l{C1+({YYV zH6JbXRAvv_UnT7}WXG)+P|BG3wX?4=sh}wm`eh>`i!dK)+QOPin20Q5;O-O{NM&gL-NaESjEfcxnGQaBh3@LLbk)?m>>41Z zMXB0+H;X%<3Pwpr`SGwO4;2*Bc;%dq#BJ^#p{`H;7#L`MUlUx)e zIl0Y);kNh?O|4TXBgm@eOJAVee@mVAZKlW#9ww5z5mxtm* zZ>l$=0@X{UiDgv|+9#me51LRaT))x-X!gIN2(o)ZPD8+Z`UW_hmiz| zzkB;-Vn7rA6&eF-cS*nFAg1xVeQs7%bAvR&<}TGK*;0_a;0O4=6&?OI{f`11_33v+ zlDCS47*gp6lV6mxxcjnsO66}I;jN(sU#0*2x|1Ng>XT*M6Ph`%U3u2S!$g7HUC9T( zz=41LjBS5z)emKZ=6y!L9o-YcVlPvzcE5=2?TM(AwJHESsyCk09{yKQ!e5#|AD~M7 zBi_L6DR>4kQmMdz`39LuRw*Y8UyHKLpYFw@egD`)HVF@jRD99X;IHi|42J~ zhz2nHm)>^QNZ=WQZ1`gSNi_PADgp2VfDpw#TJX14^uMvk|IbJKOp$~Hh|t?Z>%X+}im;wJ@GWbclWXSgvQSnhckln1*MHUHm)Cz(?bZ|LC-&M4-=a!+ zK6v0-Y?bWS-?7-Hy`SQ(19>ip0@&m-r zBOq0jp?Zj-{)O5%$PUV+ZBU?(&R~?^=1svmLx)GRQMdnUcB^6~_CNNSsQQpcO&9#z z6HFI~uQpV6mbVXY%v0)W-Y%#=pqVS9Z#~I`&%PguD}3*&{1ecLd9u7Lyy_lw%N|`W z+O?c?jpFz2n>?{_BApH}!k5RS9D{08$1i50gIrqCRqExGN8b=DN^lWUOUM&b*)VCU ziiUFWBum|w^gr$3HuV*i=kLzqc?ZPCUTzo8ef{hr;`A9xpUweDv zax|OERpkQ|hpPE8VUufhp{1{S#7ZzYJ z#@x9$B7QnXVd=@A0$N`D&xN%Sv?x@E=&jAFXv3xBX?Oaa288brCED%Jm-1%avSSZG z#O1=81l^una~*2+k>6A1t~Z9cjGDrJ{)uada}*ZKy2WYPmMD(4%e%`xm0jHvMfqG9 zx8pC)?96<_Liyd=v!`|OnRUVk+xlqL5LR7o=$CFoeQ|i=1)`B-w1&Lh@*SmoR5=gJ zwZA0?RZ8K(W5=Z*5v-wiHlKgk_n^>K2H?%*7jRvG;K01FiE)~_fPVPY`8&uIldcQjixWWPoO*>~n@KToSqyd=4A8p2RsJIeNmaxrhWw*TNP2B$jMQ|H> zNb;ysJ8PqJUdK!)IVakhk5b6YT+PALt#E;NUO{m8vsLI(aGm>4wF>lENzrbRbcIxT z{Aj7|9mvd;=Mj*dykWHiJKdbAlxFG+>Wia^JM70XRx)F$dds%f8dVv6wu+{ZTp8`8pO?oq?#Nh(n|;x6V;Gg3#C@VaY_69m%>~Jc!IkEX30N+&bJkon^?f; zOq%0o##c&-G{_lIy0>ypQV4ZV*cMIznJ~TO`qnbK!&Ypir;s;njaF4OtheVF{wi~R zMuVlt{}|}#Prqs{qpdg?lXf`A`FKKZ#94iGFla3-OtD!Y~wb6koJS+ms9w40~_4_;nyE) zYhIHM@e}y=#D)mr4vv=Vu#e8c7rhEeI3KK|T9%)2cu%Oburf|E{!c24u78Iq*Sv1~ zOSUB9Z43M@`P^tTi5FVeQV>+#>nDno@Z5cL_;SS(z1V>xoB_(ll5!#mPyNeepSp%^ zP#4n?7^_P5dvX=Bc-BhO915ly@atoJYG#t!IT+ejHM*X9AG1qffM zAidy+W;eea0dCzoi{DAjYAbDuH}N)5ekj7Co1yd_n!WzIvQ)ys>3AI3^LVLkQkHgt zpZK6DC5AC?3P|je?=a>ZliPejs-!WOFd@z`(D00#7yR`z`5wL1lQqdVOr0rgj4@L2 zXANsuhFFc1&P~ZxS$*;^C|}a&B@H*R5pG@#_cmVW2#7GTF7iB-c$QDUS)Ki6@^IAj zi&rI2<1jrbLD&Gc{U=3Z`15D=D)(T^n>=^ZFmNe?NcvnQYwdV_tS?q3|;ASBluj zuSY>K`sw2pC$SfhWr2&*L#{~%U)VZ7KVjQCHn4^Yc=nw;n&IJ`SCf2Q&5Hp0t8vtO zz=c}BU8pRti(hYkyd0I_`Rz8xha31DUFHm>e)17?yY(@+yo6tafHrf@E#}l-4RlAG z{b&RO+U@E~2KZTV2p_J%uQV$b6D;}Tz7}m;zm#FkECqyw>_o5)D1AMeint?PSNUh* zt?z7nq8FL+Bbb+-ze~;B&(?juQ8nKYLp7o@anseRp>H|H(4I>v3?j95#?&eBK`m$z zxR}X*4tUQluBXOtMsfQLFrUB&;CX?fhwO zku+G9XgB#8;=%rNL@CN(cS_E`;+|7X8UE((asRT@~(5ickT6;|W+vB6NW@9ff zR-w^y1q1ZJMeLMIq7%Uwc0DRqMPJ#7$MGU8&iw`P%{!oBcE3NBFjLHD)ddNBISC>w z>Or~dG)71 z3`Shzc9jAsll%0Yo+6q0dHpCg+RkKwu1M_A4atxQE!6aMIFPv@!e_k$1@^;k07uWf$J;Jt6jT$(LI zy?M?;P+xFDkGjHO&{o}ZMCz;*gjPuLPzo`zea15nw0QE2Y?qEKh>Qwx+N7G-X#7y- zbgqJxBP=^MU9@<7@p|%m^wBVfd0}Ok5idDy=Xs4w9x2o1hQ1RGu)78wPcnQ%fUG@~ zcFudqfLq$_N8j>-I8!rEbKOTMFhmP1P{*2kiUScRU)0`K?cC>4G}*ifUVhrFsfW7ga9_m})s*So!XnhRD-`WN;62gg(^27#nfZ!XkoDF4$D}9< zS`7y17F5~87$Im;ZHx-8dBSW&yC7hG-dne3qy*+4Q=k)q1E0d4gFjRyr}-^+5>rv! z0q&9B0z3|N*^gWdLz-50EV(T4ZCifsIV!zHz~1wlUVUC7SSA;CN+r9l~Cqp5JJ}Qo+y11#LLH5JOH9AL*st6{W{&jpdZ}v(1E7tSJTHMTmTdRX&V| zERL4?1vGi8*|V>4EjYMDJhV_dRLL&CHU&u(tR;~8=-C&_w$I=w^&_57vd0S{0K??1 zY;5FZvl>A7U?b^c*}eSzgO5*1dWCESYekL%Ky@1jMFJ?G{GMq!Z~2e&y>gZ4n=rv#``&h-X!e4L4hHR)3eS$?%+`!scUevwo|cT?anzi#21fQ_OVSyB zn{n~p6)xsoeBO4UzD%bkFPhVW%ES>ol^l-G_wz@nA=oti=&@Ylq+KEai8fc*eFi@Su)1f{DTXB7Y6SR)w?G5I&cark9~L1gIPzv*%njE zeKz)CdQ`wuR~W)>mAiGcLWZV*()_Wu>h)}ka$SA3N)|4f)l8v9POtF&M7z3LeoN5C2-8WIx zV}_Go=CaE72NpLWHeT81=KmsidOy0AS3y>v%!mFK@g7pD_*5sppB#xGSYY@Y zf;>?L@{S4;G^_PjX+^HiZoEGYe!DX6f0a$EPCuF9v}7-QdLgXgEA+gkvelOI4Nq~E zexkCDMRds?A^W<{qcvbt;lfU3<)D->X1hdXwkuL* z91-0mbhVYyZo1=QWW~bN;Blzlp3$SS3YMKpVIUw1+5_y4cqLc3*|PyY03 zY0_;Jd@SaiX*zThIms1W!b#zKTl>fg;CX{pdb;9%aYEHMKY@B{07_+ov#nIt1OsUT zy<-{)9I3;8sUXX4#oXUFR-dhFyc0NX!JIfc*}D8;eZGToYk!_r9e? zOAel1{6*W%LB$11q-r-c<}tiM8#@Bq8Enf6ok#i{6Qbo?R+7l~sMFoOLCq?h_j~0g zje|_J7srHfmHQEt2F9*vg~_8{@eGT{y^L!BXHZVh$+EJuKQu5#m}n#$J#&jb15@Vu@!pm}v`@^!(@<5_S~Kn43f_yF!Z zbiY1h3>EeGut>i%OsB-`rj$tf5$g9tfJ;r_DU>JM|M;?<0dsghe{`5O$sjYp%uV?$ z)}w{`iB8hlzag4Jr|fZ!=^pZgkRD$Kbo}NIj|v}so>yznI;K#=oL4#`yi=&}Ty0DA zfI_W@FWfPiRKa%|$LeoyMBgiDl(g{~q;Gi~2c%pmtueGoYIIv7A?(f}G_#d6de9;t z*38kPcE!vq64C-~h$8N*KjX0{E-9~EB4sNtYgo#d0(E-P<=Qm8oZWG`d{f>oqA7dU zp>6L0QaD&1J|~kh7L<5_d{pIx0P;meiP^-g`tN+_Bs84%$dpYy2|QYi-T<>;XC6+|*A%HbZ5OKnc) zuauSsw3Yi!JJNkgyIVx{3s|NR-j@s<>iPWQ_h{Q}9S?aT+$}?-w&C#A4>nMxrR?syCIg1pp15~+ zQwDy4RAGVG51Cii82HKW@V;%woH2e8CVr>JBlhDuWK9Tfx+`&-Ph9g9cx1ykF)gBfm&z~6?UCm?p` zcz6>j?;`kFbdI(eEtr{(tl1ec*CHJKy5pE<@+avId+ax4l*2VLH(9mUL}pkBTAc6i zK;)e*ufmRbW4OF~Ye?Ku^e(5Mr40zFxJwspm&$_VE4pH9mi02O*|^S~Ydz*|6ewl^ zUsVM_Tea)#(j&KxTJHiL@d1u_oCwjA_m->nrN&IekoQ>a(;MD`uv1t8cyZAb*d&{9 z3$CfB2xRxIcv|JS%Gj6^o9HN6KnUmqF4|KX+=jaRv$Ud0bg2XJt2qU@(O;ZSyOhZnbCa{4&jD! zMpSN!bp|X;m7%_2^bhghU1;o@fWRrcpZ9O{9Er3iGZ>!0T)owxEf%jM!iwoV(*D#C zS$qnInKd=G8Q#1#7LT};>fbEZ<)3s1g)o(0iPw8{-5I*S$&9_YAdz6*m7#DE=N1^O zHmMo--A;wz&j8oy{usm9Xp^K-SosKH+LSDwUWJLD<(y>x)Ret%#C3MIufb@j$qFlnl{f&AlS1M7|*Zj(Gkq5SQSVuJ*0<}Xa(s`#Moqwy4e~XL1{cUB| zGex$z?eQr*kD7TrpacJ%kL9?=e8_0KauA4h;<*2PgE3hW!?Fqc3Pm3t{b_5<~AHuJ>4}^fS-1^sUB7zVG_4;ji1o zP-DWtmG?@2D^Do9GzE|l9V0m^t1XS1on;~gZl-ir1qSxafn1yG(i^`5iS@C_e%R9( zG@$C5(fodp6$5>oqW@;iwnH7T9dm(yIAa;QO!w36#~0)*75|P5`yWK?|8>^wQ0skJ zzDF3=;8p%tby%6yEsz;?>ObF<@Jmmw`kNp71(KaMI28XE05S|ouv}0~KRd1fh{pu^ zmH+0hg8!kv{+l?(UzvFSALHW0H6;*>r%ZvsHZ~JeYwvM9kP&C2Oimk`TRjyEkYYik zZ%V{Wfa9vLp^c>YENud*$o>Q5N54?9H){YSRvR6B7#F7;1=+>#qBb?$_q)v%7AS8m zd&%MC)=kn@G7h+)xCFVn6Sy8z7Ic8#__iJ=oU7DZGH4yLzqFpkKIZgcB1!>dZ=)aQ z)V%p+z-v3cnv9+0iYSLzY?;7VZpnifc@ahMxd_{pelzB=H);$$u0vq zO4xjUSnT9T1$1A-_kfDmuszuLR~31MH&BP;pqi4k;Pb^?xy%chtE_sE?39G_8LpsA z75Q+G`3ZUj{FAqh76KY|64f}$YwhpOUnn^g+q&>`{9}_F{=4Y;gz6=w+kcD5WnNU9 zww5w?CarPgqgAg1)P{~&iFNt@@Z2pVrb}6eSW5oO-)nYgsN`|>l-$+^3(6(Dxl&|m;A6v@vf-LC?1QJZOLm?H;3zh$D2{@h)%b&E7?79i*%J7WHYYs1xf-1G05YSuV zr@&-+Mb@SMk7&2iIcP%%@v3cBNh)@ z*U)HhRB&9b_iYFYvGA~~LlDwuMC2B!ikyUykF&s)%7T;A3aIMMR&foXrjdK<{#6U| zf=mtxw#hS@8`qP%gBz}uMM5$=Zl?MhbKT~NG=0PSm!a$dU?`K#zjIN}Sn~p7LNXNA zB?G_mV$1qAkiAWM?InGb?l`6>GMF_-&MVxub2@DEd~C04OVxXcgj??E#{NArZALw~ z!bnp02Ss1ji!M*suVFi0`WtkPve(c!FsJB53o%d14(#pK$ca$wnXHUBz@_dQJA33k zw)hY{G(wr)b?ch)=z0fR3hWP+cHeDKtq1!W%!SqVBzkgzpN437J*z(6mkyaZFZaUj zGRA*K+fSzIW$))I+EuinrFXw?+XJo_%+9KSXBJp-lZ7f9eVg~Q_maUsr>|bT7w5uL zoQ0W#4rwUO^b(uXpqFrg zEHc#c_L{CpD~#ZvwOsCM-s{>|;m>>bFr=_m(bwSO$VNcdz#?^C_V#PJX{CD&6CXh2 zq=wn=)cl?;V^a1$()H~Os?V#H=54!GnEY7JZTwt`_~ub$JOJI{ig~k)GpY`q;Sc(O z+kDI*+LH82das*5LEB6cp6_2$vMo*m;=m>|(oaY+k%rJLT{jR~8jGoHsg42AQ}DO- z-rZ4N)R>H9VuBo*h?`d*^~=7>BW7(uA7iSZXRWPwfmr7MdT^rXW`LynqP zz7$R0sQW(ccBD*4f(X4{Ot%(MIpBaABIJl}q}MzfC%#m<#7rvvc_uQYU4;9AxXos2 zJ&&Y}pj6(}Zarf=DvWW3ZRd5%{r>Bb9s_APP+h7qw^7}U78j|H7%pu;mvRk`FQEVthu+ansK{^Da2?P}or6awAAs|91L6Fc@q$o}4(nOF>C{hGMP&x?^ zm8P^v2~i>~7y_Z4H|qE8{hz(>f66`Qj(hI7j)6%CZ?fLipJ%Q)=g%hahh!AnYhYTX zz4V(n>dJd0-+_~DU{$#wEZL$A56SZAsvW5SyYc#RMQ-TH6QLklZYX=B_=KAq&!(}$BKO$Ep_t%%yju~fH;a^#O9F5RmrwQHfDWuTcQK{e^@g{GM7f&%V&p19dVMg`H;Pp5F z2@qPSkd5@zi7C?;rkE*F&faw~y)7;K(`oiNeKyXTt!A@`-sY4emn&>?P_%t^?@UB) zmj#)+_t|{XNWScNOu# z$?Rt4I4(P5V3u7Y`tBRn&;*r%4l;8<^E8! z&6^P>3V5?fWTuYElD_2ksrLciMpv>HU+CzToffYrlE97k}|`FU7DYo%f8W zb?be~cp`EI9U~oQcdnAjW&ixXE-lrBeHu!5=!KOQsBn+wdp)Cc%sFK2vyJP(5&Kgt zxtP8y9ZBB{WXQs{kz9o1+QetfXJdIEv*+H6CX3{*HTGq-YTA+uS~RvU@aWH3)tRg& z=h8sk7-6z#ziTik7{%b1{_qHc)>vvmJ#n0eejJJcTISo5-gCPDp4|*j#cN`J5qPvO(0W>FxTrqrT6R>E+UBcBIq9 z*JyeFaymAwL3B4yiI>l)DwV#s=ha`Ahue9PM8}0cNYU2?ub~j_NK3TB%;!(SU9veA zm&qkGZwvZtVBAWd4X)H?GquglwmCk(<3r@_j$Z44++t^|6Am5vy;MI~=41>`h&(L( zoSBh}>9WO5;cJ)VWv3fqUzu`=bvlYf$~h% zV`M0g7i34uuQnRieAL7{wt0hfprw0mxqUq#0cr9VmmhP1t7<%OxV~c>%V%i#peDE4+^`gM%oy?VuhUVRzK6Mxft*aT8rin)cl2+{Bd6s1priymx)C zhk|S~J5#^hg3Xy;>^nM;r06tuqh29LW9%AFv^MWL)5qh+89z?V>^@jQvs90PJ8YlP z(f&cLcx8mUmzMpUZYv=)?snyMRW-djn%U;Ocidq*Ec`7je%4;5)T^)TZAUM{jZf~d znegI8K0Jg^5|Hw*1{AU$zxsDdhfh~!UTyHP$V?5)s2t+?p6}cQ9C`V|FlcjI`Ro&d zXxQtdH7N?zB#X#i;D{Bn?w*r>5O_udj0Os1s_7e92!FWl4 zRf=jb;s>PlsK0jIzetRqZ@BrMzIeuo_XN_{nm5w+2%yaCKG1&vjb%=pju3rUJ@sQ+ zWi{n=Z}Iy}UwP*lS8Gbn%N0St_MCg_RBC9TVF|sZ2{|;Y1JjsGxLVWik*7CSSu2)F zPNpirNJ&{|@LL7-&(GXy%6m1DZU|Usac_y~Bg4w9Ja%N({Vt=dhDX(vw-GKI{S5Uhi8+W5a-DYK% zG2|J)!O!E#{FiFT>Dq3&(YLdc?+>TN2DyV>{xoOft3NpJ&5yNDp7{8JjX$EE0J?s; z$qowdHtajolqy?OQpXUT& zP{=A7UZ!Kb%U5NQr9q=cZrKBC39@DN`C{myw0OUaEiiGjpe2NQ&TYXrEOjhvD-P0D zV!`Zn(+@|zLEvXWs@B)4ub+n*1Im<2zr-f`-UGw8{!ygCff?qET6*g}&j_E+&%JtE zL~Biyn~75|OOW|dl=n=79?Q;8_Ra?1c&LvV3MC(Z*h>lVAsmF8Wv0^E7ycUrP6k~)t;+Crs596 zk1s)}&VcwFpqnuK`DO(=_AaH2Z>%(G9lg3-W@u`r$fp_b?q$Vho;Iai?+u#f3^dU8 zn>HO(MW^8rP@vQ}!ytZQCa6!>t+b)5MvPHnx1v&~$T|Fb-rLLC0ce8@$fOZOKaMxv zdK7;gmPs`sqv<*+zYk@-nKC6giKBqQbizDi&!p=?&R?1|i*!RV{)S2{>Y;GIOCv~KE*i6Z$ z2YSn>Vf(bzNw1xr^qIG%y2%UrHl_aOxx`Rbmud}*Rk|!a0+^p<|JufH8DoJ7j$6#m^I$GML3Gp$wvGGb&CUxI!l3 z>wOl&+Odyr6e;xB4jO{-(RNy_gF8sLAz36S8OsvhR=_xMUm1&G-EI3U-U99J3Ns0C#(g` z#}J_bjuOfPY&sR*Z8TbHi*4u~r<{&^8r`-qXM4&bo9LSbxkX7|K%nJcAlN|=!T|KH z=F0^^11|&tWg4nbL*}o<9ZDzn`winynJ7_uH_O6qt4V19%Ce;J#`IC?`JeT~G$80z zFR8Ob{&`Z9IODw>x#$D6DI}?oXS3#M9eU`-Pq! zhwzc42REN7O1=1*bumX64oF4H`zOKmorHV5dt3}IaX61To_6G2sXs}S(L0jnX3}!% zwJb>7nQSVYd_m5@XCuBRDwwIrwvRbOUe;G6SLue|N5gaZjcr9!SCT;20?N}C@)XJd zarf;HVB^VT>3MFS;|LBzpBc`?GoPy4Zcn8sY4grJ_Y#^}V_wMJaASLH3)Xi6E=5&~im(^JT=;;)-Mf6GVo4hkxnLUO+L-&*Ol>i0Gt(U+l zxnG{6flEM&QW7vSXm6XFX&VT6U#$SWaYCi*``~Au5qYA^HRdGIPFLoOu^YkNn(vYm zr^68sd|oQPmN_kR`KQ|a2|P{UETU(kxL~7$V2QwZU;B9OS+U_I59hfKrGnno#Mn2S zXFgt(?JicN7&U6pA_5|Oc-Mps40zVA|7@YDN3yq;mR*1p8FrK{JZ37C9kM?;Yep$; z>(vd>wgMr?D%YZyBc4fhnT6C8JIH%xVJS&m+-vo}7G;1KQ@Sw4sc2JN*E={b5eXFy zTk7v1*tfvmd~SFK@h$Ba8S|H39)HPNFW{GH>~Sf8i(8m-^53GM zw|W0Br7b~Id3D%yIqli3elN=f?ie<5^kzBbBpnE?CYc`WBCNB|yUV0eLIfL1kUgZVG5@ETNlnbEH0TfdyT}}torfGJjSgd?Y-RaIut1Rp^ z@DW-6^bwwn9w-1v)z#%^LorTO&Gf%oK>fBErArNNM%GEJ`nGJ`$E=I)T;Mnnn|qkN zX1sH+YWZSXDs<7|FEKuVq6>&*+5-i74Jc}=SI)ZLA69nHpr`+SQCV5AV?{FWXYho( zJFrWDnV0#$Uiizv{Esrw|1YNaZ#~iepOcw9+rcnZ1PnM~+0pefv%e>?h3=@{OXA;-v%$?A z-xENrR#0kX zXReH(zV~Ql8_?W~aJ$?DvUS_V?hrRWyM~cD4LQHb;jsWKnXY7qifP0)7o;oD;xGKO zVyR33r$0~U&>}pace5x?c!&Bc0iv5f>|{+w!(PKE3G=5HcodNqo#TJ#g;MEntuV(P zNJ$A_Bwi{};%obSQ1%5=t1mVRr_nqCP?o`b#8)gb%*?T|XqvaDgYe_A1m$Zv-yC+P zR0i6LyR)gGtE%(6pNH>^r8IInRs^KmCSw~(_G`OuXc!3y^KoCJK4kFMdv#$(B!km$qLe17dJ!37Rcv770k z!y4ycf@(06-Rk8Gl?an=OGWa@WL;i@mTC&uEW5$y51W?N*z~=j%-z9^nD&d;IfXwS zTcWiOv!NDBT&II`d78Nm)q@8Ua@`zw^Y;}tcH74JrsBY4)%Z0z`x6mKxkw8@2bD4G z`lg%YbknVjQ%y@zLwiO5`AVKIZ67I?mP9{Ir%*}ZO%S4^n;Y61cYjNJ#txXddq~J> zjGvMo6+})0jyJ>SujN!Uu~_YusDGEgeyUWit`X=bApqGo`0rOOo(JUpeE3 z*SbLW(>Wm%E)yyyBknwz4)I3fA#60SI zy9ZS~Mal;;|3|R0tRN4~{s*kgAr`iY1|6x@Sd(`U1O~$72^+tIan-SP zM<KyPT=yr;O@j1$_s&4sQ}6;h^LAPwf9GN_l($ z(X#sJcH={Z-i_j!p6Ir7rRO9m2}asnLeZBy8lKcsm`o3GA`;U+8DCd${}%M=dF~7| zLIYO3&wTi4pnJTL;zDsXdzSW)MM%4=L_(EFwjkfaBXW;}?+r?ckquxl#7(P^HT$kH zQ*1X*>rKc0wrxRM!6N+l&oSbK`_!c3=P|ksrq1)NsOr z8n$qT>y5Ng_Lm`ZN~ICFaRU51w-U8Z`T)Y2?mS^Xy%v8Q8O1&zcX5XCn0HRVLvq>W zVvo8sC5?FqKcwq_qX(G>21vZT2|VsPb(e)RaMAP|t@4~|Wk~Jg%f{w@yEW+mDK_*D zMVIxTz)6SXGw#K)Co6QX`Hz97L5*5i1K=U}(V zNLbg4Rg>(}OWYb1cj)tvVRM6b*Z$aOG@q+2hQD{K>3EuaQ-1w64j{1uc+-I%b(Ic= z7-9_De}E<<^dvKm%65tg3UC{UH3Z5lzhdiq5SK70p725n0rlcxH3?whb?gY9NTNC0ZfCrlWn* zk4q+lEyksjKoI1n>D9L*@^-CF^?o7dY_vdw{Tar?eunlw6#(;`l1F;zuB$1RS10Un z8bBdG=O)sfmb~h6K&(HHkuLg(X~+UR`@Li@K8<+>cMxIaY*+bx4aa^#^~uAL0~VkC z9Tqb$-hDHcQbIlf;E=_!O1KC9AgP5Sv4LcYx`QA$bs|Tpen^`VFVoeRhE_=UBh+&6 z_!+a})BA-F3Lu5KZVP~%q5xaU*xwlB0mex0#AghWa#w|qx;!-E>PP``;G_3**?(Lq z>0>^Zqj&M@R0LKgpLa&n`J3b1k(ne`V?Kf}I4xuCOox!+m`W`jhRQKhIc2z6!taZM zG0XqTyV*(us-Lr+t8vE-*A8y*CHS^7YZ1&|^{F--xya&23<`Z?Fbl5DjcsK9O=tip z#fQgEb{$R!*M2Wk$e(+ENS{~_`*e@bEK^ObN{gZuc>e&*5#VRq|_t&!2_c?$g_6& zdIXD*M5SEF&$zsQCZPgAW1&tr>U{aav3U~-#dh1W0rwX3oEJ|UnHNgJY9E=a`P=RJ zc@4=6Xh=Am@bPdnLGfMv-ctm~xcz15R9oEjw9)!V`pFKBEF$lTLprW9p8n2wY?;5%HN`OfsQf0^ptH$=Fk0dyw}J= zPh{9oKrbY(DOTS79QCHci*G!7oCDe=dKC4=BrfGcWPw~59bd-3nLfN=^Re{Iv=4d? zlA^uSb%pP)VRCGT$3u1t!tI65Cd}sxyB|Vj=VkvgMa#SlJG$ml{;*%gr%Ycy(`%rU zWx8IU+fBrQhXkmdW_s|&w9hAOzfB|Yzn873|IsA<**we$4FLvp!O|J!`R1SQ_;hXS z9UbKY!LU!y&#XOwV1)LVVdwbV8se0FVv^RzpkNn3g4>c&BJ&#iRL&E`s@k5{Q!(im zH?CMJyqg(!e&`)55`Bo%%XRhaqqv{OvXjw_|3DD;V##zaIszZ45#T4uxb|#qm{LyC zT64h5g+ML5cSQ7U*Pz1RXbWZIbQ@VS{$GT{5Up0{BLIr1<<4wXR$E`q=VR~w15C3Q zHnFYTng?j6{&jybN-fq-r@LFf$#~i!C_Sk`qgm|TIWtNtU1~<|4e5UuD8N!1fbko( z4DzDcZ`J~-*54<%7r73By{t>>bb((7PbA9`1#;1m;F8efE9x-$y~Y4Qn-&O z!ylcd&yt}m$*6tEN|Vy-cBD-2$(=q&+Y(sxHt0ruhGg>1trT;pNe+3cs~+5oSZy)9 zD~`ykHuCH%(kf5uLhB@v@Ibm8*NrLSKGF8Z^bGmy+iR3~LdWcOD&K4YLh+x5Z5ZJ8 z1^E5q%D?VrEH}Oj$=LFp;B8OWf1yC~*sjg1SkP6@Dl@VnM~01tysxLZC%wS(w9(~K zn~OnFc3Wm0poE{4vh@%I(rsYwa2&HAY&Z_y%h-8&@iM<0r0chsYBaN8_H47BB)fju zjo8e!7|k^?fKnK8EA$J)Y|tr(8Pu-1wI^Hc*u5LS8qcI3@zI)kW%O7V65W6#V7!qZ z0uAj~$B02_f7*gS7hNVMita&ZuxC!vrn^=#_dY_qdI-oO$=oW3Dh3g|#xBGD#WPRQ z_sX$-8Ut%LgSM8gKHTgdGgR%kUqP(c9o`Jy6A00hgYEzl3S0|=cf1MJuXn21Aa+|c z?|(QDw&;HV%s##Bjy$VA=B#i+Cith^4d&N(z*>U)9>w@ZIISEv*iJ3U%LTvM@Mm3- zyO!-PS~&eyR+OB1^UCF!Ql!Z4ff}}9E5sa14mjjN?7Tk%d$c{Y2LXIQNxFZv68HU< z>C<`4OtZ2hk5zAPW(4Ofi=}~{KxOS0f_NNV27WUH3rn|h`HW}^X%GADDTxKDwA#g~ zGS$xq7J{I+YWWz>&9i^;bNGYE;4zu&g z+;I=JG*#;!=>rKfqpfMQAX~vKjRsQBOGutS412l-q8e!T@3bBxWNtmH z!fcO@X*E?lZugkp)*k>UV)BNOp8)k%fOomM9-{=zfg@pDosyj(EaD?kcJ!^fkHI10 zXYh~w-}+=ZPS>2j&p>X$na7l%XLyfPo{sJVV!+J*6hVB6az3rqyB)3GISVvIu}$bx zi8dQ`*}|8lx|)e2tTmqg!Sb;#KCtaIHGXprCGWLOv zF9&FRC1V#L;qbEhAHRlh86!s+KPGIAC#|_VAIP|>^w!6DaAYEv)twfq2ne24S-&X_!U6^>ytM8=;2cy%F zybesjE53|GN{j^{rbiyzA6Ul(0UJ1S%Ogm#+vA2i6^apN?h>m?Y0EL=j#>BiJpZ)&A+UwUKRx;All(%}H55NTDAr-v42b%KnB*?G-rp(MsS5EOnVfCOH2wy#+UT>+kW zytUjK1~^9gGmxG4>8Gn2H_(R@&!ETYo|B2adAU#ab<1*Wl9KRKGXZZuT_j=Mo-kJc9 z>s)?Q!h@f4Z~8y?QeLq^S{d=*+y6kpCVOVrd5r$T4Ws9;N?hbG)0+>-(*8$deiR0I z4SkMa`4aAaLTQdqtW=*nQ>l$ys2sJHXa*cz8!#)5|G^?a-@?FTl^YiGsyolm&j4l; zA41Pwo_|<{4ux*dwlg?GblF@Rmy~T`bLU?#PY0)MrgN#x%RUz~(G8nZ+g+ui)gR)N zul{$c%K~Bud~pBwdX2Th38D-k}$}_w8kEqT}#^@zZC-!IExzJPl zUgDk7SD5I4Q~GhK!NNds9q4WIp1Be|k@NopFyZ68kXKDieeyHs$QN>EIzZ>7PZ~Rj zEgv1A%j4KEZNyS;2Lsl=FC}m6oB}YPhrCi6alx;ubuxPsz^-5;2hFguNS1Z+q)(=DV<=z4xhWAwfhmnmW%Mk>u+4JMnaP|jgA z3_Y92G_nr@;Na@~KH{5X2MOPA!F@ku|94u{d6~)nf(E| z@D=^Vu;lxX49nPgot(@eNM?-IUl5@eX<00u77;8zV8+G1ki^n*1SZfjQ@mVUWVH9{ z^I`h9|IL0o{PKmwL*KE_GE7zs^>$Zo|6tK|xu#io5|F)fzTha}sMF*94)spyL*^~c zW1MGcd@)`j&*D{`JxxWxK39JugX}{WxfNvL0>6Pl<`4nj*gzAu6qN$QaG7|)j$Sv& z)D^t8;NeT5W_FKbv%y!*r1fM{^44E*R+gTb^CsWI8=Gq`jv_klx7|2n{9i+Y^L=@G zQH^bCMr^XtaZo@EHz{4QstF&TS9cTg2HBs#@IF#DKPrZ^vI+mxe6sU%$W^?s36wHi zl%UbysS&%GNFV+hTk<#-z+EW%FLEShWvs|Dp?VGU=05;}0$;Y@o3<%Eq2qyn{0U{g zn({BSZEmcarC3~QIqr8M_lOIh*Ha&RDB#O+$^bE!G;T1KuD7Fb%N_fbkjQk z4*YF;SGP1LU;jcy$DvN6Lsg7Gdv1Xb?oZ5ldkW#{*{uC^0jzERY{W0gt&{7`E>?y_ zay811afqu}GLF4jP|4Q)my4VgP2&^C`-xrv2Yk*V1L^CW{ez_V{DA=r<0{t>)2jh} zRbwu27%x*Uh}7wSqEqrX&aMIixv5}2lxQc9 z`6kTjjqN|^kdntjk!SV{zJNa(3V_~kAkL}t4;vod3WdAHeo_BdUZX^a>Mx00x$A!v zp>7Aa%WfSwA+c+R9Km0g>#~*j*Ngu6<^AGS-N%RAN3a3}mSJT;zZT$gLIAtmzlfKB zaBDjREUn7dbq@&X6w#}3Wq>DE#%reQoW$rP-1+~;6%wWyj^lmObRg|Nv*i=gI^hw$ z`S`yjMEc74Aq%KA5pZ_vI#%${Mu2>4glSOj89p{hlc})zNSxx7QqT4^z}}7X{{XAM z|8*vN0E$?4H0RY`SkN9#Vmt(YKXmult=E`AJ+V`2-exm8e6eC?B~SPY+BBaPEBWZh zSrx;fs_opa$wwKLt5umpOb*0*h^1b|)XYaGLougWzR0MqSSI&BgKR(EhI@K~2L>bGa4Hd%V%iN+MSISiyRP{9mPsb<4KCFEgIo*iS^wJMOo{J}g#qiu=2; z)=sxOSK9#7l73~Ke^*GMAN?=zwF)m`{lNF?VfX*LLZN3Y=p}Yp z`Zq;hcgqvvo6bcos91Q}y>vgDOl&tS-$-18;B}Davmd6kP8bTK-+Y z^pO(3!-M!Ej(`8X^4R~4-~6}`-m)?Cv<$!|hyT9CuJmk!rXhp<>3>&sENClB$mOsM z1Mu%{^nqWGO~T>(fR`WS_b~$x;YcP&;DONPY-*pCD=00|@~<*KR?R%suiz(3c?pC4bWY7;~hqq_}LeUZ`$6o>7ogd7sYE_Ja`31O?_as?@B;k{41= zF15Q*;wfrghqvE}*yvH1Jg!V>DMI-V%=*E{@w?l-^t(ZL{=k5jNTv+Ud%IhPp;J2@ zhT9#I%g5joc|Z#?M9(uQobpF+6Qpxd?7MmVw7fx$c->Up7>DMgj!;4XZx&{HlZr~_ zl$jb6yy~1L?%Ig7J5CLcxU191<8uypPE0 zuB@?LGSr{_UCWt0k}<_inH-}=+?S3TFC ze)mfArGW7dwPgfu+$IW2_G9J8j1o}Fu+Yq`3QT0$sm(a}fP<8HHEi2vKG;sGENdRs zpd&sipXG;jv(bqtRbEc1iAWnuOW4a*&mt%y5o^1zSEfF z^HY2@7Yes0^A7mM3MnkkpwmdWHP}M^93p+T8%{&9-j{3ujoNuGE5)aFOYWUUfYU}z zLc4($dD>ILiSZz=C4T!+HLh%1S=3jZm{A`Pb*99 zsZ&DF5ygwnwjRcG>qe2mE*n!Wli$0j=ZQS{$mvDw9QbTt&jALdvWi1hs4p1JsT%|q zYbGN^ej5HXv1*!UzN2#byKK86R11-xuINS}V=_MkpCw%O4T!JI7Vb(5F(;5Cy>5?6 z7Ok1rCo1Ev$e5cu`?kzhxD05RBe8yUP2kVDnXXRMu z2!1cZ1aZ0WCRMaDYGPe=Nox~*YrsOPC`;IZC@Z_Uw-#(4n=)SWJ;-n~Cu`JaAW0Or z=rkct%@20qkxt;PPuRMA>K!KN;k4&*vfTY`UE~{`zCda-UQPVruz`wVI26;Zh?Slu zdx3yzoFhpjz^&S6BQb;&Y~Ls$>*Go~fM+}}qg8Sl@Y2%L2(%eQCQghTg zT(UC3&W5VQOhC^oB(U(0tJ-hwt-6a>Ql~@4yF;>%I&NA~8Sy)t*4K1Gmxx3k)^Y6v znV0(~7!-nYj}t=l5r)6&QMEl>UI<&h9-4;=!?Zpd2cfoKq3|L#6WwX@==h1WU^R3l zA;bcCjNzq4|2_$?tU}@L1jTNCajz~N2JGJGly?f=^ZL|{v_V2}yQr6u?~7@#n?<3s zfm0Y+?pCJB$K}UUPnFsodQ$!ts#O*7WIHIQ=307>g?Ts;33S0PHPB!+Wz|vjiRX}U z^4ru?d$wUqzI?M2Iny;tUIc|7ItT-nW#2{?Vk1R2)io6+JeolofQgK@U>S_<2Q^4J zYswPcgiFoIBwndBc(ZP_cthg^l04Br+Ag~4@%Etmmxp55&;TwiqUZ~8l_-Ju6;b>csc#e2&*CVr ztlHgxV(SH3el#`(D?#MJ$nv5SZe(G}}$tj&+4aUw4n=v%!9OV>%tVHPh-lcdz6mTy0P8!X{%;q*Ep%UVxu znb2Z@q@r^0@^xD;x*Zq%lL__ijRUJ|-W$d}>K!-=K_d^A0^Q%l-vDg7e-0tKTH=p~Zy>x_PBkVVChh~35OSbOHOw8m zh04Xtn^5I(4XQ+7!LtJ!Xn8^;`;x~U?9~x**LTA>{YOM`d*m$6s5iFSi%mG2CEZ3l z(p)tLA{*ZgEvxs#gh*py{M5SB?=U#B_3^Pz?e84u(2c0D0Lb=}*Zay*)E@IW$z=_A z)0;z8y!KN(P%Gi!l#pkq2qEUoUZl@5g(+K2m6Mtb3_k`N4_0MKc{O#xH&$32rrr_y zCs!JtZSJAIhSh#z;YSr#op^g-1<}NfBR95LaidbErhF9gJT>ntPiDatkHk zu8)*!fR=llzl4$`)ibMjHBMQli(+LPeFt$TX?S%CXIL_Q$U#>h-Zf z0b5VWssldHu$a-=clNhE8e4%KO&xc2bodc83@~l=&WWB*a0c4$SM@ zPxTN`1Qr`6N9f>gM@^P+(jLxDR*MxkO1P&Swz=qvfLKoHu^O68eP+e0&Xf8OJt2M2 z2aRmqU-mmsb&*bhgI`BOuUyp?f@-5l*Z1#;J+X8r}hw zC&}H-s`c$8uvasn%1i*eHl}T5ZDnp{0|Q*gxrZ`$*>`d)FqG&&7ue0w99j8Z&q|@0 z`JR^NtZ91hnsB#kjYR&$?zF5_kvQ)AM1qI1Jjzgrm#_l2YU z^CocHHmumq{E5od?_|n~Jo?uZ`z*I?6Ooon-9%)1n}p^h2z~?uQ%d6^K@Gd2rbl0o zVsVPEQLeqo(DXHeKQmqX|Kvv_4TbbFH18W7dq;Ud9r~_N%^pbZq-6w6t8R?P(2iL397Y?S|l|A zt9Wa(0t%O*O7yRu$t76tw{>yAEtfEyMzEyL-f#4{3*Rx>CsDHve=+SV(jbq08!U~T z&H8MWGbp5#kQUTt#42rL231Zb_8`%0nh3k{%;J#BzL1_BU>CfbGMGXz4vJfzMH?Kf z>cNd}XZ7P+_&1ht%i9y`1skV)87-&OleeCSok;AO2j<}ES?XE&#_}u$B0u^t1`&mo zS$@_vnlYX_o>>|7@oq3b2AY6Rn%fo%gu;V8*%+?i28#B>u1W$SVRM~SxZMF?#?QzU z2LGvP(LDDvt;viGO80`K^$;cs6WrxJUQ#VijNv;R?*_W-87=Gic)aS&>zi`lXmK-K zSzhORhvC6#R-u^s=C`0GduzKDA`DeI4F2Hyv+u~d?V44=KR{_#;Q91+)rL+e!=g`9Xs%JEGvJQsW|t z9+Gxn1%fnTAwCKa4f87g_XfcX3|B_~C*$XTy|nJjz8`?N(nTOA!n*;4+1_P^4C_EK zD}a!?k_*TZR`pU{=_PxK6b*+89Sd(yTE@}KekGgX(1(5WcN~Gp{O@O7xA}qC*&K+b z`$LbmEUtF*JG2nhmX&uFg5b)Hs*YTLuIRe0%V@b6yq_~G4v6a+Wb_#z)v!Y{Up?q| z7VhTC>PW?h?Ja1B&S@JwygnZl`Yp;l7cdYqOxT%PRo+H6F9mMhQ(s|JucF1|Zo~}N zxsMtcQq(r@*s|hQl_%*@FH*{`>0aOus?f{1K#bq~z1fN=j9=;?p$Q8U<$o_ulx`3z z!a)gD+bSsjXO&UZI)8PbH|cs(S8BQ%ypXp312(BQcY*yDi5H(cL7Yg-gEwQo!!zON z?Y#hHt;Q)hKOE@A?+#D-v?znSSzy-J+h=77H1b{}DsNg=A}wj$R!=Qwb5A;K6I)dD zu${^SL{JyyM&)p-Z)hdCw34K-W523nEMvcAn?{@FLyg;-g%Qyl2|5KLP67@WJ?4UA zq_p=}9bTu^Cx?aNtGzy(0Txutzjsrt;+`&`VKoPY) zffFgh$x0SUkD4Tp-|Dlv-q=D|Cv0$(lKrGAIj5rFJ645_EJ6pif0DKce7g{rAnx+}PjYU6v(sRS38f8`?>xQ_84_^aHV3UT+^8C->Y!=6A$b zulmiAoXTV1HeEy_?wKrGQpv2``DNUrS0UWkfIdoMf-HN9|@Q15IdBr%+7 zXBvEQ^t88 zD^72cRK}Z1U|UJC5QD{<+reA&JvChR{`&F!hhK7P7nA^X7n^3%I7Q}Uz8IC(?GcPG ziZ!%QaX?Eo)bXmvM}usI3G1=^IC#MBP6zzG6e!0jt*A4`1PFnA#Z+7F-G*l~I`s(Xn z6|sT>fnm2z>ThA4Hq#vpa>gp`706bADe|7VAfXV@x z_(_?7{uqbzz~slES7s6U#BZO6hz)~Ku*Z{=&)TuwRPtWtD_tfq;CCHjC4^#TG&Hby zr~}+F96gPI1drK zLrYd%NPJ1R?}GQ+I~?zUGzUGI#ZcJt=32S&F-M_G&e#ggl3=@;t_{J&49yJq3(aRv zi8m*DoH7oiPeW5A+jSVY92}JKD`a?+&lS-Ip*_~&nGYMaUuxU}#>rmF5=G*7(@vR0 zc|G5vMo!>u4GTRo%sh2L_qkGvGs1LV z^5Em){1{)C04o9w*jgloMHR9}<^0OV(ua${c`+xU_ve#tfS9?2u3zv^9u&GgcWJgF zl|z9;RY#^5Dm24vm_`!!5l*Uj06(j2hn6bp-(&OA)_eTSnwL9QA+IoaTPkThQ6yRaJ}}Ri^|PoMlz}l^HI~Rt|>=TxZBq|Fg9GV==0S5Fn3y4>?Nx zd8Z!oCCV(dGizVE?(Yt3-CaP3`B0UY%4>S9<%4;IPy5_E~sAPhqFG z^8wF2p@t;j;Xlhy|Lgkt-<{h2fAOVL{@*{n9&VnZAcq$l@S%Z6-fcE7qqhvuRA+{- zs=%yz`0G;ha_Cmv-WGmucTj`Pp|L+h+ya@!7g#pEQ}#FoKA|v2#u8YF_2Zs4lRn*N z-Mw55G4LB7?w${?Xt>w7pR_0&9s^_OzRAF-#1Lao=nnw$jycL|E-C}!cbUC>XFerU z(bN{NT%&69&=Q)9*J43eB3M@y86@$-&!(mB<0G3d4f}d6a14u zq>l%cD^zeEb$kc@AsfEWJL`Z0-9jBzEe{haS|QYxN@k{Uuub0m&Ch3jT@o(J>F zuQ1PQS+N)QE5YY3pD4wQSZ7S2ODs(dk|bNs=G-{p|@!a-aydQg0x0+{y+Fb ztTe$bv$}oDQ1MYk{c}|%6~0)vdP<06QVG-{2$@2>W+g>XAk>c}56*4q8K|gwEV;cR zC_i6I8Du7`yCUX;2^+vZ@y9AeNwq%!v*xtW^rU22ypE{LigSbW_-?QGSqvP0=LA&j zx`@}0HDP6OVJ&kVCOc2=OC zMQ-GUSQcqwIcDc9GJ-|_tb-?h2!k2ggj>3x;O4^E+WJqIpbP%ajj34PO1(4^AD=z2 z<$z;0-AxQEkm}-|Cb+0o*m!}e^r%{E&3-k%;6>#VRv*i=!AY}E;JJl`B3KEmZ?Fv} z1SvTRq(3RVJ+)GAreU5b%i^R|iZ>#)TUag@(D0Gm)2H5_q&^*+TaRzow>s2lH#=_b zIfzhgh{U25HtiM7QT!felw>eSh>k# zULV}WgS4!Sdoj1I*!y{Q;8Un-{>qp0D|*T4im3SVVZ{?ut2-H&Q6kv$NrS19@?E&c zr`uEdRrw>+lx@FAV5`aZYi>*PpcQ1`_DY!f`P5w0iMWj+UE<*<4{X0Ak zUiNrN+yhQ9YXwHOzqNhTqfxTatJAx2uAVJlbGKCj>r*a5RoqJ@BzIQ?8)7#xvu_Os zY^2;dB_VH_BXOB|tBd$9-!fq$R#t?VL9@3+63g+2N8Aud!X@R!=gXvw=@70LzAr|I zJu9FL!3006dJ`OOqW(n*2?L$4ny3dx$xo(h_2KxZ^qS15`6Op*{F*># zK*zh*eQ(|O*1K=L|7PSX=i6tWU4HwwcT_d!;=Av4>%AB**-D-2v37RtM2~h3&0c$m zrgm8oFr*2ig5kqP>AO?XqmNLu)hgFhe6~aF&e&R{8*1k;Qs4Uh8<1|?+#=tvE~W() z*tdAi7FrRklb8EaN-AwU;nWDX!;)XY^l%f1H=S&h4Qc^X%IMMz^7sIrj z?-NxLEmEEp7r1PTC(2@8#}jjNd(J2q8Q1$K6q}h}p7yfxQBs_7q=d*w*E`;wY;GNR zQ(C(p9K_kc`b^3_vf2?`tz!!%;ft!xPGxFxX?n*C_s0PbeWgh^3vCbVJUWZCef^g) z@#&4b1K1qBy(cJ4Yq(rVd8!JrK#8bEt}V1_p8N9116FiLH%jVQ{VX;t<6vS!9bM8` zRxZWUA=)>Es!M9`B_x&FYAczQsl3T-aW9?d^82SJ{fse)!l?}O*$YkprU}WzJY}4SAZH z-|)`cz$Ykp)bqr+FZzI@G|sB%L-B$@^5zBNu^+zkvV8gL*Vs>%e_E`%B#+)iVZ;g~ zVo);itXmf+)86?)#@8k@6I2(*l@m3c?+gTIx?5|U(=i^`m@rFU_(kDOS&}H@qJlTA9(kQtyFijtt%hTt zcRy-eGGVMwLM~ z;AQLG>XX{#ndQ%*gbn>OKUK&OTpTZk@nq234nSeVwXlE}6B?}tou75ppImjv>Y1!c z^Q9KGe{DSn^ScIAaXlLQsYZwX(zU4P8%`LOG~{eWYia!9IXTBWBWsrvn)*1E5P-~b zx0cjCXl52_Q%`A3z7&+cKLu^vYGPIv!$x1Zcj{rcPONq{DzMS&Ael_$_8zXTElX7> zdU~VyOnmpDh=};%^PRAQ( zr7bE${Al0d>d}Km3hIBLgDck$X{WU$Op?uJ3sRk1!l4L@@%%@=CDm+O&*~uU&A4eu zzTlw9#qUseu7N8jRJ+xuu%@J^jJ@6wr_A7|5^`ff8hkPN5msD#tp>0d0=Ya(0KYxq z+kNX|-o7Fge)}0)PswaqIWttPj5gwvd2rs^CkCAl_7acsV`uqaRYGt`GSSsXQ_v@3 ziLI@2iSk5Ts`E(5r55b%E|+=_o1&Wol@#ra?!@k7(ys+BC9P=xan;U$l#0Sswxxss zHUf}H*Nlz8n|5*cB_@*Wj7y-}y=P0(rv?g!HSqh2f}8w2T3j^45t!b@OD!(0#DF+# zs@e%kei%VIr8J$UWo*lTFnR|o-}NOTvHK8x&P2}{(xC^=!}|I=pGy;xr1Wc`*OlP0 zZGC4F->OIKw(A=+tvIHlc)Vn+OPY^0eKb}7n#A!6F*!$%pjILG8z&q^FCUhIP>%EIwmA z&Tz8Y^Xal{Gim>5o}dJFiIzlrfZrd76H2m z4unT(`7`#pyetY&`8~}>XLOd{6~131&8aT@Ahm97o1~&E>LO4@U>2GB2zL5W&YQNg zC3)@CwU9E9Q;Z$EZAo#EF>@_&E51I8l{Y(>_4-}|FaTc~d95j5Xf-gZa*DF#;grjLgXH2k z@KCt~${{;3rNk8W3s0lsXWiv4%=Y>7;MrWtzF&K#_|J?{;g8UT)93VX|LwfAuEjfb{HCd_qH)^b~g2 z;naa)fY9AJ>4EjtHIGek=>7e8Nvgj(&Q}%h;<#J!u%p%5M7Q|qScNPVMHrc=ob}8W z5u7vB$9ObAwt+J!1x*g-W)sAqH-{9j<7!J%Cv?q+G?b%!rCsaCxAmj8=0QkKVc`_| z;q*(c$?J^MoLWYmdmY>3*=a6E%KN(3I=uc8tZ@WK?f>F`_((O|rgE~)X{*bvdbOmG zzSkhIt9NtL4Gs(BhX0*ODG5MNjsnBlplv+r_m5Im!`b7lUv#VYC&D7uYKCW|l;&B^ zhRc&I9$VfgHu#vKya24ZL4r)dj z+xC~%I>h>Rk~ihOz2g>3B%7Im;Fq_l#I~AhI^k@!K_8V+VoZG06Z_CN)OB{ZZ&@LT zwK{D~)4R#aFszK<+}sO;5-&5-rco2KgM{B6oI(4U=u|4F2t<@XNuy4EQEduuy2wmd z`$I0}OrBp*H>rZLdsfruu~OWbJO#x1qV)Z~R^&-I(@ulV*+;Ywn|s$99PMih6l{9( z-CXbk#|Id{pb`w3ay~B$U#`(xBM$;(BLc=$7V2lRQMvg(utW~p9wnG+Dy`LFYf1UD zNx#uNMc>k?U_aUFZaA>|i}nce@s!7E08Uj1u$b6*07T0O0NIbHR@s+n+6*a^cVC5g zYXG#n>b?}9kvRyFtp5{qwf~u%zJGfx3<9x`JN?_SKLEJ!Bi7sh%wzsHzcsd1z&LK< zFB=M>w^^9-*C&lOYWC}LVSnEwNxlfBBv_aT{w=B9Mz-xY8aF5&!=S|6eDbnnyvjuEsV! zl#OlpT%m=@5D6OQOIL}Qgl9Vwrt63IclCr(=xvm2k_EJkf4hx6SyImWr~rkBe=194 zR*^xS$OoKhKr0m#qrmpRGZF4P8+|t66g?fKpU;{?6cdil>W9#0TBrAfRZQj?J_nhB zkEw3x^i1oI6l2WnlLfx%xxw?P!K;2@jDjpu&5MD-7e=f8t3zXE`e@5FW@d#hM)_G@ z<+?f%j=o)~&a1@k0{Sb^#Gl5$s^;DBy2@hLbFM8pYs_r__mPv_X~L9qd($NELnxR)&w2(Qf)l1HM}~=BztIM zRlr~+dqQWGs-q`?%V=@XC}U5Q_51X#onHC9sFGPLFR!xavoQQ;BU6D9L-oV*LZF?gd*yBUI*!o}DH;vWqJW#kl zl7cZbNxtJcx|dpKs0PRkI`{04#M(MW=NoTOq)KNLZEpSdKdSUV8Q(Ykeud|%^P}P& z?1_-UnI8Y!4&-U~ho-u7gTp1=E5T!OE2Li54E>h#8k6ez@9RPiF+_Z;((bQD323E5 z#z90ygwef+tL<+)3Y9OlMSrfYw$G>1KMrNK_qFoQ$njsr&*;}{Y530pJa9k!PvhlB zJmn)_sx84CTI)i_*Qx6_*ja9E{ zRhWEFv&#)gRV{nyoWXAzACXI#>s~AJk@P}-VjG`J0u*X;RJxXSWk99>bO*8+ z&noF>)sNKbhxK<)XV|egBPyIHhUV*GJj1r>7YzoswN{^wdy1KVBAxAA#mhvQ4r#ux z)fZy)*is+zNqUgOnQ&V5PDObf`K~Ix?)UNx&6*d+cIwSAei&U#@JBi2$u`&8ZH1^{ zt{3-q9E3r$MdCv^513D=c^EnV5H1esR;!X6T^d{X`eyfvj?%PV_SQjNeeqSq!Kmw~ zUyrAhADh~6>PlR{yR)N2QnTR>Z7nDhkuH(n2IKl=t&qudu&(WdLyZ32?)BzKpdmxs z*h5iO%w0}wEqa5{i--=hkGDkr^}-^EdG`ZFF*lhvQl&NLa>9Cif-JRd4<^#Tb3HXL zH&jVB^@&@?D_7RvC0n$D|Ei9v*8NU9$RAhQuW-hQ-396Px0Y;$Aw)+@ykmpZ|LT8t zo*o(X%He(NOv&yq3kjz`g>e`o*``Qm`q&yws+~r}sR82^Crk zd=`*S>^-4>21mqgwt$`SZ%y8_;Ez*oS=4{F{0}bkV~)_X#|Vj*4WJd9o)iD zgUwzS@3^+~3sWYalI$=wmEX7K5(Thini(qBGlD$(yf(RVsL>6D=?h{^ z3C_jrp^QrYnzl)}*)*T?H1v65p-W%TkX!bssjjtN)65YsJNCqET^0c!G7Lv#%OUUi zRY@EI9UTJO(=gjQt&+wus5V9LYDNMpSu=GcOyvZ%X|KQ~3m8L`3}S!6o98bzpv!8L6Q{aAg9i zagOG-5w7c%cUg+{_t)VX2G9Yz#_5C%nPt+$ua6Nr^!=X&QPw)rm`( zSRqFU2sywohKyR+W&82EMuCpfW@NU&D>1DT{h4*M{{lWWfmV9@ms~xITX7j)rv3SC z!HNqGcvL>`Zgss@Bp|usvXo`;7ODQy2?eaRApxFr%xw-`^9NZCs*d7H^_({jc(Sc9cLAD?z> zn%g%BXgTxY(TLO~S1)~a;Kd9O1>Fhdw~5Sjh{-o-RiQvk#y%ebdqCwd4LZh z+GyEHFXVDH_S*(@8D)#qs<2MqF7&%m$C8)~X}?-*nknQ7=phog)ZSJ@8>uRj$1$CK zhkwd1%(;pjQS3ij@k;>M{^kcEiZf9Uo9o#|c5)V|0-OM35>UX&cP?1luw+QM-T2ZP<_` zsT?UEfeVwcl_+bo(ie9zoLh{|z^^$JH6DuO-pt^4jPO|l$N*v(HQ6i2yq}47N&d;X zbMoD8)2_s(%9Zxcf#jzobL3TWLfpUikccpxM}KfP{;GlgvCk|cOgdOP9t4zyuQ6IL zAnm1;6hf>`(PbDq*Sbt>Qf5da*2fCa_mi;N<`Y5oy+r|vJo%(pTm_qL)4o0 zgE&ploCV_IL&}PW$(nzsh+8sD-!QZFG*b4mb1ot!@*Qb|RWU5;ZFwIXu(BwPf8V^}TA$_9`zaWsN7A0}E0F=Btf&jXY9F z*I|~n^`PE4(Mk*G7Saocv4xhxo@!q=DsK6>!3x>ktZUWe_xRUsitE`5kvJmSK)HFk z9m1a^_&fMF@3ydjvEis(eNLxhHUjyju5AndOG%3ZHKhzu*^?OVnH#WSFcf~=EV!qi zd%xd~`#CKmG<4hEhhw9motd)zrp6OSDm0snt`XJQLJB$0v_B}Au{qwExplli>Z>I3 z?fU+fL>lRAs-j7IPPr8gc?Ms4$_D1ZgU0>Gam{@-KTv{p#6)k)Rixb?ReqnT!IX0$ zV(7HuyG5R|EArLYbCwt7s;9=2biB%$?ZZ!P&IG#;P4Jaa12(wYFfBVx;(|6NDZtGK zHF0DJdea}!(4LH(tvS&W=5mrzkanjD^9$t8nRET+azmc}STH5&cB3}JF0@3M$$veP z^SRJYIUn$V8uF@nR^W(}SX0gIv&)0S2X%ub_xuH@Y}WKrE0cv|l*0ew6+s_|Fk&$L zvX&E_MLG6~o1${AI#XW$K6`OPRaZ{;fy=!)>|!gs^|>3W+vCbqaR#HM5OnQP`2NY? z-6RggA|jh~7^z##o!%1>aCF_4`(WvHwNT=j@E#?2QmYBU zGeFkrmeiQkEv(Y-k#F1GZueA-Z*&>i{S0imQzCo^b?V>VTgHVbl$&SEhcAKeCua>5 zzM9v+??Zly29j^0`4j?MPk^Sg-`7r!?be8yC0M4&5HMOr9%iVH>TU1oitQh z$1VK~|77x}&bV;2_EzA+w!Q<_y z^{V?&N{5>u-?vRzIGI6=?si2PU(-OHR$*5?l;+gzjvP;1aCIkvY1r-#rQGJn6!_Ri4gqm+1|kZ(|_&vV~-bQ$QAZ^6!kJMJK)ooZlZUiQQ4Bw*U)rk`;yIU2D03^D2Hz7_Ipv2Hs0LhhrBT2bZ82W4<5B;Y11 zWu>O~Ug_Q5f({0jWBgZq>-O3_tbyex%WR{>gJ}|nHAJc2_Eq*{=l(P&ZdQwW;pZ(j zHb$FseslDtV%(wj?b>igK|=V}@uQCq_V%B*+oJcjcIeh@_U0-nf&{uZrSM2*?U1)1 z|FgWnF8=f&x7ZaPr#g=`fpr@i>Awot2fw}2K5F+=4#Q@f$tM^5)^1!_d|^PSMx#Oh zijH^Q(FEYvfwd;TnjI{7re{cWh$CyS1yb2lbvZk?;8`{4^r@6GI!W5A+-GK*0tl#= zwW+7=bo#cMWpKQM0G#%`cAJA|nkJO_x?5~4#<|j5FGiKBY8uWSs;l=NnM~ieVc%w*~mj!>usNb?T$OksLu96euMIdW5rZpqS zlSe4FGbQ|NQ;a^B0@DgEXV8<9Sc4@GscE%Chp3P9$Vxu<#Bv-B8Om^^<4(#;JqD9Q zg(0yGdA4QddVQdaxU;5*!@-oxR^`E_uFwzHq}E=R*Mk44lnmnxK`*GZBsEwNre^AF z!daaoQ)TIdu;$%u)Tveu6FKKi_XSb#<3_115o_|%xrAL%whm)IJhFnv8f_!TsM3Qc zN~k6gI_Gtow)%~h(?7$)bRsL5W2EdX)gj%XCj{=CGPEpN2l=I8O&hU?QVyD%72uI= zvFc7*{lUX+ZG18`;u*_Et{FKs8xcrvq#|AFNF;x5x&WadNsSqwHIe6pmDO zfZFGuiDSi&MVjimJ}6LnwsO>0AO#>MKgBa~?l; zKs^?rx~a<78je)nEzAH8AAJA`n0#W9t?HAX&avP1Lj9pb7doM?;2JB|*KiB(-d^$<@AK4pggg75C4)ZC>;jZ%#qt{8R zjXPf{z%P!U7;Q&5`lfWI`6G9#pbQwz;0#7=5@wSix&DgH#o&Lg!kT7U7jHG?edx?< ztnk1EkFSGW4^+Odh|Jl6g73XZfA`8_=YPKNu9*8o6NNa&dUCVV_ul}Wp$2u8a^H~s z-iqHN&?lW&+IK?Dh>{KJ+MuLPj?Zm!Eplj*89^IHeJb$kv+SLInw`Bqf~J_Q17qXb zFtE9(Y&L;0HuA;-qTC(QQ&>ajt0QEU|FNP-j>!e1LXVN+=?!?jL|gg z_6Sg^@eHdJCn!6tTVD7r&gwSEx6t&u=I3FUUJ~@-#XzCV>B>6v-kE}($Ul()cHN$~ zGqQ2rPIBEW!PXiJ%@4~MO2d9iNXYA=J$#sMQ4ef*pq$uPZp*MOKF79*cW(z8asHix za3i>&5APR3eTVoX0Mp0B?ueA{dC}9}S7c-DT335|+H$t^LLo@*@gVjxx>If{SDDo` z%amz%kZ@)4+}!f@g@My(%LCby?49lNpT_j?8)vs?g0BNuS<86itTVjd1ioE2)HW07 zfQ}o4I7`z;_WYBDxP006?*M34CKaJ8qb^)exHU~RmGd)kedrJXeco>qx73e4$F9V= z1e9##6ueIISxKDj413XRXqZW%zVlATHRwYpr}so2D+BFi=-CeyVGl0Vsia_2KSv9k zd%ix-lY612@(PHng-?5YYIGKNg4uZ0Gt|@YaUw#^9qo7_RF`@N1Ep@M`n%Uk+;S9s zHaVJi`VQyr^InW`Tn28&KoGQ9A~Pc*xt%)vG5|Mxg&_@B$n zy|}&5XL10{Yv|{pwyelM^*1KmD`h)WwLA%g6P6hGu9f!hCX~e7>(FPn=tadZi}5> zMSQdZbSI`aOM4o0OG}(K6{&`E?`F`tKP?TLhQY&`*I{8qUHbjNTyp{gsirMTaAe!yjB~3L}?nF;$^lvZhTnOPpZs z5*}X!x!*o_jeWq@UeT$}#hMmz?$(>vQ=H9tumMFyx7vIPRyM=F?pSzFyyTb}&0Uln^$bJx(gDXeYtqztP z4L5?}N0r2K{9!kO^B6ORS{mG)4|@7J;(v-BsUykUNw?thvicS|wU`4#n5W9~ z#Wz_a2r)E4sv@%nI(jh=l#bIib@KF3x+*Tlw?|!Ug!r|dIsC1uuORX(Y zrW*T;z(WT7CXt^ja<=0klJ?B4Rvz+xO`IlDhF%4@Q5D>H|Hbfn!}z+*XIdQ3&XBm?v=M+qd;9^&}7LMcjnomw^gdt#U!O`v$ZY-b6cX4tIBnR-W|2QmQg@4`E-$wOWJXklhU(s;>=UF^jNc`sjgq!`ZZbEEc5YBuqCfBBa+KB; z&2=3%+MNVW8W7|r=5DD@gk5K#-EkBAuX;vdFmo%X&gYtOj!DYnvHN$d25A%|4y77v zEUW*=Co2;j8TgGxPf$jS3vac6B7JdY~poTI7XzOe(ir z)wTh~{{YR2m}zQ0((I-Cg+4;bO(}Pxbv=_UnhgF@uW5lKYa+eq2rf6uoZpGj&1Yrv zdlM~?vwjHMaPBkb>?}D<31LMKHfT4KB1DogS{^X?)G?a40=_du6nEFDxc7OZcGDm{y)+4um=*$2KzrLk>- zlo`yrMbax1mOVRS1y!?F{YuL<_UCq4CU+c&LRtCkT2u+5FleY$#Ye4~D; zLi0Ag1;F<8LSeb^+b?`KaSwRX%Wl1^-G?iiKPm}xO%OnKY*^T|7s1oHMdq4z#U&vV z%xW-v1deMvZs8$C#QMvOiQr$rmsw&qgK76)auVvsPK*gEF(Zbc*ZcI@`iso1pDN}5 z^yrdOf=Ap&j75KC+AO)V5j&ZG@znO8^3|Ms3wZc=ZhAs@f_);!Pirjh2ba@bk>{|R ztJFHhN8iaDkH7wd?>nRn4V5v>Yxn~~?D(*H7`jGqAF97Ls3+Xu{Zlu9kWmDJpk=yYd&I!%q`I)uSoU1TeRL zuz~X&9Z{k>_Se^A@s++K%6Avd{x`&~B?6r1e0_gr=Q%fm%H=m-A>e`iNYLL<#Fi+o zh&?us7cAP3|K*GSjuz8i=1Zy9Y0^v;V3g^1_J4D17uak%XLq#&a5g;Tt*d{xM^WP) zd)%~I#u87yD1{D5;PGXPUJIa4ane!oj?Sea#Ny}7d-Ifyc!~T%P_$kQaTf&FeN7^% zi&vT{2|Xa@uG_HGH!p6|qBq{#`Pkh=f|&BfAw$rdr$8KevjqVND6C`)-sh>JWgTwo zdB4*SJMna9d;GFfG{Hznz(1j~=%WgXG!YNPZ7(KG;LBK-d-n?I9}bx!$Z=;8Bs-a1 z@K##WQ--VirD_>?I+L1#5j=fM9*+lr(5L++f9S;|!y3(<*M(Rq&n`zjGuXbn!hlBn5joT^9I7-Cg?PB^PR61Rrvb2GkK9=(QUGi!QOF zlpvmrgmR@t_}YOO&3DYs3rwTy#inXK4Wg>#|ZKP`aa<{7*m(qc+b zWD+aebem4O-p^liI#BdS!O+3v#v&7yoBoSx1%SR#Rt>SRk27?bFr*~tW#Fo~s~+9Xw_KlL%LO%pd- zQ@eBa)K^@B%N%nswLtcHZP$63A%KlU0SFF+3(vwMJRT<6g>q-4I`)6fwCM`)>ng!8 zZ@_A5F|yydC`*7YjWuU8ZZQZvrcpLwW|~kOP&xI5V3{k&Pwk}JR2o9McL^^K|GnXP zg+kJH`1PVC;&OqM0p1f>-zs}A!_9uhyCc(*1wJr8!ni_ZtEoOKDYqtR$pQf&=9&Q& z?^wP~Y^*^jN4hdRin_k9wGTSz@Zk34PZmo!)?jsl%rgAC&RQ@v_Z>ou8KR2_|BT2p zN6rNoP7a(AK0!7rY>!%jB2r0#zxVDc1SoV1JdUG8zHEu0>-_kcdib>uy}tl@RO%+lmHT`>3XYN&$4kmn1c#> z+l!VxglGVi&@$kXj-*YBcn1ixeM3BpJA~_5>Zdwg-DJw@H($K%;1}PLxN>h~qaPZw zd9rNuexO7iC{(hSgIr`2l(i$$=Tu*q8!lu#M$?q1cUc7dh?H`k`6=ae3x3mho98j} z<4J{gm8M8@C@Ea-5ruvqTFCUpm8Nlr`Ki#U!-Bi*HP6GOmjMi{ZJx5r-3A(by9)v)m>#Ta;vMu`m#X{tmaI! zkeRsy4E!MGKMK3*J2zEVBBu0GVf{owGZn^X#!>voyKtZraJnpms{k0F{cO!ZOrAd1 zoqiQi*U*iE%H0Tz`IKz|3!ze-8nXzT6>UV(py13Xt52^y#L#5gRNW)AauwEPtf2V` zNZ(wx~%O#b*dAmWur6A=@27lBnsS6N67|z#6rinbCpBVEP1#!x&?>5I8s6 z+NgH_+O_9VLDsGSFBDQ$sY8t^3iAO%Ijvq%;hPX)fFXv4pj*x~YQ_3w7fvi2+l-uJ zf~aF;syR(=CAZFN``EI(wxNJ)*oHdl`S1jDO}QWzA6vGz%kgH=lG2@fFXCEip*-o` zhiO?*;sF-3BWi3WLZ)cw45Kz^<$_*;KLps%S;$1YR~(7GDahuYIP-Zan&ufnpX=KC=W>w* z)>~&x4sUq3g{frsPvEJ=%=%G*(UoeF{Qj+<-wRUD&QDJ_i>ql*3`;x1S8b{4NEDub zJJZHT?p#rI?W-XLZ_`Y~*nw$LPuBl&qAEyA3N2ODk%j8z|3=r=OMi`VKN7HSsoa2j zJ!y))KL?`!^d*G{QdJ80-BsjKM0QbO;%WIr2>#=eZ2 zX7{@y3KmZ0qToLp=>KFM4fh-BGt4+z5l^>^pvSaPBlyXHJv=eYXJiqurR?Z_Ehej5 zr7|RBwY}{3%Z)B{7h~ukkc^r0nezGT0`vJ#0wX+yD&CEtVrXKc|5@-LMjO4g?I=pO zgtzYer}jBrzzY)aZDo28z1L6d62Yfmv${heG>}npVs{*I_D%QKc}Wl$|LHby%e}~( z>jAC(n*?a9o)t~?t%$(xl>mksgPghvWYmKJGb6bz$cwAKUww-EwF}YbaV3D)(H?Qx z$8{9bPzq^pF}JtUUlqa(q8|EbNZ*P1D*#dEyD&s)-1{e8`eilK05|pOL6Az5@CPI1 zf1!T_Oi)jbrOPWzeEfCo?-c3E>Fa8I+!6Za0%(TCo0*#)5{6kcF); zjK)|ra=(o&vz;D?I>m76ZQ_#}L1T@nirC0^JMa^6hFATFu5zrBpH9r8-f;O&%GVu3 z_yhNpyN4#$C}oZM7-``}QN*H*^BW2{-o-#yp1;pNgXfI6@SRgtzF)jr?M(GfHiIQN zudnmm#}kl+PdHr|CYY5|4^V36fHo*hMIP2 zqtNJn+hX?Lq&)GU?a|~%Iupg@L0ZQd5uIsiry9tW&B+}X@GIe1d5hy+AExD`h5lv! zsEVvlhva29%y0PxHi2L5<$fOH3pPn}NE2)fQ0* zI$l)@!CyAY3rkglU-R&t9%_7&(a-Je*RyI0cDSHZ>2Js5BIyB;P^Kau8}cx`IZyx) zGMLiY&XwWhLB3m-s#6PyHt-;cmLb{@BEtR2H8CTC^nCD09LZ!{*r%~3@jK#-xck3& zGwwsq$?*b|WreD$?qsw?U;=(v0=oQH(d_l=saXX?WLu)ympv*mz z$fH|iQ&pW@(~&%8ZiVMvZFJq8F!SF<`TVB)68S=SU{KF`X+-llB^)pk>}5cc2MmPh z*--vXHmM14(J#|q7ZOKQYpF>Z>u-4o+CdUE86dIu2$PTkTIQcxu{_@&x!kvoSKj2? z=is*%G}CYnObZ&xLRhyF)B$DeyS-_}chs9-B=19`PQ+H5o2X0LYB$c6w+d=n1{ROa zl>>|$SHPGZ!zC91t}1fn^J!PWKkm2-nVa)Lf%d(7P-VjdSt>NUZ<*yKb7fzT?a-T- zMXA7&qk!vM)*)>B^7eQb3atnUS#@?no4y*Ec2MUtx4{)3aSmJiz!*=j9vGQc!A!76 z)U!hgoW?dx1aG}LpVqHe!D6Yaq{^)c|JG7~D%w~><>sQH29PC%h3r2Y*bbG{Lo$mPu zupgG7fa9PLeP6bl^y>KDYwp7lXkisO8dF(W%KPA!y(R3-_!TAQfH#_Oh6Q4}0Koh4 z`NnbMKw-~-@iKmEfS~T_eqs7`q2`ux#C~cW(@m$#^z8I~T&1$$0m`7qB1A->`YN4R z)ku#M_`&4qg8JVySTBHwh+^QV^_iz%+>90ljf*yRTuOCl!c575n5M4`q_fXDG~N9n zNbfW3#BFT8`66Qo7%Q&VF8-#}suZazN%{blG-52<`}o86-Wq4c+JH}us!XLsonV}> z2u*L@3tAZtU;kFxfL&yaAAD|bM;o*d!L4g#ZWQvolQzDjo$Kb@zxA~5C^s?Sb}#e9 z=HRuz&zRh8Ftp@ei>53XYZ2nmp6N(To6-#)e+~+QK(&rRVRRZ=2r)xS-V^vAd0b&6~rQC^g?N5#GMHshf zuIOp#q`>-hBvIYi)U}rc!Z%?oX4h16Y zgKgx3I9;Hqdiv+O*l{;7DGmSkMnadYHH7G(<(%FA+63zFh!AS|d`6M98@^`U?Cg=i z5MEHnPM&wi3wg`OvC4!YqkRp2T2s5>D=7Up?1|nbC)U_2BjTLKi36flfR%bAu$AGwFr8YE}kwoR7u@J&hxWQ^Yv?|3lj#z?E3x5|Rx zJK!%s*DySfJ@2J>x7Vff_e;7JClei51CVZCQ1q;WqTb375v3GBU;gSl0pR@vtwwf(j9Z#9F$>%yZ?0h)|oo4JNY4D1||hgJX51sa$@zP*_y{m&lC9b4 zz}rbY0Og*Abpe5VkjinBV_50G^ zW_(<|^mZ{Pj%p1U>l){7jQ$sYTCT$(!%!}G@Uk`mz%p%QIF*j+p9PU6H#;LY-Kp1x z)k89$WOo0?p@wJ`FDe*0l9BVe zzdX1wq-SRX7ZD!f-@FABB)QG70#@s!9)%jM{a2ma&{s-FUFZw!F0Z@EFd+xy`dWLO za&>zhWiTuSFGJjH3c89p@0+(&ez_S(q4)STUqE@KZQ=C_Xl*K-?*5!~rxs3s#Q4iL zR3Eb>W;%oOft#%Y<28;rZuRSngG%D68$E|G{Aps_i#}Nh0yiNB+c#3_I@BEO$f)jFWs}nyU;30)MRcI6W3JSqs(gJm+H`QaB0i&=hkg zj4d0xG|xd3CNZF895mkXJ=JC%P_+k-?ceEAtOf`d$#0NWKG9-pC6gg|gc1MB$z4a; z3k~hS#NwAR-D1d*3AwL8|MQK!i@zA@rbySv+T#D&j#&%ZFB%>zzXW50Z$R+KT^#e5>IxU}Bh89FAL7lH`LNvHq63DhaG0E#V3LS}QpYdUK_! zCL;L!0rq>>6hO9C{VvUVf75*(;Lr1F5OeC;3dkYTKd5jR8=&?PQ%D>biGFnNSKQ{S zam=6CeQ%T@CX5&D#18>Bu?S~h`1=gzS3AJ@pOK0Leb~UmE&Z-BiBkNg4j?nxQ_K7p zVhRw62*L&F&WCO?C7I%umpWTCbN;FNMeME}2>hk971Dz*(nI^X$y0A&VH1OVLg7=w zKl{JFSl~e|1U=n4c!_LaHkk4eFTp^AiaPWwLC>;hPOip{c`KN{D2t3YJ&?8`K z?({o^EkJDJz9du^N=X1l!TY$dem>ynWZ%L}UCwv_CTT4B030z<1p~&>2If{yf5^^? zg7~eppSkSGIS`k7J8=Ps6UILBptWNtW&V;+KqWwLcWIsB@OPJq0Ke$=TtfZe`s^1= zbsaDj1U;nhuLJHgX!=eW+PGdnSU6#0$%;gcyq}Npo;xUeHYU4kw~MGwi=nqbKI;Lc zTJUEMML;q6CX^GjErocaAqCFk;dER$)MJ=UqYX z1@$sc;t9aTeDXSXwwHeDNSGtyVy6qw>YGyoF3!J9p$Vjf6@R#?2}@&woCp6|CC?`U zd?RBfCeid#2k;QA%g~F?>kv1;ivuMM&DHwZQ&If|ZEu34YJ86}76R0-t{U)+*50iY{`g-U2n*6&2=(&0MEwBQ`))0O725`CXd<7v zSn3gwVz@-f3Abx*ErVfW3&HBwGyM?QC=n0NdF!H$`+EBJMvnJ?fS^I^>wm@!<#R&; z8v6%X&}`7L2=>ia{EmpZ^YsJx8wH^H*fRGYE?iKd6#QKPd4{YSyZ%*vi%SdGYR#7# z6ei;)41=%G;}Yq~_{}ayw+KcowD=W>sOEydiUinw9~8(Y_q>iT;WD~SQOT3w_wJ&b z0vjaqtFfL)`$^{KWuW!M&wz9E=kG+{eN8}L{Viz;57jN!%BZ?ibz_yJDPxtOA;?sJ z$Wm~GN#akPvs-!=XCj;X+s!d z?$s6(^t4~Io`qgtl5G=?T3Nw*;?|s7Ngo@4L8q%91r)0~h0sa`UfTnBZ7U4{L8RFr zyjRKRDrhrfjbwwTus$T3p#36tBXqz+K(iW?GxC92iD#Lrghz<*`{@k+$O(lgQd8mN z>BCK46gL6g#1EijZH+0K*+TA#vH}J{uoF=`4T(!BnjI@THqbVlVkjf0XnN3adpeom z%^xfWyf4WKjQvp2?s$Q1@paFi|I&gm;IQi9WOf|Y6Ju4ZOO}tpx}Fc zolQ!Oe$~fM3wH}02&lsPf-<}7M{CVa*Rf&gr~;bOSD&|d+zU)QAB8JlHEsmS@28nm z&^$VCN|s(Cgv5Xod{&b;C37AWr|v!l`Eh77x1r!$KuvsW)DEBQ%@u0Q4sr#`POsQ< z2j~=R?hhL)+8j2WXbb~oyR59Sl&^A_Pad`Qtg`LYj@Wxn>Dx8^Z8z}4_gw_DgQ5&ce3%}m1g=UnYt11Jcq*moXPB_j|Ir1UO)b_v z57lS&S*^oQXBG_Ee98?TRkIh6F>|*A)!1#yU}oBWHFZm`08+0ITp{W|s{GDZ2|tD` za;*N-LU3+^_oHie!>?7$lGj_Du<#HvXF#nIrWd$q;6-{Un1axd@OMJ~g*A57buQTc z{PZ74@5+7Um$dga)^qjcpjrX~tBT-|k;PN7C}zuD_eVj)a-P5?D6m(s!8wvMm^<*D z@(W%n*w=4#8c63qr{w=T7aoih`lwIyKI$UCy}<^|`>^>M$Ta~i@^sY_x`)OF_q{*7 zkQ}Syd0Q~k>$PgTsKa~ysEFLMY%!=bt<=!yJ8<{tkM1`rF9GAkhAsLv{k0|+Mi?!6 zR2^?XEEdou5Ui&Nz|ly~U?#^o(gS%-{m%r$wVKlc`$|}E5S%x{%1GGYZm1Ral?vy$ zXpGfE=9eg#&5zuGOz&oUaO7x{6v!%W2x=RGQU`VSy?0H9J07K>eto5; zFCP3+=s^B6sZ7AmSr$mCPk`eY!qUb`5!K}ztsE2I2Uv?62wl4LRB;YXH$o@eT-pM<{)opeI*^Bn(IXJP8735Y=>wv!>GZa*-OAVY51S_;kwa9-0Q z{1q|sbCu=hw=hu+0+aWl~p&oGPdohQDWaz$lIO>_IOrp1j5d!x+RC*DN(x@ zlU#n#73Nn1OL5J*=Aas;yw;gxmow`x1IU^2n$~Z?3<44*ZaaRb*TYP6wQ}@FHukt` zL~cCkt+(Toaub@@eT>1O*b8==(YC**Sj<;vOVhzx-nTXCv+CHp~l@#n|-G`r*jQXSpv> z$jy$+^)V?I&)jk_i->4_Wp3pHt<5V(Yb=L+z7SlI6W20R84Gx~x4XRhs_R#@Qc}W| zBZHM+>IUnr?Y|O3kW{373CCrqkaw6{-lrz5LnuwM34`Z0qSw%=sl`^x+S)^HDwP*-ckhb;I@g!v{t&?QZZ{y*eAZKk6@9%yUG}on#7Rc$8d&qIr`X~p` z85ykXGe$eQl@LHHeLE1Gw$@lyveuRJK6k1<0{s5bj8ZexXD+vb|1e}dIHoe$eetht!SfWSp@nw^T>3i$kR%KF-9K!kDC)dD35+LnFuT29vH2~;jo@-e|kJ$w^A3(Km9u8 z$LT4z&QH4sKJ(P*U#`c`hYxr~?quCHuLdRJPk-)j2?Cy~U*B!{yjd>wXw4tvWou6K z*H3%0;USk-Qjx#5a)H>>Qscf@U|7^^2H)G=>(72%;r#!H=O>h3;B{`)yrFL6)mQ)d85E\n", " 0\n", " 5.00\n", - " A plane is taking off.\n", - " An air plane is taking off.\n", + " a plane is taking off.\n", + " an air plane is taking off.\n", " \n", " \n", " 1\n", " 3.80\n", - " A man is playing a large flute.\n", - " A man is playing a flute.\n", + " a man is playing a large flute.\n", + " a man is playing a flute.\n", " \n", " \n", " 2\n", " 3.80\n", - " A man is spreading shreded cheese on a pizza.\n", - " A man is spreading shredded cheese on an uncoo...\n", + " a man is spreading shreded cheese on a pizza.\n", + " a man is spreading shredded cheese on an uncoo...\n", " \n", " \n", " 3\n", " 2.60\n", - " Three men are playing chess.\n", - " Two men are playing chess.\n", + " three men are playing chess.\n", + " two men are playing chess.\n", " \n", " \n", " 4\n", " 4.25\n", - " A man is playing the cello.\n", - " A man seated is playing the cello.\n", + " a man is playing the cello.\n", + " a man seated is playing the cello.\n", " \n", " \n", "\n", @@ -318,21 +331,21 @@ ], "text/plain": [ " score sentence1 \\\n", - "0 5.00 A plane is taking off. \n", - "1 3.80 A man is playing a large flute. \n", - "2 3.80 A man is spreading shreded cheese on a pizza. \n", - "3 2.60 Three men are playing chess. \n", - "4 4.25 A man is playing the cello. \n", + "0 5.00 a plane is taking off. \n", + "1 3.80 a man is playing a large flute. \n", + "2 3.80 a man is spreading shreded cheese on a pizza. \n", + "3 2.60 three men are playing chess. \n", + "4 4.25 a man is playing the cello. \n", "\n", " sentence2 \n", - "0 An air plane is taking off. \n", - "1 A man is playing a flute. \n", - "2 A man is spreading shredded cheese on an uncoo... \n", - "3 Two men are playing chess. \n", - "4 A man seated is playing the cello. " + "0 an air plane is taking off. \n", + "1 a man is playing a flute. \n", + "2 a man is spreading shredded cheese on an uncoo... \n", + "3 two men are playing chess. \n", + "4 a man seated is playing the cello. " ] }, - "execution_count": 6, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } @@ -373,7 +386,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -385,7 +398,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -430,7 +443,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ @@ -457,7 +470,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -466,19 +479,6 @@ "testing_data = feature_engineering(test)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "#Take this out later\n", - "\n", - "training_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_train.csv\"), index=None)\n", - "testing_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_test.csv\"), index=None)\n", - "validation_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_dev.csv\"), index=None)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -508,7 +508,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -531,7 +531,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -562,45 +562,24 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n", - "Workspace name: MAIDAPNLP\n", - "Azure region: eastus2\n", - "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", - "Resource group: nlprg\n" - ] - } - ], + "outputs": [], "source": [ "ws = azureml_utils.get_or_create_workspace(\n", " subscription_id=\"\",\n", " resource_group=\"\",\n", " workspace_name=\"\",\n", " workspace_region=\"\"\n", - ")\n", - "\n", - "# @Courtney : put the print in another cell and don't run it. \n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "print('Workspace name: ' + ws.name, \n", " 'Azure region: ' + ws.location, \n", " 'Subscription id: ' + ws.subscription_id, \n", @@ -649,7 +628,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -666,7 +645,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -695,7 +674,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 19, "metadata": {}, "outputs": [ { @@ -703,7 +682,7 @@ "output_type": "stream", "text": [ "Running on local machine\n", - "Parent Run ID: AutoML_5c05b58e-1709-4042-a38c-f4b96cbae855\n", + "Parent Run ID: AutoML_96d2e4e6-8d8d-4304-a160-18c487158b8a\n", "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", "Current status: DatasetEvaluation. Gathering dataset statistics.\n", "Current status: FeaturesGeneration. Generating features for the dataset.\n", @@ -719,59 +698,58 @@ "****************************************************************************************************\n", "\n", " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:14 0.1703 0.1703\n", - " 1 MinMaxScaler RandomForest 0:00:55 0.4157 0.4157\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:32 0.2771 0.4157\n", - " 3 StandardScalerWrapper LightGBM 0:00:09 0.2708 0.4157\n", - " 4 RobustScaler DecisionTree 0:00:12 0.2435 0.4157\n", - " 5 StandardScalerWrapper LassoLars 0:00:07 0.1246 0.4157\n", - " 6 StandardScalerWrapper LightGBM 0:00:12 0.6567 0.6567\n", - " 7 StandardScalerWrapper RandomForest 0:00:10 0.1989 0.6567\n", - " 8 StandardScalerWrapper LassoLars 0:00:09 0.0836 0.6567\n", - " 9 MinMaxScaler ExtremeRandomTrees 0:00:12 0.3547 0.6567\n", - " 10 RobustScaler ExtremeRandomTrees 0:00:33 0.3561 0.6567\n", - " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:11 0.2956 0.6567\n", - " 12 StandardScalerWrapper LassoLars 0:00:10 nan 0.6567\n", - "ERROR: Run AutoML_5c05b58e-1709-4042-a38c-f4b96cbae855_12 failed with exception \"Primary metric spearman_correlation is not available.\".\n", - " 13 MinMaxScaler ExtremeRandomTrees 0:00:12 0.2495 0.6567\n", - " 14 RobustScaler RandomForest 0:00:14 0.3333 0.6567\n", - " 15 StandardScalerWrapper LassoLars 0:00:06 nan 0.6567\n", - "ERROR: Run AutoML_5c05b58e-1709-4042-a38c-f4b96cbae855_15 failed with exception \"Primary metric spearman_correlation is not available.\".\n", - " 16 StandardScalerWrapper ExtremeRandomTrees 0:00:09 0.2098 0.6567\n", - " 17 StandardScalerWrapper RandomForest 0:00:09 0.2262 0.6567\n", - " 18 MinMaxScaler SGD 0:00:08 0.0877 0.6567\n", - " 19 StandardScalerWrapper RandomForest 0:00:20 0.3533 0.6567\n", - " 20 StandardScalerWrapper LightGBM 0:00:27 0.7412 0.7412\n", - " 21 StandardScalerWrapper LightGBM 0:00:28 0.6983 0.7412\n", - " 22 StandardScalerWrapper LightGBM 0:00:31 0.6864 0.7412\n", - " 23 StandardScalerWrapper RandomForest 0:03:53 0.4227 0.7412\n", - " 24 MaxAbsScaler DecisionTree 0:05:29 0.1967 0.7412\n", - " 25 MaxAbsScaler LightGBM 0:01:14 0.3161 0.7412\n", - " 26 StandardScalerWrapper LightGBM 0:01:22 0.5771 0.7412\n", - " 27 StandardScalerWrapper ExtremeRandomTrees 0:00:31 0.1956 0.7412\n", - " 28 0:15:12 nan 0.7412\n", + " 0 StandardScalerWrapper RandomForest 0:00:13 0.1822 0.1822\n", + " 1 MinMaxScaler RandomForest 0:01:05 0.4164 0.4164\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:12 0.2106 0.4164\n", + " 3 StandardScalerWrapper LightGBM 0:00:10 0.2845 0.4164\n", + " 4 RobustScaler DecisionTree 0:00:13 0.2544 0.4164\n", + " 5 StandardScalerWrapper LassoLars 0:00:07 0.1246 0.4164\n", + " 6 StandardScalerWrapper LightGBM 0:00:10 0.6568 0.6568\n", + " 7 StandardScalerWrapper RandomForest 0:00:10 0.2216 0.6568\n", + " 8 StandardScalerWrapper LassoLars 0:00:09 0.0838 0.6568\n", + " 9 MinMaxScaler ExtremeRandomTrees 0:00:12 0.3674 0.6568\n", + " 10 RobustScaler ExtremeRandomTrees 0:00:35 0.3522 0.6568\n", + " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:11 0.2703 0.6568\n", + " 12 MinMaxScaler ExtremeRandomTrees 0:00:13 0.2410 0.6568\n", + " 13 RobustScaler RandomForest 0:00:14 0.3422 0.6568\n", + " 14 StandardScalerWrapper LassoLars 0:00:07 nan 0.6568\n", + " 15 StandardScalerWrapper ExtremeRandomTrees 0:00:08 0.1996 0.6568\n", + " 16 StandardScalerWrapper RandomForest 0:00:09 0.2429 0.6568\n", + " 17 MinMaxScaler SGD 0:00:08 0.0559 0.6568\n", + " 18 StandardScalerWrapper RandomForest 0:00:21 0.3900 0.6568\n", + " 19 MinMaxScaler RandomForest 0:00:09 0.1557 0.6568\n", + " 20 StandardScalerWrapper LightGBM 0:00:20 0.7423 0.7423\n", + " 21 StandardScalerWrapper XGBoostRegressor 0:02:19 0.6688 0.7423\n", + " 22 StandardScalerWrapper DecisionTree 0:03:03 0.2179 0.7423\n", + " 23 StandardScalerWrapper LightGBM 0:00:43 0.6779 0.7423\n", + " 24 StandardScalerWrapper XGBoostRegressor 0:03:05 0.7638 0.7638\n", + " 25 TruncatedSVDWrapper XGBoostRegressor 0:00:33 0.7415 0.7638\n", + " 26 StandardScalerWrapper RandomForest 0:01:53 0.4292 0.7638\n", + " 27 StandardScalerWrapper XGBoostRegressor 0:05:27 0.6608 0.7638\n", + " 28 MaxAbsScaler LightGBM 0:00:18 0.6966 0.7638\n", + " 29 StandardScalerWrapper XGBoostRegressor 0:10:35 0.5947 0.7638\n", + " 30 TruncatedSVDWrapper XGBoostRegressor 0:00:42 0.5582 0.7638\n", + " 31 0:15:17 nan 0.7638\n", "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", - " 29 MaxAbsScaler LightGBM 0:04:06 0.7195 0.7412\n", - " 30 SparseNormalizer LightGBM 0:00:39 0.6011 0.7412\n", - " 31 MaxAbsScaler LightGBM 0:00:31 0.7445 0.7445\n", - " 32 StandardScalerWrapper LightGBM 0:00:43 0.4265 0.7445\n", - " 33 MaxAbsScaler LightGBM 0:00:30 0.7460 0.7460\n", - " 34 MaxAbsScaler LightGBM 0:00:27 0.5939 0.7460\n", - " 35 StandardScalerWrapper LightGBM 0:00:38 0.7115 0.7460\n", - " 36 MaxAbsScaler LightGBM 0:00:35 0.7265 0.7460\n", - " 37 MaxAbsScaler LightGBM 0:01:15 0.6830 0.7460\n", - " 38 StandardScalerWrapper LightGBM 0:00:35 0.5717 0.7460\n", - " 39 MaxAbsScaler LightGBM 0:00:38 0.6779 0.7460\n", - " 40 TruncatedSVDWrapper LightGBM 0:00:17 0.6970 0.7460\n", - " 41 MaxAbsScaler LightGBM 0:00:32 0.7303 0.7460\n", - " 42 MaxAbsScaler LightGBM 0:03:12 0.7011 0.7460\n", - " 43 StandardScalerWrapper LightGBM 0:00:35 0.6661 0.7460\n", - " 44 SparseNormalizer LightGBM 0:00:31 0.6854 0.7460\n", - " 45 TruncatedSVDWrapper LightGBM 0:00:21 0.7386 0.7460\n", - " 46 MaxAbsScaler LightGBM 0:00:51 0.7113 0.7460\n", - " 47 MaxAbsScaler LightGBM 0:00:37 0.6230 0.7460\n", - " 48 VotingEnsemble 0:01:26 0.8104 0.8104\n", - " 49 StackEnsemble 0:05:20 0.8102 0.8104\n" + " 32 StandardScalerWrapper XGBoostRegressor 0:03:26 0.5855 0.7638\n", + " 33 StandardScalerWrapper XGBoostRegressor 0:01:54 0.6289 0.7638\n", + " 34 MaxAbsScaler LightGBM 0:01:22 0.7226 0.7638\n", + " 35 TruncatedSVDWrapper XGBoostRegressor 0:01:14 0.7168 0.7638\n", + " 36 SparseNormalizer XGBoostRegressor 0:01:51 0.7436 0.7638\n", + " 37 MaxAbsScaler LightGBM 0:00:44 0.7087 0.7638\n", + " 38 0:15:13 nan 0.7638\n", + "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", + " 39 MaxAbsScaler LightGBM 0:01:15 0.7516 0.7638\n", + " 40 TruncatedSVDWrapper XGBoostRegressor 0:00:49 0.7186 0.7638\n", + " 41 StandardScalerWrapper XGBoostRegressor 0:01:08 0.6529 0.7638\n", + " 42 MaxAbsScaler LightGBM 0:02:37 0.7303 0.7638\n", + " 43 StandardScalerWrapper XGBoostRegressor 0:01:57 0.6202 0.7638\n", + " 44 StandardScalerWrapper XGBoostRegressor 0:01:48 0.6566 0.7638\n", + " 45 TruncatedSVDWrapper XGBoostRegressor 0:01:31 0.7186 0.7638\n", + " 46 MaxAbsScaler LightGBM 0:00:27 0.7438 0.7638\n", + " 47 MaxAbsScaler LightGBM 0:00:22 0.6211 0.7638\n", + " 48 VotingEnsemble 0:01:07 0.8160 0.8160\n", + " 49 StackEnsemble 0:09:44 0.8161 0.8161\n" ] } ], @@ -784,34 +762,26 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, my accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." + "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, by accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "20258ec1fa27452db1ac708b631d48aa", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Inspect the run details using the provided widget\n", "RunDetails(local_run).show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](AutoMLwidget.PNG)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -819,18 +789,52 @@ "## Deploy\n", "\n", "### Retrieve the Best Model\n", - "Below we select the best pipeline from our iterations. The get_output method returns the best run and the fitted model for the last invocation. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration." + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the get_output method which returns the best run and fitted model across all iterations. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", + "\n", + "Our best model is a Pipeline with two steps: a DataTransformer step and a StackEnsembleRegressor step. We demonstrate how to extract additional information about what data transformations were used and which models make up the ensemble." ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ "best_run, fitted_model = local_run.get_output()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can look at the different models that are used to produce the stack ensemble model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fitted_model.named_steps['stackensembleregressor'].get_params()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can also look at how each column in our dataset was featurized by AutoML" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fitted_model.named_steps['datatransformer'].get_featurization_summary()" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -841,15 +845,15 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Registering model AutoML5c05b58e1best\n", - "AutoML5c05b58e1best\n" + "Registering model AutoML96d2e4e68best\n", + "AutoML96d2e4e68best\n" ] } ], @@ -871,14 +875,14 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Overwriting score.py\n" + "Writing score.py\n" ] } ], @@ -920,7 +924,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -930,18 +934,36 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 28, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No issues found in the SDK package versions.\n" + ] + } + ], "source": [ "dependencies = ml_run.get_run_sdk_dependencies(iteration = 7)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "azureml-train-automl\t1.0.41\n", + "azureml-sdk\t1.0.41\n", + "azureml-core\t1.0.41.1\n" + ] + } + ], "source": [ "for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n", " print('{}\\t{}'.format(p, dependencies[p]))" @@ -949,7 +971,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -958,7 +980,7 @@ "'autoenv.yml'" ] }, - "execution_count": 21, + "execution_count": 30, "metadata": {}, "output_type": "execute_result" } @@ -975,7 +997,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 31, "metadata": {}, "outputs": [], "source": [ @@ -999,7 +1021,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 32, "metadata": {}, "outputs": [ { @@ -1008,9 +1030,9 @@ "text": [ "Creating image\n", "Running.\n", - "NotStarted..............................................\n", + "NotStarted................................................\n", "Succeeded\n", - "Image creation operation finished for image automl-image:9, operation \"Succeeded\"\n" + "Image creation operation finished for image automl-image:1, operation \"Succeeded\"\n" ] } ], @@ -1034,9 +1056,17 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 33, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "https://maidaptest3334372853.blob.core.windows.net/azureml/ImageLogs/08bb1d92-0082-4b14-899b-3b829cf785be/build.log?sv=2018-03-28&sr=b&sig=LZLnU6O2ZjSPlgRbrN2V9iI%2FthozymlHLQOJzYIzWJY%3D&st=2019-06-18T14%3A52%3A34Z&se=2019-07-18T14%3A57%3A34Z&sp=rl\n" + ] + } + ], "source": [ "print(image.image_build_log_uri) " ] @@ -1050,7 +1080,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 34, "metadata": {}, "outputs": [], "source": [ @@ -1061,7 +1091,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 38, "metadata": {}, "outputs": [ { @@ -1069,7 +1099,7 @@ "output_type": "stream", "text": [ "Creating service\n", - "Running.........................\n", + "Running...................\n", "SucceededACI service creation operation finished, operation \"Succeeded\"\n", "Healthy\n" ] @@ -1077,7 +1107,7 @@ ], "source": [ "# deploy image as web service\n", - "aci_service_name ='aci-service'\n", + "aci_service_name ='aci-service-automl1'\n", "aci_service = Webservice.deploy_from_image(workspace = ws, \n", " name = aci_service_name,\n", " image = image,\n", @@ -1096,7 +1126,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 39, "metadata": {}, "outputs": [ { @@ -1130,21 +1160,14 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 42, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "nb sentences encoded : 27018\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{\"result\": [2.4007649356586533, 3.7203041050076435, 3.01905608701006, 3.902856364455924, 1.1550228301889944, 1.3840207272790979, 3.178933834598589, 2.0223381553819926, 2.622230818796456, 1.7338832115242881, 1.7338832115242881, 4.323554015554397, 1.425456035672047, 3.484031993820433, 2.6054303529758225, 1.7605201466322773, 4.306348418450282, 3.348479712581535, 3.385623520970331, 1.6276171412284968, 2.1343994392871606, 1.6012629512613972, 3.869952502388212, 3.686662790001718, 1.6451070211205048, 3.1421279572615366, 1.111655437831912, 3.0180103017472146, 1.9712967228763916, 3.4114704338436295, 3.6518193090231668, 3.562469227771801, 2.124821531366967, 3.834628005697218, 2.945914751898513, -0.2392489880655022, 1.2437658111011634, 3.0753114019001915, 3.336621543167602, 0.7072309427745641, 2.7822500083684014, 2.897998498396679, 3.507070164144911, 0.8324353540675962, 2.3439851393673887, 1.196795770111204, 1.2525496534114489, 0.823759560298268, 2.9920972505050303, 1.9236245820356928, 1.5376412762417093, 1.9239034683592888, 0.680217556271701, 0.8143318319044779, 1.6569386883279007, 0.4259460646219386, 3.3319423567551247, 0.9045598298338577, 3.211903077820651, 1.7243704134512794, 3.514874356499507, 0.9110995408450886, 3.3741522237839656, 1.2281258642856878, 3.465292338670356, 0.32058502612519185, 0.8850275497988621, 3.065256206079797, 0.4053934125165189, 3.5740675568691405, 3.281222953364415, 3.5140749236641007, 1.071282888904832, 3.0968370948355677, 2.845044903199787, 1.1761123158943305, 1.0931252885255132, -0.2190979331369351, 3.0278360651324765, 3.3771503236637863, 0.3595765778211298, 1.4457001019091615, 3.0655501960586586, 1.6492383358940006, 0.700043224420747, 2.034128871545479, 2.9722384301971485, 3.5590364352072705, 3.012090526492372, 1.432290360589044, 2.0708695062946347, 3.6032182783315987, 1.4070917999765014, 2.9198148106213044, 1.602526171561382, 3.0423404255325828, 4.134046333382066, 3.1833924704358454, 2.6972409320246538, 2.9434198535949747, -0.05719679744147322, 1.2503400387171553, 3.2788294771940167, 2.241817370332072, 2.8001416992606636, 2.5398034571458528, 0.3029301262149539, 2.178643890606605, 2.0575427816124714, 2.6827134271921307, 3.43005114934858, 3.047697949118931, 3.472035416873391, 3.703490992658692, 0.225430822332008, 0.043626339595712264, 3.0879265634103734, 2.664410529751562, 0.7146709184194655, 1.3892539947195832, 1.930121000571028, 1.5100478053809738, 1.7024385761581813, 0.9894313783101468, 0.6221041392720973, 3.5563839037874154, 1.3334827047760667, 2.550764256588327, 2.5132284082157508, 3.0888773695858593, 3.0042233009733916, 3.144856207572728, 3.363023145527185, 4.013332011077687, 2.4978689717062226, 4.7154419897349715, 4.091931365848677, 2.5915953148524076, 1.9035472956314814, 3.833530140479712, 3.996015336852124, 3.4759896565796544, 2.529646569551917, 4.01565651738302, 2.40554205513067, 1.6604893811174333, 2.3036266116855906, 3.291920898341884, 1.8514038884508663, 0.7320211729888076, 2.3516518607367636, 1.714361267002725, 3.3393321350157557, 3.404014965739996, 3.3363768800838876, 3.1177630731489376, 0.7495448149489056, 1.44139528899232, 3.8746406822493165, 0.9831310767486685, 3.7189856624445485, 3.128158819716561, 1.4778378372937289, 3.251769986232879, 3.1260653092856465, 3.8012358220902875, 2.972991227310789, 2.7869173126273847, 3.620144049554889, 2.7610451957087454, 2.0077517983652537, 1.62559678706209, 3.3835946499295915, 3.342784987133866, 2.778189118048538, 3.1837254309785257, 3.070986244981476, 3.466654863132514, 0.1706408313049759, 0.382837393103195, 0.6416225628881164, 1.9705785347947344, 2.4767273642199097, 0.9397530596698768, 1.8664705351321715, 0.3653088047711237, 0.060504487338421606, 0.6283130609242394, 0.7083774945975357, 0.8152272276658127, 3.4867539254639994, 3.757142718332686, 1.425830358140289, 3.138372825095919, 3.109286797434089, 3.5647830017865174, 2.4814118018830107, 1.1912843278368994, -0.211299440780044, 3.0640266338622344, 0.9568925868946916, 1.4447557293942643, 1.1420885489452837, 0.8900281057215034, 0.8318306978489256, 1.6307705462289308, 3.4983493687922675, 0.6783178581004078, 2.4580089109110053, 2.329802776356859, 2.2295937954146856, 1.6013504699842562, 0.9129166329934331, 1.7457743241980899, 3.1655465968364505, 1.355118817170123, 2.5743628644444585, 2.7571419830768256, 3.4745078193915346, 2.5051203913814897, 0.8307662328614271, 0.720744510775827, 1.8276461846463783, 3.01690229431437, -0.25050333797693924, 1.2815725435665288, 1.8872697372451213, 2.610577907494665, 1.3883996116181896, 1.2228511391747832, 0.35257187262163364, 0.8965247082238874, 2.2941033262696093, 3.466202232227163, 3.1068231722279633, 3.045204921878465, 0.45158281481116574, 0.933151563866055, 2.243300376185762, 0.07105386847779536, 2.4037748914249484, -0.08515316836355831, 0.913547797393113, 0.0010811001570994374, 2.6841702656376856, 0.6660893597738771, 2.4119960757687875, 2.0595140086068637, 0.43530312378093233, 1.0416628278285354, 2.8734853544394308, 3.7976065265996, 3.6161862057111867, 2.7500292221185023, 1.1883248613421873, 3.345537123161656, 0.9461683170219358, 2.645964435953183, 3.0503539161431528, 0.02160243930148138, 4.207381581116232, 3.1888530831664124, 3.5807382073022747, 2.9365557843933274, 1.8450879607864796, 1.9307271926057505, 2.5485730185570334, 2.555793825137589, 1.7867196131587055, 4.206651146773874, 3.466417205623947, 1.1866513656691242, 3.4247481250143985, 3.2486935215344124, 3.751927409699416, 4.073381170104493, 1.4681226441012432, 0.7175379704412098, 3.2199806504598225, 2.8939563419398264, 1.7042682827949156, 1.8045430872239652, 3.063693264683336, 3.3147825728537157, 3.377488576856278, 2.631703354832816, 1.9792029734778631, 3.5481532566384923, 3.3685051150052976, 1.7323567312852364, 3.7209295890706864, 1.7547467154102796, 2.763679923184088, 3.617752810172748, 0.17499532980648613, 0.7242352481260469, 2.1087864702964487, 3.407714961810979, 3.4456155889786166, 2.9553953025572057, 3.770130702080153, 2.2909553383442476, 1.9822024869846375, 3.7059386406902366, 2.954842082898494, 3.7044464337001783, 3.5903349466486034, 2.135460796431692, 3.807138304298016, 2.969702904218857, 1.62352803589241, 0.6537953084301349, 0.9787363044080096, 3.5468258351266404, 3.347184776089763, 3.1943672324179957, 2.3960911552047324, 3.2700265623256337, 0.8962644507418412, 1.9281645098119242, 3.4269055678798415, 2.4575140645674036, 3.443161137798304, 2.607613774906206, 3.402960449177406, 0.12448582120560814, 3.1136945522439143, 3.068403804825057, 3.6577983809139285, 2.6776231883508554, 3.952742078214713, 3.647291086545242, 3.4383021672325236, 3.262818182001557, 2.640391532766498, 3.231945060912891, 3.2000507949684445, 2.7703545014052726, 0.8331445128667355, 1.7949420809913157, 3.5300512892143874, 3.993110424063823, 4.176896626114492, 2.425195004734868, 2.0264410800699153, 0.7531986543600562, 3.1973635935527467, 1.714744851755237, 2.8469963787728485, 1.0622498557913416, 3.528951499711494, 4.245668920107155, 0.8090282336075807, 3.771529999518523, 3.0706324398965092, 0.16155029210451213, 2.577498432153764, 3.373535217339784, 0.19928354084516253, 3.7671545285397663, 3.5453055378572453, 3.1227485639965544, 3.014042056501635, 2.6843448435920125, 3.2186726678477813, 1.5168294086042708, 1.99028904358292, 2.771840928110641, 4.096401701677807, 3.8210128471610325, 3.421090305572525, 4.0799640437889035, 1.1979911049453364, 1.2180758168031065, 1.436703461325978, 3.347879993586396, 2.4025182611477955, 1.23037296675016, 2.3636640084955087, 2.423460147035201, 4.146036541875713, 3.3982643737285914, 2.7542003931208012, 0.6110590294045548, 2.6854984402422546, 2.603755669960302, 3.4137779066710423, 1.5121894241671099, 2.0913552771801807, 2.827220540899616, 3.413622807541957, 1.7612197811243804, 0.1504072586714507, 1.233232024294982, 2.7551491240830006, -0.31992309227563764, 0.6646156517721699, 4.073629622709906, 3.406296727242307, 1.8605601331168824, 2.650801211522822, 0.2601210474295682, 2.046291019644924, 2.858555525628637, 2.1026937875881813, 3.7518529595192875, 0.9583201625870615, 3.097799931549756, 2.9054741688676518, 2.6441238359580828, 3.1845230257713366, 3.536449681631408, 2.4545165695728723, 3.5547517385032616, 0.7444258549051933, 1.6271314830233479, 0.6302729386007294, 4.320773767831801, 3.816297374117777, 3.6349317889361736, 4.021965486546852, 1.6704224158152112, 4.039212643255107, 1.0856242613212121, 2.3304699493289474, 3.1305519515355753, 0.6965602809808259, 4.262738327208635, 3.0437801839029115, 3.950757460330295, 1.412302179775267, 3.889742568787724, 0.6786148984300411, 2.4838593710267007, 0.654101747099627, 2.583098003689608, 2.818493227722639, 4.130215985078361, 3.913518738390418, 3.024256401117099, 2.7852458525669013, 3.2480414589000404, 0.9728466260265598, 3.565262733522141, 3.929838803757427, 0.5028391606654596, 2.090004509911121, 3.2834303755658376, 1.1340542891269836, 2.4449012431913295, 3.298761407923188, 4.083922765892926, 1.3926948034239552, 0.5263184160167794, 4.657861306433003, 2.7029299012372405, 2.823283939915331, 3.7342853500543036, 2.868196168939005, 2.323410689386915, 1.2789545967901779, 0.6148851219515439, 0.2857623751057112, 1.5526051243016619, 3.2968079891109827, 3.199813840882855, 2.822673463498511, 3.996560000579014, 3.4368410475520106, 0.5387465130421016, 3.2144963961864224, 3.6250401496359803, 2.530109542295146, 3.661496824951344, 2.57004764881931, 2.497163411244967, 0.8552174607208394, 2.314861786651138, 3.226208716885824, 1.0888971502178093, 4.097519935679198, 2.3137168269095985, 3.713266883115758, 1.3470152602283132, 4.1477415215905244, 1.3787303069833947, 2.783921973162389, 3.286047654465115, 3.5450918950284414, 3.243038342934074, 3.881319435145448, 2.6332230527484732, 3.031088047412508, 3.9590878175779527, 2.850714014447557, 2.479376685176867, 1.03707003686562, 3.7906223073421463, 1.5182353258292025, 4.863782566406362, 1.7684636605450037, 1.4112498008710455, 1.604204256009843, 2.984325632476774, 1.5414034680086948, 3.416017860268618, 2.1742178178426292, 1.1989604522466768, 2.9953078584927018, 1.4583392392992678, 1.0003298189181882, 1.8377120914761766, 3.66991625941158, 0.5400579209192116, 1.4459360036311313, 1.5948839264030654, 2.099930795696326, 1.6918701183528013, 2.560092812607309, 3.330682890115802, 1.959174916040434, 2.5004523207697535, 3.5791231927634946, 2.308375936108195, 3.8904198923046454, 1.7763068151583101, 2.952609890681845, 2.110326965971543, 3.779523700581668, 1.1960219365830944, 1.2077961255982665, 1.5398203290497199, 2.2043086926716287, 1.073135130300595, 1.8934420832937688, 1.0238102796505002, 3.503565303995557, 1.62492764414433, 2.0770609423805184, 1.9948054754566058, 4.025946118131413, 1.7624947399018527, 1.8152564359098275, 3.1696252811668426, 3.3969423429389356, 3.7753738723103742, 1.0947613928149178, 2.3398224157435425, 2.4990117665020692, 2.9781136890034476, 3.261538318444305, 1.0127385513707918, 2.5812838461411927, 2.758573469573072, 1.9257398023555914, 2.5742937344184718, 2.5895961688817084, 2.1511742248346053, 3.0548549895363926, 2.7207298522885575, 3.636010882568026, 2.3040651892989605, 1.94086883457108, 1.8127623139073386, 2.181532308900643, 2.8408944189255823, 3.8542324557227245, 2.677435921373939, 0.5007048519536742, 2.1445654577402773, 2.6892382671318527, 2.3721654806086128, 1.41310139591782, 3.4727111191925824, 3.592725949620006, 3.588137663341875, 3.095131086693382, 3.1630869677026543, 2.5169578600178846, 2.2924490804605724, 1.8366148648830272, 2.736553140462617, 3.201378347367616, 1.702570641619703, 1.9068809741423467, 3.400209658236268, 1.974274726099257, 3.631584887655271, 1.6172447530514695, 2.351199840928339, 2.2752482582598224, 1.9430059394059969, 3.5921915414575754, 1.6873260069203793, 1.1071239195601534, 3.328030585208255, 2.429212093439262, 1.1198444299984158, 3.508556830928189, 4.032919585985083, 3.166842173167614, 0.7050881376855174, 1.9540927777939345, 1.3216569012987762, 1.5461667060622044, 3.156026957552728, 2.628858327598987, 1.556984437966439, 2.845001237048929, 2.4997845380124404, 3.708049943488315, 2.232049412888785, 2.3311256211051794, 1.5642007429618343, 2.0899784046815006, 1.8032159432803923, 3.1255853932777597, 0.8413036189722212, 2.8763190809588193, 3.6876558677865705, 2.189062786778165, 2.829582497494671, 0.9756493553838288, 4.304298240436837, 1.7457715148646749, 3.342280290528494, 2.7137922341551763, 2.227923035829614, 1.6982824583921725, 2.558979259456806, 3.059370988741424, 3.5936397722099502, 2.7505829477837045, 2.5060400718855447, 2.0528613827397857, 2.007372232890293, 2.6863008434490596, 2.506805340379085, 2.869577318812625, 3.1679419307060206, 2.2968588226889537, 3.547150110761137, 2.8380284825525006, 2.619904186797817, 2.7461132281211498, 3.4079562235344127, 2.877709866466577, 2.8401351158091708, 3.0733962196639877, 3.6218696214465953, 1.4887307265388356, 3.58660500134064, 2.9572020036640843, 2.6341039499767267, 2.2782857206253384, 3.1322145945772, 3.131648316252123, 2.107115324202573, 2.798597730251243, 2.7658248649373856, 3.1166003469277728, 3.1268471085741636, 1.964078945543616, 1.6708352840862384, 3.9506874070454296, 2.5456095060280464, 2.194620419111837, 2.508968889597246, 3.0579378571010394, 2.130147038193876, 1.9952200321764486, 2.8316935605523392, 1.2367999525997722, 3.6771943633424664, 3.2101344985939613, 1.2090441597142658, 2.0545455123522007, 2.2053158410639373, 1.5200620991258607, 1.4601831066814448, 2.9441695479837615, 2.1752116615864483, 3.2374649315244572, 1.7925449316172752, 2.507735656543983, 1.7247329475830961, 2.426189713708539, 1.4752241072219026, 3.20777469787363, 3.10724013604713, 2.4839521438542658, 0.8728502275804966, 3.711190782678877, 3.0284155539598006, 3.2898362998350716, 2.1355209578637524, 2.453902453341244, 1.854354554245718, 2.0148192329704115, 3.5461202162333185, 3.141836818120166, 4.101723038133673, 2.4329927223335077, 3.6368390219625657, 1.733466125475294, 1.6602608909185481, 2.22707951526562, 1.3244461639255156, 2.88256590405389, 3.163494826663484, 3.3791828090488076, 1.7016271797206928, 2.0137802764720756, 2.820670637894466, 2.389788962231025, 1.505257318475078, 2.3626819769201415, 3.1133470578930664, 2.9076674172126773, 2.7130002186952145, 3.3131934566484627, 2.9872284567906657, 2.711230302466336, 2.2027114398511722, 2.6005573530380857, 2.3761147643903757, 3.059756622914858, 3.423177334934199, 2.555409608717624, 3.114645027419842, 2.489437240647881, 2.7850122425465593, 2.046035381086139, 1.8502818708375561, 2.822046366929282, 2.091956116877832, 2.7243767064607396, 3.0198886758826142, 2.2846621415815735, 2.9925468404824622, 2.5040400407220096, 3.2011328545298974, 2.899057745967722, 2.88347143561458, 2.477843029869807, 2.8087378040008515, 2.6455674844717127, 2.3163831725788313, 2.7629868636108443, 2.1742888892881456, 1.675904992182427, 2.4991527620658127, 1.9694541866830546, 2.067169288463332, 2.2321600228506497, 3.2674350153606895, 2.7653115505827754, 3.137156896609537, 2.5386968252405784, 2.4351194277298376, 3.3730478236603463, 2.119980705824364, 0.9616803129944496, 2.423854203694371, 3.0384514018665985, 2.8119229208783425, 2.9050246184001725, 1.9766031683918777, 2.889477347000595, 2.8789492484396058, 2.844152611528514, 2.2676204871100865, 2.4136481150968128, 3.5400521729317873, 3.2941520559244726, 2.630960525297087, 2.73492119026562, 2.1926812748788294, 2.740937399972681, 1.8614950423611356, 2.593176926387489, 2.85000216779835, 3.8809945185395, 3.909857264971338, 2.469823835415648, 1.7302720279029715, 2.1666582110707724, 2.5908463344069537, 3.3736328448572377, 2.239697597323414, 2.244741685388107, 2.1041302615978954, 2.2806820770645606, 1.9285337770348905, 1.676603504648464, 2.586535329497653, 2.324007983498282, 2.9027989099533174, 1.2852376480182786, 2.062621602297574, 2.568913058356344, 2.1733302440463302, 1.763354019719918, 1.4292968491672942, 2.5070526672286024, 3.234117561165093, 3.6191728964909298, 3.434963522851285, 3.4471321769303147, 2.2071106858189777, 2.6257480444101335, 2.5873937516722423, 2.540284755928624, 2.490298362612369, 2.4783555437440454, 2.139422690872629, 2.2284906474734707, 2.660037964168879, 3.803744795968945, 1.9569439019540176, 1.9854245005140652, 2.3888392029217242, 2.5591552899889614, 2.953703568924823, 2.0852016002112066, 2.5120289596833105, 3.091705475317183, 1.8516296581776697, 2.551319078836151, 3.6612886614092006, 2.0512107121829977, 2.0513810933859973, 2.49492757046954, 3.238911779445031, 2.7158631562534628, 2.1552656474450806, 3.676226361567823, 1.3843819128455084, 1.9523879048737625, 2.9645329352282377, 3.357052170204582, 2.38796249311755, 3.395932757348177, 3.1691574284828214, 3.3962278399115107, 3.24409842669985, 3.084678113977659, 3.0979939523331765, 2.944913980668609, 2.950675869664319, 3.1186375828548507, 1.8735532615855475, 2.788452644337485, 3.2965700362708166, 3.2476246105920774, 2.016500793641796, 1.4775744638012087, 2.455294653905821, 3.1012216346784824, 2.7039056917080524, 3.0225901376550763, 3.73051662712301, 3.561781207394679, 1.3372006152725102, 1.9669603420963115, 1.6382466828433202, 1.8051270397607486, 1.9902983362699878, 1.295709814038781, 3.0541525599340336, 2.4120597983142638, 2.917200213117251, 2.295869511847802, 2.915612746298734, 2.036172001889443, 3.968347889909805, 2.319743870170633, 3.5303672681271494, 4.041713910191172, 2.323276105865658, 3.2026264274786445, 2.7452772263380907, 3.153410322692673, 3.4805220553619587, 1.9615463116693832, 3.928706486198638, 2.7042271198622747, 3.643225138480707, 3.2675392481455128, 2.56596489458562, 3.5498042797541043, 2.992139036312882, 2.4496832035067135, 2.4941386286767653, 3.2537238041078065, 3.2098441301263776, 3.071732022837564, 3.613405870873094, 3.0571169262105187, 3.5034655580449625, 3.864997636939696, 3.8923866651833556, 2.5443538226393705, 3.9644087864479416, 4.3742334909931655, 3.2626279780279344, 2.4832071592967426, 2.6810079278106476, 3.0657080692755536, 3.185633346500324, 3.2179949166006674, 2.83102762384967, 3.006989775851423, 3.270420949088386, 3.4415568771256764, 2.9762682597850882, 3.058296575491441, 3.714968305401228, 3.5189049998383695, 2.9630203678276215, 2.6915069855819813, 3.915976218166813, 4.025270887934071, 4.061352212082405, 3.9328608208624245, 3.022448871576655, 2.40077672413356, 2.4322528876532474, 3.506271082617848, 3.5784556420288904, 2.48930586320751, 3.3229979738977242, 3.491603286407794, 1.8874204231159117, 2.849572663825799, 2.1829460481559635, 2.6769747617114317, 3.517350445439615, 2.9147329348132036, 2.989421460394383, 3.6817362941500678, 3.8333193660029026, 3.1694420326903945, 3.1894911024923944, 2.2845336231301836, 3.549367105938334, 3.678898232991184, 2.6454487192773812, 2.281455385264731, 2.8839677846679663, 2.837990615102943, 3.259142296492863, 3.107086356989458, 2.51649737578461, 3.5780840532158367, 2.4337906650849135, 2.961484560644563, 2.387360437409033, 3.5970532153736197, 2.6035127208714868, 3.4022561358815513, 2.7411298547814833, 3.4680806329054787, 3.0173208397895683, 2.230372060650678, 3.194706703121717, 3.6521004250325655, 2.5948319460785485, 3.3124170272140043, 2.9043328833901496, 2.715247970003449, 3.8614548713669365, 3.7184080370220913, 3.217045946968865, 3.5754074785612766, 2.981847561872253, 3.7945639438917533, 4.166949953023856, 3.7415112307491993, 2.7156515482510564, 3.522642678631886, 4.039979299015728, 2.5596421353993533, 2.491434958002671, 2.7286534090548136, 3.5607026110454556, 3.124543976734519, 3.7439454443743507, 2.2444113025301307, 3.31593649916145, 3.6421614510223934, 2.80184715721985, 2.8152294772434083, 2.7189682284990693, 1.9712638327633405, 2.2225357246300934, 2.4764235274714905, 3.757696482835125, 1.631341267532276, 2.8086425136838487, 3.1708671559816115, 2.6598111186275233, 3.3036248251710587, 3.3856884289496803, 2.7417197284606054, 2.6201310852841924, 3.706651060591473, 3.481852755800717, 3.440320271552699, 3.3263662299393912, 3.519141542885544, 3.6715816246656874, 3.6561722282005378, 2.900139080063901, 3.920545710124385, 2.9010944307942377, 3.7418992069045216, 3.6071454851401668, 3.9007254288359463, 3.269838809357927, 3.416252095647819, 3.1498544378698474, 3.5443570326277007, 2.7042181447208185, 3.2330074449447856, 3.3921173402155538, 3.4697086474481518, 2.632894143723635, 3.3117417419920647, 2.152796430328292, 2.784557712994731, 3.729239087025558, 3.7039597278432046, 3.088913459003473, 3.8998808599935435, 2.5520075615859854, 3.4921049087665565, 3.7909321587975393, 3.7925086914518973, 4.114226111933249, 3.8835507667800093, 3.1066634389208074, 3.63132138965877, 3.584869087427812, 3.6376363302140136, 2.7310233652756737, 1.812675089707175, 2.5590693851535717, 3.574132710384399, 3.3152510020368697, 3.7480643197771655, 3.6306142221636972, 4.070325586658873, 2.197284716179904, 3.2759028954110057, 3.622139154195582, 3.6377816514663843, 4.255945002756924, 3.68300081197788, 2.5376882929345066, 2.839389575003969, 3.3472722189690858, 2.7199727784504146, 2.1030216705599507, 3.370292140583007, 2.534134794807473, 4.002564487009867, 2.067986536681024, 3.1414731195799694, 3.470109912459018, 3.708052307762132, 3.151333601970356, 2.418976163687315, 1.5262332310045914, 2.5739240694172665, 2.8538279132275686, 3.3329645305631446, 4.091109259808409, 3.918619742233721, 2.815194544052006, 2.353515917087883, 3.3901850289052113, 3.4421072874854644, 2.363093205627888, 3.224592657854113, 3.7867706571060995, 3.2383798293646895, 2.5572444273433264, 2.046564606675505, 3.714942860007509, 3.7642321086462673, 4.340547476353654, 3.8987540261427123, 2.9798952620802863, 2.88616439114652, 3.2504492534204577, 2.986593726731848, 2.9899548801174576, 2.813098001075675, 1.8286751718300847, 2.8123307238250748, 3.222209944862631, 3.4597944907334974, 3.7907370319135745, 2.518916568181, 3.334813091465324, 3.617222304642308, 2.583895605422133, 3.30287686719256, 3.7575435441737826, 2.002515992067927, 2.567724088146479, 3.465644343529918, 3.5248877031053567, 3.97915230585789, 4.201747165540247, 3.235671379347487, 2.9748735808497213, 2.920438658557805, 2.8858398983929248, 2.2869379508825936, 1.4556482455235786, 3.8944220790841393, 2.948126700029103, 3.325631163599958, 3.3542076849026095, 3.679748815703511, 3.9951804164614355, 3.555864818181974, 3.0304880634330167, 1.9203384395118877, 2.06125851549044, 3.4882751852624043, 1.302545255732906, 1.8823212502123445, 3.293184795322817, 2.2915763130461646, 3.0153008098454617, 1.309190441039, 3.783466877872818, 2.5695571103501, 2.073721108822683, 2.116053124434779, 3.2251765126128724, 3.1171300726060043, 2.4403975190682776, 2.21139585032154, 2.85344569019514, 2.838834090000973, 3.3216411862083866, 2.418433740675964, 0.5842643928044109, 3.758925722611947, 2.9622500916988344, 4.366839792208037, 3.526978928300739, 2.8720433548905313, 1.7395332195844833, 3.0349753878570236, 1.6973289259321238, 2.9788230588508395, 3.4828497210998437, 1.7941506512349572, 2.8216032195046106, 3.0650214748449516, 0.9271829288501763, 2.540642883686294, 1.882018454025525, 2.3578944365867938, 1.8725060802170725, 2.468926398649008, 4.2456384979915756, 3.0737515512934093, 2.8981538989888054, 2.155094084567309, 0.7381764589940008, 3.4891839578315262, 2.6544098341609415, 3.0491550082889707, 1.3307568364332314, 2.0020094121839698, 2.417875824564256, 3.351076177367197, 2.7600565986554373, 1.4576973854400834, 3.4937065134447023, 3.4075247378155282, 3.167528732529367, 2.875933713792609, 3.2510334543287747, 2.2302785740657094, 3.820771508325611, 2.3310925130576834, 3.0440211506601424, 1.531677081003587, 2.8324096403411287, 3.94491271348647, 3.5487696817749788, 3.3046400074277873, 3.2331021255154657, 2.0459871446074276, 2.054793114012052, 2.5539570228655726, 3.11362435498946, 3.338476101128342, 3.013147800773455, 3.7718908322183795, 3.187412490777917, 2.9912362873468292, 2.594841068902907, 3.009954460073072, 3.9306272150503943, 3.842726463442695, 3.1600353683724713, 3.862533428432032, 3.1752073712122266, 2.1055200792533233, 2.290986798209671, 1.6561998519963164, 2.051581159789608, 2.0579811739193032, 2.5740558071651205, 3.5807291794415104, 2.2007137129304204, 3.0727838766505013, 1.4944885012842102, 3.150749661894655, 3.387202705738997, 3.951069845294468, 3.0731707589595896, 3.5830931000509816, 3.663824690305388, 3.6015642994370713, 3.9574621324709267, 3.1856567419930193, 2.9879406818956498, 2.5524536969954035, 2.239341893005705, 3.084176617426294, 3.1819619107192727, 1.937136649872337, 2.2788553138980236, 3.587746400109457, 2.9725323636335315, 1.9631741431643428, 4.605332358109236, 3.823700609110611, 2.2347561057660696, 0.8396625055692859, 3.396104247404846, 3.475560991080612, 3.9545496780399243, 1.846141179967212, 2.80678588779163, 0.7488139837733249, 4.2567884956927555, 3.1837653852771393, 3.962123383905963, 2.5947344338484206, 3.4426881464236656, 2.081724754151715, 1.9912106273442574, 2.9709320384178457, 1.8037442758687694, 2.8813980632514538, 3.3755108556433386, 3.7405653405973913, 2.6191846156338463, 1.6720714970120745, 2.335831864189882, 2.9988997506562844, 1.7877199724383026, 2.6536321443084936, 3.695990832996317, 2.907805675813865, 2.4331897249780576, 1.1228995521304208, 3.6552455784967037, 1.4720646185160096, 3.8414397919385146, -0.0024038636618798, 0.4891400444319629, 2.994557163201774, 2.83443220365923, 1.9004078494421095, 3.1909590403621824, 3.3218691169647006, 1.0287792222505625, 1.4065322204716273, 2.5246112282529576, 3.484904963259684, 0.7348979581660239, 2.699549172978241, 2.8735800276621166, 2.6796477115723674, 1.9647321674035938, 4.132617683554722, 1.1567336674343123, 1.0067948849874369, 2.0609990307401067, 2.7966758810509758, 2.9429116568674614, 3.029512216087856, 2.1607349662266127, 2.3073796871805743, 2.459907431878223, 2.0093044149296393, 1.3507236626420844, 1.9919483732507655, 2.719386677643159, 2.64544066372447, 3.642895024003619, 2.0458309277991944, 2.5812584053163423, 2.081622937815319, 4.038987189166854, 3.2283460434176203, 3.1299333112198617, 2.44330677839802, 0.8651243782261259, 4.266610422354583, 2.2886985124107193, 2.8774511534450555, 2.4249759408786775, 4.042692437612484, 4.177254951082251, 3.063129337017748, 2.9752100190877817, 1.9678322508873571, 2.9098875505857267, 3.492035142037713, 2.563735154259473, 2.018926768613127, 3.687986025252337, 2.885724344353045, 4.2534628167350865, 3.380156567086184, 3.4186966546348594, 1.6840294615770652, 2.8592741025588664, 3.143120940191124, 3.5037172078344825, 2.5607331638979374, 1.8300423396130263, 2.320562409401714, 3.1163754112083355, 3.7107336258970913, 3.6631871831580503, 3.8039790267751714, 2.5498822163043986, 3.499174920111733, 4.582165428796414, 3.6461050722084756, 3.791987404709681, 2.4527741447393208, 2.246446979352826, 2.7304762272559047, 3.730930529248425, 2.8316325249900167, 2.634246929739397, 1.1853713655264004, 3.5431372942489583, 1.472237875135905, 2.1601648770079467, 2.589000106851014, 2.5875364233197145, 1.977727221188185, 3.385190650696605, 0.7890993817638915, 2.9336873381748627, 1.3216326198792667, 1.356744263747531, 3.0909000167547713, 2.3693370546284456, 1.4961197095665637, 2.096660756641706, 1.8479874198015775, 0.11619308951303675, 1.6860747366973479]}\n" + "nb sentences encoded : 27101\n" ] } ], @@ -1152,82 +1175,28 @@ "score = aci_service.run(input_data = data)\n", "\n", "# embeddings will print the error message incase error occurs.\n", - "print('nb sentences encoded : {0}'.format(len(score)))\n", - "print(score)" + "print('nb sentences encoded : {0}'.format(len(score)))" ] }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 41, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.7764788693697073\n" + "0.7787081114999308\n" ] } ], "source": [ "from scipy.stats import pearsonr\n", - "#print(train_y)\n", "result = json.loads(score)\n", "output = result[\"result\"]\n", "print(pearsonr(output, train_y)[0])" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Inspect the Best Model\n", - "\n", - "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case). The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). Our best model is a Pipeline with two steps: a DataTransformer step and a PreFittedSoftVotingRegressor step. We demonstrate how to extract additional information about what data transformations were used and which models make up the ensemble." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "lookup_metric = \"spearman_correlation\"\n", - "best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n", - "print(fitted_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can look at the different models that are used to produce the stack ensemble model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fitted_model.named_steps['stackensembleregressor'].get_params()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also look at how each column in our dataset was featurized by AutoML" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fitted_model.named_steps['datatransformer'].get_featurization_summary()" - ] } ], "metadata": { @@ -1246,7 +1215,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.8" + "version": "3.6.7" } }, "nbformat": 4, From a33a89260cd03b2d088d7b270a1c48c997c6a809 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Tue, 18 Jun 2019 11:16:18 -0400 Subject: [PATCH 074/108] Update widget image --- ...utoml_and_deployment_google_universal_sentence_encoder.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb index 0da25586f..9203088d1 100644 --- a/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb +++ b/scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb @@ -779,7 +779,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![](AutoMLwidget.PNG)" + "![](autoMLwidget.PNG)" ] }, { From c48443c92380123bef24190f781330a43087e6c3 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Tue, 18 Jun 2019 16:57:06 -0400 Subject: [PATCH 075/108] File paths working --- ...une 18 Following Regression Notebook.ipynb | 1316 +++++++++++++++++ 1 file changed, 1316 insertions(+) create mode 100644 scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb diff --git a/scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb b/scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb new file mode 100644 index 000000000..9831b23d3 --- /dev/null +++ b/scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb @@ -0,0 +1,1316 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Logging before flag parsing goes to stderr.\n", + "W0618 16:45:51.878871 26704 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Turning diagnostics collection on. \n", + "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.41\n", + "Pandas version: 0.23.4\n", + "Tensorflow Version: 1.13.1\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0618 16:46:02.219784 26704 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0618 16:46:02.576774 28512 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0618 16:46:09.812178 26704 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n", + "Workspace name: MAIDAPTest\n", + "Azure region: eastus2\n", + "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", + "Resource group: nlprg\n", + "Found existing compute target.\n", + "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-18T20:41:02.587000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 215KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 255KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 246KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], + "source": [ + "# Set the environment path to find NLP\n", + "import sys\n", + "sys.path.append(\"../../\")\n", + "import time\n", + "import logging\n", + "import csv\n", + "import os\n", + "import pandas as pd\n", + "import shutil\n", + "import numpy as np\n", + "import torch\n", + "import sys\n", + "from scipy.stats import pearsonr\n", + "from scipy.spatial import distance\n", + "from sklearn.externals import joblib\n", + "\n", + "# Import utils\n", + "from utils_nlp.azureml import azureml_utils\n", + "from utils_nlp.dataset import stsbenchmark\n", + "from utils_nlp.dataset.preprocess import (\n", + " to_lowercase,\n", + " to_spacy_tokens,\n", + " rm_spacy_stopwords,\n", + ")\n", + "\n", + "# Tensorflow dependencies for Google Universal Sentence Encoder\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "# AzureML packages\n", + "import azureml as aml\n", + "import logging\n", + "from azureml.telemetry import set_diagnostics_collection\n", + "set_diagnostics_collection(send_diagnostics=True)\n", + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.core import Datastore, Experiment\n", + "from azureml.widgets import RunDetails\n", + "from azureml.core.compute import ComputeTarget\n", + "from azureml.core.runconfig import RunConfiguration\n", + "from azureml.core.conda_dependencies import CondaDependencies\n", + "from azureml.train.automl import AutoMLStep\n", + "from azureml.pipeline.core import Pipeline, PipelineData\n", + "from azureml.pipeline.steps import PythonScriptStep\n", + "\n", + "print(\"System version: {}\".format(sys.version))\n", + "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", + "print(\"Pandas version: {}\".format(pd.__version__))\n", + "print(\"Tensorflow Version:\", tf.VERSION)\n", + "\n", + "BASE_DATA_PATH = '../../data'\n", + "\n", + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')\n", + "\n", + "experiment_name = 'automl-sentence-similarity'\n", + "project_folder = './automl-sentence-similarity'\n", + "\n", + "experiment = Experiment(ws, experiment_name)\n", + "experiment\n", + "\n", + "from azureml.core.compute import ComputeTarget, AmlCompute\n", + "# choose a name for your cluster\n", + "cluster_name = \"gpucluster\"\n", + "\n", + "try:\n", + " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", + " print('Found existing compute target.')\n", + "except ComputeTargetException:\n", + " print('Creating a new compute target...')\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", + " max_nodes=4)\n", + "\n", + " # create the cluster\n", + " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", + "\n", + " compute_target.wait_for_completion(show_output=True)\n", + "\n", + "# use get_status() to get a detailed status for the current AmlCompute. \n", + "print(compute_target.get_status().serialize())\n", + "\n", + "# Load in the raw datasets as pandas dataframes\n", + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")\n", + "\n", + "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", + "# and renaming the remaining columns\n", + "train = stsbenchmark.clean_sts(train_raw)\n", + "dev = stsbenchmark.clean_sts(dev_raw)\n", + "test = stsbenchmark.clean_sts(test_raw)\n", + "\n", + "if not os.path.isdir('data'):\n", + " os.mkdir('data')\n", + " \n", + "if not os.path.exists(project_folder):\n", + " os.makedirs(project_folder)\n", + "\n", + "train.to_csv(\"data/train.csv\", index=False)\n", + "# test.to_csv(\"data/test.csv\", index=False)\n", + "dev.to_csv(\"data/dev.csv\", index=False)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Uploading ./data\\dev.csv\n", + "Uploading ./data\\train.csv\n", + "Uploaded ./data\\dev.csv, 1 files out of an estimated total of 2\n", + "Uploaded ./data\\train.csv, 2 files out of an estimated total of 2\n" + ] + } + ], + "source": [ + "from azureml.data.data_reference import DataReference \n", + "\n", + "ds = ws.datastores['workspacefilestore']#.get_default_datastore()\n", + "ds.upload(src_dir='./data', target_path='stsbenchmark_data', overwrite=True, show_progress=True)\n", + "\n", + "input_data = DataReference(datastore=ds, \n", + " data_reference_name=\"stsbenchmark\",\n", + " path_on_datastore='stsbenchmark_data/',\n", + " overwrite=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "aml_run_config = RunConfiguration()\n", + "\n", + "# Use the aml_compute you created above. \n", + "aml_run_config.target = aml_compute\n", + "\n", + "# Enable Docker\n", + "aml_run_config.environment.docker.enabled = True\n", + "\n", + "# Set Docker base image to the default CPU-based image\n", + "aml_run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base:0.2.1\"\n", + "\n", + "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", + "aml_run_config.environment.python.user_managed_dependencies = False\n", + "\n", + "# Auto-prepare the Docker image when used for execution (if it is not already prepared)\n", + "aml_run_config.auto_prepare_environment = True\n", + "\n", + "# Specify CondaDependencies obj, add necessary packages\n", + "aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n", + " conda_packages=['pandas','scikit-learn'], \n", + " pip_packages=['azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", + " pin_sdk_version=False)\n", + "\n", + "print (\"Run configuration created.\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "run config is ready\n" + ] + } + ], + "source": [ + "# create a new RunConfig object\n", + "conda_run_config = RunConfiguration(framework=\"python\")\n", + "\n", + "# Set compute target to AmlCompute\n", + "conda_run_config.target = compute_target\n", + "\n", + "conda_run_config.environment.docker.enabled = True\n", + "conda_run_config.environment.docker.base_image = aml.core.runconfig.DEFAULT_CPU_IMAGE\n", + "\n", + "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", + "conda_run_config.environment.python.user_managed_dependencies = False\n", + "\n", + "conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", + " conda_packages=['numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], \n", + " pin_sdk_version=False)\n", + "\n", + "\n", + "print('run config is ready')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "embedded_data = PipelineData(\"embedded_data\", datastore=ds)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./automl-sentence-similarity/embed.py\n" + ] + } + ], + "source": [ + "%%writefile $project_folder/embed.py\n", + "import argparse\n", + "import os\n", + "import azureml.core\n", + "import pandas as pd\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "def google_encoder(dataset):\n", + " \"\"\" Function that embeds sentences using the Google Universal\n", + " Sentence Encoder pretrained model\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " emb1: 512-dimensional representation of sentence1\n", + " emb2: 512-dimensional representation of sentence2\n", + " \"\"\"\n", + " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", + " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", + "\n", + " # Apply embedding model and normalize the input\n", + " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", + " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", + " \n", + " with tf.Session() as session:\n", + " session.run(tf.global_variables_initializer())\n", + " session.run(tf.tables_initializer())\n", + " emb1, emb2 = session.run(\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={\n", + " sts_input1: dataset['sentence1'],\n", + " sts_input2: dataset['sentence2']\n", + " })\n", + " return emb1, emb2\n", + "\n", + "def feature_engineering(dataset):\n", + " \"\"\"Extracts embedding features from the dataset and returns\n", + " features and target in a dataframe\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " df: pandas dataframe with embedding features\n", + " scores: list of target variables\n", + " \"\"\"\n", + " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", + " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", + " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", + " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", + " df = pd.DataFrame(df, columns=names)\n", + " return df, dataset['score']\n", + "\n", + "def write_output(df, path, name):\n", + " os.makedirs(path, exist_ok=True)\n", + " print(\"%s created\" % path)\n", + " df.to_csv(path + \"/\" + name, index=False)\n", + "\n", + "parser = argparse.ArgumentParser()\n", + "parser.add_argument(\"--sentence_data\", type=str)\n", + "parser.add_argument(\"--embedded_data\", type=str)\n", + "args = parser.parse_args()\n", + "\n", + "# Import the Universal Sentence Encoder's TF Hub module\n", + "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", + "embedding_model = hub.Module(module_url)\n", + "\n", + "train = pd.read_csv(args.sentence_data + \"/train.csv\")\n", + "dev = pd.read_csv(args.sentence_data + \"/dev.csv\")\n", + "\n", + "training_data, training_scores = feature_engineering(train)\n", + "validation_data, validation_scores = feature_engineering(dev)\n", + "\n", + "write_output(training_data, args.embedded_data, \"X_train.csv\")\n", + "write_output(pd.DataFrame(training_scores, columns=['score']), args.embedded_data, \"y_train.csv\")\n", + "\n", + "write_output(validation_data, args.embedded_data, \"X_dev.csv\")\n", + "write_output(pd.DataFrame(validation_scores, columns=['score']), args.embedded_data, \"y_dev.csv\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "embedStep = PythonScriptStep(\n", + " name=\"Embed\",\n", + " script_name=\"embed.py\", \n", + " arguments=[\"--embedded_data\", embedded_data,\n", + " \"--sentence_data\", input_data],\n", + " inputs=[input_data],\n", + " outputs=[embedded_data],\n", + " compute_target=compute_target,\n", + " runconfig = conda_run_config,\n", + " hash_paths=[\"embed.py\"],\n", + " source_directory=project_folder,\n", + " allow_reuse=True\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./automl-sentence-similarity/get_data.py\n" + ] + } + ], + "source": [ + "%%writefile $project_folder/get_data.py\n", + "\n", + "import os\n", + "import pandas as pd\n", + "\n", + "def get_data():\n", + " print(\"In get_data\")\n", + " print(os.environ['AZUREML_DATAREFERENCE_embedded_data'])\n", + " X_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_train.csv\")\n", + " y_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_train.csv\")\n", + " X_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_dev.csv\")\n", + " y_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_dev.csv\")\n", + " \n", + " return { \"X\" : X_train.values, \"y\" : y_train.values.flatten(), \"X_valid\": X_dev.values, \"y_valid\": y_dev.values.flatten()}" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"iteration_timeout_minutes\": 5,\n", + " \"iterations\": 5,\n", + " \"primary_metric\": 'spearman_correlation',\n", + " \"preprocess\": True,\n", + " \"verbosity\": logging.INFO,\n", + "}\n", + "automl_config = AutoMLConfig(task = 'regression',\n", + " debug_log = 'automl_errors.log',\n", + " path = project_folder,\n", + " compute_target=compute_target,\n", + " run_configuration=conda_run_config,\n", + " data_script = project_folder + \"/get_data.py\",\n", + " **automl_settings\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.pipeline.core import PipelineData, TrainingOutput\n", + "metrics_output_name = 'metrics_output'\n", + "best_model_output_name = 'best_model_output'\n", + "\n", + "metrics_data = PipelineData(name='metrics_data',\n", + " datastore=ds,\n", + " pipeline_output_name=metrics_output_name,\n", + " training_output=TrainingOutput(type='Metrics'))\n", + "model_data = PipelineData(name='model_data',\n", + " datastore=ds,\n", + " pipeline_output_name=best_model_output_name,\n", + " training_output=TrainingOutput(type='Model'))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from azureml.train.automl import AutoMLStep\n", + "\n", + "automl_step = AutoMLStep(\n", + " name='automl_step',\n", + " automl_config=automl_config,\n", + " inputs=[embedded_data],\n", + " outputs=[metrics_data, model_data],\n", + " hash_paths=[\"get_data.py\"],\n", + " allow_reuse=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "In get_data\n" + ] + } + ], + "source": [ + "from azureml.pipeline.core import Pipeline\n", + "\n", + "automl_step.run_after(embedStep)\n", + "pipeline = Pipeline(\n", + " description=\"pipeline_embed_automl\",\n", + " workspace=ws, \n", + " steps=[automl_step])" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Created step automl_step [db9288ed][21fb4afa-ca46-4929-b39e-49a272ff0cbd], (This step is eligible to reuse a previous run's output)\n", + "Created step Embed [70352e68][d271deed-bd3b-4e41-9814-29fc11e585b4], (This step is eligible to reuse a previous run's output)\n", + "Using data reference stsbenchmark for StepId [a1aa29ca][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", + "Submitted pipeline run: 50a80cb2-8adb-4cd5-a337-c493404b7549\n" + ] + } + ], + "source": [ + "pipeline_run = experiment.submit(pipeline)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3e86e9c25d3d4e509c9a2d0d2ccbe486", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_PipelineWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', '…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from azureml.widgets import RunDetails\n", + "RunDetails(pipeline_run).show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PipelineRunId: 50a80cb2-8adb-4cd5-a337-c493404b7549\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/50a80cb2-8adb-4cd5-a337-c493404b7549\n", + "PipelineRun Status: Running\n", + "\n", + "\n", + "StepRunId: f78cb325-802a-4779-ada8-05db82c97835\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/f78cb325-802a-4779-ada8-05db82c97835\n", + "StepRun( Embed ) Status: NotStarted\n", + "\n", + "Streaming azureml-logs/20_image_build_log.txt\n", + "=============================================\n", + "StepRun( Embed ) Status: Running\n", + "2019/06/18 20:46:34 Downloading source code...\n", + "2019/06/18 20:46:35 Finished downloading source code\n", + "2019/06/18 20:46:35 Using acb_vol_35e6dda2-50a4-4121-9aa7-690a7b1c1e12 as the home volume\n", + "2019/06/18 20:46:35 Creating Docker network: acb_default_network, driver: 'bridge'\n", + "2019/06/18 20:46:36 Successfully set up Docker network: acb_default_network\n", + "2019/06/18 20:46:36 Setting up Docker configuration...\n", + "2019/06/18 20:46:37 Successfully set up Docker configuration\n", + "2019/06/18 20:46:37 Logging in to registry: maidaptestc9922809.azurecr.io\n", + "2019/06/18 20:46:38 Successfully logged into maidaptestc9922809.azurecr.io\n", + "2019/06/18 20:46:38 Executing step ID: acb_step_0. Timeout(sec): 1800, Working directory: '', Network: 'acb_default_network'\n", + "2019/06/18 20:46:38 Scanning for dependencies...\n", + "2019/06/18 20:46:38 Successfully scanned dependencies\n", + "2019/06/18 20:46:38 Launching container with name: acb_step_0\n", + "Sending build context to Docker daemon 46.59kB\n", + "\n", + "Step 1/15 : FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04@sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", + "sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b: Pulling from azureml/base\n", + "Digest: sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", + "Status: Downloaded newer image for mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04@sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", + " ---> a62e2769d877\n", + "Step 2/15 : USER root\n", + " ---> Running in e13363f06f21\n", + "Removing intermediate container e13363f06f21\n", + " ---> 0c6b3062030f\n", + "Step 3/15 : RUN mkdir -p $HOME/.cache\n", + " ---> Running in 3b1110f2ad4a\n", + "Removing intermediate container 3b1110f2ad4a\n", + " ---> 371d5ca51f9f\n", + "Step 4/15 : WORKDIR /\n", + " ---> Running in 4ab46653a217\n", + "Removing intermediate container 4ab46653a217\n", + " ---> ca6f18a2a1af\n", + "Step 5/15 : COPY azureml-setup/99brokenproxy /etc/apt/apt.conf.d/\n", + " ---> 532467333a34\n", + "Step 6/15 : RUN if dpkg --compare-versions `conda --version | grep -oE '[^ ]+$'` lt 4.4.11; then conda install conda==4.4.11; fi\n", + " ---> Running in 1aa523971ab3\n", + "Removing intermediate container 1aa523971ab3\n", + " ---> 20d95b09e00e\n", + "Step 7/15 : COPY azureml-setup/mutated_conda_dependencies.yml azureml-setup/mutated_conda_dependencies.yml\n", + " ---> 4e4acdcc0a95\n", + "Step 8/15 : RUN ldconfig /usr/local/cuda/lib64/stubs && conda env create -p /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b -f azureml-setup/mutated_conda_dependencies.yml && rm -rf \"$HOME/.cache/pip\" && conda clean -aqy && CONDA_ROOT_DIR=$(conda info --root) && rm -rf \"$CONDA_ROOT_DIR/pkgs\" && find \"$CONDA_ROOT_DIR\" -type d -name __pycache__ -exec rm -rf {} + && ldconfig\n", + " ---> Running in ace0ce861df6\n", + "Solving environment: ...working... done\n", + "\n", + "\n", + "==> WARNING: A newer version of conda exists. <==\n", + " current version: 4.5.11\n", + " latest version: 4.6.14\n", + "\n", + "Please update conda by running\n", + "\n", + " $ conda update -n base -c defaults conda\n", + "\n", + "\n", + "\n", + "zlib-1.2.11 | 101 KB | | 0% \n", + "zlib-1.2.11 | 101 KB | ########## | 100% \n", + "\n", + "libprotobuf-3.8.0 | 4.7 MB | | 0% \n", + "libprotobuf-3.8.0 | 4.7 MB | #######4 | 74% \n", + "libprotobuf-3.8.0 | 4.7 MB | #########6 | 97% \n", + "libprotobuf-3.8.0 | 4.7 MB | ########## | 100% \n", + "\n", + "h5py-2.9.0 | 1.2 MB | | 0% \n", + "h5py-2.9.0 | 1.2 MB | ######## | 81% \n", + "h5py-2.9.0 | 1.2 MB | ########## | 100% \n", + "\n", + "sqlite-3.13.0 | 4.9 MB | | 0% \n", + "sqlite-3.13.0 | 4.9 MB | #######5 | 76% \n", + "sqlite-3.13.0 | 4.9 MB | #########8 | 98% \n", + "sqlite-3.13.0 | 4.9 MB | ########## | 100% \n", + "\n", + "c-ares-1.15.0 | 98 KB | | 0% \n", + "c-ares-1.15.0 | 98 KB | ########## | 100% \n", + "\n", + "certifi-2019.3.9 | 149 KB | | 0% \n", + "certifi-2019.3.9 | 149 KB | ########## | 100% \n", + "\n", + "absl-py-0.7.1 | 154 KB | | 0% \n", + "absl-py-0.7.1 | 154 KB | ########## | 100% \n", + "\n", + "keras-preprocessing- | 33 KB | | 0% \n", + "keras-preprocessing- | 33 KB | ########## | 100% \n", + "\n", + "py-xgboost-0.82 | 70 KB | | 0% \n", + "py-xgboost-0.82 | 70 KB | ########## | 100% \n", + "\n", + "_py-xgboost-mutex-2. | 8 KB | | 0% \n", + "_py-xgboost-mutex-2. | 8 KB | ########## | 100% \n", + "\n", + "xz-5.2.4 | 366 KB | | 0% \n", + "xz-5.2.4 | 366 KB | #########4 | 95% \n", + "xz-5.2.4 | 366 KB | ########## | 100% \n", + "\n", + "libgfortran-ng-7.3.0 | 1.3 MB | | 0% \n", + "libgfortran-ng-7.3.0 | 1.3 MB | #######9 | 80% \n", + "libgfortran-ng-7.3.0 | 1.3 MB | ########## | 100% \n", + "\n", + "mock-3.0.5 | 44 KB | | 0% \n", + "mock-3.0.5 | 44 KB | ########## | 100% \n", + "\n", + "openblas-0.3.5 | 15.8 MB | | 0% \n", + "openblas-0.3.5 | 15.8 MB | ###1 | 32% \n", + "openblas-0.3.5 | 15.8 MB | #######5 | 75% \n", + "openblas-0.3.5 | 15.8 MB | #########4 | 95% \n", + "openblas-0.3.5 | 15.8 MB | ########## | 100% \n", + "\n", + "libcblas-3.8.0 | 6 KB | | 0% \n", + "libcblas-3.8.0 | 6 KB | ########## | 100% \n", + "\n", + "ncurses-5.9 | 1.1 MB | | 0% \n", + "ncurses-5.9 | 1.1 MB | #######8 | 79% \n", + "ncurses-5.9 | 1.1 MB | ########7 | 88% \n", + "ncurses-5.9 | 1.1 MB | #########7 | 97% \n", + "ncurses-5.9 | 1.1 MB | ########## | 100% \n", + "\n", + "protobuf-3.8.0 | 683 KB | | 0% \n", + "protobuf-3.8.0 | 683 KB | ########3 | 83% \n", + "protobuf-3.8.0 | 683 KB | ########## | 100% \n", + "\n", + "libgcc-ng-9.1.0 | 8.1 MB | | 0% \n", + "libgcc-ng-9.1.0 | 8.1 MB | #######5 | 75% \n", + "libgcc-ng-9.1.0 | 8.1 MB | #########7 | 98% \n", + "libgcc-ng-9.1.0 | 8.1 MB | ########## | 100% \n", + "\n", + "setuptools-41.0.1 | 612 KB | | 0% \n", + "setuptools-41.0.1 | 612 KB | ########5 | 85% \n", + "setuptools-41.0.1 | 612 KB | ########## | 100% \n", + "\n", + "astor-0.7.1 | 22 KB | | 0% \n", + "astor-0.7.1 | 22 KB | ########## | 100% \n", + "\n", + "wheel-0.33.4 | 34 KB | | 0% \n", + "wheel-0.33.4 | 34 KB | ########## | 100% \n", + "\n", + "scikit-learn-0.21.2 | 6.7 MB | | 0% \n", + "scikit-learn-0.21.2 | 6.7 MB | #######5 | 75% \n", + "scikit-learn-0.21.2 | 6.7 MB | #########7 | 98% \n", + "scikit-learn-0.21.2 | 6.7 MB | ########## | 100% \n", + "\n", + "hdf5-1.10.4 | 5.3 MB | | 0% \n", + "hdf5-1.10.4 | 5.3 MB | #######6 | 76% \n", + "hdf5-1.10.4 | 5.3 MB | #########9 | 99% \n", + "hdf5-1.10.4 | 5.3 MB | ########## | 100% \n", + "\n", + "pandas-0.24.2 | 11.1 MB | | 0% \n", + "pandas-0.24.2 | 11.1 MB | ##3 | 23% \n", + "pandas-0.24.2 | 11.1 MB | #######5 | 75% \n", + "pandas-0.24.2 | 11.1 MB | ########9 | 90% \n", + "pandas-0.24.2 | 11.1 MB | #########9 | 100% \n", + "pandas-0.24.2 | 11.1 MB | ########## | 100% \n", + "\n", + "markdown-2.6.11 | 56 KB | | 0% \n", + "markdown-2.6.11 | 56 KB | ########## | 100% \n", + "\n", + "termcolor-1.1.0 | 6 KB | | 0% \n", + "termcolor-1.1.0 | 6 KB | ########## | 100% \n", + "\n", + "ca-certificates-2019 | 145 KB | | 0% \n", + "ca-certificates-2019 | 145 KB | ########## | 100% \n", + "\n", + "pytz-2019.1 | 227 KB | | 0% \n", + "pytz-2019.1 | 227 KB | #########4 | 94% \n", + "pytz-2019.1 | 227 KB | ########## | 100% \n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "pip-19.1.1 | 1.8 MB | | 0% \n", + "pip-19.1.1 | 1.8 MB | #######8 | 79% \n", + "pip-19.1.1 | 1.8 MB | #########4 | 94% \n", + "pip-19.1.1 | 1.8 MB | ########## | 100% \n", + "\n", + "tensorboard-1.13.1 | 3.3 MB | | 0% \n", + "tensorboard-1.13.1 | 3.3 MB | #######6 | 76% \n", + "tensorboard-1.13.1 | 3.3 MB | #########9 | 99% \n", + "tensorboard-1.13.1 | 3.3 MB | ########## | 100% \n", + "\n", + "werkzeug-0.15.4 | 254 KB | | 0% \n", + "werkzeug-0.15.4 | 254 KB | #########7 | 97% \n", + "werkzeug-0.15.4 | 254 KB | ########## | 100% \n", + "\n", + "python-3.6.2 | 19.0 MB | | 0% \n", + "python-3.6.2 | 19.0 MB | ##7 | 27% \n", + "python-3.6.2 | 19.0 MB | ######3 | 64% \n", + "python-3.6.2 | 19.0 MB | ######## | 80% \n", + "python-3.6.2 | 19.0 MB | #########2 | 92% \n", + "python-3.6.2 | 19.0 MB | ########## | 100% \n", + "\n", + "gast-0.2.2 | 10 KB | | 0% \n", + "gast-0.2.2 | 10 KB | ########## | 100% \n", + "\n", + "libstdcxx-ng-9.1.0 | 4.0 MB | | 0% \n", + "libstdcxx-ng-9.1.0 | 4.0 MB | #######6 | 77% \n", + "libstdcxx-ng-9.1.0 | 4.0 MB | #########8 | 98% \n", + "libstdcxx-ng-9.1.0 | 4.0 MB | ########## | 100% \n", + "\n", + "openssl-1.0.2r | 3.1 MB | | 0% \n", + "openssl-1.0.2r | 3.1 MB | #######7 | 78% \n", + "openssl-1.0.2r | 3.1 MB | #########7 | 98% \n", + "openssl-1.0.2r | 3.1 MB | ########## | 100% \n", + "\n", + "tensorflow-estimator | 205 KB | | 0% \n", + "tensorflow-estimator | 205 KB | ########## | 100% \n", + "\n", + "liblapack-3.8.0 | 6 KB | | 0% \n", + "liblapack-3.8.0 | 6 KB | ########## | 100% \n", + "\n", + "readline-6.2 | 713 KB | | 0% \n", + "readline-6.2 | 713 KB | ########7 | 87% \n", + "readline-6.2 | 713 KB | ########## | 100% \n", + "\n", + "six-1.12.0 | 22 KB | | 0% \n", + "six-1.12.0 | 22 KB | ########## | 100% \n", + "\n", + "tensorflow-hub-0.4.0 | 52 KB | | 0% \n", + "tensorflow-hub-0.4.0 | 52 KB | ########## | 100% \n", + "\n", + "scipy-1.3.0 | 18.8 MB | | 0% \n", + "scipy-1.3.0 | 18.8 MB | ##8 | 29% \n", + "scipy-1.3.0 | 18.8 MB | #######3 | 74% \n", + "scipy-1.3.0 | 18.8 MB | #########3 | 93% \n", + "scipy-1.3.0 | 18.8 MB | ########## | 100% \n", + "\n", + "tensorflow-1.13.1 | 77.2 MB | | 0% \n", + "tensorflow-1.13.1 | 77.2 MB | 5 | 6% \n", + "tensorflow-1.13.1 | 77.2 MB | #3 | 13% \n", + "tensorflow-1.13.1 | 77.2 MB | ##2 | 23% \n", + "tensorflow-1.13.1 | 77.2 MB | ###2 | 32% \n", + "tensorflow-1.13.1 | 77.2 MB | ####2 | 43% \n", + "tensorflow-1.13.1 | 77.2 MB | #####1 | 52% \n", + "tensorflow-1.13.1 | 77.2 MB | ######1 | 61% \n", + "tensorflow-1.13.1 | 77.2 MB | #######1 | 71% \n", + "tensorflow-1.13.1 | 77.2 MB | ######## | 80% \n", + "tensorflow-1.13.1 | 77.2 MB | ########6 | 87% \n", + "tensorflow-1.13.1 | 77.2 MB | #########1 | 91% \n", + "tensorflow-1.13.1 | 77.2 MB | #########4 | 94% \n", + "tensorflow-1.13.1 | 77.2 MB | #########6 | 97% \n", + "tensorflow-1.13.1 | 77.2 MB | #########8 | 98% \n", + "tensorflow-1.13.1 | 77.2 MB | #########9 | 100% \n", + "tensorflow-1.13.1 | 77.2 MB | ########## | 100% \n", + "\n", + "libblas-3.8.0 | 6 KB | | 0% \n", + "libblas-3.8.0 | 6 KB | ########## | 100% \n", + "\n", + "joblib-0.13.2 | 180 KB | | 0% \n", + "joblib-0.13.2 | 180 KB | 6 | 7% \n", + "joblib-0.13.2 | 180 KB | ########## | 100% \n", + "\n", + "keras-applications-1 | 31 KB | | 0% \n", + "keras-applications-1 | 31 KB | ########## | 100% \n", + "\n", + "grpcio-1.16.0 | 1.0 MB | | 0% \n", + "grpcio-1.16.0 | 1.0 MB | ########3 | 83% \n", + "grpcio-1.16.0 | 1.0 MB | ########## | 100% \n", + "\n", + "libxgboost-0.82 | 3.9 MB | | 0% \n", + "libxgboost-0.82 | 3.9 MB | #######6 | 76% \n", + "libxgboost-0.82 | 3.9 MB | #########5 | 96% \n", + "libxgboost-0.82 | 3.9 MB | ########## | 100% \n", + "\n", + "python-dateutil-2.8. | 219 KB | | 0% \n", + "python-dateutil-2.8. | 219 KB | ########## | 100% \n", + "\n", + "numpy-1.16.4 | 4.3 MB | | 0% \n", + "numpy-1.16.4 | 4.3 MB | #######6 | 76% \n", + "numpy-1.16.4 | 4.3 MB | ########9 | 89% \n", + "numpy-1.16.4 | 4.3 MB | #########9 | 99% \n", + "numpy-1.16.4 | 4.3 MB | ########## | 100% \n", + "\n", + "tk-8.5.19 | 1.9 MB | | 0% \n", + "tk-8.5.19 | 1.9 MB | #######7 | 78% \n", + "tk-8.5.19 | 1.9 MB | ######### | 91% \n", + "tk-8.5.19 | 1.9 MB | ########## | 100% \n", + "Downloading and Extracting Packages\n", + "Preparing transaction: ...working... done\n", + "Verifying transaction: ...working... done\n", + "Executing transaction: ...working... done\n", + "Collecting azureml-sdk (from -r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/75/5d/b9a03efc12c2d18bac509cc8754c3015ee70a50749a63f3b1ba0070c01de/azureml_sdk-1.0.43-py3-none-any.whl\n", + "Collecting azureml-dataprep (from -r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", + " Downloading https://files.pythonhosted.org/packages/bd/ec/dd8521421adaf64264aa26ab31a8be4ffd01c29d0600497eed7b955868ac/azureml_dataprep-1.1.5-py3-none-any.whl (23.9MB)\n", + "Collecting azureml-train-automl==1.0.33 (from -r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/63/30/9dbf0166f81f2a0b0bb751f74a5a9fe5e491703a1858af85b7b127917320/azureml_train_automl-1.0.33-py3-none-any.whl (3.9MB)\n", + "Collecting azureml-core==1.0.43.* (from azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/f6/b2/ba8fde6c28251cec7fee4f6040ba13476a42ecbc138785bf958a5f500704/azureml_core-1.0.43.1-py2.py3-none-any.whl (937kB)\n", + "Collecting azureml-pipeline==1.0.43.* (from azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/67/31/9266e565b2965616ed694aabb70f035f01627f8e7cfeff48553c3631f0d7/azureml_pipeline-1.0.43-py3-none-any.whl\n", + "Collecting azureml-train==1.0.43.* (from azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/67/e4/b5a3d84ac40ceaf4203ca0ef0629e8de9c27edefd9ba0e7c32f5630f1930/azureml_train-1.0.43-py3-none-any.whl\n", + "Collecting azureml-dataprep-native<14.0.0,>=13.0.0 (from azureml-dataprep->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", + " Downloading https://files.pythonhosted.org/packages/b8/b1/3b38a679a77eabad4e62157ad7f5e783003c0b2cdbef4fcc20b0ebafab98/azureml_dataprep_native-13.0.0-cp36-cp36m-manylinux1_x86_64.whl (1.3MB)\n", + "Collecting dotnetcore2==2.1.8 (from azureml-dataprep->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", + " Downloading https://files.pythonhosted.org/packages/4f/a1/3224449b9fd886f2be43173d47e8c9d42e2291bd4e6f0a9706ca6d0a4807/dotnetcore2-2.1.8-py3-none-manylinux1_x86_64.whl (29.3MB)\n", + "Collecting dill>=0.2.8 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/fe/42/bfe2e0857bc284cbe6a011d93f2a9ad58a22cb894461b199ae72cfef0f29/dill-0.2.9.tar.gz (150kB)\n", + "Collecting resource>=0.1.8 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/34/ad/9cd037c01c075f9a273c23557f8e71195d773d59d3881bbb26011d396c8b/Resource-0.2.1-py2.py3-none-any.whl\n", + "Collecting azureml-telemetry==1.0.33.* (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/cd/12/eb308994938105cb80357c57201a48a16388531b9961cffb543073ee80d0/azureml_telemetry-1.0.33-py3-none-any.whl\n", + "Collecting skl2onnx==1.4.5; python_version < \"3.7\" (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Downloading https://files.pythonhosted.org/packages/a2/17/99dcc4c5881fbbf73151c2e66cfe2b3ad0664e114f0f06d987c2c21afb86/skl2onnx-1.4.5-py2.py3-none-any.whl (171kB)\n", + "Requirement already satisfied: pytz in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (2019.1)\n", + "Collecting onnxmltools==1.4.0; python_version < \"3.7\" (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/6f/0d/83201824e7693b63eac297e294cfa8af69f70b1b56492d40d580ba44bb6a/onnxmltools-1.4.0-py2.py3-none-any.whl (326kB)\n", + "Collecting azureml-pipeline-core==1.0.33.* (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/6a/9b/86ef6afa306d46f4f443dc9a9d0e09ef05abf8ccfdaaf19474a4fe0eaeb7/azureml_pipeline_core-1.0.33-py2.py3-none-any.whl (162kB)\n", + "Collecting numpy<=1.16.2,>=1.11.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/35/d5/4f8410ac303e690144f0a0603c4b8fd3b986feb2749c435f7cdbb288f17e/numpy-1.16.2-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)\n", + "Collecting lightgbm<=2.2.1,>=2.0.11 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/78/7e/bc87e7951cfaa998cffaf39e6c721f5bd04efb2e139486206356edb289a5/lightgbm-2.2.1-py2.py3-none-manylinux1_x86_64.whl (1.1MB)\n", + "Collecting gensim (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/d3/4b/19eecdf07d614665fa889857dc56ac965631c7bd816c3476d2f0cac6ea3b/gensim-3.7.3-cp36-cp36m-manylinux1_x86_64.whl (24.2MB)\n", + "Collecting sklearn-pandas<=1.7.0,>=1.4.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/7e/9c/c94f46b40b86d2c77c46c4c1b858fc66c117b4390665eca28f2e0812db45/sklearn_pandas-1.7.0-py2.py3-none-any.whl\n", + "Collecting scikit-learn<=0.20.3,>=0.19.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/5e/82/c0de5839d613b82bddd088599ac0bbfbbbcbd8ca470680658352d2c435bd/scikit_learn-0.20.3-cp36-cp36m-manylinux1_x86_64.whl (5.4MB)\n", + "Collecting wheel==0.30.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/0c/80/16a85b47702a1f47a63c104c91abdd0a6704ee8ae3b4ce4afc49bc39f9d9/wheel-0.30.0-py2.py3-none-any.whl (49kB)\n", + "Collecting pandas<=0.23.4,>=0.21.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/e1/d8/feeb346d41f181e83fba45224ab14a8d8af019b48af742e047f3845d8cff/pandas-0.23.4-cp36-cp36m-manylinux1_x86_64.whl (8.9MB)\n", + "Collecting scipy<=1.1.0,>=1.0.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/a8/0b/f163da98d3a01b3e0ef1cab8dd2123c34aee2bafbb1c5bffa354cc8a1730/scipy-1.1.0-cp36-cp36m-manylinux1_x86_64.whl (31.2MB)\n", + "Collecting nimbusml==0.6.5; python_version < \"3.7\" (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/e4/7b/d7d2ccfd778df731ae661caa15dc58ef4ef0bffa236176296bd7b9620c8d/nimbusml-0.6.5-cp36-none-manylinux1_x86_64.whl (60.1MB)\n", + "Collecting PyJWT (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/87/8b/6a9f14b5f781697e51259d81657e6048fd31a113229cf346880bb7545565/PyJWT-1.7.1-py2.py3-none-any.whl\n", + "Collecting requests>=2.19.1 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/requests-2.22.0-py2.py3-none-any.whl (57kB)\n", + "Collecting SecretStorage (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/82/59/cb226752e20d83598d7fdcabd7819570b0329a61db07cfbdd21b2ef546e3/SecretStorage-3.1.1-py3-none-any.whl\n", + "Collecting azure-graphrbac>=0.40.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/3e/93/02056aca45162f9fc275d1eaad12a2a07ef92375afb48eabddc4134b8315/azure_graphrbac-0.61.1-py2.py3-none-any.whl (141kB)\n", + "Collecting msrest>=0.5.1 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/9a/23/eea6c1fce5b24366b48f270c23f043f976eb0d4248eb3cb7e62b0f602bcd/msrest-0.6.7-py2.py3-none-any.whl (81kB)\n", + "Collecting pathspec (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz\n", + "Collecting urllib3>=1.23 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/e6/60/247f23a7121ae632d62811ba7f273d0e58972d75e58a94d329d51550a47d/urllib3-1.25.3-py2.py3-none-any.whl (150kB)\n", + "Requirement already satisfied: python-dateutil>=2.7.3 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1)) (2.8.0)\n", + "Collecting jsonpickle (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/07/07/c157520a3ebd166c8c24c6ae0ecae7c3968eb4653ff0e5af369bb82f004d/jsonpickle-1.2-py2.py3-none-any.whl\n", + "Collecting ndg-httpsclient (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/fb/67/c2f508c00ed2a6911541494504b7cac16fe0b0473912568df65fd1801132/ndg_httpsclient-0.5.1-py3-none-any.whl\n", + "Collecting docker (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/91/93/310fe092039f6b0759a1f8524e9e2c56f8012804fa2a8da4e4289bb74d7c/docker-4.0.1-py2.py3-none-any.whl (138kB)\n", + "Collecting azure-mgmt-resource>=1.2.1 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/97/1e/03813b5705b46d86d8d6d594930b78f14b13d901b5ca089152e06e67b680/azure_mgmt_resource-3.0.0-py2.py3-none-any.whl (468kB)\n", + "Collecting azure-mgmt-authorization>=0.40.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/6b/b2/c0d62a3a91c13641e09af294c13fe16929f88dc5902718388cd9b292217f/azure_mgmt_authorization-0.52.0-py2.py3-none-any.whl (112kB)\n", + "Collecting jmespath (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/83/94/7179c3832a6d45b266ddb2aac329e101367fbdb11f425f13771d27f225bb/jmespath-0.9.4-py2.py3-none-any.whl\n", + "Collecting ruamel.yaml<=0.15.89,>=0.15.35 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " Downloading https://files.pythonhosted.org/packages/36/e1/cc2fa400fa5ffde3efa834ceb15c464075586de05ca3c553753dcd6f1d3b/ruamel.yaml-0.15.89-cp36-cp36m-manylinux1_x86_64.whl (651kB)\n", + "Collecting msrestazure>=0.4.33 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/0a/aa/b17a4f702ecd6d9e989ae34109aa384c988aed0de37215c651165ed45238/msrestazure-0.6.1-py2.py3-none-any.whl (40kB)\n", + "Requirement already satisfied: six>=1.11.0 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1)) (1.12.0)\n", + "Collecting azure-mgmt-keyvault>=0.40.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/49/de/0d69aedae7c5f6428314640b65947203ab80409c12b5d4e66fb5b7a4182e/azure_mgmt_keyvault-1.1.0-py2.py3-none-any.whl (111kB)\n", + "Collecting adal>=1.2.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/00/72/53dce9e4f5d6c1aa57b8d408cb34dff1969ecbf10ab7e678f32c5e0e2397/adal-1.2.1-py2.py3-none-any.whl (52kB)\n", + "Collecting pyopenssl (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/01/c8/ceb170d81bd3941cbeb9940fc6cc2ef2ca4288d0ca8929ea4db5905d904d/pyOpenSSL-19.0.0-py2.py3-none-any.whl (53kB)\n", + "Collecting azure-mgmt-containerregistry>=2.0.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/97/70/8c2d0509db466678eba16fa2b0a539499f3b351b1f2993126ad843d5be13/azure_mgmt_containerregistry-2.8.0-py2.py3-none-any.whl (718kB)\n", + "Collecting cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.* (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/97/18/c6557f63a6abde34707196fb2cad1c6dc0dbff25a200d5044922496668a4/cryptography-2.7-cp34-abi3-manylinux1_x86_64.whl (2.3MB)\n", + "Collecting azure-common>=1.1.12 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/e3/36/9946fa617f458f11766884c76c622810f4c111ee16c08eb8315e88330d66/azure_common-1.1.22-py2.py3-none-any.whl\n", + "Collecting contextlib2 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/a2/71/8273a7eeed0aff6a854237ab5453bc9aa67deb49df4832801c21f0ff3782/contextlib2-0.5.5-py2.py3-none-any.whl\n", + "Collecting azure-mgmt-storage>=1.5.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/8c/03/62c3ed229b9b83fbf4dcd56ae27d5d835f3bd921004c09a478729c221fff/azure_mgmt_storage-4.0.0-py2.py3-none-any.whl (426kB)\n", + "Collecting backports.tempfile (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/b4/5c/077f910632476281428fe254807952eb47ca78e720d059a46178c541e669/backports.tempfile-1.0-py2.py3-none-any.whl\n", + "Collecting azureml-pipeline-steps==1.0.43.* (from azureml-pipeline==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/b9/24/917523741c7d18f4f7447b1e3e8bd00ef97a26f1991426e90ec64d90834a/azureml_pipeline_steps-1.0.43-py3-none-any.whl\n", + "Collecting azureml-train-core==1.0.43.* (from azureml-train==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/2c/af/e6f15c939c7275fe2e630fc9c502f4c0879acb28bd930fcac6083d597766/azureml_train_core-1.0.43-py3-none-any.whl (61kB)\n", + "Collecting distro>=1.2.0 (from dotnetcore2==2.1.8->azureml-dataprep->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", + " Downloading https://files.pythonhosted.org/packages/ea/35/82f79b92fa4d937146c660a6482cee4f3dfa1f97ff3d2a6f3ecba33e712e/distro-1.4.0-py2.py3-none-any.whl\n", + "Collecting JsonSir>=0.0.2 (from resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/aa/bf/5c00c1dafaa3ca2c32e7641d9c2c6f9d6d76e127bde00eb600333a60c5bc/JsonSir-0.0.2.tar.gz\n", + "Collecting python-easyconfig>=0.1.0 (from resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/b1/86/1138081cca360a02066eedaf301d0f358c35e0e0d67572acf9d6354edca9/Python_EasyConfig-0.1.7-py2.py3-none-any.whl\n", + "Collecting JsonForm>=0.0.2 (from resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/4f/b7/b9491ba4b709d0616fab15a89f8efe4d3a7924652e1fdd4f15303e9ecdf0/JsonForm-0.0.2.tar.gz\n", + "Collecting applicationinsights (from azureml-telemetry==1.0.33.*->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/a1/53/234c53004f71f0717d8acd37876e0b65c121181167057b9ce1b1795f96a0/applicationinsights-0.11.9-py2.py3-none-any.whl (58kB)\n", + "Requirement already satisfied: protobuf in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (3.8.0)\n", + "Collecting onnx (from skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/88/50/e4a5a869093f35884d1fd95b46b24705ab27adb7e562a2a307523c043be3/onnx-1.5.0-cp36-cp36m-manylinux1_x86_64.whl (7.0MB)\n", + "Collecting keras2onnx (from onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/1a/b0/6f3012cb0c959203dd3ce05e0fc61c9112f0d4043fdf917cf665d8c53254/keras2onnx-1.5.0-py3-none-any.whl (186kB)\n", + "Collecting smart-open>=1.7.0 (from gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/37/c0/25d19badc495428dec6a4bf7782de617ee0246a9211af75b302a2681dea7/smart_open-1.8.4.tar.gz (63kB)\n", + "Collecting idna<2.9,>=2.5 (from requests>=2.19.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8-py2.py3-none-any.whl (58kB)\n", + "Collecting chardet<3.1.0,>=3.0.2 (from requests>=2.19.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl (133kB)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from requests>=2.19.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1)) (2019.3.9)\n", + "Collecting jeepney (from SecretStorage->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/2b/f7/ff23b9b59534f501d47c327576aadda59da5b83d76ff837e6075bc325b9f/jeepney-0.4-py3-none-any.whl (59kB)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Collecting requests-oauthlib>=0.5.0 (from msrest>=0.5.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/c2/e2/9fd03d55ffb70fe51f587f20bcf407a6927eb121de86928b34d162f0b1ac/requests_oauthlib-1.2.0-py2.py3-none-any.whl\n", + "Collecting isodate>=0.6.0 (from msrest>=0.5.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/9b/9f/b36f7774ff5ea8e428fdcfc4bb332c39ee5b9362ddd3d40d9516a55221b2/isodate-0.6.0-py2.py3-none-any.whl (45kB)\n", + "Collecting pyasn1>=0.1.1 (from ndg-httpsclient->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/7b/7c/c9386b82a25115cccf1903441bba3cbadcfae7b678a20167347fa8ded34c/pyasn1-0.4.5-py2.py3-none-any.whl (73kB)\n", + "Collecting websocket-client>=0.32.0 (from docker->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/29/19/44753eab1fdb50770ac69605527e8859468f3c0fd7dc5a76dd9c4dbd7906/websocket_client-0.56.0-py2.py3-none-any.whl (200kB)\n", + "Collecting azure-mgmt-nspkg>=2.0.0 (from azure-mgmt-keyvault>=0.40.0->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/b3/c2/af4b47845f27dc7d206ed4908b9e580f8bc94a4b2f3956a0d87c40719d90/azure_mgmt_nspkg-3.0.2-py3-none-any.whl\n", + "Collecting asn1crypto>=0.21.0 (from cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.*->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/ea/cd/35485615f45f30a510576f1a56d1e0a7ad7bd8ab5ed7cdc600ef7cd06222/asn1crypto-0.24.0-py2.py3-none-any.whl (101kB)\n", + "Collecting cffi!=1.11.3,>=1.8 (from cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.*->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/5f/bf/6aa1925384c23ffeb579e97a5569eb9abce41b6310b329352b8252cee1c3/cffi-1.12.3-cp36-cp36m-manylinux1_x86_64.whl (430kB)\n", + "Collecting backports.weakref (from backports.tempfile->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/88/ec/f598b633c3d5ffe267aaada57d961c94fdfa183c5c3ebda2b6d151943db6/backports.weakref-1.0.post1-py2.py3-none-any.whl\n", + "Collecting azureml-train-restclients-hyperdrive==1.0.43.* (from azureml-train-core==1.0.43.*->azureml-train==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/14/1f/5a08797b00e0a70fd0420311bb809a740175a1700dc405a84616c62b622d/azureml_train_restclients_hyperdrive-1.0.43-py3-none-any.whl\n", + "Collecting PyYAML (from python-easyconfig>=0.1.0->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/a3/65/837fefac7475963d1eccf4aa684c23b95aa6c1d033a2c5965ccb11e22623/PyYAML-5.1.1.tar.gz (274kB)\n", + "Collecting jsonschema (from JsonForm>=0.0.2->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/aa/69/df679dfbdd051568b53c38ec8152a3ab6bc533434fc7ed11ab034bf5e82f/jsonschema-3.0.1-py2.py3-none-any.whl (54kB)\n", + "Requirement already satisfied: setuptools in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from protobuf->skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (41.0.1)\n", + "Collecting typing-extensions>=3.6.2.1 (from onnx->skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/0f/62/c66e553258c37c33f9939abb2dd8d2481803d860ff68e635466f12aa7efa/typing_extensions-3.7.2-py3-none-any.whl\n", + "Collecting typing>=3.6.4 (from onnx->skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/4a/bd/eee1157fc2d8514970b345d69cb9975dcd1e42cd7e61146ed841f6e68309/typing-3.6.6-py3-none-any.whl\n", + "Collecting onnxconverter-common>=1.5.0 (from keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/5d/e0/1192dacb4b8139758db420484e98d54295d09a7aa458a14d1d581147534a/onnxconverter_common-1.5.0-py2.py3-none-any.whl (40kB)\n", + "Collecting keras (from keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/5e/10/aa32dad071ce52b5502266b5c659451cfd6ffcbf14e6c8c4f16c0ff5aaab/Keras-2.2.4-py2.py3-none-any.whl (312kB)\n", + "Collecting boto>=2.32 (from smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl (1.4MB)\n", + "Collecting boto3 (from smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/4e/0e/3158a8eb94f32bf1b1c926c0c9ccfd441657b012d4b4d17e49b36865af54/boto3-1.9.171-py2.py3-none-any.whl (128kB)\n", + "Collecting oauthlib>=3.0.0 (from requests-oauthlib>=0.5.0->msrest>=0.5.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/16/95/699466b05b72b94a41f662dc9edf87fda4289e3602ecd42d27fcaddf7b56/oauthlib-3.0.1-py2.py3-none-any.whl (142kB)\n", + "Collecting azure-nspkg>=3.0.0 (from azure-mgmt-nspkg>=2.0.0->azure-mgmt-keyvault>=0.40.0->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/c4/0c/c562be95a9a2ed52454f598571cf300b1114d0db2aa27f5b8ed3bb9cd0c0/azure_nspkg-3.0.2-py3-none-any.whl\n", + "Collecting pycparser (from cffi!=1.11.3,>=1.8->cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.*->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", + " Downloading https://files.pythonhosted.org/packages/68/9e/49196946aee219aead1290e00d1e7fdeab8567783e83e1b9ab5585e6206a/pycparser-2.19.tar.gz (158kB)\n", + "Collecting attrs>=17.4.0 (from jsonschema->JsonForm>=0.0.2->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/23/96/d828354fa2dbdf216eaa7b7de0db692f12c234f7ef888cc14980ef40d1d2/attrs-19.1.0-py2.py3-none-any.whl\n", + "Collecting pyrsistent>=0.14.0 (from jsonschema->JsonForm>=0.0.2->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/68/0b/f514e76b4e074386b60cfc6c8c2d75ca615b81e415417ccf3fac80ae0bf6/pyrsistent-0.15.2.tar.gz (106kB)\n", + "Requirement already satisfied: keras-applications>=1.0.6 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from keras->keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (1.0.7)\n", + "Requirement already satisfied: h5py in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from keras->keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (2.9.0)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: keras-preprocessing>=1.0.5 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from keras->keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (1.0.9)\n", + "Collecting s3transfer<0.3.0,>=0.2.0 (from boto3->smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/16/8a/1fc3dba0c4923c2a76e1ff0d52b305c44606da63f718d14d3231e21c51b0/s3transfer-0.2.1-py2.py3-none-any.whl (70kB)\n", + "Collecting botocore<1.13.0,>=1.12.171 (from boto3->smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/c4/a2/48924b841570e99fdf9d593a4d76568e27bb335f8b63faa716f5bba9db87/botocore-1.12.171-py2.py3-none-any.whl (5.5MB)\n", + "Collecting docutils>=0.10 (from botocore<1.13.0,>=1.12.171->boto3->smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", + " Downloading https://files.pythonhosted.org/packages/36/fa/08e9e6e0e3cbd1d362c3bbee8d01d0aedb2155c4ac112b19ef3cae8eed8d/docutils-0.14-py3-none-any.whl (543kB)\n", + "Building wheels for collected packages: dill, pathspec, JsonSir, JsonForm, smart-open, PyYAML, pycparser, pyrsistent\n", + " Building wheel for dill (setup.py): started\n", + " Building wheel for dill (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/5b/d7/0f/e58eae695403de585269f4e4a94e0cd6ca60ec0c202936fa4a\n", + " Building wheel for pathspec (setup.py): started\n", + " Building wheel for pathspec (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/45/cb/7e/ce6e6062c69446e39e328170524ca8213498bc66a74c6a210b\n", + " Building wheel for JsonSir (setup.py): started\n", + " Building wheel for JsonSir (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/ee/30/5c/3a3b5e1386c8db9a3be5f5c3933644ae0533c1351c6a8eb4b5\n", + " Building wheel for JsonForm (setup.py): started\n", + " Building wheel for JsonForm (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/e8/74/51/42c2d41c02bdc6f0e604476b7e4293b8c98d0bcbfa1dff78c8\n", + " Building wheel for smart-open (setup.py): started\n", + " Building wheel for smart-open (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/5f/ea/fb/5b1a947b369724063b2617011f1540c44eb00e28c3d2ca8692\n", + " Building wheel for PyYAML (setup.py): started\n", + " Building wheel for PyYAML (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/16/27/a1/775c62ddea7bfa62324fd1f65847ed31c55dadb6051481ba3f\n", + " Building wheel for pycparser (setup.py): started\n", + " Building wheel for pycparser (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/f2/9a/90/de94f8556265ddc9d9c8b271b0f63e57b26fb1d67a45564511\n", + " Building wheel for pyrsistent (setup.py): started\n", + " Building wheel for pyrsistent (setup.py): finished with status 'done'\n", + " Stored in directory: /root/.cache/pip/wheels/6b/b9/15/c8c6a1e095a370e8c3273e65a5c982e5cf355dde16d77502f5\n", + "Successfully built dill pathspec JsonSir JsonForm smart-open PyYAML pycparser pyrsistent\n", + "ERROR: azureml-train-core 1.0.43 has requirement azureml-telemetry==1.0.43.*, but you'll have azureml-telemetry 1.0.33 which is incompatible.\n", + "ERROR: azureml-pipeline-core 1.0.33 has requirement azureml-core==1.0.33.*, but you'll have azureml-core 1.0.43.1 which is incompatible.\n", + "ERROR: azureml-pipeline-steps 1.0.43 has requirement azureml-pipeline-core==1.0.43.*, but you'll have azureml-pipeline-core 1.0.33 which is incompatible.\n", + "ERROR: azureml-pipeline 1.0.43 has requirement azureml-pipeline-core==1.0.43.*, but you'll have azureml-pipeline-core 1.0.33 which is incompatible.\n", + "ERROR: azureml-train-automl 1.0.33 has requirement azureml-core==1.0.33.*, but you'll have azureml-core 1.0.43.1 which is incompatible.\n", + "Installing collected packages: PyJWT, idna, chardet, urllib3, requests, jeepney, asn1crypto, pycparser, cffi, cryptography, SecretStorage, oauthlib, requests-oauthlib, isodate, msrest, adal, msrestazure, azure-common, azure-graphrbac, pathspec, jsonpickle, pyasn1, pyopenssl, ndg-httpsclient, websocket-client, docker, azure-mgmt-resource, azure-mgmt-authorization, jmespath, ruamel.yaml, azure-nspkg, azure-mgmt-nspkg, azure-mgmt-keyvault, azure-mgmt-containerregistry, contextlib2, azure-mgmt-storage, backports.weakref, backports.tempfile, azureml-core, azureml-train-restclients-hyperdrive, applicationinsights, azureml-telemetry, azureml-train-core, azureml-pipeline-core, azureml-pipeline-steps, azureml-pipeline, azureml-train, azureml-dataprep-native, distro, dotnetcore2, azureml-dataprep, azureml-sdk, dill, JsonSir, PyYAML, python-easyconfig, attrs, pyrsistent, jsonschema, JsonForm, resource, typing-extensions, typing, numpy, onnx, skl2onnx, onnxconverter-common, scipy, keras, keras2onnx, onnxmltools, scikit-learn, lightgbm, boto, docutils, botocore, s3transfer, boto3, smart-open, gensim, pandas, sklearn-pandas, wheel, nimbusml, azureml-train-automl\n", + " Found existing installation: numpy 1.16.4\n", + " Uninstalling numpy-1.16.4:\n", + " Successfully uninstalled numpy-1.16.4\n", + " Found existing installation: scipy 1.3.0\n", + " Uninstalling scipy-1.3.0:\n", + " Successfully uninstalled scipy-1.3.0\n", + " Found existing installation: scikit-learn 0.21.2\n", + " Uninstalling scikit-learn-0.21.2:\n", + " Successfully uninstalled scikit-learn-0.21.2\n", + " Found existing installation: pandas 0.24.2\n", + " Uninstalling pandas-0.24.2:\n", + " Successfully uninstalled pandas-0.24.2\n", + " Found existing installation: wheel 0.33.4\n", + " Uninstalling wheel-0.33.4:\n", + " Successfully uninstalled wheel-0.33.4\n", + "Successfully installed JsonForm-0.0.2 JsonSir-0.0.2 PyJWT-1.7.1 PyYAML-5.1.1 SecretStorage-3.1.1 adal-1.2.1 applicationinsights-0.11.9 asn1crypto-0.24.0 attrs-19.1.0 azure-common-1.1.22 azure-graphrbac-0.61.1 azure-mgmt-authorization-0.52.0 azure-mgmt-containerregistry-2.8.0 azure-mgmt-keyvault-1.1.0 azure-mgmt-nspkg-3.0.2 azure-mgmt-resource-3.0.0 azure-mgmt-storage-4.0.0 azure-nspkg-3.0.2 azureml-core-1.0.43.1 azureml-dataprep-1.1.5 azureml-dataprep-native-13.0.0 azureml-pipeline-1.0.43 azureml-pipeline-core-1.0.33 azureml-pipeline-steps-1.0.43 azureml-sdk-1.0.43 azureml-telemetry-1.0.33 azureml-train-1.0.43 azureml-train-automl-1.0.33 azureml-train-core-1.0.43 azureml-train-restclients-hyperdrive-1.0.43 backports.tempfile-1.0 backports.weakref-1.0.post1 boto-2.49.0 boto3-1.9.171 botocore-1.12.171 cffi-1.12.3 chardet-3.0.4 contextlib2-0.5.5 cryptography-2.7 dill-0.2.9 distro-1.4.0 docker-4.0.1 docutils-0.14 dotnetcore2-2.1.8 gensim-3.7.3 idna-2.8 isodate-0.6.0 jeepney-0.4 jmespath-0.9.4 jsonpickle-1.2 jsonschema-3.0.1 keras-2.2.4 keras2onnx-1.5.0 lightgbm-2.2.1 msrest-0.6.7 msrestazure-0.6.1 ndg-httpsclient-0.5.1 nimbusml-0.6.5 numpy-1.16.2 oauthlib-3.0.1 onnx-1.5.0 onnxconverter-common-1.5.0 onnxmltools-1.4.0 pandas-0.23.4 pathspec-0.5.9 pyasn1-0.4.5 pycparser-2.19 pyopenssl-19.0.0 pyrsistent-0.15.2 python-easyconfig-0.1.7 requests-2.22.0 requests-oauthlib-1.2.0 resource-0.2.1 ruamel.yaml-0.15.89 s3transfer-0.2.1 scikit-learn-0.20.3 scipy-1.1.0 skl2onnx-1.4.5 sklearn-pandas-1.7.0 smart-open-1.8.4 typing-3.6.6 typing-extensions-3.7.2 urllib3-1.25.3 websocket-client-0.56.0 wheel-0.30.0\n", + "\n", + "#\n", + "# To activate this environment, use:\n", + "# > source activate /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b\n", + "#\n", + "# To deactivate an active environment, use:\n", + "# > source deactivate\n", + "#\n", + "\n", + "Removing intermediate container ace0ce861df6\n", + " ---> 357f156a89a0\n", + "Step 9/15 : ENV PATH /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/bin:$PATH\n", + " ---> Running in b46d6928a32a\n", + "Removing intermediate container b46d6928a32a\n", + " ---> e0cef77bd281\n", + "Step 10/15 : ENV AZUREML_CONDA_ENVIRONMENT_PATH /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b\n", + " ---> Running in 6785c898e6b1\n", + "Removing intermediate container 6785c898e6b1\n", + " ---> 22b08f45e3b2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Step 11/15 : ENV LD_LIBRARY_PATH /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib:$LD_LIBRARY_PATH\n", + " ---> Running in 1c2d773a5c25\n", + "Removing intermediate container 1c2d773a5c25\n", + " ---> b8a67309c446\n", + "Step 12/15 : COPY azureml-setup/spark_cache.py azureml-setup/log4j.properties /azureml-setup/\n", + " ---> 639fe2b42e43\n", + "Step 13/15 : RUN if [ $SPARK_HOME ]; then /bin/bash -c '$SPARK_HOME/bin/spark-submit \"--repositories\" \"[]\" /azureml-setup/spark_cache.py'; fi\n", + " ---> Running in 2b8e5e18802e\n", + "Removing intermediate container 2b8e5e18802e\n", + " ---> 76c3e4641f51\n", + "Step 14/15 : ENV AZUREML_ENVIRONMENT_IMAGE True\n", + " ---> Running in 39fa1899d8d0\n", + "Removing intermediate container 39fa1899d8d0\n", + " ---> 1f7288ce6b97\n", + "Step 15/15 : CMD [\"bash\"]\n", + " ---> Running in 4a3e210076e4\n", + "Removing intermediate container 4a3e210076e4\n", + " ---> a44c080ae64a\n", + "Successfully built a44c080ae64a\n", + "Successfully tagged maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3:latest\n", + "2019/06/18 20:50:48 Successfully executed container: acb_step_0\n", + "2019/06/18 20:50:48 Executing step ID: acb_step_1. Timeout(sec): 1800, Working directory: '', Network: 'acb_default_network'\n", + "2019/06/18 20:50:48 Pushing image: maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3:latest, attempt 1\n", + "The push refers to repository [maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3]\n", + "9bdb266887f9: Preparing\n", + "a69cb7060f7a: Preparing\n", + "3cd6b95b801a: Preparing\n", + "21bc0f0f38b2: Preparing\n", + "a309c58722fe: Preparing\n", + "21b14872ab14: Preparing\n", + "3c9c46eff366: Preparing\n", + "5d7b56c93f07: Preparing\n", + "a93427c950fc: Preparing\n", + "94919f134c61: Preparing\n", + "be63f83f7ae7: Preparing\n", + "4c54072a5034: Preparing\n", + "49652298c779: Preparing\n", + "e15278fcccca: Preparing\n", + "739482a9723d: Preparing\n", + "94919f134c61: Waiting\n", + "be63f83f7ae7: Waiting\n", + "4c54072a5034: Waiting\n", + "49652298c779: Waiting\n", + "e15278fcccca: Waiting\n", + "739482a9723d: Waiting\n", + "21b14872ab14: Waiting\n", + "3c9c46eff366: Waiting\n", + "5d7b56c93f07: Waiting\n", + "a93427c950fc: Waiting\n", + "a309c58722fe: Pushed\n", + "3cd6b95b801a: Pushed\n", + "9bdb266887f9: Pushed\n", + "21bc0f0f38b2: Pushed\n", + "21b14872ab14: Pushed\n", + "3c9c46eff366: Pushed\n", + "5d7b56c93f07: Pushed\n", + "4c54072a5034: Pushed\n", + "49652298c779: Pushed\n", + "e15278fcccca: Pushed\n", + "a93427c950fc: Pushed\n", + "739482a9723d: Pushed\n", + "94919f134c61: Pushed\n", + "be63f83f7ae7: Pushed\n", + "a69cb7060f7a: Pushed\n", + "latest: digest: sha256:7c746d01dd80267b57a9b907bc60eb993269f63105e8f58080994d156448d2eb size: 3459\n", + "2019/06/18 20:53:02 Successfully pushed image: maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3:latest\n", + "2019/06/18 20:53:02 Step ID: acb_step_0 marked as successful (elapsed time in seconds: 250.532526)\n", + "2019/06/18 20:53:02 Populating digests for step ID: acb_step_0...\n", + "2019/06/18 20:53:04 Successfully populated digests for step ID: acb_step_0\n", + "2019/06/18 20:53:04 Step ID: acb_step_1 marked as successful (elapsed time in seconds: 133.795459)\n", + "2019/06/18 20:53:04 The following dependencies were found:\n", + "2019/06/18 20:53:04 \n", + "- image:\n", + " registry: maidaptestc9922809.azurecr.io\n", + " repository: azureml/azureml_b2a8349416887710026a15e07f74a6a3\n", + " tag: latest\n", + " digest: sha256:7c746d01dd80267b57a9b907bc60eb993269f63105e8f58080994d156448d2eb\n", + " runtime-dependency:\n", + " registry: mcr.microsoft.com\n", + " repository: azureml/base\n", + " tag: intelmpi2018.3-ubuntu16.04\n", + " digest: sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", + " git: {}\n", + "\n", + "Run ID: chq was successful after 6m31s\n" + ] + } + ], + "source": [ + "pipeline_run.wait_for_completion(show_output=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 6288e2580c9fa3a778a8b50b91e211647d358f17 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Tue, 18 Jun 2019 23:55:23 -0400 Subject: [PATCH 076/108] Add AutoML pipelines notebook --- .../automl_with_pipelines.ipynb | 1062 +++++++++++++++++ scenarios/sentence_similarity/pipelines.png | Bin 0 -> 18681 bytes 2 files changed, 1062 insertions(+) create mode 100644 scenarios/sentence_similarity/automl_with_pipelines.ipynb create mode 100644 scenarios/sentence_similarity/pipelines.png diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb new file mode 100644 index 000000000..20f3342bb --- /dev/null +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -0,0 +1,1062 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using Pipelines and AutoML for Predicting Sentence Similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to use AzureML pipelines and AutoML to streamline the creation of a machine learning workflow for predicting sentence similarity. The pipeline contains two steps: \n", + "1. PythonScriptStep: uses a popular sentence embedding model from Google, Universal Sentence Encoder, to convert our sentence data into numerical data\n", + "2. AutoMLStep: demonstrates how to use AutoML to automate model selection for predicting sentence similarity scores (regression)\n", + "\n", + "An AmlCompute target is used to run the pipeline, Azure Datastores are used for storing of our data, and logging is utilized. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What are AzureML Pipelines?\n", + "\n", + "AzureML Pipelines \"define reusable machine learning workflows that can be used as a template for your machine learning scenarios\" (https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Pipelines allow you to optimize your workflow and spend time on machine learning rather than infrastructure. A Pipeline is defined by a series of steps; the following steps are available: AdlaStep, AutoMLStep, AzureBatchStep, DataTransferStep, DatabricksStep, EstimatorStep, HyperDriveStep, ModuleStep, MpiStep, and PythonScriptStep (see [here](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) for details of each step). When the pipeline is run, cached results are used for all steps that have not changed, optimizing the run time. Data sources and intermediate data can be used across multiple steps in a pipeline, saving time and resources. Below we see an example of an AzureML pipeline." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](pipelines.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### What is Azure AutoML?\n", + "\n", + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", + "\n", + "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](automl.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Modeling Problem\n", + "\n", + "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence).\n", + "\n", + "For each sentence in the sentence pair, we will use Google's pretrained Universal Sentence Encoder (details provided below) to generate a $512$-dimensional embedding. Both embeddings in the sentence pair will be concatenated and the resulting $1024$-dimensional vector will be used as features in our regression problem. Our target variable is the sentence similarity score." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Logging before flag parsing goes to stderr.\n", + "W0618 22:40:59.654601 10096 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Turning diagnostics collection on. \n", + "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.41\n", + "Pandas version: 0.23.4\n", + "Tensorflow Version: 1.13.1\n" + ] + } + ], + "source": [ + "# Set the environment path to find NLP\n", + "import sys\n", + "sys.path.append(\"../../\")\n", + "import time\n", + "import logging\n", + "import csv\n", + "import os\n", + "import pandas as pd\n", + "import shutil\n", + "import numpy as np\n", + "import torch\n", + "import sys\n", + "from scipy.stats import pearsonr\n", + "from scipy.spatial import distance\n", + "from sklearn.externals import joblib\n", + "\n", + "# Import utils\n", + "from utils_nlp.azureml import azureml_utils\n", + "from utils_nlp.dataset import stsbenchmark\n", + "from utils_nlp.dataset.preprocess import (\n", + " to_lowercase,\n", + " to_spacy_tokens,\n", + " rm_spacy_stopwords,\n", + ")\n", + "\n", + "# Tensorflow dependencies for Google Universal Sentence Encoder\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "# AzureML packages\n", + "import azureml as aml\n", + "import logging\n", + "from azureml.telemetry import set_diagnostics_collection\n", + "set_diagnostics_collection(send_diagnostics=True)\n", + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.core import Datastore, Experiment\n", + "from azureml.data.data_reference import DataReference \n", + "from azureml.widgets import RunDetails\n", + "from azureml.core.compute import ComputeTarget, AmlCompute\n", + "from azureml.core.runconfig import RunConfiguration\n", + "from azureml.core.conda_dependencies import CondaDependencies\n", + "from azureml.train.automl import AutoMLStep\n", + "from azureml.pipeline.core import Pipeline, PipelineData, TrainingOutput\n", + "from azureml.pipeline.steps import PythonScriptStep\n", + "\n", + "print(\"System version: {}\".format(sys.version))\n", + "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", + "print(\"Pandas version: {}\".format(pd.__version__))\n", + "print(\"Tensorflow Version:\", tf.VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "BASE_DATA_PATH = '../../data'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. Data Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**STS Benchmark Dataset**\n", + "\n", + "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 232KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████| 401/401 [00:02<00:00, 140KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████| 401/401 [00:02<00:00, 165KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], + "source": [ + "# Load in the raw datasets as pandas dataframes\n", + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", + "# and renaming the remaining columns\n", + "train_clean = stsbenchmark.clean_sts(train_raw)\n", + "dev_clean = stsbenchmark.clean_sts(dev_raw)\n", + "test_clean = stsbenchmark.clean_sts(test_raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "# Convert all text to lowercase\n", + "train = to_lowercase(train_clean)\n", + "dev = to_lowercase(dev_clean)\n", + "test = to_lowercase(test_clean)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training set has 5749 sentences\n", + "Development set has 1500 sentences\n", + "Testing set has 1379 sentences\n" + ] + } + ], + "source": [ + "print(\"Training set has {} sentences\".format(len(train)))\n", + "print(\"Development set has {} sentences\".format(len(dev)))\n", + "print(\"Testing set has {} sentences\".format(len(test)))" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", + "
" + ], + "text/plain": [ + " score sentence1 \\\n", + "0 5.00 a plane is taking off. \n", + "1 3.80 a man is playing a large flute. \n", + "2 3.80 a man is spreading shreded cheese on a pizza. \n", + "3 2.60 three men are playing chess. \n", + "4 4.25 a man is playing the cello. \n", + "\n", + " sentence2 \n", + "0 an air plane is taking off. \n", + "1 a man is playing a flute. \n", + "2 a man is spreading shredded cheese on an uncoo... \n", + "3 two men are playing chess. \n", + "4 a man seated is playing the cello. " + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train.head(5)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "#Save the cleaned data\n", + "if not os.path.isdir('data'):\n", + " os.mkdir('data')\n", + " \n", + "train.to_csv(\"data/train.csv\", index=False)\n", + "test.to_csv(\"data/test.csv\", index=False)\n", + "dev.to_csv(\"data/dev.csv\", index=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Set up AzureML Workspace, Experiment, Compute & Datastore" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2a. Link to or create a workspace" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0618 22:55:15.048929 10096 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0618 22:55:15.432929 37988 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0618 22:55:30.586771 10096 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n", + "Workspace name: MAIDAPTest\n", + "Azure region: eastus2\n", + "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", + "Resource group: nlprg\n" + ] + } + ], + "source": [ + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")\n", + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2b. Set up an experiment and logging" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "# Make a folder for the project\n", + "project_folder = './automl-sentence-similarity'\n", + "if not os.path.exists(project_folder):\n", + " os.makedirs(project_folder)\n", + "\n", + "# Set up an experiment\n", + "experiment_name = 'automl-sentence-similarity'\n", + "experiment = Experiment(ws, experiment_name)\n", + "run = experiment.start_logging()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2c. Link compute target" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found existing compute target.\n", + "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-19T02:52:52.599000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + ] + } + ], + "source": [ + "# choose a name for your cluster\n", + "cluster_name = \"gpucluster\"\n", + "\n", + "try:\n", + " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", + " print('Found existing compute target.')\n", + "except ComputeTargetException:\n", + " print('Creating a new compute target...')\n", + " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", + " max_nodes=4)\n", + "\n", + " # create the cluster\n", + " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", + "\n", + " compute_target.wait_for_completion(show_output=True)\n", + "\n", + "# use get_status() to get a detailed status for the current AmlCompute. \n", + "print(compute_target.get_status().serialize())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2d. Upload data to datastore" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Uploading ./data\\dev.csv\n", + "Uploading ./data\\test.csv\n", + "Uploading ./data\\train.csv\n", + "Uploaded ./data\\dev.csv, 1 files out of an estimated total of 3\n", + "Uploaded ./data\\train.csv, 2 files out of an estimated total of 3\n", + "Uploaded ./data\\test.csv, 3 files out of an estimated total of 3\n" + ] + }, + { + "data": { + "text/plain": [ + "$AZUREML_DATAREFERENCE_e806155bf4c3452596bd2c3ffa76743d" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Select a specific datastore or can call ws.get_default_datastore()\n", + "datastore_name = 'workspacefilestore'\n", + "ds = ws.datastores[datastore_name]\n", + "\n", + "# Upload files in data folder\n", + "ds.upload(src_dir='./data', target_path='stsbenchmark_data', overwrite=True, show_progress=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Set up a **DataReference** object that points to the data we just uploaded into the stsbenchmark_data folder. DataReference objects point to data that is accessible from a datastore." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "input_data = DataReference(datastore=ds, \n", + " data_reference_name=\"stsbenchmark\",\n", + " path_on_datastore='stsbenchmark_data/',\n", + " overwrite=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Create Pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3a. Set up run configuration file" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "run config is ready\n" + ] + } + ], + "source": [ + "# create a new RunConfig object\n", + "conda_run_config = RunConfiguration(framework=\"python\")\n", + "\n", + "# Set compute target to AmlCompute\n", + "conda_run_config.target = compute_target\n", + "\n", + "conda_run_config.environment.docker.enabled = True\n", + "conda_run_config.environment.docker.base_image = aml.core.runconfig.DEFAULT_CPU_IMAGE\n", + "\n", + "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", + "conda_run_config.environment.python.user_managed_dependencies = False\n", + "\n", + "conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", + " conda_packages=['numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], \n", + " pin_sdk_version=False)\n", + "\n", + "print('run config is ready')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3b. PythonScriptStep" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this pipeline step, we will convert our sentences into a numerical representation in order to use them in our machine learning model. We will embed both sentences using the Google Universal Sentence Encoder and concatenate their representations into a $1024$-dimensional vector to use as features for AutoML.\n", + "\n", + "**Google Universal Sentence Encoder: Overview**\n", + "We'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Google Universal Sentence Encoder: Transformer Model** The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Using the Pretrained Model**\n", + "\n", + "Tensorflow-hub provides the pretrained model for use by the public. We import the model from its url and then feed the model our sentences for it to encode." + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./automl-sentence-similarity/embed.py\n" + ] + } + ], + "source": [ + "%%writefile $project_folder/embed.py\n", + "import argparse\n", + "import os\n", + "import azureml.core\n", + "import pandas as pd\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "def google_encoder(dataset):\n", + " \"\"\" Function that embeds sentences using the Google Universal\n", + " Sentence Encoder pretrained model\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " emb1: 512-dimensional representation of sentence1\n", + " emb2: 512-dimensional representation of sentence2\n", + " \"\"\"\n", + " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", + " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", + "\n", + " # Apply embedding model and normalize the input\n", + " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", + " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", + " \n", + " with tf.Session() as session:\n", + " session.run(tf.global_variables_initializer())\n", + " session.run(tf.tables_initializer())\n", + " emb1, emb2 = session.run(\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={\n", + " sts_input1: dataset['sentence1'],\n", + " sts_input2: dataset['sentence2']\n", + " })\n", + " return emb1, emb2\n", + "\n", + "def feature_engineering(dataset):\n", + " \"\"\"Extracts embedding features from the dataset and returns\n", + " features and target in a dataframe\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " df: pandas dataframe with embedding features\n", + " scores: list of target variables\n", + " \"\"\"\n", + " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", + " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", + " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", + " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", + " df = pd.DataFrame(df, columns=names)\n", + " return df, dataset['score']\n", + "\n", + "def write_output(df, path, name):\n", + " os.makedirs(path, exist_ok=True)\n", + " print(\"%s created\" % path)\n", + " df.to_csv(path + \"/\" + name, index=False)\n", + "\n", + "parser = argparse.ArgumentParser()\n", + "parser.add_argument(\"--sentence_data\", type=str)\n", + "parser.add_argument(\"--embedded_data\", type=str)\n", + "args = parser.parse_args()\n", + "\n", + "# Import the Universal Sentence Encoder's TF Hub module\n", + "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", + "embedding_model = hub.Module(module_url)\n", + "\n", + "train = pd.read_csv(args.sentence_data + \"/train.csv\")\n", + "dev = pd.read_csv(args.sentence_data + \"/dev.csv\")\n", + "\n", + "training_data, training_scores = feature_engineering(train)\n", + "validation_data, validation_scores = feature_engineering(dev)\n", + "\n", + "write_output(training_data, args.embedded_data, \"X_train.csv\")\n", + "write_output(pd.DataFrame(training_scores, columns=['score']), args.embedded_data, \"y_train.csv\")\n", + "\n", + "write_output(validation_data, args.embedded_data, \"X_dev.csv\")\n", + "write_output(pd.DataFrame(validation_scores, columns=['score']), args.embedded_data, \"y_dev.csv\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**PipelineData** objects represent a piece of intermediate data in a pipeline. Generally they are produced by one step (as an output) and then consumed by the next step (as an input), introducing an implicit order between steps in a pipeline. We create a PipelineData object that can represent the data produced by our first pipeline step that will be consumed by our second pipeline step." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "embedded_data = PipelineData(\"embedded_data\", datastore=ds)" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "embedStep = PythonScriptStep(\n", + " name=\"Embed\",\n", + " script_name=\"embed.py\", \n", + " arguments=[\"--embedded_data\", embedded_data,\n", + " \"--sentence_data\", input_data],\n", + " inputs=[input_data],\n", + " outputs=[embedded_data],\n", + " compute_target=compute_target,\n", + " runconfig = conda_run_config,\n", + " hash_paths=[\"embed.py\"],\n", + " source_directory=project_folder,\n", + " allow_reuse=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3c. AutoMLStep" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting ./automl-sentence-similarity/get_data.py\n" + ] + } + ], + "source": [ + "%%writefile $project_folder/get_data.py\n", + "\n", + "import os\n", + "import pandas as pd\n", + "\n", + "def get_data():\n", + " X_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_train.csv\")\n", + " y_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_train.csv\")\n", + " X_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_dev.csv\")\n", + " y_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_dev.csv\")\n", + " return { \"X\" : X_train.values, \"y\" : y_train.values.flatten(), \"X_valid\": X_dev.values, \"y_valid\": y_dev.values.flatten()}" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "# Create PipelineData objects for tracking AutoML metrics \n", + "metrics_output_name = 'metrics_output'\n", + "best_model_output_name = 'best_model_output'\n", + "\n", + "metrics_data = PipelineData(name='metrics_data',\n", + " datastore=ds,\n", + " pipeline_output_name=metrics_output_name,\n", + " training_output=TrainingOutput(type='Metrics'))\n", + "model_data = PipelineData(name='model_data',\n", + " datastore=ds,\n", + " pipeline_output_name=best_model_output_name,\n", + " training_output=TrainingOutput(type='Model'))" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"iteration_timeout_minutes\": 5,\n", + " \"iterations\": 5,\n", + " \"primary_metric\": 'spearman_correlation',\n", + " \"preprocess\": True,\n", + " \"verbosity\": logging.INFO,\n", + "}\n", + "automl_config = AutoMLConfig(task = 'regression',\n", + " debug_log = 'automl_errors.log',\n", + " path = project_folder,\n", + " compute_target=compute_target,\n", + " run_configuration=conda_run_config,\n", + " data_script = project_folder + \"/get_data.py\",\n", + " **automl_settings\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "automl_step = AutoMLStep(\n", + " name='AutoML',\n", + " automl_config=automl_config,\n", + " inputs=[embedded_data],\n", + " outputs=[metrics_data, model_data],\n", + " hash_paths=[\"get_data.py\"],\n", + " allow_reuse=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 4. Run Pipeline" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "automl_step.run_after(embedStep)\n", + "pipeline = Pipeline(\n", + " description=\"pipeline_embed_automl\",\n", + " workspace=ws, \n", + " steps=[automl_step])" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Created step AutoML [320f0121][5913af95-9ebb-42c0-a650-7725b7fe0b54], (This step will run and generate new outputs)\n", + "Created step Embed [81087fb9][d271deed-bd3b-4e41-9814-29fc11e585b4], (This step is eligible to reuse a previous run's output)\n", + "Using data reference stsbenchmark for StepId [8ca56eac][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", + "Submitted pipeline run: 5549c561-26e2-4979-9f3f-0379e38de86a\n" + ] + } + ], + "source": [ + "pipeline_run = experiment.submit(pipeline)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "3361878269d34e4a9546fa54822e9e92", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_PipelineWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', '…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "RunDetails(pipeline_run).show()" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PipelineRunId: 5549c561-26e2-4979-9f3f-0379e38de86a\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/5549c561-26e2-4979-9f3f-0379e38de86a\n", + "PipelineRun Status: Running\n", + "\n", + "\n", + "StepRunId: 3fffcae0-74f3-49c5-bb7d-f877bda582f7\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/3fffcae0-74f3-49c5-bb7d-f877bda582f7\n", + "\n", + "StepRun(Embed) Execution Summary\n", + "=================================\n", + "StepRun( Embed ) Status: Finished\n", + "{'runId': '3fffcae0-74f3-49c5-bb7d-f877bda582f7', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T03:30:21.798384Z', 'endTimeUtc': '2019-06-19T03:30:21.864304Z', 'properties': {'azureml.reusedrunid': 'f78cb325-802a-4779-ada8-05db82c97835', 'azureml.reusednodeid': '70352e68', 'azureml.reusedpipeline': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.reusedpipelinerunid': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.runsource': 'azureml.StepRun', 'azureml.nodeid': '81087fb9', 'ContentSnapshotId': '8979e52a-3c38-432c-b9e3-a235b33b7d1e', 'StepType': 'PythonScriptStep', 'ComputeTargetType': 'AmlCompute', 'azureml.pipelinerunid': '5549c561-26e2-4979-9f3f-0379e38de86a', 'AzureML.DerivedImageName': 'azureml/azureml_b2a8349416887710026a15e07f74a6a3'}, 'runDefinition': {'script': 'embed.py', 'arguments': ['--embedded_data', '$AZUREML_DATAREFERENCE_embedded_data', '--sentence_data', '$AZUREML_DATAREFERENCE_stsbenchmark'], 'sourceDirectoryDataStore': None, 'framework': 'Python', 'communicator': 'None', 'target': 'gpucluster', 'dataReferences': {'stsbenchmark': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'stsbenchmark_data/', 'pathOnCompute': None, 'overwrite': False}, 'embedded_data': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'azureml/f78cb325-802a-4779-ada8-05db82c97835/embedded_data', 'pathOnCompute': None, 'overwrite': False}}, 'jobName': None, 'maxRunDurationSeconds': None, 'nodeCount': 1, 'environment': {'name': 'Experiment automl-sentence-similarity Environment', 'version': 'Autosave_2019-06-18T20:46:30Z_9b3f4178', 'python': {'interpreterPath': 'python', 'userManagedDependencies': False, 'condaDependencies': {'name': 'project_environment', 'dependencies': ['python=3.6.2', {'pip': ['azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33']}, 'numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], 'channels': ['conda-forge']}, 'baseCondaEnvironment': None}, 'environmentVariables': {'EXAMPLE_ENV_VAR': 'EXAMPLE_VALUE'}, 'docker': {'baseImage': 'mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04', 'enabled': True, 'sharedVolumes': True, 'gpuSupport': False, 'shmSize': '1g', 'arguments': [], 'baseImageRegistry': {'address': None, 'username': None, 'password': None}}, 'spark': {'repositories': ['[]'], 'packages': [], 'precachePackages': True}}, 'history': {'outputCollection': True, 'directoriesToWatch': ['logs']}, 'spark': {'configuration': {'spark.app.name': 'Azure ML Experiment', 'spark.yarn.maxAppAttempts': '1'}}, 'amlCompute': {'name': None, 'vmSize': None, 'vmPriority': None, 'retainCluster': False, 'clusterMaxNodeCount': 1}, 'tensorflow': {'workerCount': 1, 'parameterServerCount': 1}, 'mpi': {'processCountPerNode': 1}, 'hdi': {'yarnDeployMode': 'Cluster'}, 'containerInstance': {'region': None, 'cpuCores': 2, 'memoryGb': 3.5}, 'exposedPorts': None}, 'logFiles': {'azureml-logs/20_image_build_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/20_image_build_log.txt?sv=2018-03-28&sr=b&sig=UnaDzx29AaHQ4bXCShTvUjr9zNTT%2B9u2uBDvfSdMMq8%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/70_driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/70_driver_log.txt?sv=2018-03-28&sr=b&sig=XYlg%2FyRm0SUVCXbObSl5DCwjp3Bl3B6on4blzUoFqlo%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/driver_log.txt?sv=2018-03-28&sr=b&sig=kprjz9j6n3Lzm%2FL8KxJkUBNvxW1BWMiS7hWfVsfpERw%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_stdout-job_post.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_post.txt?sv=2018-03-28&sr=b&sig=ioI%2Ff6h7kdlY11mw9PIgpLuPT%2FGMVXSXLRB34qHuJaA%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_execution.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_execution.txt?sv=2018-03-28&sr=b&sig=ROUL6b8kXIXxxUwuQouafOz3jRRpTEBkU3abanuluz8%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/56_batchai_stderr.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/56_batchai_stderr.txt?sv=2018-03-28&sr=b&sig=Te%2FTg7zxGNobQjnHM5Nvzv%2BCQ4LuWrvb29KKunFFRgQ%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_stdout.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout.txt?sv=2018-03-28&sr=b&sig=AFfpczOOi6dGhkqDcREC4kscKdT%2FigM7OnLNAQZZthI%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_stdout-job_prep.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_prep.txt?sv=2018-03-28&sr=b&sig=PkUiUN6d9d9MCnCgxRUsdVBlvy2vq2sgyWTb%2FdeOf8g%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=wChazhC1Pscu1eFgEmq9nkZ3mxQl4J%2FmvKJoRD7GdBg%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=N9oo4pCAB4A9gAPsEwKWzv%2BB1UJ%2BS2pnNGMc0Hv%2B%2F9k%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=lXcVv6kNYKwT2tWKIjFXWASrQSBs2RfNYTEqhairYfQ%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/138_azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/138_azureml.log?sv=2018-03-28&sr=b&sig=bwyQxhxb8UkX%2FAvYDXGAczSBrXieqOqfshRlwsyYai0%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/azureml.log?sv=2018-03-28&sr=b&sig=N1CzC2OIPbDG6Ts9nzPVg1rN3%2BuTiXNHSM4A4UOv6YE%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r'}}\n", + "\n", + "\n", + "\n", + "\n", + "StepRunId: 297207dd-e830-4133-af2b-1efff54ee11a\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/297207dd-e830-4133-af2b-1efff54ee11a\n", + "StepRun( AutoML ) Status: NotStarted\n", + "StepRun( AutoML ) Status: Running\n", + "\n", + "StepRun(AutoML) Execution Summary\n", + "==================================\n", + "StepRun( AutoML ) Status: Finished\n", + "{'runId': '297207dd-e830-4133-af2b-1efff54ee11a', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T03:36:02.737145Z', 'endTimeUtc': '2019-06-19T03:44:50.348314Z', 'properties': {'azureml.runsource': 'azureml.StepRun', 'ContentSnapshotId': '81120654-2a16-4013-96f7-922eda5e4e1e', 'StepType': 'AutoMLStep', 'azureml.pipelinerunid': '5549c561-26e2-4979-9f3f-0379e38de86a', 'num_iterations': '5', 'training_type': 'TrainFull', 'acquisition_function': 'EI', 'metrics': 'accuracy', 'primary_metric': 'spearman_correlation', 'train_split': '0', 'MaxTimeSeconds': '300', 'acquisition_parameter': '0', 'num_cross_validation': None, 'target': 'gpucluster', 'RawAMLSettingsString': \"{'name':'automl-sentence-similarity','subscription_id':'15ae9cb6-95c1-483d-a0e3-b1a1a3b06324','resource_group':'nlprg','workspace_name':'MAIDAPTest','path':'./automl-sentence-similarity','iterations':5,'data_script':'./automl-sentence-similarity/get_data.py','primary_metric':'spearman_correlation','task_type':'regression','compute_target':'gpucluster','spark_context':None,'validation_size':0.0,'n_cross_validations':None,'y_min':None,'y_max':None,'num_classes':None,'preprocess':True,'lag_length':0,'max_cores_per_iteration':1,'max_concurrent_iterations':1,'iteration_timeout_minutes':5,'mem_in_mb':None,'enforce_time_on_windows':True,'experiment_timeout_minutes':None,'experiment_exit_score':None,'blacklist_models':None,'whitelist_models':None,'auto_blacklist':True,'exclude_nan_labels':True,'verbosity':20,'debug_log':'automl_errors.log','debug_flag':None,'enable_ensembling':True,'ensemble_iterations':5,'model_explainability':False,'enable_tf':False,'enable_cache':True,'enable_subsampling':False,'subsample_seed':None,'cost_mode':0,'is_timeseries':False,'metric_operation':'maximize'}\", 'AMLSettingsJsonString': '{\"name\":\"automl-sentence-similarity\",\"subscription_id\":\"15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\",\"resource_group\":\"nlprg\",\"workspace_name\":\"MAIDAPTest\",\"path\":\"./automl-sentence-similarity\",\"iterations\":5,\"data_script\":\"./automl-sentence-similarity/get_data.py\",\"primary_metric\":\"spearman_correlation\",\"task_type\":\"regression\",\"compute_target\":\"gpucluster\",\"spark_context\":null,\"validation_size\":0.0,\"n_cross_validations\":null,\"y_min\":null,\"y_max\":null,\"num_classes\":null,\"preprocess\":true,\"lag_length\":0,\"max_cores_per_iteration\":1,\"max_concurrent_iterations\":1,\"iteration_timeout_minutes\":5,\"mem_in_mb\":null,\"enforce_time_on_windows\":true,\"experiment_timeout_minutes\":null,\"experiment_exit_score\":null,\"blacklist_models\":null,\"whitelist_models\":null,\"auto_blacklist\":true,\"exclude_nan_labels\":true,\"verbosity\":20,\"debug_log\":\"automl_errors.log\",\"debug_flag\":null,\"enable_ensembling\":true,\"ensemble_iterations\":5,\"model_explainability\":false,\"enable_tf\":false,\"enable_cache\":true,\"enable_subsampling\":false,\"subsample_seed\":null,\"cost_mode\":0,\"is_timeseries\":false,\"metric_operation\":\"maximize\"}', 'DataPrepJsonString': None, 'EnableSubsampling': 'False', 'runTemplate': 'AutoML', 'snapshotId': '81120654-2a16-4013-96f7-922eda5e4e1e', 'SetupRunId': '297207dd-e830-4133-af2b-1efff54ee11a_setup', 'ProblemInfoJsonString': '{\"dataset_num_categorical\": 0, \"dataset_classes\": 140, \"dataset_features\": 1024, \"dataset_samples\": 5749, \"is_sparse\": false, \"subsampling\": false}'}, 'logFiles': {'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.297207dd-e830-4133-af2b-1efff54ee11a/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=sc4OGsuRrBaBzm1%2F8U%2BXjdywdh00XNxmO9tISKxYRZM%3D&st=2019-06-19T03%3A37%3A03Z&se=2019-06-19T11%3A47%3A03Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.297207dd-e830-4133-af2b-1efff54ee11a/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=M9aM0Xy%2FznTxVS1jkee1WL5GTKVblvYXOjaRKeh6Bp8%3D&st=2019-06-19T03%3A37%3A03Z&se=2019-06-19T11%3A47%3A03Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.297207dd-e830-4133-af2b-1efff54ee11a/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=kzTStpejzK%2Fk0h6gtKOj4WwJBYz6tp5DG7YnAgPJtJQ%3D&st=2019-06-19T03%3A37%3A03Z&se=2019-06-19T11%3A47%3A03Z&sp=r'}}\n", + "\n", + "\n", + "\n", + "PipelineRun Execution Summary\n", + "==============================\n", + "PipelineRun Status: Finished\n", + "{'runId': '5549c561-26e2-4979-9f3f-0379e38de86a', 'status': 'Completed', 'startTimeUtc': '2019-06-19T03:30:19.54232Z', 'endTimeUtc': '2019-06-19T03:46:59.380629Z', 'properties': {'azureml.runsource': 'azureml.PipelineRun', 'runSource': None, 'runType': 'HTTP', 'azureml.parameters': '{}'}, 'logFiles': {'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.5549c561-26e2-4979-9f3f-0379e38de86a/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=xygOEA3uP72DELH6cKm1AtJ9wsQUrR6DhcGBIZC6Grc%3D&st=2019-06-19T03%3A37%3A06Z&se=2019-06-19T11%3A47%3A06Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.5549c561-26e2-4979-9f3f-0379e38de86a/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=mnwq58c19ZqTLxjJhnS99LTZAHISFgJua4XvFmVKxkE%3D&st=2019-06-19T03%3A37%3A06Z&se=2019-06-19T11%3A47%3A06Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.5549c561-26e2-4979-9f3f-0379e38de86a/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=2dFSQk1r7iNsd18URUCgWlsEp%2FNStDu8Y2d3Q1fkat8%3D&st=2019-06-19T03%3A37%3A06Z&se=2019-06-19T11%3A47%3A06Z&sp=r'}}\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "'Finished'" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pipeline_run.wait_for_completion(show_output=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "published_pipeline = pipeline.publish(\n", + " name=\"Sentence_Similarity_Pipeline\", \n", + " description=\"Sentence Similarity with Google USE Features\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/scenarios/sentence_similarity/pipelines.png b/scenarios/sentence_similarity/pipelines.png new file mode 100644 index 0000000000000000000000000000000000000000..38f1cd5aa89e2c5d0e18b9df39439462a6b638f5 GIT binary patch literal 18681 zcmafaQ*7Wm6ClPfgC0kP`R|5wV zFacv*LlYtiYXdVAMH2&Kcl%KjZZI(M3<+TYWw({HEI2Rp`OZr=D8UVI;;@4t1XP&c z;V>~!Rf^(PG;=o#Kvs__9x~OYpC(RdcfS{{J1Y{4|1r7mOWh_rW`M`HXrNxUYR^iC)1bf7hXOWd@ETj5yU?p z`&k8{0fA3HA)lc^pU9Lvpo3t^9q0g^3q<>OPEzjwJ&}|h`yuFHC4Pj=$NOz(UO42H z2^S4?C59v+rgjazziPZ23tP~O?aRZ_zz9DttY9Je0C*S)1mNL)OtMNkW^i~rSK>N1 zBzb*G8?dFY<bCpt zsOi)7PhBn5kmYehNkIwGA*Q#9PhY_k^6NXrX-tblQV5wSZI(-ZgnyD?g>+2qi|bFg z`TG=tb=B?FWp?9GJzG;FC9(lDTFUaxt10qq4Mlx0M^}{{ve$top&K2F0%Z>*Lu-Zm zhkWO;7Um}1xSVXSa{c3m3<61P1`gf=A9^?#e7*^)3k>J;ODj%ZGDc*LGRS}pJ+8_@Ne`V@@`lPIxA)W%f^KW$>>4n> zzgu~Aw>8a zGIct~n`VnI?PEjwAo>aspJoP7MpUGV*F&>PM2OV@!&11Wpf}fOJHNGB)Qkd9S5(%2 zIkMwu*v`_l7excWiY9m_E}itCt&ykPm^*q!R>&ak+oQ6A5jUGi+u1~zH098PDVcP z=S^Ob8Z-_u!(ej}(G!K$i-Mm3T$1UZLe^(b`~v)MhWZL_`>4tu(-6_h=AIgYioJo8 zPZD9q_B6LyHgTf31*hY;UCf7vgyVoExKsj;Kz;Z6o%vi)hQtTd5@JhXvsjS`jQ|Fc zzhe`TX=$dFozS%|J%?A&6-ucqDMz8%AGWNmUH2=H+Nmx1bk~ilU!4U!cS!At7$WtD z6fAE?qce?iR{-iTD$t$mRXidaXT7i&V8LT+6Nn+X)DD>rjE}&(evnHMLNR80%MMZC zZajZC0VfW3fEsKZ^LK-`rSAG7mgK<4Z4ae9fJJ8eAimb29Bc_m>dJSmLEe;e^HSbE46~l(VEr=^8%^EZDE2xgUo{X1{XYI znrd#Dm)mW`(Qs&u<&Ga^@Rb}YY5V+}KJe=kKk zai@K&J^zf=j?Q9dUjBPTuBN$K@C!g`pV5XoF+AY2z9{DLBtv}J3)%BcwYsFD%MpQQ zM3z{8Cy^*uCb}YQ28=H=Z*>!P9(jL3-#jUy@T8_`JxA2aZY3V?UVvy(qP0in!Y&;W z1uX7oQc$A^fRd8S)~Q{=OSoqLt9+th$WtLX;{3k*K(I@<5>U% zw;PgB*(n_{K|xC0&`8xt!@A9?4z*;9)*AXp1e8#w4mLn4r|X-*q*I};hLB=zSE+-# z&?$Oj)C&=CgZa1Uy)R+8H3Lid7+~&_U=iPDH&k^$Vti=tX{H9On`aZ>k};@?_4eyi zDX&!Gk2(4pu@@A1O}y+KLwdcusFuYp`SeZhj< z%52tJrqS@s?|M)|;KG?V461N*VGc)0&w6!jRP*p~<9^*nR?0`KRvi~~Is8hVw|PL> zfC$cJ<)T2*D*E}%%!0%Q?;%2zPq1Z^yIhjtVw6(c4?xtTzHz=(W8IM(U4mGQ=%zC<;&%Ki z5@)fP!{?B4sLxSRA8ZxDA)?E8Z11}KfVc{yT&|uIgJg2* z?TVvE9{vlDjOLx$gRhX4hH6e3=b=)RC8g<=Ff0C99&6!TY)aB!_?q7*$)i>!koIf- z#3Rj)IK`)sXjJ1XGWZm?Rr^_@F+(t+u2ewpMwk%Y`9qcu8q7v2mW+ANS=E|~vMLg` z-wyFZD#cWhkSr|k2cnY*g5kN^GM4Xi?t?$d6iw17_fBzX z?XZMom~VAd7al(C*RkA)NX0+jaj@aTertoLtXGDYp8l8GvxWAc*8%rutdtG=M<`72 zz`-PedC5CMN|mrVN@Z2|&r526xlpQ4q*#a=9Yff~7B=iMUI${TAZ;o@1pL2;sMX;J%r!3#g@(5A1^^)Hsx#n!_3 zoVqA&SQxf>%&g+Xxx<5K!YZ~6b~kY0%TF7{YQfNp@GIAAZ)K|03qyszxfQ;g~nRJ*MNL_*$!zqhMFwAT|=Ct^H0Gt<_VP97!~^ zCIgLR%}}1cd{atDcIy{7KGmr3vQ-JqOydT#SB+u=<3aL%o~;rK5q@1S_l*=pnsy!C zA9b(?BJ62R#8fPlmXe8VW88_-Djl)3bd51QqGNq)C8VcUBJ9S+D#x{`8xli8Uleq| z#W!c~{m8@SqwxO#G?&CF+0<@-`ux^PqLr)kbJ|2kHrAOvby@tpoJvqUwMK)l94%|> z)b~NfnOP_?wR7I5*@t4OfQ#KTprq0fD+uVp|Zu20)| zayBfDU9+P{5jd5@_T-BkFWXDet zrRTuk7eC|sSO;~&8Z>eoJ)$$1ucV)^2H{R}Mjo~CcQko-x+jxphRPncqeB+aA+$q~ z=!fu|S$>s|1$=^O!v#+>1touE@x!pPlOEQCw-_E!)c=muFzEM12BmDyu`V&n>PNzd1a4(m|p%E<{!>4{C5ZF zq-@aP|2`4>F-}B2e1X&J;V0!~cVO`-IzoQQB(Z$H#%ZO0MMRcpX5TibI`1_9?L*vZ zq-TV`s2J&h){K>8!WS&ZA*Y92uEKAE$m&!RQdN0#M!C#AF53hz!`Pj&2fWvj0Yqe1^gPsB?2?fGqWm&g}6UkXs ziM)XV;)&XeF$7dJ3iazdGCfjOJ%M8Y)76wAN5v9`Rl zl-m5jL2elHU3k>kdGeOSPQTlVpH87pu)ojaCA9C~KCgsZH-6@r!p`l}rhvwcI<%E{)x#;L#{mom! zcYiiXAG&!QGt;g1+}hqSeEuM?O?*)nd!TWk_U&7Qv^?Qd3Gvm=J|UOtnAy&~B+^hM zb~#nsLfxa?fuVWHXsSCK6ak@mQNVPdYFZOZQFBVhwbu+cfbEgQuHs&|Sd z^^Cy^As#eDj720w`OL8%vwC7&@WncgT z5w*f{#BC@H#g~T(KC@vXnFqgpL3_QQVllJ60p5l$$=A})({Mr+VY3BG?N*U~J{o^kJFsSkl^l46xAaBd;NlA@prBd0Y$)CW5%-)6p8ut! z4I_Yaxz?y~hh|cI8rggf-@B5**Yu^+&=@drRfw!!zB)O<-!`4brPnHKNUMW~7UEba z2(}2z>hT(#Rke9Vu&&eZJ*K0TTuMRt|B7-Mm*VKPIRu1F%Do{*lVSH)J4P$9X! zra_Pey|48Axu41`FM*PqV+NlI^{SfY zm+%P0_%lZ#1$4@ zE<0Xym71%AUPo&;Ym83%R`iZ3W`Ai~BkH#w5I&!tY&SE!7n^=#%3nP(Lm#skJLyVG zTe*&M(;!`YmE3ucf=+!pFX;HbuU9|+gg#c7i$p{+qa{d4ywjuWmK+xvpY_EW5SwY% z{$=5IznT28|Ja?L^S-x~Sn5R-cn9@zwGs1#_sjigHi0+4;i+?8!%_Er8_2{`47wel zfqNjs>t!f7I5N$3To{|jBNtSr^kbRQ~X9W6kF|SofgOve2};3yI&OAK5O?(>1Qu+(6er$kX>mze?!Hxuewxw+^<#0#x+n*YLWi z=*s_=!1A!ZjAoK%K{9S~6h2~WYd6g?UAF~scDCnyEY+f75xLuHVbq@_thyZ(cRN2O z#tU6mO{E*HxIZouX6C#X%)`;IM|W`R=;*AkmKIeD{lxJL=k9dBbhv0|6EucAZ1hK< z!+bjF$8$ezkxh-J?J_xpNghrF*B>f;ITEoIa$6V7>MXmv!6~0`BatK-RBwFky%=|( zxFU!|2nLR<%`C-&df0p*7CsskT1B70+O6tPWZGld172FMOctCNDH++q$w@Z9qJ!pT zOoYO;zm(0SE#gqH@>w*{#X3P_a2_n&zSqkw5txRRSp4X zFaTXCJXx!z>6nuHXi|Cq7&hj~Sq-!swD+tB(jhLAnqK^ocL8#Ck=x$TC_CVGI68^A z^v@bJnH(N2GL}ZDCXb;NuBT<}l@y_#qLNOO<2D^J>TI3nQ2C=f1XNHU&2uwC$;zXt zK>}O!mAJ_Q)BV~N8j_49aFJX%;meU&uvAei@^L-hi!ga8e)kPh9 z#!+1P7O5B~1#*{I8W>7iHsX3lk`?Z&9va7wWBn>jSX43c7P>n>^jAlr5jEP>$=FzF zP4}-(E~-+OJ13@f?KgMM{>)tIc2)JqQ8OwvRC;GSY6L1H&Nnn)Nr7<-6-?C{{Rkc2 zhIOW-3=^Xfl{Z>f(r?5`QU}*a*Tg7I6oHed2C!I}55^rdAEh>7u+MToCqGy6U@2?ky_Mz838L z=?ea>php^T%(SN#vH3fP(Z6l)M|rttqJ>)&upLpcZ!RoC8HCgLCJ=zY@>f^eF6TAh zi?=^=oI~0L{jGv=7{KsCi^c~gaVe=tr^a8=M2mzsf~51tUG@cyC5=y_Hc=_bkJPc& zrVZ6HeJteW?pDDjQiFctS-@XHRc5(EqAD^@(aNvL^&=cUZK=zd+Uw=VueFamrOlNo zg}$rsmS_@}aC3t9Symt*K|#JCR%XPacLzVGC<4qOj{?tr2uHM-C7`+;LZ7&Tk1C&( z6pE;T4GOmop_-E@n@nr(S*o|u4w+FecWl8ymvP)okze;^Y$_yCBkD9QFl#|a@^Z=q z<#GT#30XVWP1$bHeQ2{f!ERlltZG`qiXT>?MDV>u))K-1S=4z|Gk0s(P?mkJEM=z@ zk&3bLKw_9H4&hq*p&mbj?)@C>Sx5tshzTwsbO*I4lopB5`CJRVdCu|`u>5H%)&22} zqkcG2LQt7{yDET~{N2E9uIy3Rk6}K(v;k^f_j?VZT1mhV3E++m@M+aBWq~hfq#shQ zz`(&x$~O}3QunY3x>2BhGtHNSx)z+-b@b*5^#5Wv=Ka6rmX}ry5n$)op5Qds;Q|d~ zIvUV46T!%pAbI06sLDPkCW@XN2=h?}ShU?I6OVh;yx*{=RFIn6yrO@%WTt@ldtOppj5itvg(GxPXeT4I+ zSJTD%^b?*(=)RJzI`4UKm%>X`cmHB1sKZZtUyZwW`>YBTS(1VH7&oD94w2qcSrN;sAm6O^2&}`gZIMz_g zb1)T-(R>A=V8DLQf;z_H31EV_!TtX{q|q4M%OjcSbjJ-n&usD*8f#kIBrj{e&Oey zl|5S?%5}d;{8=jOd2s#%9!Vbu1!ybq&g*G8*vCa2Zxq1urrJ5D?(T8=9yIjA_g2@l z4LIwzwP)RIaYXrEu)b{6j8|1&VyVIv@&!?l2d7cl3R8`RZbdyfzXbV@PgKrNx7Kz_ zM^j<7IUQk`f$T^%DOe0LeZDe8>@O>I#804A!Q!$rz=g&hNKR zeiH^u#Cs0ATGpZr{(>L~cIN74`vj+H5P=nsj6(G2ShdfHmV!)pB`V)Dix!fn-ESBw z)8q1x?BLbU{)ZLJ2%`Q^g+C|eLaF}t&(a^E=JB%zaszO5>aj#$k;SgjL?MeH7soQt ztKl(aLwfxUdjGCvCADOBHufT5MI~W6_ z4XRy;&4e^U_`H$RaF=oR1@3S;aB*T{<-NaGuIZQz7N?J^$@F|%=JA!g&oyZ#nI#c4 zcBPPI$G?ZnG^sLZ_|`+AIQ?@#j1J%-wmy=0FLrm{)a5>ZrSEJzYTw(|qsP%;<)vz& zOfHJs@wmaI8X`HRJ_Y#emN#`%5er=|)SJ6aVvB!hp4Q7O4>_%Ep+-$sfaEU*=!3Te zs|T|W>iiw{i|VNskiYPY%mh$(g33F)qv<(ivLD5zIppIX zyo}eZ`@_3GU4WIYC?oVYW|q?Nr&LLwTJYIIkvq0PW(=l0MZfgN9ojq45vb*iWDBk$7uLP)G<&hxQt z+T%-^V8B`*iq-~%G5YZrkr4g-4ysoY+@M#BK^%QZ&GISKU6Q_P4%An<{*IoxKO00F z&|OLkgy>oTQv{EgoOp&BgcyjFV0ZtX@&nPcGYm_i!b>dimcNq8cpk3K3z?VPwZZup zTv{eJBJ=1|wR2hPgI!tN9IRNV(XfFQw2sh{njSmy*Xx5CDRJzK6J4{u#k1!RTCQ(X zCo^ITyzd7w)ZQ;5uAC3Yn@`u22W`9qa0>F?ec5CgYpZSxoj82uUFP< z3p8^uB)_w6EtANG7|7#}{QRjh)s|>Ru{s2_q*T-CU?VRml&R-*DqR8o4rHBI$H&P) zwN_ENr__P}smEb3w}Lbnff}L?6nFG647h$X@Qp(T>Qb)fjVP3y0K@>?O^&7me^$t& z_wJH%>O zF@5)Z?HCR57VDT5AV z*MH`H6Sj~nho4}0_ywDeQ}_}9=8_KIFc9o$58Z^di0VJwx&%oFdyRYQg$gM+_Nq35 zRkJ>nKf=dI{hi@RNKQ07N;VonzzW)L?RR@TCEU*6ul`N0?)eY_TZp%aE3czi~;u7_9Fm!U{S`&z=_g>kSXMGAsQ_g;H1qL_*91 zDX5ogso0b&!}>e8L*ExED=g>Vn{t8l^wf6pT9{Vt)dzNOVp`;u%kCL22`up`4yL~t zi0JbKJ8LFI$}Jc_G{|+jCtwH+Q>4>oH&Db*GQLJpDg{{Svk1%HgpFPR2+RKBjzwa* za@Mkv8Hs>YOgHTG#V&gAco?3h%b)ldM`o6nyNS7fMGq$oRDcAX3HOYFcu8TPw+wB? zhZJ^uO{iKaHif&L<@%1g3E3*Xfp(BzQ+uC{YzSp@aWF9u-WE~D@b#jup}G7o>XRzM zR@KY~{LBMv;l}Lcx87tQ*RZwk8|)wbNmAcI{WKlboG{6$v~O@RB6^pp+mY{+HbdvWeq zD0uNIQp22HBkY2wv6c?D2@U%9IhX=>ep@}Bm4~7*R%tp1P-YdLszKl``Vr~+C#VvK zl$djaB-~h^@Y3$tIioU)+FVFn#9E2*^Z9l6i1${wgDRPk8gH#OhOe*Ln!8)5uZU{a z3ry{4zL7lDvc#kVhX$uSfIUveZmdHrvVU%mYnB2IoeXGq@^1_%J4!Y88{;LT*!>~= z9uP+5EqSL2+g$Ex;ch77#p`}ZIgs8ULD@d$CJC*>@+;~;=SZxNBxsEFo^g%OXmK8* z?H~NlR5Ac3jz!QD!XIkZ=i?zLuBLE0tgV%j-dRY@L5tN#=l#Z5T}cgVGrO1~6$<^c z$|AWj0$De?r1xv4r}^k`a0h&pqsdo?G^61L7ev6oX_Aln)}LMevy;U;)=x$WnKs=SJ-P_<*iIcd-v6Gre(u0HL>UhzL$jMr>bY2)y&n zH(MU?bM#ujVs1Ol9v;&E+U%ly`};hcYS|9Gsdq#*XUKtU`#QLrd%*}p+m-tw?*WBI zPT&kdJ{}RLtG~{bQ|bB0#2Z3j_&k^G-I#lsL;&GyX-ovgyR_N+$3I@~v)O8{IhjSz z%5&1y&-zMc`*FlY`0A--Mq&x;j14Sqi{up~VyNVFE35d%b@2i^Bb#8S>{NQ*@u2KD znPRB(@~5MGH8fy&RRlW}kB$1@hc~|)J8fFxA}P;VTmGWxbb40eTm8}P@1uH{UH0nv z8vNjWe9{Qs^at#-<>>0JmhD3>3@@j>3d+~3Xn%K>X3kU4qho=9xkP8A`j^f<3b@@` zy<&d29E&S4tJ^2=m!5)at-b&RAss-s=4b zwG}1T#c(`n+@$zq?BjY_stFv0P0R}|9!bo1ElQv{8hXvQ4n>!DHY1k6aD1CHv@LRk zYE7;EDmoMtT*lk&Q|^1AHFmz0k-M|Q;59kf$d?KpBX1Lbx5tt`$HUGJ79f2fzfz`M z5{JUd4wkm5jZJ^|dR{mOeAEWqhY!AzXHZT#d=V1#fb#y*dOE(eL}zD~X(Lz3RAjo( zBs)#s|6&!4J}R#^l8*YL@+A=2^*G@17|>+*-)C>7dHTyl`Zte7iVeg0>^F<;I6%%$JycI(h72afV zR~Wm$xZTk4*O!9Hg>AgE0zGiPkF%4oI)tcZz5Qf23J}=6eN-ODgTLtu%5T$wpK#8p zV6fFbOO@Q@(eA`JWBXGmMu&pJt!tg)JGAgx)w=S+{fme6RuO$yhztVCuaSqa=*X?} z`}0oOIP(|Yk~?1t+ZHqBR4cje`$bz{?rGmGbmzOVd8)}2EgV`_-37LqZ>zF~aJZ=9 zmv*1A0G|(mfPmlMzUjuCtjrq#9>=)c`o7Ii8WBUjS{wEoEo>?yR+Y3vFKUpnpk3;b zk&mXMh6!D*piuA5qp~ZOxSAD;Zhz0%Ft^D3R@qYa4?1%M&KAP-y1``dVtK0n(GwH~ zI~f7B8CEm0o!Ge}>z5NUAQXA=xSTeznW)pl0<7?lV1mb3HZEOJ=h4;{Av_hOGIOCG zc2`b-?mtSQZ%`oiwNQ7Au_H9{x1u`ObJ(SVB_FrzHluWibQY9W+^0+9#a(ld!h^71 zu3sxX!S6|hWXXrU$?%kDNUU-YdrG|TpWLcK7BzKt`h{wcKM-vY)1eH!zbEikI{sX= z{j7cbyV?iYKzob)xl|p)S93|ok;BrZvv@O_2IoAP(|))k#lt`U^((?1E(X1m@D@UU zz>u7bdd!o`NU=-STf_D3N5vg$=QEGVyMq1L>LE+SXa);o;aR9|45A+a$u5DW3)e$F z{2!?g-7YUVF<)I`u<>wZ%+Wap%TzS<#Fj z8rPzQi#!R)^Egnqzh@(&7s~;8HOe)tPZZVmJ4YofgyHx7?>%eSVo2=tVQ{GOw;)H; z<3M39kZUXda_1T~b@qe1v+5w$yZ|@*#*k11~~Q zf2JgGowf3QvE5$28+lnM-*9M#BO9Xi*n5qr>$;?2!^7lFC0?ibsfJ5gj=5){fhSNA z{tg;J)68fpxpt{si&@#xZxIlIxr}unE}z>vid)sF0h9meakibeIai9N42_YFy}*rK zY&VtOO$~fZIg1fwv+rznp|UxsYxz#h^kiJZdp%7Zh3u1)cHiIL=CJw{_~01vZ{>os zHCrF8KDoG6f(&<6-sNSv_porOD3#e-sxj#l0|f5tPRgBAq$Ig}>%ZJivbr+gUlkPV zr#3YGSC7WT7aeJAiqD$xyPG*1npYT`oINco5NUD<&X*Q__i#JYikd|{mdjyhrW&4z zFor&0tl-MdU{HjJo=fh8K63w*Nhtt${pR745hJ>p(R<)9tNWh=BW*P3XD8V+KjS2A z4;ll^uF{xn*hxQecY{|sj?(pb$sA-4iTPaGzR12s=R422dB*Bp$H7wXHT7SuQ17RK z4>2$@;=LPcjy&E(Ob!)7ey*vNQzxBaUgC)r-s@IqsFq1J8dutO zm2ucEj5O)KO-+3+btme&-P)R6_YU1D7|hzj3_oTu^a3L;X}?is5M32EV#ccdyqdbD zD(fr}i!i$+-2;&k*3K;0?YO}NBOzsYpvez#c{l0FSA60hc*P`|Dt@<*b4Nyw#M`Ik zlwWA0vJ{txo0Jh#Lj&C?%5n%qh0eZ`wtDSDt$*6ougd>YCtnHy6`??uo~90mFr030 z+;)^kU@g{q&Qp4cfKDActC#qkFJ*8ULu%^!M-eNkZ{bUF;kN2f$KL)Q6`{5Dr zoXi?nfAA{nJ&a8E^gLMu*`ib2d}MeWfnay`;AT%)T}AotdWo^$MG5e z+TLhK%*8rKbrb(~APo}_yT*7GTI)EPw?{~(ZEHt!hXA+F?#BR1CNoH^5+}^R*DMm< zzo@tiZ{OF94UFB`it$~#pGNXMgtOU+xxJ1%uc@hl3_%PDs@Ds4Ya^`I?H^xPIi_K9 z2YCaosbziMn!DdV)ZprJ;9>76mS0c2>lJ5m)_#DbvHu2FOUheDzURw_3!H0w4^8$++%2gRa{s~OfuN#bHlMdUB_vroj=QSq zx6;+2D80KLHvBm-)oPP)G>{vjfX|`s?6SQ6pyJi%AT3>qcXuHqC?wpVNYK-?8mSne`Vs0*}}3&$|V^PF*I|y|;tKjIy!@RoD_AZEfybn161J zj-nYOIw2y8EVO-z_@LkQHyFTgi> zJkYfvd>6KJL7=X#NSJ{M8N`v0^^{-!ZV#$V%D>QroMu9er=cVuGmv^piMZ;xf{^}= z3Yy{qVP%Qspjpq2w=~@J_P}#F`P41LOfh-j6VwWDrCpY6_^XgdR@=Hz+hbe*@2?!) z#DG9vo?}yem+7kPR0VlOWz|iP-WYs2MZooT9B`;fiz|ddE#1Sm)(%?A&Es4uhUpN6 z$nYr$OmTH*h6l=Z(cyb^vfxrglLlzffvmPPYrdL_@``ATbWy8M1dR{<2L3)@;KXtr zhz^OT#l>|x-cgAmkdYXN4km;HN7q%tN@~#lj)~Nofza%8sa`n-pM6`){g6p_RCL$v z)BeVd>9Ewfz`3w;sQksP$G)j#qH*_5*})mj{*$RJk+KJn&M&hcrzi%oyXr2#-{7|l zqb-!^rj8B;NDnkq*yRyC77M|=C`Sh2I3 zD#(XhfGcxwu#)c1$5+h#gr=d*T{$d#3FR628td-08+*blrp*3@flFDB{F;3Di%^HX z=M$Vm*iIQ)DVz8{I>WfiRTdM8Car(Id%dWnOi=6xnAT;O3|{+bm7GQ5U$hfWn10Bo zSRPoKu$V=vLf1kLojb(_LSlqVy?t!qAf8azMxheAuEI;gF7^{B`JMW5a=`$T7?nxL zskWqqb5ftuBuSYs;5AVoRYqUvrpCg!Pj+nrBw^}lrBDVSr6NFkdjMnn?gLX2@NnUk zsJ`L{qkh%Yv@tN=R@l}G3aR`aB9ZBgC7%gBJno{+Wx>vZk#aP1meiyELEWiFKD#eFAEW1U z8pW*rj;WD8 z2G|hSXt~E{I~0cco|GjvQHIpTBg?ldrwxZfT+r@)Si*9VjS|pJRQcHAyGOi6Z-Mos zq2P-0EGdYg#xgo}VO>oOqWT?Q)uk0oLd$>oE%b75YI|LT*H=Ci^T%7CSLUxQRk=W} zY=;pOK*!d7gtnZuw#$NWX!}pt$gWgR;UyuN3Qz^w>_p+z(5I!~m;?y{yT4$#?Xdp_ z7YN^IGhM;VTM@fo;GpXx za%wWJVnLSZQ05+&9bs}B1fDyy=8LDS`P`Ta@0UqJ4cPuXF8tR!+c&qQZ$XI!Q=s%&SrOY~fB}ttB;`Dim8eR* zm5RM1EJ353SGC%6IpHiKlXn#7aI0VEZKspxZ9_$xUG5-D;X1@7s9OHd=AIoy3+|3~ zJ8urbHvE6Sz!$9DaCbZ;IZ&zzhF15sn#1IRT!`014H8G7B8A&dO_+j*+rh!4uu(g) zu3?T>Z0Wp6XOlVM~Pt%A%9-jU{5>+VbV+n)gtC!{>l1SM7O(GbARB9AmA#Ms5- z5bXztc0rYO(dxtg*%iWkF%WGhOtSke`{>X`EsNW=Hl_NtvB02q`$O76uq|3T&FgPwVcpoz9y3P6?d8W6^pGcnzzMl%_DADbip71Z8)UXWhQj zGM#@Ojq-ojDUwj!20CHNUs#?hUQ2{R&tNt!2CwW-geGT!_H9J9+4%nSEMFk>b-5Ql zi&K5?^KL9*5Dz?e4s8-Ssd03DP;V5+2Na;)?T2oyy?THeE8uLd9m zY+&US_mQxUAbXZc;aZF!M7towX2i1Tr9lDz?gkn&4jd@hE(_dTx7tp~Kf~18b~&MM zV$$m9Nmbt8%%k)MM47FBKOm%c)3-|X<%@ebdNZ16)Cs(#`xkTB#BZ#XDLk=Z!l1Zy z!eO-s1>J8Y!X+HuZ6l%ry;l$jXm(@esq4BgYhFA59cCUu@cfZy>$9n4!%pi(e~a~R zH`+`5DlufoX>e|q`L%#FJLqog4OmgT9`;<3$nrPqPTKp{I}0{cOw%eei1wBT=7%cz2( zY|nST%O5L*tTwUdJ+U>%w*+lw8~t9OVInnUIv%doY%ytqiPV^yY^;Pt#MlWFtmK|q zZ(m?0_Fqs;)&O+d@4o1BfrhUOi6c8@rW*OuF2t$QnzTLk!^d^se0)U(%t3pe%xk+r z*@D<9ZB<1V@8W&!Fwpw1Nl6t$QsT9solUqk1|-ddesefc{=r+s^RHRFa9lK98*FcT z$p^YM00XoxQs3zKJig$5`{?k#Y+5WElj?3H-YnCUe>A`E5L0VK_mxuWhm-26by}wj z78|6$=KPkZ!un@;W^Sjf2&5hS`4f^H6|x|>UdVwTuGM7$0fof}c$j(Wdv6rI{>Ymp z^wWe;*7&|s)I1~=&_V6`wNcfRXte}eyb|JuE~P(cPk1d}vjYzRhe7)1L4S6UZ~n^% z>;)4Q%a~8L@*+c7L|8mive6>0+?+->I_kFFCupGiM_zK#Sx1ll(PuY%viljGw+%KvoGpMdH&fgFrXas$9WB24g|8Uv_lO5*87- z5YWSK)X>CfTguP}W>nR$yvQp$;M8ZvCbOPX>AkPyd^~L;#_+a#(BOmcxAgH~cgRu> z`x|fKN6^{p^nxl?vF|ID<8Ek;<<|SST;OMlnXlp9V(sTUpLI1Y-5{$VF@e<< zE=!nuOw~*O5CSNS(q%5GpBBQog`;zz2_2#l+Vk^<_Wt z)s@wa+}^Ukx~x|;xO}hM`7_6_j45SUL_M3DaeO`&ZQ*L~?!ksBqE|gMMt&R_CT~^D<%=#m}XaId@XD5X(uKr^9fS}VXM{7JpD72Z9 z$Lo64IWE{sP=1ib)|{2CTgWMfke8^<^29F$>~j5MWqF12wbxs0gC_kg$$iIn_h{q1 zI)~kh-%y?fa@H9IiJ5?qLnPA^Ph?-e|Hig?UiM`cG<(w_2b2RHk*(A+8X6C~L-J)pQ#7j_MhXh`{JJ|zSPpNi^(cv*+_nj0_nKec{as6UM$Cz*DN zt;0pUg1I}x0#aW4=^9vO%Lxkzag8pqBUaw5#I5PR2r3e7xp$ab+Kfr}pC1t*yxo$a zYmaEdedG^2ADnL)Xu(l8v-E}8Xo+rV3#`rJ$B$QV6GhV1(HXSuc59_z?Q*}I4-5+V zJZfzm?|O7wovxwhybHWz=DFwPQFeKiTj1}86zX&GMo93nKW4F=)m(4odo^c?j?PK< z7us1^bsWb}uv5;WBKCfrbD9sL>%50HLk7ZeUv2Ho zHI&wMdWj%@+d#S@iSAxBdtME<=?&a={};xna-J-P zH$1ll$yq(@)PBf7H2y8jiB3LN){D4`%NuiK;4J*oU2odQ^R!kvj?djW-8etDTzfOf zaH|@UcN@d7=n|Kn$)FnYpcylw)x78+ym52f$wquxg#y|C_KltU?s2wKw;_353io4o z9Ars%$ymIb84f~f(wwR)dtM2RD)wi{CN!KL5YNml!rl^Cf67VWJDw#)j^JlXcmUPCM&#L%VP(%UV53pj;m2(?NEnZGI4!1+n8G zvrSdF9CW$}-0h>hC=(Hr6+@8>CbS=ZZ^|lA*NY0yBO8h~+KdV_m`b7Xv34`lIxQWZ zqv{&cAa@i?8O{r8D2Yko?imd_sut)$+8t4y(1I8_sCe~G{?W~QmxdpvG3n6!4Y%E7 zFU5}U87^nO*qD=@7W*MY*szhIT2yP!bc)aJ^VDfpK!1P=C?)gF^+M=JSN>O6FeXP$0G457lDulStnm z1CgZ)H`U-`xNTj$u1igiJV1^aOgZL*%G z2v}oD7W`3yi8C@5!xEhloj8^kD5lT5CJLq91T!T}$4!oyxvji#A5XbR25pmFZu070 zKp!&UB;Kr1NoN~wlYas)j!(M;6nsE5LL4Km5yNNUF z5|R15&x8{Jw;dZ*{8LYxB-q<60Dx(n9#7&svP+!gc=m@0X=-h+u|d;UwZ+N;9qu-k zZzs=1PAoY9sp#kZXVd}A&;B6-QpS}`LW16+9B(@fp1yjUo|b+bCqi{C?!uE2CA{#{ z;S?WQ%ErWTyv|)K>mN#YXuT>}<;J>fK%<8Q4;Lcrj3gs1!m;mUBj>H=280y9QN1B5 zmAK`)(vtD^9+ihc`_ZET6iqk)ge%fTt=Dqa$dUmVbritGnM~%)Du|_=kO?o7C2|IO zN@{An{#!Z(`tJR0M=@bm9#cWwc%pA<*;O;?2RoIhE>)-~#<*m2<=wEbd}%9F(;0aT z87=t~{f|?uSmrEs+DLY%H4d=#SnSM&acI<#io;XY1~)fy#scs;%N0KLfHXZH1YX9 z{b&D9`p7J}8#h4$SOKCSWaa`Dn0$ZVy{}O+H9MN;W~hhu2S8yC2tiFOdfGI(*tQnm zZ|+JCZ>Fx2_@3HJ3iD(mj$hg3bFL|iB#jQP$EY`m$rlo3aJ_DT*Hc$W zFSCeud$L#nuG~qq8c_DwRwkWD-^R_)Bi{lHcXF=WD@~?9%UZVx6juZskDCljUDS)x zJmj4s)SNy07LqpgBeHU8;bP6c@s!M%wU`UOd!g@=qET(NWUc5VwY!*VkaSh1=Ps}B z(IuV(raTtFS3c;EY-q2|Ho-yq?bWI9;b&Oai$`y=ljJjJr8`#bU3K2G^#)T0rJ~-< z*%-fGW7o#EvxUfmZ24cYS)27Tzm==SJER&0Dp#g%mB##Wr}PEoV2@}8N`T~ITHNre zISknM2Ae0x96$+`yfJBpcTTfk5yRJI_t^xl2IYX9gumDW5F)Pq?$k`TIDnmX)6z5x zD5r~-*}uaM>31QcuB$>uch2M22D*LL6?CqF^wZi$W6#4XZ+)#H3k$|~Rnh^p#(WdQ z<8bLF0MNJRXzr2~x;{u8!HjgG^Oa-YNQtX32v`nwaimt&^SVDTT0J1EoAbbHt)5Zc#)$yzWAf zJ7D8ymw^NVOF_`?@er|SS#jas94Lq^BlaXEtDQ0O6BPu@M0wn68}@t^mEG3?Glx9I zPB3^AGZ5`4ik)?H@sXp+ zTQ{}{@Tt7M9#&v)sxPy{0{uy!&IVtNO7$Co;6|96s4M#ATY fL23NG!Z%V`XY_>TKQmUqEF-C7P8d|BgMZ@Rc0<6r literal 0 HcmV?d00001 From 8aa53ab880c7c57eba92904c38b843f0c18b1cc8 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Wed, 19 Jun 2019 13:36:04 -0400 Subject: [PATCH 077/108] Added in descriptions for pipelines, table of contents, etc. --- .../automl_with_pipelines.ipynb | 500 ++++++++++++------ .../sentence_similarity/pipelineWidget.PNG | Bin 0 -> 109142 bytes 2 files changed, 327 insertions(+), 173 deletions(-) create mode 100644 scenarios/sentence_similarity/pipelineWidget.PNG diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 20f3342bb..17b95a874 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Using Pipelines and AutoML for Predicting Sentence Similarity" + "# Using AzureML Pipelines and AutoML for Predicting Sentence Similarity" ] }, { @@ -21,19 +21,68 @@ "metadata": {}, "source": [ "This notebook demonstrates how to use AzureML pipelines and AutoML to streamline the creation of a machine learning workflow for predicting sentence similarity. The pipeline contains two steps: \n", - "1. PythonScriptStep: uses a popular sentence embedding model from Google, Universal Sentence Encoder, to convert our sentence data into numerical data\n", - "2. AutoMLStep: demonstrates how to use AutoML to automate model selection for predicting sentence similarity scores (regression)\n", + "1. PythonScriptStep: embeds sentences using a popular sentence embedding model, Google Universal Sentence Encoder\n", + "2. AutoMLStep: demonstrates how to use AutoML to automate model selection for predicting sentence similarity (regression)\n", "\n", - "An AmlCompute target is used to run the pipeline, Azure Datastores are used for storing of our data, and logging is utilized. " + "This notebook showcases how to use the following AzureML features: \n", + "- AzureML Pipelines\n", + "- AutoML\n", + "- AmlCompute\n", + "- Datastore\n", + "- Logging" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "### What are AzureML Pipelines?\n", + "## Table of Contents\n", + "1. [Introduction](#1.-Introduction) \n", + " * 1.1 [What are AzureML Pipelines?](#1.1-What-are-AzureML-Pipelines?) \n", + " * 1.2 [What is Azure AutoML?](#1.2-What-is-Azure-AutoML?) \n", + " * 1.3 [Modeling Problem](#1.3-Modeling-Problem) \n", + " \n", + " \n", + "2. [Data Preparation](#2.-Data-Preparation) \n", + "\n", + "\n", + "3. [AzureML Setup](#3.-AzureML-Setup) \n", + " * 3.1 [Link to or create a `Workspace`](#3.1-Link-to-or-create-a-Workspace) \n", + " * 3.2 [Set up an `Experiment` and logging](#3.2-Set-up-an-Experiment-and-logging) \n", + " * 3.3 [Link `AmlCompute` compute target](#3.3-Link-AmlCompute-compute-target) \n", + " * 3.4 [Upload data to `Datastore`](#3.4-Upload-data-to-Datastore) \n", + " \n", + " \n", + "4. [Create AzureML Pipeline](#4.-Create-AzureML-Pipeline) \n", + " * 4.1 [Set up run configuration file](#4.1-Set-up-run-configuration-file) \n", + " * 4.2 [PythonScriptStep](#4.2-PythonScriptStep) \n", + " * 4.2.1 [Define python script to run](#4.2.1-Define-python-script-to-run)\n", + " * 4.2.2 [Create PipelineData object](#4.2.2-Create-PipelineData-object)\n", + " * 4.2.3 [Create PythonScriptStep](#4.2.3-Create-PythonScriptStep)\n", + " \n", + " * 4.3 [AutoMLStep](#4.3-AutoMLStep)\n", + " * 4.3.1 [Define get_data script to load data](#4.3.1-Define-get_data-script-to-load-data)\n", + " * 4.3.2 [Create AutoMLConfig object](#4.3.2-Create-AutoMLConfig-object)\n", + " * 4.3.3 [Create AutoMLStep](#4.3.3-Create-AutoMLStep)\n", + " \n", + " \n", + "5. [Run Pipeline](#5.-Run-Pipeline)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Introduction" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.1 What are AzureML Pipelines?\n", "\n", - "AzureML Pipelines \"define reusable machine learning workflows that can be used as a template for your machine learning scenarios\" (https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines). Pipelines allow you to optimize your workflow and spend time on machine learning rather than infrastructure. A Pipeline is defined by a series of steps; the following steps are available: AdlaStep, AutoMLStep, AzureBatchStep, DataTransferStep, DatabricksStep, EstimatorStep, HyperDriveStep, ModuleStep, MpiStep, and PythonScriptStep (see [here](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) for details of each step). When the pipeline is run, cached results are used for all steps that have not changed, optimizing the run time. Data sources and intermediate data can be used across multiple steps in a pipeline, saving time and resources. Below we see an example of an AzureML pipeline." + "AzureML Pipelines \"define reusable machine learning workflows that can be used as a template for your machine learning scenarios\" ([pipeline information](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines)). Pipelines allow you to optimize your workflow and spend time on machine learning rather than infrastructure. A Pipeline is defined by a series of steps; the following steps are available: AdlaStep, AutoMLStep, AzureBatchStep, DataTransferStep, DatabricksStep, EstimatorStep, HyperDriveStep, ModuleStep, MpiStep, and PythonScriptStep (see [here](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) for details of each step). When the pipeline is run, cached results are used for all steps that have not changed, optimizing the run time. Data sources and intermediate data can be used across multiple steps in a pipeline, saving time and resources. Below we see an example of an AzureML pipeline." ] }, { @@ -47,7 +96,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### What is Azure AutoML?\n", + "### 1.2 What is Azure AutoML?\n", "\n", "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", "\n", @@ -72,7 +121,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Modeling Problem\n", + "### 1.3 Modeling Problem\n", "\n", "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence).\n", "\n", @@ -81,17 +130,9 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 34, "metadata": {}, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: Logging before flag parsing goes to stderr.\n", - "W0618 22:40:59.654601 10096 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -159,7 +200,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ @@ -170,7 +211,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 1. Data Preparation" + "# 2. Data Preparation" ] }, { @@ -184,14 +225,14 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 36, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 232KB/s]\n" + "100%|████████████████████████████████████████████████| 401/401 [00:02<00:00, 191KB/s]\n" ] }, { @@ -205,7 +246,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:02<00:00, 140KB/s]\n" + "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 211KB/s]\n" ] }, { @@ -219,7 +260,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:02<00:00, 165KB/s]\n" + "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 210KB/s]\n" ] }, { @@ -239,7 +280,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 37, "metadata": {}, "outputs": [], "source": [ @@ -252,7 +293,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -264,7 +305,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 39, "metadata": {}, "outputs": [ { @@ -285,7 +326,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 40, "metadata": {}, "outputs": [ { @@ -365,7 +406,7 @@ "4 a man seated is playing the cello. " ] }, - "execution_count": 10, + "execution_count": 40, "metadata": {}, "output_type": "execute_result" } @@ -376,7 +417,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ @@ -393,57 +434,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 2. Set up AzureML Workspace, Experiment, Compute & Datastore" + "# 3. AzureML Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we set up the necessary components for running this as an AzureML experiment\n", + "1. Create or link to an existing `Workspace`\n", + "2. Set up an `Experiment` with `logging`\n", + "3. Create or attach existing `AmlCompute`\n", + "4. Upload our data to a `Datastore`" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 2a. Link to or create a workspace" + "## 3.1 Link to or create a Workspace" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0618 22:55:15.048929 10096 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", - "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", - "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0618 22:55:15.432929 37988 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0618 22:55:30.586771 10096 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n", - "Workspace name: MAIDAPTest\n", - "Azure region: eastus2\n", - "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", - "Resource group: nlprg\n" - ] - } - ], + "outputs": [], "source": [ "ws = azureml_utils.get_or_create_workspace(\n", " subscription_id=\"\",\n", @@ -461,12 +477,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2b. Set up an experiment and logging" + "## 3.2 Set up an Experiment and logging" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 43, "metadata": {}, "outputs": [], "source": [ @@ -478,6 +494,8 @@ "# Set up an experiment\n", "experiment_name = 'automl-sentence-similarity'\n", "experiment = Experiment(ws, experiment_name)\n", + "\n", + "#Add logging to our experiment\n", "run = experiment.start_logging()" ] }, @@ -485,12 +503,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2c. Link compute target" + "## 3.3 Link AmlCompute compute target" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use AzureML Pipelines we need to link a compute target as they can not be run locally (see [compute options](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#supported-compute-targets) for explanation of the different options). We will use an AmlCompute target in this example." ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 44, "metadata": {}, "outputs": [ { @@ -498,7 +523,7 @@ "output_type": "stream", "text": [ "Found existing compute target.\n", - "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-19T02:52:52.599000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "{'currentNodeCount': 1, 'targetNodeCount': 1, 'nodeStateCounts': {'preparingNodeCount': 1, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-19T17:10:43.157000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], @@ -527,12 +552,19 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 2d. Upload data to datastore" + "## 3.4 Upload data to Datastore" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This step uploads our local data to a `Datastore` so that the data is accessible from the remote compute target and creates a `DataReference` to point to the location of the data on the Datastore. A DataStore is backed either by a Azure File Storage (default option) or Azure Blob Storage ([how to decide between these options](https://docs.microsoft.com/en-us/azure/storage/common/storage-decide-blobs-files-disks)) and data is made accessible by mounting or copying data to the compute target." ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 45, "metadata": {}, "outputs": [ { @@ -542,28 +574,28 @@ "Uploading ./data\\dev.csv\n", "Uploading ./data\\test.csv\n", "Uploading ./data\\train.csv\n", - "Uploaded ./data\\dev.csv, 1 files out of an estimated total of 3\n", - "Uploaded ./data\\train.csv, 2 files out of an estimated total of 3\n", + "Uploaded ./data\\train.csv, 1 files out of an estimated total of 3\n", + "Uploaded ./data\\dev.csv, 2 files out of an estimated total of 3\n", "Uploaded ./data\\test.csv, 3 files out of an estimated total of 3\n" ] }, { "data": { "text/plain": [ - "$AZUREML_DATAREFERENCE_e806155bf4c3452596bd2c3ffa76743d" + "$AZUREML_DATAREFERENCE_dbe7476178794853924424fdfbc4dcc1" ] }, - "execution_count": 16, + "execution_count": 45, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Select a specific datastore or can call ws.get_default_datastore()\n", + "# Select a specific datastore or you can call ws.get_default_datastore()\n", "datastore_name = 'workspacefilestore'\n", "ds = ws.datastores[datastore_name]\n", "\n", - "# Upload files in data folder\n", + "# Upload files in data folder to the datastore\n", "ds.upload(src_dir='./data', target_path='stsbenchmark_data', overwrite=True, show_progress=True)" ] }, @@ -571,12 +603,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Set up a **DataReference** object that points to the data we just uploaded into the stsbenchmark_data folder. DataReference objects point to data that is accessible from a datastore." + "We also set up a **DataReference** object that points to the data we just uploaded into the stsbenchmark_data folder. DataReference objects point to data that is accessible from a datastore and will be used an an input into our pipeline." ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 46, "metadata": {}, "outputs": [], "source": [ @@ -590,19 +622,35 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 3. Create Pipeline" + "# 4. Create AzureML Pipeline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 3a. Set up run configuration file" + "Now we set up our pipeline which is made of two steps: \n", + "1. `PythonScriptStep`: takes each sentence pair from the data in the `Datastore` and concatenates the Google USE embeddings for each sentence into one vector. This step saves the embedding feature matrix back to our `Datastore` and uses a `PipelineData` object to represent this intermediate data. \n", + "2. `AutoMLStep`: takes the intermediate data produced by the previous step and passes it to an `AutoMLConfig` which performs the automatic model selection" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.1 Set up run configuration file" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we set up a `RunConguration` object which configures the execution environment for an experiment (sets up the conda dependencies, etc.)" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 47, "metadata": {}, "outputs": [ { @@ -637,38 +685,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3b. PythonScriptStep" + "## 4.2 PythonScriptStep" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In this pipeline step, we will convert our sentences into a numerical representation in order to use them in our machine learning model. We will embed both sentences using the Google Universal Sentence Encoder and concatenate their representations into a $1024$-dimensional vector to use as features for AutoML.\n", + "`PythonScriptStep` is a step which runs a user-defined Python script ([documentation](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/azureml.pipeline.steps.python_script_step.pythonscriptstep?view=azure-ml-py) here). In this `PythonScriptStep`, we will convert our sentences into a numerical representation in order to use them in our machine learning model. We will embed both sentences using the Google Universal Sentence Encoder (provided by tensorflow-hub) and concatenate their representations into a $1024$-dimensional vector to use as features for AutoML.\n", "\n", - "**Google Universal Sentence Encoder: Overview**\n", - "We'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). " + "**Google Universal Sentence Encoder:**\n", + "We'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Google Universal Sentence Encoder: Transformer Model** The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Using the Pretrained Model**\n", + "### 4.2.1 Define python script to run\n", "\n", - "Tensorflow-hub provides the pretrained model for use by the public. We import the model from its url and then feed the model our sentences for it to encode." + "Define the script (called embed.py) that the `PythonScriptStep` will execute:" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 48, "metadata": {}, "outputs": [ { @@ -742,10 +783,12 @@ " return df, dataset['score']\n", "\n", "def write_output(df, path, name):\n", + " \"\"\"Write dataframes to correct path\"\"\"\n", " os.makedirs(path, exist_ok=True)\n", " print(\"%s created\" % path)\n", " df.to_csv(path + \"/\" + name, index=False)\n", "\n", + "#Parse arguments\n", "parser = argparse.ArgumentParser()\n", "parser.add_argument(\"--sentence_data\", type=str)\n", "parser.add_argument(\"--embedded_data\", type=str)\n", @@ -755,15 +798,19 @@ "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", "embedding_model = hub.Module(module_url)\n", "\n", + "#Read data \n", "train = pd.read_csv(args.sentence_data + \"/train.csv\")\n", "dev = pd.read_csv(args.sentence_data + \"/dev.csv\")\n", "\n", + "#Get Google USE features\n", "training_data, training_scores = feature_engineering(train)\n", "validation_data, validation_scores = feature_engineering(dev)\n", "\n", + "#Write out training data to Datastore\n", "write_output(training_data, args.embedded_data, \"X_train.csv\")\n", "write_output(pd.DataFrame(training_scores, columns=['score']), args.embedded_data, \"y_train.csv\")\n", "\n", + "#Write out validation data to Datastore\n", "write_output(validation_data, args.embedded_data, \"X_dev.csv\")\n", "write_output(pd.DataFrame(validation_scores, columns=['score']), args.embedded_data, \"y_dev.csv\")" ] @@ -772,21 +819,44 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "**PipelineData** objects represent a piece of intermediate data in a pipeline. Generally they are produced by one step (as an output) and then consumed by the next step (as an input), introducing an implicit order between steps in a pipeline. We create a PipelineData object that can represent the data produced by our first pipeline step that will be consumed by our second pipeline step." + "### 4.2.2 Create PipelineData object" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`PipelineData` objects represent a piece of intermediate data in a pipeline. Generally they are produced by one step (as an output) and then consumed by the next step (as an input), introducing an implicit order between steps in a pipeline. We create a PipelineData object that can represent the data produced by our first pipeline step that will be consumed by our second pipeline step." ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 49, "metadata": {}, "outputs": [], "source": [ "embedded_data = PipelineData(\"embedded_data\", datastore=ds)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.2.3 Create PythonScriptStep" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This step defines the `PythonScriptStep`. We give the step a name, tell the step which python script to run (embed.py) and what directory that script is located in (source_directory). Note that the hash_paths parameter will be deprecated but currently is needed to check for any updates to the embed.py file.\n", + "\n", + "We also link the compute target and run configuration that we made previously. Our input is the `DataReference` object (input_data) where our raw sentence data was uploaded and our ouput is the `PipelineData` object (embedded_data) where the embedded data produced by this step will be stored. These are also passed in as arguments so that we have access to the correct data paths." + ] + }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 50, "metadata": {}, "outputs": [], "source": [ @@ -809,12 +879,34 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3c. AutoMLStep" + "## 4.3 AutoMLStep" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`AutoMLStep` creates an AutoML step in a pipeline (see [documentation](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlstep?view=azure-ml-py) and [basic example](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-with-automated-machine-learning-step.ipynb)). When using AutoML on remote compute, rather than passing our data directly into the `AutoMLConfig` object as we did in the local example, we must define a get_data.py script with a get_data() function to pass as the data_script argument. This workflow can be used for both local and remote executions (see [details](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-auto-train-remote)). \n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3.1 Define get_data script to load data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define the get_data.py file and get_data() function that the `AutoMLStep` will execute to collect data. Note that we can directly access the path of the intermediate data (called embedded_data) through `os.environ['AZUREML_DATAREFERENCE_embedded_data']`. This is necessary because the AutoMLStep does not accept additional parameters like the PythonScriptStep does with `arguments`." ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 51, "metadata": {}, "outputs": [ { @@ -832,6 +924,7 @@ "import pandas as pd\n", "\n", "def get_data():\n", + " \"\"\"Function needed to load data for use on remote AutoML experiments\"\"\"\n", " X_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_train.csv\")\n", " y_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_train.csv\")\n", " X_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_dev.csv\")\n", @@ -839,9 +932,91 @@ " return { \"X\" : X_train.values, \"y\" : y_train.values.flatten(), \"X_valid\": X_dev.values, \"y_valid\": y_dev.values.flatten()}" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3.2 Create AutoMLConfig object" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, we specify the parameters for the `AutoMLConfig` class:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**task** \n", + "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**preprocess** \n", + "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**primary_metric** \n", + "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Constraints:** \n", + "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after acheiving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"iteration_timeout_minutes\": 5, #How long each iteration can take before moving on\n", + " \"iterations\": 5, #Number of algorithm options to try\n", + " \"primary_metric\": 'spearman_correlation', #Metric to optimize\n", + " \"preprocess\": True, #Whether dataset preprocessing should be applied\n", + " \"verbosity\": logging.INFO,\n", + "}\n", + "automl_config = AutoMLConfig(task = 'regression', #type of task: classification, regression or forecasting\n", + " debug_log = 'automl_errors.log',\n", + " path = project_folder,\n", + " compute_target=compute_target,\n", + " run_configuration=conda_run_config,\n", + " data_script = project_folder + \"/get_data.py\", #local path to script with get_data() function\n", + " **automl_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4.3.3 Create AutoMLStep" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we create `PipelineData` objects for the model data (our outputs) and then create the `AutoMLStep`. The `AutoMLStep` requires a `AutoMLConfig` object and we pass our intermediate data (embedded_data) in as the inputs. Again, note that the hash_paths parameter will be deprecated but currently is needed to check for any updates to the get_data.py file." + ] + }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 53, "metadata": {}, "outputs": [], "source": [ @@ -861,75 +1036,59 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 54, "metadata": {}, "outputs": [], "source": [ - "automl_settings = {\n", - " \"iteration_timeout_minutes\": 5,\n", - " \"iterations\": 5,\n", - " \"primary_metric\": 'spearman_correlation',\n", - " \"preprocess\": True,\n", - " \"verbosity\": logging.INFO,\n", - "}\n", - "automl_config = AutoMLConfig(task = 'regression',\n", - " debug_log = 'automl_errors.log',\n", - " path = project_folder,\n", - " compute_target=compute_target,\n", - " run_configuration=conda_run_config,\n", - " data_script = project_folder + \"/get_data.py\",\n", - " **automl_settings\n", - " )" + "automl_step = AutoMLStep(\n", + " name='AutoML',\n", + " automl_config=automl_config, #the AutoMLConfig object created previously\n", + " inputs=[embedded_data], #inputs is the PipelineData that was the output of the previous pipeline step\n", + " outputs=[metrics_data, model_data], #PipelineData objects to reference metric and model information\n", + " hash_paths=[\"get_data.py\"],\n", + " allow_reuse=True)" ] }, { - "cell_type": "code", - "execution_count": 25, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "automl_step = AutoMLStep(\n", - " name='AutoML',\n", - " automl_config=automl_config,\n", - " inputs=[embedded_data],\n", - " outputs=[metrics_data, model_data],\n", - " hash_paths=[\"get_data.py\"],\n", - " allow_reuse=True)" + "# 5. Run Pipeline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "# 4. Run Pipeline" + "Now we set up our pipeline which requires specifying our `Workspace` and the ordering of the steps that we created (steps parameter). We submit the pipeline and inspect the run details using a RunDetails widget. For remote runs, the execution of iterations is asynchronous." ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 55, "metadata": {}, "outputs": [], "source": [ - "automl_step.run_after(embedStep)\n", + "#automl_step.run_after(embedStep)\n", "pipeline = Pipeline(\n", - " description=\"pipeline_embed_automl\",\n", + " description=\"pipeline_embed_automl\", #give a name for the pipeline\n", " workspace=ws, \n", - " steps=[automl_step])" + " steps=[embedStep, automl_step])" ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 56, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Created step AutoML [320f0121][5913af95-9ebb-42c0-a650-7725b7fe0b54], (This step will run and generate new outputs)\n", - "Created step Embed [81087fb9][d271deed-bd3b-4e41-9814-29fc11e585b4], (This step is eligible to reuse a previous run's output)\n", - "Using data reference stsbenchmark for StepId [8ca56eac][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", - "Submitted pipeline run: 5549c561-26e2-4979-9f3f-0379e38de86a\n" + "Created step Embed [ba6c6c5a][d271deed-bd3b-4e41-9814-29fc11e585b4], (This step is eligible to reuse a previous run's output)\n", + "Created step AutoML [4cc715de][cd685f96-c946-46df-a4c4-87a53b92c90e], (This step will run and generate new outputs)\n", + "Using data reference stsbenchmark for StepId [0c4ee4ad][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", + "Submitted pipeline run: 994a7673-8f48-42b9-9cfa-1a47bf70c304\n" ] } ], @@ -939,75 +1098,69 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "3361878269d34e4a9546fa54822e9e92", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_PipelineWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', '…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ + "# Inspect the run details using the provided widget\n", "RunDetails(pipeline_run).show()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](pipelineWidget.png)" + ] + }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 58, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "PipelineRunId: 5549c561-26e2-4979-9f3f-0379e38de86a\n", - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/5549c561-26e2-4979-9f3f-0379e38de86a\n", + "PipelineRunId: 994a7673-8f48-42b9-9cfa-1a47bf70c304\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/994a7673-8f48-42b9-9cfa-1a47bf70c304\n", + "PipelineRun Status: NotStarted\n", "PipelineRun Status: Running\n", "\n", "\n", - "StepRunId: 3fffcae0-74f3-49c5-bb7d-f877bda582f7\n", - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/3fffcae0-74f3-49c5-bb7d-f877bda582f7\n", + "StepRunId: 9f99614d-6bc5-4c10-a121-b09afdc02c74\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/9f99614d-6bc5-4c10-a121-b09afdc02c74\n", "\n", "StepRun(Embed) Execution Summary\n", "=================================\n", "StepRun( Embed ) Status: Finished\n", - "{'runId': '3fffcae0-74f3-49c5-bb7d-f877bda582f7', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T03:30:21.798384Z', 'endTimeUtc': '2019-06-19T03:30:21.864304Z', 'properties': {'azureml.reusedrunid': 'f78cb325-802a-4779-ada8-05db82c97835', 'azureml.reusednodeid': '70352e68', 'azureml.reusedpipeline': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.reusedpipelinerunid': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.runsource': 'azureml.StepRun', 'azureml.nodeid': '81087fb9', 'ContentSnapshotId': '8979e52a-3c38-432c-b9e3-a235b33b7d1e', 'StepType': 'PythonScriptStep', 'ComputeTargetType': 'AmlCompute', 'azureml.pipelinerunid': '5549c561-26e2-4979-9f3f-0379e38de86a', 'AzureML.DerivedImageName': 'azureml/azureml_b2a8349416887710026a15e07f74a6a3'}, 'runDefinition': {'script': 'embed.py', 'arguments': ['--embedded_data', '$AZUREML_DATAREFERENCE_embedded_data', '--sentence_data', '$AZUREML_DATAREFERENCE_stsbenchmark'], 'sourceDirectoryDataStore': None, 'framework': 'Python', 'communicator': 'None', 'target': 'gpucluster', 'dataReferences': {'stsbenchmark': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'stsbenchmark_data/', 'pathOnCompute': None, 'overwrite': False}, 'embedded_data': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'azureml/f78cb325-802a-4779-ada8-05db82c97835/embedded_data', 'pathOnCompute': None, 'overwrite': False}}, 'jobName': None, 'maxRunDurationSeconds': None, 'nodeCount': 1, 'environment': {'name': 'Experiment automl-sentence-similarity Environment', 'version': 'Autosave_2019-06-18T20:46:30Z_9b3f4178', 'python': {'interpreterPath': 'python', 'userManagedDependencies': False, 'condaDependencies': {'name': 'project_environment', 'dependencies': ['python=3.6.2', {'pip': ['azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33']}, 'numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], 'channels': ['conda-forge']}, 'baseCondaEnvironment': None}, 'environmentVariables': {'EXAMPLE_ENV_VAR': 'EXAMPLE_VALUE'}, 'docker': {'baseImage': 'mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04', 'enabled': True, 'sharedVolumes': True, 'gpuSupport': False, 'shmSize': '1g', 'arguments': [], 'baseImageRegistry': {'address': None, 'username': None, 'password': None}}, 'spark': {'repositories': ['[]'], 'packages': [], 'precachePackages': True}}, 'history': {'outputCollection': True, 'directoriesToWatch': ['logs']}, 'spark': {'configuration': {'spark.app.name': 'Azure ML Experiment', 'spark.yarn.maxAppAttempts': '1'}}, 'amlCompute': {'name': None, 'vmSize': None, 'vmPriority': None, 'retainCluster': False, 'clusterMaxNodeCount': 1}, 'tensorflow': {'workerCount': 1, 'parameterServerCount': 1}, 'mpi': {'processCountPerNode': 1}, 'hdi': {'yarnDeployMode': 'Cluster'}, 'containerInstance': {'region': None, 'cpuCores': 2, 'memoryGb': 3.5}, 'exposedPorts': None}, 'logFiles': {'azureml-logs/20_image_build_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/20_image_build_log.txt?sv=2018-03-28&sr=b&sig=UnaDzx29AaHQ4bXCShTvUjr9zNTT%2B9u2uBDvfSdMMq8%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/70_driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/70_driver_log.txt?sv=2018-03-28&sr=b&sig=XYlg%2FyRm0SUVCXbObSl5DCwjp3Bl3B6on4blzUoFqlo%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/driver_log.txt?sv=2018-03-28&sr=b&sig=kprjz9j6n3Lzm%2FL8KxJkUBNvxW1BWMiS7hWfVsfpERw%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_stdout-job_post.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_post.txt?sv=2018-03-28&sr=b&sig=ioI%2Ff6h7kdlY11mw9PIgpLuPT%2FGMVXSXLRB34qHuJaA%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_execution.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_execution.txt?sv=2018-03-28&sr=b&sig=ROUL6b8kXIXxxUwuQouafOz3jRRpTEBkU3abanuluz8%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/56_batchai_stderr.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/56_batchai_stderr.txt?sv=2018-03-28&sr=b&sig=Te%2FTg7zxGNobQjnHM5Nvzv%2BCQ4LuWrvb29KKunFFRgQ%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_stdout.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout.txt?sv=2018-03-28&sr=b&sig=AFfpczOOi6dGhkqDcREC4kscKdT%2FigM7OnLNAQZZthI%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'azureml-logs/55_batchai_stdout-job_prep.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_prep.txt?sv=2018-03-28&sr=b&sig=PkUiUN6d9d9MCnCgxRUsdVBlvy2vq2sgyWTb%2FdeOf8g%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=wChazhC1Pscu1eFgEmq9nkZ3mxQl4J%2FmvKJoRD7GdBg%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=N9oo4pCAB4A9gAPsEwKWzv%2BB1UJ%2BS2pnNGMc0Hv%2B%2F9k%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=lXcVv6kNYKwT2tWKIjFXWASrQSBs2RfNYTEqhairYfQ%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/138_azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/138_azureml.log?sv=2018-03-28&sr=b&sig=bwyQxhxb8UkX%2FAvYDXGAczSBrXieqOqfshRlwsyYai0%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r', 'logs/azureml/azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/azureml.log?sv=2018-03-28&sr=b&sig=N1CzC2OIPbDG6Ts9nzPVg1rN3%2BuTiXNHSM4A4UOv6YE%3D&st=2019-06-19T03%3A20%3A31Z&se=2019-06-19T11%3A30%3A31Z&sp=r'}}\n", + "{'runId': '9f99614d-6bc5-4c10-a121-b09afdc02c74', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T17:11:05.931189Z', 'endTimeUtc': '2019-06-19T17:11:06.005469Z', 'properties': {'azureml.reusedrunid': 'f78cb325-802a-4779-ada8-05db82c97835', 'azureml.reusednodeid': '70352e68', 'azureml.reusedpipeline': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.reusedpipelinerunid': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.runsource': 'azureml.StepRun', 'azureml.nodeid': 'ba6c6c5a', 'ContentSnapshotId': '8979e52a-3c38-432c-b9e3-a235b33b7d1e', 'StepType': 'PythonScriptStep', 'ComputeTargetType': 'AmlCompute', 'azureml.pipelinerunid': '994a7673-8f48-42b9-9cfa-1a47bf70c304', 'AzureML.DerivedImageName': 'azureml/azureml_b2a8349416887710026a15e07f74a6a3'}, 'runDefinition': {'script': 'embed.py', 'arguments': ['--embedded_data', '$AZUREML_DATAREFERENCE_embedded_data', '--sentence_data', '$AZUREML_DATAREFERENCE_stsbenchmark'], 'sourceDirectoryDataStore': None, 'framework': 'Python', 'communicator': 'None', 'target': 'gpucluster', 'dataReferences': {'stsbenchmark': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'stsbenchmark_data/', 'pathOnCompute': None, 'overwrite': False}, 'embedded_data': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'azureml/f78cb325-802a-4779-ada8-05db82c97835/embedded_data', 'pathOnCompute': None, 'overwrite': False}}, 'jobName': None, 'maxRunDurationSeconds': None, 'nodeCount': 1, 'environment': {'name': 'Experiment automl-sentence-similarity Environment', 'version': 'Autosave_2019-06-18T20:46:30Z_9b3f4178', 'python': {'interpreterPath': 'python', 'userManagedDependencies': False, 'condaDependencies': {'name': 'project_environment', 'dependencies': ['python=3.6.2', {'pip': ['azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33']}, 'numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], 'channels': ['conda-forge']}, 'baseCondaEnvironment': None}, 'environmentVariables': {'EXAMPLE_ENV_VAR': 'EXAMPLE_VALUE'}, 'docker': {'baseImage': 'mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04', 'enabled': True, 'sharedVolumes': True, 'gpuSupport': False, 'shmSize': '1g', 'arguments': [], 'baseImageRegistry': {'address': None, 'username': None, 'password': None}}, 'spark': {'repositories': ['[]'], 'packages': [], 'precachePackages': True}}, 'history': {'outputCollection': True, 'directoriesToWatch': ['logs']}, 'spark': {'configuration': {'spark.app.name': 'Azure ML Experiment', 'spark.yarn.maxAppAttempts': '1'}}, 'amlCompute': {'name': None, 'vmSize': None, 'vmPriority': None, 'retainCluster': False, 'clusterMaxNodeCount': 1}, 'tensorflow': {'workerCount': 1, 'parameterServerCount': 1}, 'mpi': {'processCountPerNode': 1}, 'hdi': {'yarnDeployMode': 'Cluster'}, 'containerInstance': {'region': None, 'cpuCores': 2, 'memoryGb': 3.5}, 'exposedPorts': None}, 'logFiles': {'azureml-logs/20_image_build_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/20_image_build_log.txt?sv=2018-03-28&sr=b&sig=IuiLro7VKrTsmPEJbfhRGYus2BolDMlvECS3bZ3BFvo%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/70_driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/70_driver_log.txt?sv=2018-03-28&sr=b&sig=sE6YQBEIcxO1nlrQ6DrJsxEmG2CNm8LT20qK2RohIkM%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/driver_log.txt?sv=2018-03-28&sr=b&sig=N4Ygz1QllDgRPhcRMZVs3zK%2BYYOh738B3kzpgdw60j4%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_stdout-job_post.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_post.txt?sv=2018-03-28&sr=b&sig=8IEjHjHLfGsriil0ZaaHkMizZyhGyVl1VzBD5gwwTvw%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_execution.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_execution.txt?sv=2018-03-28&sr=b&sig=CC4n2r9mjciD7h%2FMA2Y9NF2exyW4V%2BteCp9q1QBhsTw%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/56_batchai_stderr.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/56_batchai_stderr.txt?sv=2018-03-28&sr=b&sig=70mUUIdwHi1SJ0s7XyOBviDk%2F9cBh%2BH%2FwV%2FO84%2FQ5xk%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_stdout.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout.txt?sv=2018-03-28&sr=b&sig=JHPzCDmads8HeyP5HovD6eZa8mSFDfl7l8IwhQXb0d0%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_stdout-job_prep.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_prep.txt?sv=2018-03-28&sr=b&sig=W8EnL2oERfRojlbZ%2Fk7KutncWvF6IVSQInSuvUPbdvU%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=b%2ByKyJj1%2Bt8XLmJYOOkvEKSu5glJeyp7lbRaruwGmgo%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=9nwwElqLPKA10YapK0r4t3DP1b0T4BM293%2FKkc3u0%2BQ%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=OUVWL%2FjXqUFM9ymDdIeRMUSxUOQzlU%2BMFRT90qqJflw%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/138_azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/138_azureml.log?sv=2018-03-28&sr=b&sig=GgFEfWuyG9Q3VjDZI8Mr%2FxkwJ6XW2%2FSPwtHurNrpX54%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/azureml.log?sv=2018-03-28&sr=b&sig=a%2BKsFAoMIlVigIYCutZSa%2FmlqqeLFTCiI3VzFg9GPSo%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r'}}\n", "\n", "\n", "\n", "\n", - "StepRunId: 297207dd-e830-4133-af2b-1efff54ee11a\n" + "StepRunId: a3a6fa4b-d2dc-479d-be93-eea52e5c597c\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/297207dd-e830-4133-af2b-1efff54ee11a\n", + "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/a3a6fa4b-d2dc-479d-be93-eea52e5c597c\n", "StepRun( AutoML ) Status: NotStarted\n", "StepRun( AutoML ) Status: Running\n", "\n", "StepRun(AutoML) Execution Summary\n", "==================================\n", "StepRun( AutoML ) Status: Finished\n", - "{'runId': '297207dd-e830-4133-af2b-1efff54ee11a', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T03:36:02.737145Z', 'endTimeUtc': '2019-06-19T03:44:50.348314Z', 'properties': {'azureml.runsource': 'azureml.StepRun', 'ContentSnapshotId': '81120654-2a16-4013-96f7-922eda5e4e1e', 'StepType': 'AutoMLStep', 'azureml.pipelinerunid': '5549c561-26e2-4979-9f3f-0379e38de86a', 'num_iterations': '5', 'training_type': 'TrainFull', 'acquisition_function': 'EI', 'metrics': 'accuracy', 'primary_metric': 'spearman_correlation', 'train_split': '0', 'MaxTimeSeconds': '300', 'acquisition_parameter': '0', 'num_cross_validation': None, 'target': 'gpucluster', 'RawAMLSettingsString': \"{'name':'automl-sentence-similarity','subscription_id':'15ae9cb6-95c1-483d-a0e3-b1a1a3b06324','resource_group':'nlprg','workspace_name':'MAIDAPTest','path':'./automl-sentence-similarity','iterations':5,'data_script':'./automl-sentence-similarity/get_data.py','primary_metric':'spearman_correlation','task_type':'regression','compute_target':'gpucluster','spark_context':None,'validation_size':0.0,'n_cross_validations':None,'y_min':None,'y_max':None,'num_classes':None,'preprocess':True,'lag_length':0,'max_cores_per_iteration':1,'max_concurrent_iterations':1,'iteration_timeout_minutes':5,'mem_in_mb':None,'enforce_time_on_windows':True,'experiment_timeout_minutes':None,'experiment_exit_score':None,'blacklist_models':None,'whitelist_models':None,'auto_blacklist':True,'exclude_nan_labels':True,'verbosity':20,'debug_log':'automl_errors.log','debug_flag':None,'enable_ensembling':True,'ensemble_iterations':5,'model_explainability':False,'enable_tf':False,'enable_cache':True,'enable_subsampling':False,'subsample_seed':None,'cost_mode':0,'is_timeseries':False,'metric_operation':'maximize'}\", 'AMLSettingsJsonString': '{\"name\":\"automl-sentence-similarity\",\"subscription_id\":\"15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\",\"resource_group\":\"nlprg\",\"workspace_name\":\"MAIDAPTest\",\"path\":\"./automl-sentence-similarity\",\"iterations\":5,\"data_script\":\"./automl-sentence-similarity/get_data.py\",\"primary_metric\":\"spearman_correlation\",\"task_type\":\"regression\",\"compute_target\":\"gpucluster\",\"spark_context\":null,\"validation_size\":0.0,\"n_cross_validations\":null,\"y_min\":null,\"y_max\":null,\"num_classes\":null,\"preprocess\":true,\"lag_length\":0,\"max_cores_per_iteration\":1,\"max_concurrent_iterations\":1,\"iteration_timeout_minutes\":5,\"mem_in_mb\":null,\"enforce_time_on_windows\":true,\"experiment_timeout_minutes\":null,\"experiment_exit_score\":null,\"blacklist_models\":null,\"whitelist_models\":null,\"auto_blacklist\":true,\"exclude_nan_labels\":true,\"verbosity\":20,\"debug_log\":\"automl_errors.log\",\"debug_flag\":null,\"enable_ensembling\":true,\"ensemble_iterations\":5,\"model_explainability\":false,\"enable_tf\":false,\"enable_cache\":true,\"enable_subsampling\":false,\"subsample_seed\":null,\"cost_mode\":0,\"is_timeseries\":false,\"metric_operation\":\"maximize\"}', 'DataPrepJsonString': None, 'EnableSubsampling': 'False', 'runTemplate': 'AutoML', 'snapshotId': '81120654-2a16-4013-96f7-922eda5e4e1e', 'SetupRunId': '297207dd-e830-4133-af2b-1efff54ee11a_setup', 'ProblemInfoJsonString': '{\"dataset_num_categorical\": 0, \"dataset_classes\": 140, \"dataset_features\": 1024, \"dataset_samples\": 5749, \"is_sparse\": false, \"subsampling\": false}'}, 'logFiles': {'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.297207dd-e830-4133-af2b-1efff54ee11a/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=sc4OGsuRrBaBzm1%2F8U%2BXjdywdh00XNxmO9tISKxYRZM%3D&st=2019-06-19T03%3A37%3A03Z&se=2019-06-19T11%3A47%3A03Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.297207dd-e830-4133-af2b-1efff54ee11a/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=M9aM0Xy%2FznTxVS1jkee1WL5GTKVblvYXOjaRKeh6Bp8%3D&st=2019-06-19T03%3A37%3A03Z&se=2019-06-19T11%3A47%3A03Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.297207dd-e830-4133-af2b-1efff54ee11a/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=kzTStpejzK%2Fk0h6gtKOj4WwJBYz6tp5DG7YnAgPJtJQ%3D&st=2019-06-19T03%3A37%3A03Z&se=2019-06-19T11%3A47%3A03Z&sp=r'}}\n", + "{'runId': 'a3a6fa4b-d2dc-479d-be93-eea52e5c597c', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T17:17:00.446785Z', 'endTimeUtc': '2019-06-19T17:25:31.355471Z', 'properties': {'azureml.runsource': 'azureml.StepRun', 'ContentSnapshotId': 'bac07baf-2af3-4a06-aadf-aef17548794d', 'StepType': 'AutoMLStep', 'azureml.pipelinerunid': '994a7673-8f48-42b9-9cfa-1a47bf70c304', 'num_iterations': '5', 'training_type': 'TrainFull', 'acquisition_function': 'EI', 'metrics': 'accuracy', 'primary_metric': 'spearman_correlation', 'train_split': '0', 'MaxTimeSeconds': '300', 'acquisition_parameter': '0', 'num_cross_validation': None, 'target': 'gpucluster', 'RawAMLSettingsString': \"{'name':'automl-sentence-similarity','subscription_id':'15ae9cb6-95c1-483d-a0e3-b1a1a3b06324','resource_group':'nlprg','workspace_name':'MAIDAPTest','path':'./automl-sentence-similarity','iterations':5,'data_script':'./automl-sentence-similarity/get_data.py','primary_metric':'spearman_correlation','task_type':'regression','compute_target':'gpucluster','spark_context':None,'validation_size':0.0,'n_cross_validations':None,'y_min':None,'y_max':None,'num_classes':None,'preprocess':True,'lag_length':0,'max_cores_per_iteration':1,'max_concurrent_iterations':1,'iteration_timeout_minutes':5,'mem_in_mb':None,'enforce_time_on_windows':True,'experiment_timeout_minutes':None,'experiment_exit_score':None,'blacklist_models':None,'whitelist_models':None,'auto_blacklist':True,'exclude_nan_labels':True,'verbosity':20,'debug_log':'automl_errors.log','debug_flag':None,'enable_ensembling':True,'ensemble_iterations':5,'model_explainability':False,'enable_tf':False,'enable_cache':True,'enable_subsampling':False,'subsample_seed':None,'cost_mode':0,'is_timeseries':False,'metric_operation':'maximize'}\", 'AMLSettingsJsonString': '{\"name\":\"automl-sentence-similarity\",\"subscription_id\":\"15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\",\"resource_group\":\"nlprg\",\"workspace_name\":\"MAIDAPTest\",\"path\":\"./automl-sentence-similarity\",\"iterations\":5,\"data_script\":\"./automl-sentence-similarity/get_data.py\",\"primary_metric\":\"spearman_correlation\",\"task_type\":\"regression\",\"compute_target\":\"gpucluster\",\"spark_context\":null,\"validation_size\":0.0,\"n_cross_validations\":null,\"y_min\":null,\"y_max\":null,\"num_classes\":null,\"preprocess\":true,\"lag_length\":0,\"max_cores_per_iteration\":1,\"max_concurrent_iterations\":1,\"iteration_timeout_minutes\":5,\"mem_in_mb\":null,\"enforce_time_on_windows\":true,\"experiment_timeout_minutes\":null,\"experiment_exit_score\":null,\"blacklist_models\":null,\"whitelist_models\":null,\"auto_blacklist\":true,\"exclude_nan_labels\":true,\"verbosity\":20,\"debug_log\":\"automl_errors.log\",\"debug_flag\":null,\"enable_ensembling\":true,\"ensemble_iterations\":5,\"model_explainability\":false,\"enable_tf\":false,\"enable_cache\":true,\"enable_subsampling\":false,\"subsample_seed\":null,\"cost_mode\":0,\"is_timeseries\":false,\"metric_operation\":\"maximize\"}', 'DataPrepJsonString': None, 'EnableSubsampling': 'False', 'runTemplate': 'AutoML', 'snapshotId': 'bac07baf-2af3-4a06-aadf-aef17548794d', 'SetupRunId': 'a3a6fa4b-d2dc-479d-be93-eea52e5c597c_setup', 'ProblemInfoJsonString': '{\"dataset_num_categorical\": 0, \"dataset_classes\": 140, \"dataset_features\": 1024, \"dataset_samples\": 5749, \"is_sparse\": false, \"subsampling\": false}'}, 'logFiles': {'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.a3a6fa4b-d2dc-479d-be93-eea52e5c597c/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=acaxB1G7%2BLXjlhmZ7re%2BwzFYKUIS4pLZSqX%2F7Wx5yHk%3D&st=2019-06-19T17%3A17%3A40Z&se=2019-06-20T01%3A27%3A40Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.a3a6fa4b-d2dc-479d-be93-eea52e5c597c/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=wYI6C3wu4V8nM6jJLrp6dI1m5G4lU9ELOuoIiucV27g%3D&st=2019-06-19T17%3A17%3A40Z&se=2019-06-20T01%3A27%3A40Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.a3a6fa4b-d2dc-479d-be93-eea52e5c597c/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=wPHCRzEYZ1GQ3UbXpsfSk7xFu5SMDzJTsLYkt%2FRHWpM%3D&st=2019-06-19T17%3A17%3A40Z&se=2019-06-20T01%3A27%3A40Z&sp=r'}}\n", "\n", "\n", "\n", "PipelineRun Execution Summary\n", "==============================\n", "PipelineRun Status: Finished\n", - "{'runId': '5549c561-26e2-4979-9f3f-0379e38de86a', 'status': 'Completed', 'startTimeUtc': '2019-06-19T03:30:19.54232Z', 'endTimeUtc': '2019-06-19T03:46:59.380629Z', 'properties': {'azureml.runsource': 'azureml.PipelineRun', 'runSource': None, 'runType': 'HTTP', 'azureml.parameters': '{}'}, 'logFiles': {'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.5549c561-26e2-4979-9f3f-0379e38de86a/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=xygOEA3uP72DELH6cKm1AtJ9wsQUrR6DhcGBIZC6Grc%3D&st=2019-06-19T03%3A37%3A06Z&se=2019-06-19T11%3A47%3A06Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.5549c561-26e2-4979-9f3f-0379e38de86a/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=mnwq58c19ZqTLxjJhnS99LTZAHISFgJua4XvFmVKxkE%3D&st=2019-06-19T03%3A37%3A06Z&se=2019-06-19T11%3A47%3A06Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.5549c561-26e2-4979-9f3f-0379e38de86a/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=2dFSQk1r7iNsd18URUCgWlsEp%2FNStDu8Y2d3Q1fkat8%3D&st=2019-06-19T03%3A37%3A06Z&se=2019-06-19T11%3A47%3A06Z&sp=r'}}\n", + "{'runId': '994a7673-8f48-42b9-9cfa-1a47bf70c304', 'status': 'Completed', 'startTimeUtc': '2019-06-19T17:11:01.903161Z', 'endTimeUtc': '2019-06-19T17:27:39.802989Z', 'properties': {'azureml.runsource': 'azureml.PipelineRun', 'runSource': None, 'runType': 'HTTP', 'azureml.parameters': '{}'}, 'logFiles': {'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.994a7673-8f48-42b9-9cfa-1a47bf70c304/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=YB7H6UmXpGnLZ0ir%2FK2eQuYfwb4pQ5CKXlJhcRnPTqE%3D&st=2019-06-19T17%3A17%3A42Z&se=2019-06-20T01%3A27%3A42Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.994a7673-8f48-42b9-9cfa-1a47bf70c304/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=PPsWsEXEsUhc%2F8BPXh5KnX6Ze5fUbSohjcqgeghWaIk%3D&st=2019-06-19T17%3A17%3A42Z&se=2019-06-20T01%3A27%3A42Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.994a7673-8f48-42b9-9cfa-1a47bf70c304/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=UQ7xiAJdha6PiJENVKM0OiuduPNfgQbSNrZdEHddO7c%3D&st=2019-06-19T17%3A17%3A42Z&se=2019-06-20T01%3A27%3A42Z&sp=r'}}\n", "\n" ] }, @@ -1017,21 +1170,22 @@ "'Finished'" ] }, - "execution_count": 29, + "execution_count": 58, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "pipeline_run.wait_for_completion(show_output=True)" + "pipeline_run.wait_for_completion(show_output=True) #show console output while run is in progress" ] }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 59, "metadata": {}, "outputs": [], "source": [ + "#Publish the pipeline\n", "published_pipeline = pipeline.publish(\n", " name=\"Sentence_Similarity_Pipeline\", \n", " description=\"Sentence Similarity with Google USE Features\")" diff --git a/scenarios/sentence_similarity/pipelineWidget.PNG b/scenarios/sentence_similarity/pipelineWidget.PNG new file mode 100644 index 0000000000000000000000000000000000000000..45cd68b02311b6fa123d6253236be015a2f671c8 GIT binary patch literal 109142 zcmc$`d03KJA2;qy(=s)aEtZuFO-*BE=9sCWB2#I$jgFH`=E9gM=0=G^hCtIYR;E-| z=7N+~mK!OGxd4ro<_3`=0wR?PDw~RcfWQxH=2_n7d9Lesz5l%Tr5D~e_kGTNKIglg z@8^5G@{6DEw~N;;HZwE(_P~DcU(L)GUNkdXFn`ek(-r#O6VaxRFW|rW?lG&TTaTGe zzJl)ldAFHa4Q|P7)O^$V*QfS}!p+Q9q|W_*F&GOtV`gT2c))vi;F(B?9z;wFx!IpS zHIlnO1uERweeG4|w4HPI%_p{D*YgjjHneA2|MX_z{ztviwCJMU*-ySCWd4%<9q0V9 z{YSnW9;KB3^6>azz>m&%|NPCn@~ER|04>uS7yo{Ikfc6V6RGkNN#0Xt-wX83She@E zuXg|EPfN?pdxD;XRsHX;8OB)#VbS^Z|M_z{&k`{E3Nrg@<$q~;*;kWOA1}~myJ#63 z{_|1OmB`49i4d$Z#Q(p&*Rq#gS>^^U(Wt%hUpiiN{!IGhpMu#Qf#>r7`n02?*9^=k zrGfs}*P9ou{BIAoyqS3qGpbNR*=0|;+_REUhyVo4`-M!}1!|&HSEKN2VcAVfd;(VPh z7@z%R*@ANkpU^o)KfHS;m_VD!$n-__o`3psBly;_EsGBQQdtxY7&vIU(d@l;R)@5E zFq+=AAxY%?DyaYXQ~I~_qs-0#&-}K?%jXEIKstW3~K-?ckSY z=O~QKhS{H^7Pn?Bn18O~{qNt+p=xpQcXaRk59QD8+qFi_fCE6wKPLL3(BLMRg}wge zouqW`wTC7y9sX8xZAQj#22e763Eyr?R%))F|IyUzv&FV+OoCCgclVlpWZn90C4x1R z=8xRXtM{2tCt4L8nSOIx&hgqf8flHL-g_AAuWfAp=;B3kjJ%qNM)1oc z%)mS3qaE=gxdJON)>^$;fsr+HCr&`)>8LqshkbL5n? zyTKdbWoAaaWXyPnhE0n!>M6RYI?UqY!|d^axD0#3q3XzGvW%RYkNcXno~zI*Mbc#Y zH_s34**$|(EAw`deWSpnK*Dl~=p2i&f|xH;jsp3sh?6qvnJwi?&glxo7cXTI%kIeW zSh-wJDZ^_0jbr9&IbKx({i@+5Wn5jZriOJhg9Hh<)fY<_9$-#-n9sf&-CcmYZ61mD zjOh+=;tIv0H<^Msj3mgrF!de80)uHbpTDUjGo@5{1MZ$aUW8M9@S{2_9F}KWnw?{> zKTDHho+{-H21Nle{Fhwgqh&nN`R%pKh%>XyQ;YL0qUrag^^6uhi9^;bx zsU^T}*O1goo8}f<{aP?DxQ~$3&cMN5;P|57c(}F|JrQ-5FXO5%$$PQa2RqosVo2`7 z2Rj5)(58`wV>8Z|Ohc?xtX=P{z-+6T(wrJ`zI1W;IL-#`w+_91dC=~o*COTP-Mq`N z7s3tc`{1!6`>mUKz$=k?Pm?C*rf%=<8Fh3R8vA0c3e=*#cCX!sfdBCk!3GdRL#kgL zI$fbmuS;8K%cm@Eor+wu>cXpdoyw!8j%dulF|Y#_ZOKl)A!s7yNJBfbK) zVK*z2ye<992v@;0D=&83Qc`#Zfs;8IpU5}NT8o_wvzr%Uufb>}ulTyYRyCBz>tPE! zvzMElE5d^D=>ltf7@8bV%J{~fu;u$$FT1Y=`=){Imb>+5e2K_3Vb$A+-uP}zwY%cG zD0U_z+AOkw9l5Meot@`e?HetiRWKGcnM1+8@72?!H>DYWjLEX{J_d^wapI*#Dz(ud zz-mcg9VuF_C{@VtotA1gIib)UIZR0$h$II5`h;GD_3#uxYbXhcNR@ancK# zh@NrO7SGd2e$;k9)#bMqXe7()CpXXkkk9A}h45WxH3;;K=YsbSfuYM(a*q{L)a4XL z`V?j*Wudr{i&Yh`)nD47N%*!DqojOy*8g(Kx_TRmf8c#R7gMXY(-liZyBTctNnNpB zf>2QUz(HTJ8ZlU|R3`jVl{cZVBObysBMaAJ?+i)qO00c!2UBxy43z5gY+xtps_-k@ zMD(=BKylr{$>rhH|aZ*j^fGlgR(CsX&8Rk6;WQa9ZiDnI`8sKX+i`b!jG?Fh0< zz1iM9lIw9AQrOsl6#lS|7((&iM_7sWMl)L6w1yJ+NDB%q-(@l`iOV9s$sT=?sXw)* z9#xXPT;RL9cCy^j8?%&@)qpB`3fMneL&YZ!gj!(+-9I=|>DtG1_aHc_kVwef`~Ymb z?@O~t4B;?^z(5WmmxD)8!(6sb6If0=je4RAfa*az>QbReG6~HIs z86CdG_YWGq8u;~UcX2fCO52I{75tElK*g6W-gZ}8`L4l>TT_0{x!O{K8aTLnm#!#) z5^}5|1Gl0~P8mF?D|Hf!S1@&VrOr)A;sC=}o{jm2oRnVW&Q_-#>YX3eylCoJ;BvsX z&$r+87QM)XDZH&V;=SCDBMWVgyecltc0pll(=|oq>Wp-U@v?QucbQl~ z&lGzC9#Rzoo#kR(R2(M`9PP)2klPSm8O^1jUn!B2R=ZjOZ;r6JHiz8QT0jq2`E1F%bE1{kCCk)m2I< zyXyA&rxYv)byx+L-;oED>Y92^k3=?{g%LV{1(p;nkL@l;bJkR(zxIOk6T}Isyo35( z<)BkcAQYkBwuBB>6N4xKj+S-+83zF;-HXdUH%q)qDmAQ)p zv&X!t2Zb-yBc;bIO<=kkZ3XF&X#(*3K`b^8LRdoacM4vRw6ujrs715=>R!CYVYCYDn^3x!H6M*qBFUC2&oP+_UA zk(@_K=JlQKC&Y+2pdURNDsn6?CVgDTS%1mz-oCoEvrd>j?n(VU>*}4}j+K@=XSOn; zT{5XXlJifv9aB>Z@(x*kqqy>jOQzja_Up5+3DH+EOALVkGc(V1MykD&_S!1^kl>Ji zhF7@eu#*mWa=Kuh(pk|+tktF0J3~8FF$$ZitZ`fDT#!=A-LiAH{Nc?_0ub!^b9LM-?4CnB#KRQCb($lLrD9;*#l{(<;4_J1zG@^+Upi zfMjhG42eA4Df7<{1G)0@_!LVUc26k%c80`DGF{kH^Ek4LOJ+lmIi=z{ckCRpfjt*G zt2r`l)#&g@`vU@~0`5MC>EYjz56`t6S++VQiT9Cc6xf&|Hwos6yZS~L#aD_Ml=L?? zK!?a+9GC9J4r&Zkux&03ZxV?`OHh%nG#2zt81UC+t6kHx6EBRGoSeCGjXSk+NBrK= zJ+YtVlYV~!y$|TCh=wcl8g@`0`!F^C6P~wwuVnc8NlhNgHewp+y~j(hj<;?Z(1;{z%y*UERBu*I6K{dXRU7SE8M287dWVjbabbjsSTX-T{k2a z7o{M0D)F6;WKtvb-SOEVmo=!mRAgBl|35swupLU}hTtTAR+1M{yg0}_*Z{Qv_?DEK` zg1;5y@b|A^KG0(JS@FcvOeg0|=8SYwwKrtC=`!~^uy68^RjHtMB3cc6t_^3Y@OG`lvff1pJdiV) zbd8Q{O9Im>G1Ljd?@dvnwuUItUT-lUZ_W$_e~L%_R$cUEd{+_;rLzrs5U>e$Xn!&5 zf~S;+g);U-eoefipw3Qp0hGKRiaXia64Aoa`6CRSs2ZsqcV6&q!drE`(Xgu%qEiv2 zIUQ4kjkQCG)%>SJJ;Hm@bqh-3X=fc0ntRKx=17YORalh*HT-=Y77W1<<6fU;K~}FB zlYNVoTiMV^qH}qbTW6|QUwC!JWD(T&If8G3o##iP{GZ6O&dR%GoA4-cC+gj(^LZC| zrOCqL2JKV*lu#~0M0vdKMdxgGs;E*onBK`e|Mbllao#IOc#@08=cBqSKd@kA$zJHA zUf*_!b*$w9i-rOL$2GXmHwZnJUJ4BFVPpHnFC|`In!VptplLb25e1+qMV26yH@Fqs zx7KGm-ytDdnAMhOyAE-fM93`B9OamEmzow`E#A0rlcK?FDbQf~!cZC>;Zi;z)%hul z-{ddRc@6W%i)d4kiENM0NNFYr-<8kS^B)WNPNA;Gac4gyWaiE3lXPc7>ASdZy8@MS;u`P)S+-`IY4%=?YU$k;7efZwC5&~SFbY&f zF1wOrlg*&Xy9aqv(LzBP{gMF&!YZ93T(Is&X2R`qaDtGczGzz9dyK7Pp5VA01*Ejh z^1dM3(9XFBtTRcbSFGc-AHaqL-(0R?zIMGY_s_`{)V=0*2_NQO@J=;I1~fx1byB)@ zwa`k+*%qr_W@hG?kHuD(C_bdC6;aCKV)Ub<`l6lM+4Uj~riqa00e@J__@(r2zK0bj z<1$mLNhcG`HFI%l`bCo$RR^nsf!k3GwW#Avt-3}M>DVrYD8`4A1+abcukot#^wAD2p$$x@-@;*`2o{XxkR^MoOkB{3$jv8 z)F{$Qc9tAarU$}4-Y}tP58r%Zo|w=>8oI9Vz?O8V?Z6F|yvH^-M(!ot#)CbOTz^-( z%mk&qNTZn)1uMQXd!FEW*4zd?_;eO!=oan0FCU|RtA8(pcuWGf=w^?D%{kWF1jLu! z&|qVJg;gjZz{{C%sicujorS`Un(K2bn473o7p0USoV3_c9RW%fQrgLYktFP+M2L#= zr`1R@gfSRaMGp^Dya9rbU0BPsed;cL0Gt6nnPOu~i;(RP5J-BQc?6a_noO9l~y0_EU@OfyNyQONE&e`V3&Jfla4~5&hX4^7? zW0Qi1mV3q=UDh^KQz$Ji{MwY88lNee&X8DcxiWrextV7Z{`}KXFKaGPK(*t3yoPGLd*YIPSjFWjJP<9qE*LQos&iNKGb!YrxbWua6#-T%(ogST zb;Kt3giR$HS-4|RGG{icQkqQVoW>WC;83hgS`6rUDC>xqCh1IrJO&tn*RuD!w&M|B zc!Kn~*eU;FgM?wIMEAtnK3QL>wzx8mc`VVD(nP5;H4`0F)#97ZA8O&Fnz26{IHG$) zxdf>6@;Ioxpp>I_?SUy*|-huk_C+WAb!0U07Dh^wY$W<%x&DkN zGP&yaVyXYsq@1xrbr+v`gG&Unahu|K!XQvLsa-Sqbs>#V%DnJXb0nvqv@k!^z=gWx zc|@1@CDh$PQ1-DefoYu_+MS+Uv;()$5FUk_1O|!4q941ExwonvQL*!(K@!ahhQH;I`lvfwO*$N4>Xse z;HlMKsnhM_duy2&7;)REKPMdmRBF^RzZuP~ni<3cv`s_x1;RpJY;QTQnW+f#Pa~6UhrvRlz7LfTf1NONL_1x=|+Ig~o<8 z0!Ar^TLpv>3|tH9?BvQb1|>%u8!DKnVv0Xho`6_d673G(sTkZf?*BIoX|9f@`A&E# zdcb2k`Z8beB!^a*wFL6dPrz%<9V!+EnVq`n^-Kicb#5(gg0uLU`{LG5WP#a2kb3VS zP0|HPgLk0X&C=}NUilUljCU1fbhS8__xt%DpW@E{`03W?yF)fF5<}u%r`+2-@BQ~W z$NUpMQ3f9ivx?1&=IKnopYF1r9x!*1Vh{CLW{mjzAdS3yvo|ruZz!`}n^{;Ga@x|A zGB@STKjqHn9ys-8mF+QZ$*I*A7rrPuk<}Xd%Qp+2p8xl5)8n?#Za}eCM=yX^e)~T7 z!RAEM-y5}6QD&niVnAZQq?M+Z!oL1Yn^;<#OuDD-vAM+i3#k2XfWP^akpJ@w?2dJx zUitlcblic@giMi%O?v)MYyy`3VB*Xco!AHCYD4Qr+8_;SO9h2!U!+k0%;9dkT((h@s4J;#3j zN0(EM#wndC*cqYC=E<=qFMOJX9On56te$98j=>B<)61%0#ykJUi(ZCarWwRE1NFzl zpD<{T+kMpF;H`OMiYn%g_vx<{Kuz|CL#r>+^eSE%k*XaZE5&)8)xN)Aj=Ajbr5PTR z@)};8xLr4CqK{OQ&dD6tb8IQrVARu!F{(1-`!VA*Z5D#HFb)y0nrWI*Y-IGx4LT#m z)tF~g8VxV3GmsZ_)!iC+kF=_N!GP7@a`=AT_ON9J35ikcH9PSFu@QdF-%3SP08modo;Tqoq+`6yX*>51j=YrU5NYtrJyO9~tFX6V?=|k*7{F!i)>& zN4@{*Z`pnB5?;GLG3!q)@A*{NGr53f^crqR0qduG0X8dFUD~o}-kF``HQVM3jAIO% zHsJIxi}R*PMuiFGk0bEaKN9Ad8aqA5*nK*xKcvi$3X9W!%yR6!nD}AW*3Ms-EtuG! z=yWE~qHg}-D{_pb@!`e1KhFO+vFjhI#Xi+|{;3|e0BLKBnVoDG0Ic+P=2$n}9s{pl z8%E=wf8Pnj<^1O}hCY~5uz);$26BtFrv~>p!f5x{W!UZ3h<}joz*t>XdmwGZb6vu% zJZpRGp29D8PSo}bmKJN0AX_%wiLt{x)R2%lX^3SqneuW8oaUCE@ddaQL{bTv?HltC%LtwSDiOZI3K&ZCxFX#B4-NL-9#WmSwWk}Q&@)eDpyiTQYvQJin78LCZI|^;&}!H2@Tp>9X$WT_yF^+R2=Z#vHsQ? z@S_4)6hsYoQCPiwzVTlMvS~>xbxpU8$z2A_; z3$0OG9{6+3nG=7T!gC4FPV7Fq|4$dKT+6+FJ}&TDP!O|^3;{jpzDi<*n&hUoX!|1G z$K%0kR)x}3qR`WCS6_r)ZNig%`Y^$7dal+)A_g9>FH(wq+Vv$hw)hgKiMW3eD$e<* zn`q1JU-8^vgBRz2l(@zC`tWi@n-}qnIkl@P0veWf3w%!8wU|^eb*sB+XIiKM81 z&Cz^2%pvnaoX&R)0VT>Ght^QX$5uCYPk&QHXDaJ4ElWc-lW9K>YPecdE8 zP2&6TgS*!_{%gCn>nk6eiq9e&qN2;zTd&G1eFilFrBk_GDji+j_(eFc8+H%z%QD%e zq@mQ^M@>F9(&Jy$x!k1AmHdU78MarMOzk$=$q4~fN1n->k$ z|6LYgX3~_!t$UU{;cqw@bhj!}tb%u)KXvEA>Z_#)mc-N!`F2UELjf0#$g0LG>d!PD zS*E%aVxIuEuSW2C0_R69Up3g=BxX8T&%>EU{#pA@eVb>$h$k4ZS>=%w5Znq*955w~ z8?P;HEfpB*MZ9PU!{gNn%-%I;fOU%sR&8c@yrEGurfdTd)}+n3m%@CFLmdKA)EMmn5bdOw8w5%$=}z2xVBEAsmb>P&}3<9@aK_2VKAOSols9tGIb@{k6Q0 zgY!QKW`%D4V55dXz^F(B{W99@h$D7t?aw*61lz?XaPj1>U+}dDK^f8DF`x%Tz6PaG9ttGS^PED>5%k9yp@5_ zQU7ENcRmXnr8-K6A0f1A1Q9-2o0~oMuQG zMvsN_Md}^S)KT|hisjK}cI{gPLg0+-lfOi#1l>Zawap{?;nj*Qrz=odJ#Y(Wl=LNv zemkOe{kDvNXIJA-mwc=Z-==aqO}ZW}Xi3jq4Iz<(TowR>uMXV20QOI9GydwfX@aZy zGHP`Ivv<61<*HnH(6T%55u>lu*3MX;Uiw`US>I^b1)1*IP;ew*Bvv%1T~{ksT`YBU z3pha_4$qDs%9K{TAhR_oJI-Dw=^7_q&g_G9$BN>HDa}etPPe3GiwCw@{H%Gk zJu0cLz2Xgd`op*q!6W4i zPfnw%xK82Km|J*V;Y-Me4(6Hw>{lQMm-zFCLcA_`s&$8^HJh&DHA;z>wX`ueR>h;I zsluCv#Nq8L)Gc?aR?71|#0@CIzzrooA}fsVj}XFBA-8q`%kYMbRCNJ9(|&NC-?PNs zM}25j_8+it*NvATSgO~pZcJFn)#pv`$)_Z4yIJI`Q*6QhK5FUtr`t~B?0B19PDUOT;h%e zwCyWy9tpZ<>E%|KOW!g-s>Om4w2V%T>o-@ay3S>MM>779fS{tU9|y1GI&S1U0u6`W zk9J+@;j2E-M&%h%92)|dPQEch8@dXA9Gj9GJ>v26DsmNnHdgdAN0Zwh`XbRkjeQ(S zOcRD37Z$(4?`4R_u~xfNl*T*yD|*dlKhMqA`V*cr%J^X(C~IQxU!ac{Xw<)=96JVw zwxNsw6OA<5;x7~aAc5}ja0}w)^BLx@qGUq;O7$6VrYR6`etm@+!b$5bDrI=BsDNA5 z=pJ)nc0G~?$=T6xC?h5|Lv<8ugY*)eHHmM7uDFqeVNtdvsK58fvzH1!Vp8J0M#8~_ zDbo0C&|6q4fOMk@-bQ~hmO&hzGWnM0UCb@6^UkcT4qQeCNJ!>NRn|4*ad&r`J6i$S zu}{=LL>iJfHAA+Bw25e6?E^|Mll9fRGUHtvmV3aOA^Ss8a_j5Ozu1Wo?gyAvNp7Pg zF1$={SGk9IZDJ1I<*7!}t>4H&|8loR(TMu_i!+H8>a`$ZlU+!X81;PAIpI!-GJhm6 zd4)-fN1mC+ZqWN57f~s?P=?T1xAsJX+=jtyz%RU#Nc9Wlz|PtXj4Or@kcLPJ#SzX3 zut0k(Cs2Jq2e0H&{AF(kf{9sS;k|Z3(mDn_+@H*#qcSN!#L`jTb3p*oR8DmUWE!Xp zGuBD%PNgo((M~`fjqK8k@CX(?ua7^OM!OXcu7FflduCQN21)YXvRq%rCnF;C=`7D+ zchsvKo_ubEAY`gx98(iLTHKsMFXc7b+OrH_PBc{xK%3Op$Mxmf@LEOwh<7p`$W)N5 zlXNR82mnd=sK!GrjUTY#E7F!GRX3yMqp~MQBwp|8n_~p_x+;=wh^FP~4k|~tA2D!H z5Mi%|)RQoFvfO3_v7@(b9$VHuak(KZNxARH-s!?1eG^&qRlM2Q;XY zm>ZUMjl2b&%4NBt<8tu2aF-Ac_h?Gw9U_5L%}J}vAQ;wN2RA?&VH{}RxQ3KaHSF08 z-Si+k-tT2+I;R+YixO?DE9vPF7S=l`9=6YDfG&ppb`5cZJM$8Iw&J~fcG__v76p6V z%0~66c5X}%?KtoHzymbOA&pl*l55Ss=rGLH~OQ*og?-8{rl z-4l9`EVob66z>(_jXK;uh8w#UCG@KCAh<8&cwV7rET)-%UrxDR1Lt!p5KA8r{i<=- zOi{u6WK$qWY-bs)T5Xtl_hor`%+qhl8COgcCp9n~AAM2Zy35&Mk}n4MLpgkNFubeZ z@kMDm3sXy>-7K7T6!mAWbuEj>ln*&rR?XCC0)e!?Eq#h&s4r zvO;$_o6cyXaETP?iRH@1vyo&dAWHr^of=`|D;k9G3I5l>&Yej+VOj>p1ifY%!mkdE z&)RG%gej8r@5JE;E|ME-S079{s-L}=h)Kz)r)@)pTOfL%?d+cKqDAYQ1`~^@I4)>Y z!jKLitzTO}AanRfO6zkk3uDq~X$tq#KVR!RydlB(QW^N94JoEpr~y_1$sS@8@ypL; zy42QInYrek(zRt9Gc@Lvq@5z{y zdc8b?A8JnGI%1rM1U8WAOC97b-z97?w{VB=5HE(`9aD)pK&b>Og~rN>!+m^v)h^?v zCLZh^+d<8h6lL*&MSRP(%`mhK5x#4jqr&VW|>0asxhr4~KRnLKIL+^A`( zZLFPNl9L>HQ1o2p3lhj3V{kR7(0+r9d-~CUrPPQqH{CZYYzjx#NS8#h2LQQvn4-E> zMA`#?st-X$XsnZLr;aIYSJ%LAC^;0mds2|3KtnA*9fE-WN;Wb*A0I(yjZ>oPWOGxn z*9F`xJ{DoA%D_fw$(v?1_UbFBaC28;eD?}Wx3k&|jKzC~48m<~<{Sp!G!v0HU;9N+ z$gi=~nN-^0C-lj)-ErY(!|nVNkQs(0xdymJMZ%HBR_j>zMjuJR!1$rjgUKG|Uj#M? z9uvc)F1%`QBpoT5c*#ZBo(*j82LVQ+6}K;Q8Mg6hbY()kjwlR)RP+HoXyYXAP-)`| zPI$i;N&hS8xm8VLHka79MT|ret}qK?*lpv-LK2=#d-4S1=^HxtjxtK&?^db&?u0@8 z%Ej@}*^QyeB*h(@l^v>bC{7^_8DeSH^<2V-fRf$ZdJbWJ&?o2u0;MR9yH82u6IEWV zc&##u_d4gvjCEp|S4lkye!WT>S5!(_UPaUAUyOi_`7iEQDNPl%p)yuvNWfsEx@j3+ z|45o$t@jJN9RNlQ?rVI^FM2mxaHVKGtRHohEaMnBp{BCDN=Ot=p_f!Aq(xk-Udtn2 z1n+})nW%A^Gr^`*Gg{dZ7hc{>-C7`*U{p6hWjLOn#+ke_8FuqoBky9lUynm5wHL$_f_6&mbd@LAg0tM zT*pI3wu=r4&H`+D@coUeZwaNk(Qw|`WEPU_wgKNbKE0($VS6K;B}lZc3bnJW&&{nx z)uU{(R-)}~kgPjW2Av_(j%o1dF&(g`nw7>}8|GRq)9weTyqIH0Dl2Ie+6e{7S;z$4 z^H^=F$7qCkeNNc!k_9YoWxcR+QoMo+ms}pTCAN>%sQi^2$6IH+yJwK?P)6Qs>dq>! zGi&BVx~VY~;RVyoPjzcxX~tAm%Csy>EDoQ)8V4JvI7FjajB z5q>AEkB~PKnf;`*ku=EB@Qa$oV`(WXnWyn-IG>Kgq)fQs=Iukkv&MCGeF8F^OL4ZVXONTh`(Q?uhSESYV2 zkt0l*$^kV~G)iX&s#7_xqAL%<-vzL1$Ometcp(56`V-ew>N-?)Th;kO^#DX#rtDG&^|H)=z8H zU9#niTk(!Bogo=bkVk>)Gsc+C%;D{j6V2(znukatT$_^z*e2xei|)=d%AzB{6TvBk ze@SPVnoFPORO69pXZvIK@G=_@cbej-6VtyW>}&`HgeGeh`A?<6(dDW$&}x#xLv&SE zl+HE8`THaUWrwCIESWXU=9{6d^xr9MN&9V8ABYWSBVGOR8kCGex>*<$5mKhTETI=x zD@UL0C;RU_VY|~!U>Ggv*LT+z6w(X5>z9(mEFiTYmx&5+1MvLpU|lLFt|QaXtMFyV zx%M%OIWg^)J*Q*qT%A$qM}mV;aH@PcpW?E&Z5x|-u{*1d-{B)2J|X|s9yK*+X`_X8QAQPZ7~NXBQsR9%w6S<(*dc% zkv{mdXjeCnc0N3~NBDzZw||2mjwq42bkWlg-LE^Xs|W11Is?7ibLjHn!k+qSb{vM} zRXZ>(a*pCXZVQx7(!-%5Bz+YhsHoF3i`PYjgcA6(KB<+4#I$hd5Aa))2WZTi6`_9i zq8P=ye41#`!@e<;m6leHdo61jb8&LtzHx|5u&Z~_XAf?}F;TZ_hkA#a38BF$y@UgS z=|N(fwIl9|VjCr!4<_57iU?3ZN+l?^4pNSi#`TbD#<|z%keK!|eU8v4Co83XUxJmg zxr?0N!;*kD96O{*x1jn1Bk8!DdZ$jGv^J4Ox4Xd+7MompS>{%nhU@zcs^127awv-?kZU{uG{a98&aptn*Wz?)|fh)fc4;XX%-< zw;f~oj!XHD5F4hYRUyhEWk5EApk<6%do%O%!WbWNI0Js@?_@{w8)T8DdX4P9<4EEY z4(g}an5EFR_Z@dncpm#^*=K|q)hPQyu?*9Z)0GmIkE~dC zw$6T1c!8qkMEisiChe2HH%|?YNp6fcxwCwS-?e1tghh#7;!dVfD)9rPfhO^9IB^Ru zhs#nHe4A8Jh^+s?hL3*O&QV6>hoQpNn8)p7P2G`8N>vVp;=}#nl7c|s_-TB?BWE^W zIZB7Tq@PSPBm(#Ym++7)bmg)9G1Ljw+1hq~w7ypN>Pi~RbrwTL2PfRlrlD-ar@8G< zGm05UHm|~uJobESMM9l4oRD8 z8u2>9YT8m*(ep~h&kqx{j~DkD`5>6ecE{U%Qf~qqNG6rl)=S3eC!w>!$nA-Sn|tf! zO|gAZ51hdqcWE^QN+;=tuY59##l4D663r*qV&Pi0RlOey+7~UZW3uvbzMuE4wMRTz zp9p@3qqg`+EU%9q`g1(*4~`=^l!woBxXqvBC`dK^j#r%FJ5z20ubFBTC~8Z_ZO0yj zEmF8&M;A67%feG)v6$od5aBO9;pQxT1sqn9vo`xa$&sY2O<;BOh_0JT;7PBi%0F4d zvrrMEyVTpI`m(SuNkHZ|R(rrBZF+&=&Qf^sS;wsa!sra?(^+pMPYXir6*2!ZC zFxDOBYUe82+Hg5%O9E;ql#5KI+VsXzOBG1CX^8toQ1qD!aRows?`cw{Fsl8AG=G~p zD%lklq{FA9q9uubk| z90t|z69ub_R*H{}xHiRBpg7~`J(BJ+8+j+_0h}tdkjH@J7@63#JbE{Fj zI-<+xV4WMENWM4^#bvd9RNn$UQvCE=43Xot>49{#iP$Qt-H~|*kO?3P3Rf_T0;T=+ zm+62xi0iSU#dKJebBK&`ly z5p^8ZETo0r9)+*l^NbF{XRCG)t{Gol!iE7Rxd#7Sv7PCR^wLqWf zV8o!LKk>9Jjc3Z~rM3JWgC3k8WxNEp2(U7A<=}2R zep!%b+%cA|z0=@%(SMt;}_-yp6g>OiNC z&*mRdxUR#VNCPhcxp~B@oxA*!k2hdpY3SQMW#h>q8@W zd{9k%9h=nwaw7@$DRz(;6#Cs0k>5{ak$v}^y(n@-X|utU!;BGFm8WwX+0vrIo{lSN zHW8N1>RMJ!Rd*SGCI2#))3~r`Ag@*UtMohm3ryvW&cAy@QsI!s|;RGE&dq>Oa z97G&}>-Fo_6`;Y6GIxlE5(l>dt?!;0n4JgGq$A8_!--WA=LF>}tA6zkKuQsaSg_Is z6-oBBJt+^Yt?xx$Adkpbm9+uJibRSWVU@Gt@-&#_8wjN!i1n<2cge=SaqZR)ABH?* z-$XR~^dZS`R1cj<|Gj72^(w;)!_IN*1@W zvZDjNrM#Y$Dlv)ZhQGr7gS79yF)K^EYE9&fWjYi8j6Z#qa+yUj)$g2RU;146V5-o0 zzo~Rlh^dUw*WHo@TYH%WdKH<`m`p*MsI!Ttzx<&o9ourk;?IBNC$4|&o%sD-BciAz39i^K!)T|`Bxlb;x0%Vq{?SfhTjOc*z8 zqEq%YY8qwU$fAuT#Um392+1I1(yD`BM`~sj4D1eSq60#7NLvVk>}{_YYN#Roz{I?B zh1J;I#&@kuNPoN8jnaMso=d@uIMD)|;f8?(gk0?mtH%Nvf-ss!SXd~mOxUFHNl1Gr z_LmQFC?18#EBth8rQUjHSzIE5-{j;$w5u+|r&BryZ+V{ZN=;7n#52BRpovYj5dC-v zxS)4id4k8WLR7c>$^f(@ay(qI!yq}E&+oF6JKIH!44Msm`4>9%=TA+a+j&)TNIAMB zMfqyiah^Eii4Dl2AHC+(@r+W(X_6^QsR!1%H-gynu5;3lAPflh=@`=p6Xuw@$>Obx z7{e(kkaBzQaCu5EI+uT_oK5r;=EWoF1bELr_UM?}nUMp!T4~%S4;%2kC3Wr+A7n#< zJgs}0U(&gds`ZJ2EZ8KqA{h+=o!xzA#QDZq8H&a7A@E8r3y%sBqUND${6I(=QIiM( zwTpCNjjfa~o|$-b)9mh&HO{T!AFB>M$lx>>gV6}8{?}w$H`g(XpSj)ep=b0^)f^)R z2$Xm(>;Df%OmLUjO0F*ym2 zU{~FqTRxBqh)wwQA5#~e5K9c;98zgXz!=@BJoyV1_$;`dC`h17M45?x)v5XzwT#)+ z#YioxP!F@z*q;59Yk0`74Rw6%Ba1fjR`|_BC4BN!>9i-VD>56q`irOte0BCj6a=B z#iUh3d2b)nYucghoakWcQ&|p!S`4xPYOZVN$UwN&qk~7NJOR$0t)(()O^y3fn*F zTrQlH@FLQ;OoX&k+f=ZBg%YLLPNovR_%ZXd7B~O8`zXyj$M*Vo-f!4&QP<>S^Ypmi zmL#MU85Dla@uH}H)2a&0F-3 zcM()19-jA9=7Mr5O=v__4LNzTP22QJJ>-_RWq0ebSjaPx%Wv{Vt>L;7^$_J( zhv2L6eAHBxs%KlX|FDsm1D&Qe?;zUiqD^xKhsiKr(rdb{C>c;PU=C%+KE!o`J$3P}Wjp@N2?7iVoo(Q&qMrpcNO;O20rLEhQ8R2nD@Y zC{H~M`&~uBuCSC0(d_b*d%3-2mvX0$@D3pBSeh{7q1>-|Imq$qN6F{kWXE}~YL>f;iyPZpVl#3) zP|lqJBA`315Q#8)r4h#qYzPk(;|#fJzuZ2#;_aPTPY;MFDMrF6Lg9ykg-LeEE}1+) zpmMKlv*z%-ZZpwsg=^uj$U{0-QQgmivgr`%&+i*Qnf>RdLv*l3NN>om9_CM8j_x|i z!#YVa(u?)y9i09Twrb&$e`Bj!e^UPTvOI+iw2dg^554Qoh>>WK+K31N^K9B2!c2Nz z%Mbq_dv6}q^wsuz>(krYR__+A15Q94v7%B$pd=>L0Z^$>s(_498G?jC2_!;D&^n+p zi58I|M4<{2AVNq&2qeyoNkoJYLWmYffDEW1i6oGm9kuuHtaa9T-*>Hdowd$7{il^x z!_MCOw}02ZuJ8AAncOm?;c=1y@1=>giaUy1(chHeqtc~&_19X0TDDJnDgmitq<)9x zg={?V$vF0?PJ~tM_~!MyIGi4ruI}g5igQ*b9jcKgA(XuMbrGUw}wBT zHb<`>z|c9bqU}YutXKird~Il`ePQ=1r4|-c+})JbKXaEzgdqhxq1YsgUz~HJwGY=CY@~G zAA?Q93{82_;|}lX+h@HcNgB}ylJz}G0Tn$Uh7j#FtN_Jn<0CJIq!8~4Mdal> z`wIjJn?!=l1*lj}!t)(tt7xc5M6PtNp|6eC`&KDJab<`aytB1mODPRnw2F1OI7^MC&x0(g5!@ zoYbw#J4&?Vwrd?Gunl~LJaDy@@IyqbEn&ToK8YSwib^MCuV2lKwd|3>2hdj9<}EhG z*nDanpU;RfD~?+i`J?5@Uq^cKB`FQH*TIo6SwSoXFF?aF4!=vWYOMdWj}oq#)4`6 z(ePUke&tjX;*y{23@mErcEhuzs2Px8Mm!u{NdIr8rX-u7uAug~C`v@rEN_YiMfC3d zqhmsURdJ`dVWY)q=)6C@O#`_S1BO>Ge~b{(EtJsIL;c!~Wm+h6T3wGmqDk#jsY3c` zYly3fi1Cq4rD*#j5Y)<+O*74r;ruM=Q@UHzCFPGhtI+-ity*Dd$gAlcTO}k~lFHfe z3EfRlYE87nNFrM!s`I&VpLQ$K!_8$))m_`M@u zOz}e!FDlC+-bv5z__v1C#p#EYKh_(|aNAEU`xJ!^t_R*L%T&|dTK$f(@q#C+Ck4EB zMEgeVE$>p0wM&y2laI@EQw4w8bsdkwxK;M5qpGJIr~x~~1gviKA27r^kxibMP>yQ# z?8Q`=6vuy8vU)Nuz}yVXi3UkKmjO#{^aoo|)b3>W_apBA)j9M9QroY9h-2)NTE`>N z@gn(%f^U3lWGS9DsR()(LHNTo!FalUfXx?6ccSA0a8G-}sV!?2O(N$6=Diqf0!FqD!qXDD z9N2T6ays0}U+PX=g>Xs0l~kLeIY=2stK|*V;T&VqkwX~tl?8`j zQ6G1p%qzlTu3{ImwYu@I!jKQ|2BTOsF_UCaYQgryAb4FDj}|_;yzAAC3?jxfeffFc zxI`5P&9zgba#w4_DYux{gE2W(Vf+Cl&wZzq_v1xncoa!BB2}s`=FgVWT6QG(#gqM8 zH=sAm4n*KiZd{S<>A}yBC&+Vzt4i&rW(qU%p-kX0 zi%ZtO#mEL#nO zNa1L*gJ@dgnxs0^JN*#-@rL!O?qi>$M=tTf7Y0x77{I@{8@v5m{j(rm_WP+;!?j@y zC_GNWA5c+8U|T1;I%{O7T4UoeIV@Rq5%*BZEmAzEJM0}s+F2@MBKP@APx&X~&F@}c zkySQ~rSilhWr5{`BTghyG~x&asl@1#cS=>vJ$zEiQK0l=?2!0ZSR`P1N_bUH=YF>x zVXmC3g%00CFOALQvpwU2gsC~TiyewZJWX{{tY6r=iB6u^(t$8+ZcJ_Ya4hFKHd(3C z*2r?Dtv%|pYw#o@6$3?~gu|ZUnvoELFj(|LvMx({3K_;SNN?gCGb?Q*H%HKas_(;G zibR{I8pZGY@DXnWK~k|LfqIL$`grTn&@Tuke_SGVsHtp_H7vX%FA8-e%usPDTJsbJ zFfpn_#Jd0Df6g_qV!$i6U6FQLK`6|$UYea`la##AB0Nsz+jg2YTs-~KdbB^~J3ppo zjp`PcTl=iW`p?Sz6uBz@AVb0ns^$q4ubzWoV$MYQmh0|pGB%5?~ zV$~u$rchJ#74(v-$g?$DFBoo;-$(yqg_M0??1f?^z&JexcV+Ht@zvGE`)-~O&AdT6 zT#p;!HDh#KP;sHgQ zqIx7got~JTqhAV@4+uM{;r&+VdqJ(dHAiG$@UuVP15WHdqg~k2)LXz7&)?{;y;c{F zO%tCUJ59D|SQk+Eq%Agnfb+D*W=(aM=*s-nVrR&74X*%}AbZr^v1Y>6(G`hycJwqB z$!nj0m?5l~`|e=8(6uTRhGIZ1yKk{%80q?Yu$fqv5d0YyI$l--;q}zzyw85V0Y8y& zE?b|)lZ*!^$pUlL^1#q9$RFoM6)q1UQw>qcUF&;GlKLv46%Wy$vmdqoASJ$>Jb&Th*%Lwdb0xFyw|L_V8^VL9 zA2XqB-ZaZ%(GD;v`*r7XK9ym>3Duv%t9_&j3tV|#+{#inLo>v3s zs%mr}hQ{~M6-{WW#VR)M`Cwp1ezGNt8Axn-Aj$}!!Jyy-4|Ddn?|7sQ9KET0M`S;X z&IR<2Kq99&IP!M}QIL=GZoSe{J&U9uF$Q>^;VQ3XAZVO}22n4%t5Y@fik(11Dpv`^ z70lj7q@W+_#1^tyAZ=ZlVjuxpnqshB?ik)*$Pf=|WW69|-6!@DHbuyfMdF0=k;$5@ zls_QG<+F2j|GMxhI4}5w@2QBfvW&T8i{L2E&rxxcyM9_`uG}NT>nsA1AXT#ky`nJX zc3o43KX>GwVwb8b54>uVk=y*GovKGmMk z^)qC=^C~omr{b{)WO!-d{40fWR4Cu*NL#Q85IrkLv6%?xui%KHG3+m<c`4+OW_}0 zpSSr7f*5B2DdD#C;M=&FlaQRkwAow8J@Ev3Hg4C2d8cwhI3$98ye9)751%B@KC?tL zOorQ}&%CqH{nq_1HC6_%D=uhD%v9+TDd}qIA=H{gV|;vHi!Ac_&)^XFdV|SZ)m_AG zcJqwb2Aw?jGC=zXBJu%IsW8-t&yM8Yc%a+&pH&`8`b!=I|2SXzLN49pZ*%rbpo=qswQaI+R-EoWI#XKTnq)n8 zSvXzaDtGNpMqHc>kNoB$-(B$pDoPsdww1E4?&xB{Yx&_8Yx3#?5ndlJ7R)GQ%Vo>(hCWA_WPo) zb8#a66~tjy6r2$Q()8OfLq1gb!IHLZhc$MwlGG9+)S&;FQ<~7KJudSr8$c(Pw%B^U zkr8MKe#G<`!Y*jM2oWJYYS$S$K-E>mhOpF-6QW%GSQj2+zodJpo+3iyIWG~I(g$&d zX3tq(|4Ian@VNF4388dZRh*u1qjKP8vobkYV^@auJZ@MHRnu><+VnRGO2cJR0L#Bu zwvt5Y7j|}N4_jFVg1fXT)U9=(01-fj*qn)fpQgG#?~NH|>1iRLu7EA-n_o+Hxqdil zjiW^@%YsO~RyW|Rdbeo#Lw055czBXMRoBu@OPHiOwt93kTceoCs+3;X)V_Z%y+RL+ zB-;MXn5(}#UKCX329MI;50z+^(RxFD+}K1Wn`UPY>z{f9^O*f(c)k5dKCj?_!$j~2 z9l=~Fn(MyQ4x}7ER*J3=GyV>4nKL}F!mb0u^{!UR{O+JGaKaNdmZbz0?(G%kuIg>_ zS8tY$e{ooH2`D2%)K3)8K@m#Fp-OEMz64+O&CsMLa!WVx>pe{@+uM}o)}|RH|@45 zR+oru!S9oEUNUG+Dzc(_6}Pb6S!w_WMQdC;86x~i4X;U(pog3D3{ zy0n@pU0!3F80yhA$L~`PrVe3rBjdA;tb%7eymX(*Ki^B{*v&h7Hu#1NZMNskA9{cL zFrm}K7`KAixN&56)wZG}>jnIfDckX*QP%wjhamno5bKjZ&zD-QA8xQHo;0r!+z^~j z|4z2>r{3A0DYL%2`IbA#h6@~9dCSl5>vey2mUDwCRFj)$*xCB}xRY7ZkZE*)Y}&f^*F~yb zo8S53AKlcBB;#f}zS!)bJ{zhnFfrb2v@j$jkRCwUikUGl%Y$E8f{H=Js@~N$p;9bRmH{EhXZtv!LE}y#twBi zCqGY!)LDKNdcGUp>%;|Du(E`_8l;9$6dEvbBz~w8!7;_!q=xBA1#enUiUc7_$b_-< zN^E?TL0j+i(%HKxP_ELH1Pb|ms^%zf6*oHT+w#+@T4o6+ky1p1z!zuxp4iE&^hx)Y z8JEy}#W;Lvd7BkfxcnFc6$C9k-6dFwKv`O# zrOc$f{!|02S=#m%(*DJ-)(e1Je9p&10JzkI^K1Pa9>Ra;U0u%rX`!DyEB4>j0c3#y zQ)Z~J-*W&O^q@n<5aW*8rjvqo)^TcjYx0mi1iwqye8fLd{{CW=2!pgDhx>#4#1JDu z+tam%SBlKlp(7E+frqWi8!Avh^JRXAKlB2hzeoFM44)zS)?6(*H4mIa<{@OXr8cL3 zAX`-9_bEHh3&$9gk#i`m?Ysv3cz`h*VK^3Ge5xrx5T>8-)4Jdz^ae@y%n^&)IMwwG zn7@eb+VMrS>8+ky?D9l&McT{w`ZhZZ*?%IB^L)l>tKfKr#QU93mP+g!ueJ6STh{N9 zTq#qa`CgGuF2{2_rT(qQS{+se(3gkhBlC;n(zQWjyhGT3gfZ}5vK|VD^u?^KoA6RQ z0-(Nbz}x;1=DX<93;ZuFx(5N^JIo0=w~NG5Bz(!=RjE;&k6&$kbRS-wo6~#Qnm&-x zY)nhJZ2*q$cE?WNJ?|Ty#oGjaqdksWUOsb95K$2wJ)UeM4j*x1+6&X$?>6~t#&IfD z!P>A@G`~^j`rt*iIXG3y{c2#eIV~*lihpnagHYfxJhA*RdAMqD2Bl5+YFiaGi;+E- zItRItMeO_*q~SE?D$6-xoWnAamVj%n{YrQEm$M0$9=P2mXqrA%yYizAS+%>AA6oU@ z)~UHw@O>JKYL6iY})x=5kE5Ycl=|3x4O9{t{VIkV8`2uI`S5Rlu+t9~3 zK1bA>VhsR@#;lP3?M#vpzE-!`B+?|-M5fpMG)|LISU*=hyc?IZ0I40w87Sg%I|fuz z=ioM$ths9<=b5#OqpbWiw=1&GKfjnr*-|#s#1!gu-TFYsa7#Zo!Z(mOxx>IsPC`gG zdf5fdBi7%Z9iL-+a&iCP=N<0{=ma~`IrHXuy#YK;z!Po%Jf>~0&n&u&Xn&Ya99!5w z_6V!_A%M>v@UO7xkKvSSt1>#v2hCpKb*l@~+pM@=vPUj&i^en?bSn{GYV$$^w7`5+ z3lW^qm4;8+u|e|thzwpLegaf|vM_c1pM5%hcL{Gw5^B@QE9j~bPrhWd03xC@V*)R< zcDW%^42u4w^KCnlcOvvn);_yGRDfhmqtk-NZEd(~&9Vn>AGb+9*d`&sp}P3k5x_;x zgaz+^+X~)L8NUm^2r|5Af|w@ez1gheJ$WA+B1{9oo3nz4VK=@t4n<8ye%p=rp@<4m zRfk_+T@X+T+Z2P^=yJRjcygL_dAuKMaNN$T!n&?i-0`1HcDiXE?%uWr{h9{;L>jJz z$PT+;n)!D0TO}z*p$&S$_DN?qUc_1c1?_%1GCc^)%r|%an4dv5<2v5VC1)#+SK`kW z!5-MOTSsGUl1PMOqLlt^U=x%NLewIr2r~4;Thx0GW^RchPriy;Ayt;`fPrWzxD-AjQXSD@0 z=k8m~78eNuW3x)!c?4TB0T%fLW-HYL4e3e8;{BsV-MGtaw|2R28{wrx7jUEuU8#x_ z-4siNCF6W$-N!toc}XV@X3e#;CqWm{At&#iZ_a}<2&ZPIsNw93JNjo zM!9?~%UL5UH{=C~ZPyuB7040kUpS&rΜy29o&v(;C$i-K_$@3X$3aJ)q>M;q~ED zVVeQM1+(&oaXA({c@(4AaSy;=GrCXm8Fyv-aJ{^{MD$;!^Epjm6(bis7VhO=#aB+) zC~~a#%by%c3EgX5-9ft7moY8eI~=@horj0TBR|ax!tXfQZox7H?JkmuIXl+ zjo#6%uLmbz{;wLnd9e;JqFd}iTECC7xJ1ri)8GE0$*1ATd!AnYxJaj99WCkyyH%F!ZMc#aXH8M*wnmU!dx6`7p{xPXX1mxB>0(mN28qEih+W%W7Ls`mb}1OX z5KpNC+P63DOh(+aF3^pm=TP|$|6G@>HZ{|0wmlmER)~KXxXhj`Xqt_%U{gsra3WyT zwbmiIi4^If$l(d;{MWeB->hi3UZ?doI^qtMlo(?Efgz02s`)&~E|hU?KciDYFbQ(wV6C-A0pjukD_ zHZ=%{VRGuTZQb9W^1Eo4FSdYysC#98H|+GNb(%ay*xerIDQk(_spaAJM>UiRlW#}y zNL=N}U;sKAaU=O!dQRW)a)W&5e;?AtZHxA|J@Nj24Ny6##=#zwWZ;jjg8kwrd~)3M zu-7dMU@gqNuzR$#}TbNQxm9Y zRIWz8xW&g06fu;Xokv2~cK{|p0h%gkYC=mKf^1B#m5`{&t-3w`Qk8r<^WR$RC0ANw z44A1$_tPJ&xwyt4|!!vsTXrYo|vw8u?n(!k1fLRzb zr4b<9MsC?SV6{T|N;Uce7btF~)If1#fMNJ1kW`?M>}(t+ieY9#%y+=Z3jPJOJM2q> zqm>4;wSD$h`gA8^sO2bC?u$l}V101hPN1%;{QOsLe_D9+U-|ojyvG;0nxN7O~wHJYFgZ zTk>gctQUVS0+axn|51C3r=yP=FU?FQu&1TAHW)y@Fg-axt5}d3Yj=41AhN< z0&8A{x3@Li4IX$zv^vn<@5kt`H8E4{v0|mZUb<1|lgPuAsSq)@SJjeEI;=8SEvU z5QbKlCEr|8({qsL7$q3UksyK@%>|K$rVdZ{@K^LHqe58M=HTo}EUrv&i(1Xz2DMy_ zw|AxWfp0#i_A}N8m-E|$U1NOftEhbzM&*;KvE$5b^Z~SH*S5`L4!#i(XQoy z8ga?8xv`vY=?N10g{g$xI;Q))OEC&|<#?+Stybph`vg>1O8?-@EqCmpdII&g2UUqS z++Y_MQ&R&}zwK8EgTGCLwCYXOYu?U=rI13`x_l+U<_o-eR%BG1y57z5^8{BwJ+8!v z<4O}tjsB-A65P_uAU+_u1_pg5?Q@;71poz)WBn^DtuK2%yIl3t+E|=#@niZM^+3DO z*qnNXv}|YWo3#@xM|#zn+e*ng7hg@w;a6=^(M!NDCZfr+-$8~uc*}$iAU!M-Q?67d z^84z;lWL{bqct*r6p@zDip#`nOPJYuy5SL>a|7KvHW*t&Mxk~FXa~pA^0<*V6KqMb zSz1U?pss2%2HbIACw_ z0==tx2>xl63zh@6<7O9M07w*gjD)yS^pn)66zubsphdM;_m}cWaply~@|5)2-%d$8 z3|&T-m`P$VHtY`zSB5^J$D^s$J-)85oLMi6nvFsijuwxu7hGm196{fpL04r~O3Mar zlogqX$>w%NZ&!RSR^palfO4k)XtL;vi-^P6-1cl$N(Ys=BWKzcY#`3mGuLqII`k_> zH({=>W7V^vs2xRutyqz^M%x)ZR5b`i-K=9Cki4WDQwBTob%iquW}=*z$$*l;T87p< z*)|y&DDs0_1+o}}H%`bxz;B+D`&ppFLhkN|kElGfFGR~Ns)jFUp{ZxBmugNX(bnR6NhbaPl}bzbT|7kv3*;^MVhtB z$azofCLNUto4iNqO(v|28lGH3PF}(79V=QQEQyT6*4laj=wHUvTA90YDADI?aOe~B zfUN)0D{6jwuuuCP!lugW$UF-r_Q;tl){iHpgWL8;SWHj)?kfb8!kWx=>w}udt8?Qx z)J^3n+{DRRAmUe+q&MA)Fl0T}b{DFv`Gt)Fc#kpa&m-}vDuvO_iR9Zq?BCZEl+hiM zUF+)5R4`5V6x?Xu9@j4S`+)HbUrfgR>WD(u>o~+GicJ+#$E(#`RZxKZhYnm4_lgh~h(z>X!)%PGe(Bv8kNnzS z_s3Pj<;d(?~qFFJdPpP$kwZ@1DVa91sr4^%oAt8O&p$$~tPG_NgVcqpGyhYcVpWTSRR_Eb-P2=J3;%rxpr6P>{B2n3qOf;y{d@LI-5!MB;xG96#F5ye z?aVC;jtb8kedivlxcrh>(6!m+blK)Vlhr)Es=B>0cZ-hKe0i+VJH9-H1-gG2YNR0| z1~XY;hXym2F4qh(lly``z%erF@Y5iJL$-9H)O-!x-=~V+T2_?V?Nr_H^|-Ae(>x_} z!mK~Oxqeg)%{z`?v^@_tC3t*x*H(d>_93|Zfz1M+Hs%^^|R}_FfJS?vcd`b@_uBN4r;y0xS&49C;h za`D=Pli^qxf`9dt@C$u!3AyZS{<0Mw7w3y~v5Vb5)ES)P1DAr%2TWu`0*Ts{F?$H| zwS*n&V<;Ng5a%zux&_OzQOvZV%#cHpHO)@VBKhayYe+>=c z_^!mD*utD<&!V)0#1d$%!4Vb5SL1V}F|x3mG~(!RtBhYad}BV@J z91Q+!!Dr#2`5kG0EAoxa@AC8SsvjN~(JUsEPq3!j5|6&xqg?YE2cA2A4W-vTohaaXY}i=T!y~M$LDFm?DF5*;9Q_j5cKrO} z6&hlVI))c&hYHQsfHz~caw|!t`TmZH)kC7OZ1dxgNSHaVvFn@9i|o4K@j>e((R|ga z_t(TmkD>?9t|%TcFXyN&q_a=Xv_CSPoap+QSN3siZ*ICT^%}TBpwf$q#buWtP;v*} zm(ku%`-kT~-`YC9YvKds;;Ji~b(=k1w-4Bz*oipNP->n^;pK*e^v*!^c*x#QqioZj z=%WO+r#x2#1%z91^QZ30ctf4_baGm~YXNYnl&EO77rA#PEWk(~c7Bq3x5ALSKZHT% ztpq{G{)I1^)6HLl^7&vhTcggRUh3>k!QJ4;bHcWN`g$ML=Ym($>Q^6sy8PdN`W-+% zO($e^rb~_z?2ZmGJ}8fY|LJwIYtSO2G9lgT69uvPwV8)G=b=--&Mq57ZI>k9m@`{p zz~B2xw!RPgQMP{S>-Vc4>j@hLMg$hCb`WjkZy*x1Q3A3fhHcC{^Oszksqg-ZYp3b zuqG^8&L$P2VR3Q+2Q>9aAhAje0NQxhxSlnPGZ|-ft)!yzJUiZ>~Pp8;- zK~eQhNKcvH9p(65*lN4s0CC2;9rtb+rWOxQ%&ij7?=`xm@E^-hJ=@de#lUglvLdmX zdLTkW1?k+>&NO96WGhj2#e4 ztexEKuQTyxZ@+BOdh0ljh!c@+rA_?izkLopL0*(RM-KYPp`k)z~f7ZVdW4gK~j zw3+U@%r?U$vou7$N=o%r< z@cJt#Nvx3T8hr*rHrBf`J7!rOPuQjpDFx~gWclfwka75{P!f`cBl-mw~I_2NEiGzsBVMFXqzWy_~1)LexrBfLEKO$ofW^XV!b2E`(lYs10`-$uXu{gGcWnkAA2P63rt^I6rON07{pFl4oeW2LI znWJLb$pFt?UjI_;IVX%AEJkiIKT=7eKIS`Jy9$}LBo+bkvT3ij$znZjwss(Ix3)*`n&t~aE>LhE0Z`~gX+x61XGyBkN?hZQm0HR40+DA0PMZt3c^GK zIZpp#4OGX9sja8gjWlmXxFJKS^5|*!tkZYtKq@8^*=UZ}C=NTY`Z^{9zd9NI`=lA( z1fyZ=OBzBYJ{ho9BXeKz4X%r*;Ciw3u))m?adL6>(7!Nlm?_ED`fdtZawX>H#2nS= z78$REuifU~S}k*5Ok2ifS+5oc>@uu$@AGOM*A=`+-&2F;3Dvb!OKwXol8(W^>m9y4 z$#V2}V>sdlq?LEnX(tF#g4z?h(4=iiIx6@37-ZxjR$30h$U;+aK{*0+94pLaGL|ni z)~m6y=RhiN%lv>wN-?8^Dg&@e)-(O5BT?ZsUBz)&q_`nLl+df2NV{@akQ$Lhs*7q( zbNYNLd;&RHX7DV3aDaFZXb~Hri0CNJ)c&rlbfBM7`+Tw$HnU#3`om}1> z60y@|Ttv6MGdwnkiz5h=8IH!T&5Uj*pw<;sms5Q=+YXFLKd5X3`)*=I znP674b*)kq9xEa2DmR=1Cz(2iKt_9p({Ma8sPGT{%_s3cNw7?TcF`HjjsMd-=PBtz_ z*c{!J!})deCe4niN5#{Dmj6#(ocr_jt#W2}kZQ`R=7wZ%qPIqP2y{LiMXT|mvb{;{ zm#}hIj}%<@kBU02rfTw_la8H!B_FQPPph%o25#tMsfy2+Hikzk*AZ$xagj-1Bh?cI z=CdMy-9z4B08SvCV53wN5a3?h-;1mgREWVil8 z7g78&A)!*bu@i-5VI0=}dbM0Cf4;mT@`sM(_Ojp^k`~&$gkdYWubFWtwj8BWpq6`R=_px;h}F z(pKL2tcev+UAb!kfH}5|h*Tx=$@O$o3_(Mi7~la|Z-c7=^AS7r03T(zoBCqs3G`|j zqULZfg`2R`ICkrQ0(d$?hd<__e!n3`_}S!n1IpfbNT;7o?*}`;dJ=?Se!k~uTb@i+ ztU1uOOHu9F$3WQ5$~M>2D*LLaIprx=1}XvPNqt=-Ia#rzLrf=G9lo!P(-hTDPlqdJ zBEkjuqcw=gWaT|z6ewr|RtK|| zUqa9nb!acc0v<~@8J-&#{$L!;>2?AZ5{%XCs*c7Hq+Wjz==T-80A%yyoVcAb-_pW= zoBREM;yjO^eu`l;;i;UkyfC)#@E>N@)5ZTe>xo*0Cov=A2k2i5?S11@V`(H1d!;Gg ziKiFYpwPzG)nCD>zvugN}JL)lTFPzVs4@4+km4;?(v43!o9Zu zq{AXXksN`35MGnm3GgtTdUgFE*}=~h+XsbmiqTkDi+B7wNG;V{3p$=;KOeB-Fj#$% zu&-89TTB5MzFPhwkQ8eL5!}8NxE^KeLheul9F(G2h^p;~>+QG|VeL72-YO7Gv!4i( z-t7QHS{;dG?JqLO7*E&HbU7bYCt}AzQRzq@EE+*$VM>Wlc&?DBuU2ApRO2?mtBE0G z84xI!W^qzQg0jJr>3RWct)ul z8P;l8Xzh11y#WXqeyZEk$0qIV?v_pGc2q= z!;p6E@36j}G|3J8H^`JKlgQ+ORGD29T6)q@4qH~^=M>fJ>fzoRg^P#)U>e-Tk>fU3 zab#JfyqSuPlCoJA05DVi5j(R1O6jS1L9@0JtVMq(*&UhN&j-c%Z!vHvGuc2_JH@yZ zKgpK|zQ55tvWDyRxD-JPR@&bUUp?A;((a|%AjO;MA10Sz0L$kV3nKv9>4Vi>cP!dF z!z>4+HGQvo-eo82ilHrm7!kMa+!-P@{gaW%t3KLF4_ z;mC`ry7$jR>t?f6_P&pLTia(8_|r+b99{F=F=pcKqwIZY#%Y;b+6M}|v3A#nu_5f# z!u`5TstM45FeCp7sEV>V^&fnys4fL=vX2?*fp|H4Yp2_+PONE9ZW#&DXH>S5Y+1?>RLcUi;#NDOdIk$W^|8GgR^*?%+@_jYX}9b92;X z@S01GLOZS9aH0CNjTI{0-FzpSpwgn4W$m&meP@B6P2fKDrttph!H_yv9*@ua;9+oF z$m!eR0#c)*U1S(Q5)D~Ioj}Hw!wN&yEQoqkKuD^=QFVpExznZFK?mu11<&3sp1m}% z-qPF*;env3g*3e9_BT8Aaz?(hrn$@U;PA$s{acj=mOjs^DMO zWNs;3$IF4 zO+lf{6EtD3;@r$9n3%S-yKZ3847p++_~zxkY~L`kx;_m$+$L>=>WC;OU=#4uq*vwc z$An=g$DCXsJnArQEpCshTQ7>V3;$KvkI~a*K!7!=o`5@b9C{+VCwE#+H>s?z=1|=- z_vFaw39{P|Z<+79BCiCbk7y97vLL3o)?xZ$YS(wO#h`hz0%K&MK-WqbHp5ENQG!(W z7m|p;)kN9RgiKbAzVyh)+46(M7F)`j-D$Sa6c6vNfV-)=ztVGgr4lQ3en``IgDGdC zGgsM4HmL4z0nI3^8s-!&b3+n}8GkOkyUN`Av$`cic;ej_01003SXbmSM_qe#krttC zBC#L{x<9+24a+#BFx zr=H=YP>i5yo#CO8euObQ(Hv`q(RzdGwIC(}yfY?DAKw|?q&>R+u^&5g{Azn++0?;5 z_`SMEVzxUre(J|`x0kM&s>9ln*fbDI48`^v$qvDwQJ5+PlU+ZoXxUcO+QuuTyZXuG zr^O{tuh6K9!6fw9W$EZhk$)4Ia4dQHLH)Y}l3rT1Kjr&6M>g<5^s6IFsJb2@>LrN+ z_#>zUY(oPTZpGFXh8PUxBOz{}lidZ)e01UCR>PSEAe@ zN9eO(TUz~}g~j1?@L0gtii{gV4b7=$2JLP%MB10xOpxu7RK(_evW32~&LHhPHh>us z)mTn7(md%Z%P=QgS}Tvu@Rr?E9yp>VDr>yqsqsl!Vm>Q8h*+GDdaR1XdF$?YV+N!D z;Vg)?h(tvis&n`BLWrYWHa}zF!?B2x<-H9(ZL0;L+l8-k=|P zC+LzFqLkjNGWuv1xAWw5K|7cHJSNgLngmoFxZ!xoI+iR*OL!x@hvrkHhxrAS&lCJ~ zalOQ1;CQyx)?1k-*yQ)T#OBfDqvO>?C)^=SBHi^vxXQtPz*AR5RYw?9T_nLulBzwH zzyHYa5c8gQ_T)`GQZ6BqwzMwMofoE1Wf&cvZVMgm1EmmhPeO7{Fr1MG;-0)+B;1GJ z9>7{HK`+t22Ys1G>b&%F_>Z67X_X?<&$p&5XSKwUyXiM1FSYpSxqm=yv>?%i+52Lc zW>feD347D=EOm-;l=`@$Y5}!8-QoGr`l+E8Oit3|?tI_)4oRk_^miim1R!JZ@_~Nc z>(Ms$<|xFFCj_zF1D{E>e|^I20*u|N%~W-PW&i-LI4V8LdSlgT z_nnM*16>)@<&We?wI=m#?FdKKP(dFF5?OX7HdmprTAa zc9g27#7uu=a-TPM4H;tCE~1{m9)c!?QQ;S0&$Zow*t~3C5tmmZZFXd^1mIQ+F$5W| zp_K_x>~onwV(miJfnHopq>5=j;{aTWgJW`*+6hf?vUN78nkhNcSAo{#iRA@%|5IZP zg73fdV)^C?tmUSzlS|=lacN7fe&YYjc+lyZ+(e>eI54G zH3u(JH;QVu9x__nr?vbT@WQVQIr&XXeEf+~ulp|vGy0N#-Ji>uS7P~BV%@$sM2aR| zQrd1P1EI^mTIsXX!tcX7el_}OZZE%1QvPi>1aQHouYTl$&2LC|ld&haxTM!sCaPun zK@Y=b5s5A?Ogoay<{lV$fa_pyT09!kJ-9PAr~Z;QOd+f45WW}u((CzqI$(o2;o_4^ zEBgj&srWCqP7fF^B6@&q(~VJ;`~Yn;`UmCY2iK~k#3iXE35fSMOQxJmP5oBscOjGk zsIh2uwxq{l8Z&F%{TBV_&2#$H&1fWSf2o9z~mAr1XU50LsK*=Qg$GA_;jQCCWI|%D5cC5$S&w@d^`3UK=rB!-jog+N2hV&LSCh7xz6at}MJv-Acr*}X*IuRy# zEawDrDAoj^&CJIPyDBynX6rBWQijPN=TIQLU@Iyq|91`Zl%gpStd1+?$m{(rzSjl2;XZlHv2j1%g{~Xmji%&S0c_K2$ z;}aV0IA=qxYP}oBR@>6(9>1sWDSYN?*?^;+ftwS#v5Zy{DC>&ex@yP07!qdS3Rw3? zVQzt@0R{;R{R|+ZSp)JOx{vM_kW1F4AzM5JKj%}4|9B+;ZbxAG}zXZU&JF2BT-fkUj^Ci;5EvQnUtt zijhFW!8|B@X`h&kF-6M8!mc$d2J~&ud!5kR1_+8UrN_>`n4CCpBhO7N7^^)HfK0{BnpWJCO*;9j`Hk(_KR}M>bFI)fMKms@yQ%F*7twxLxO%0LAT) zV>P?#im69i_?N}tn_(<~4&NR00)N+2DmY2m(l+qhDcZ5}#}}04TUHq0RWr(4)WQ1j zSXN-&-HwG1UHN|fiiQf^qV{ci^ap4E$tA)TbF->`U=InKF7w0B%`>}0(x&b{C^yXU zZ?m*9C2aPx)=EZT$FXU^YsdMq;Zu7Gs$r)Hw^Hf(yi8U)sH*^SC)j!uOU*oi;nZ|x zW=C&B_A$#0jztTvsNkVOS-C_1e0yQj*ptQMowYQH8+{v$VCw>$u08Y;na!z#IlZNm zBZrBUnXm?C6x#JqwA!M2L`FoFiz-UiyJ8Fv{I&OwhN9N0>#k2K03XhkhAqRBr}vLd zJ4(0^=KWY@gIipuEr*#(mKgxhag7v-Tx z2}*Qw7Aw|_lMl1?H&?IfNm-7UE~do75@2~R3JC` z$=y6^w(stv1NladxmEV;{oWp6$lvAOis4s$_m9D$a?oZ89*{^@;X#L)xBZ}Ma^~yJ zk9#-&GMh)ttlr%0h)_H$I3!@`OP*MRfto+DuX{5CUNwXFQlT7j$a$nFnQ;I;@fg0G!!&pB+yziIi?9k_hcKVkv0tpy<4 z>V>AEKM>)5FbxL<=CcA%5c;L* z@QIuenf2>yiz`q$Er{c5EPnfIphb`K16T{?y1C&Ms$C6Lsz0(O63O27We z0!SL|m(6Z}+yueXYLx=hl|1MKtZL!-8-b{6+hrr1TqSo{zcUNFbV>8f5)N zT>EYlUuI1QAUnM>o$9jX%cKQ=r(b)MJHB+E<`(F(Wq8}_=jyNa9j&Pk2kR%;(UhBY zwcIXh@(bZb{(wYoLaLn1$^GTpQq6W;Ul*|gSGb=!y6#;g5!FY zyT|Poa<)U}`4&^%woj}F+7|b(MfCJrF_}LAO(0?`>7-yH%jO{*afGHT<8fvjwq$uU z>E@ma+bI{1X9p2oZ`WS@FTB|DYskEA zU&FV*R-REyxfaj9G97!heEZay@6To}a2B)i4#3QR3TA}AF5eO}1o|pLbVXY^!OJf^ zYu6tvj#k4>z7{VUW0AnGda|Pf{hiNxRek)@M6!oBcE4X|{Tq zxs@ErEcSkrA+ggx18w1ESD1UqxkVUrc`Di@+n};=gUNuqf=_o52AdDIIsb#bH-SoW z?fQp1r>Cb*p0YZnmQ$y>OwCLU2RI&&TA7-enmJWeW{9LXBiJd+%CXEeCo)sZ0dfKn z#j=u0g&a{4$sAA-&=61%{BG>@%>UY_Yo+&nU&9`*z4x{EZ`U9Hc2TR+ zwB@7*#nwgea8JJ#9#9;yqEbIAF69D3nU}O|`rxt2L@2!1bLHCUDvTf%_ zeZ#&?0ZcC7*<>;h>kOFTsok`y_g?xwkj_-_N7}ni{?;;hLpL_6D)GXYuRfe z8zW0Yjq|O(45vU8l-zV#=esGn@2_LqTA-KsPU42DOFkCp7;&FJ#N7G(EGD1xPO*8@ z4!)7%NCW_V_%_=N(ShX6SP06AwYzlR195X8-yRF31K+GDi!3IM(+jb52vA1=HF?VF z=05%4D3tQrQhXQCs2(z7)og0=M zVmUJyodEmNZrP7mC2I&R**c|y0K4;l@(d!p%)LpkDeVkD^sqap4*87J4_V}G|Ik7^ zm+qIIU9Z!2F+JfH{XM}`n@nzY3kuKsqv&nA5{T+qMn~?U_HTQ+9qe5*>=U&$@^ik) zHIBcEx7c?qA-5gfq@FirB-SxPrc`q-%Ja68T3++^*+q>b?jGxX0yi-{PehO}QP}NS zVPO(m_dR}|C<~x#x&?_h&Ub`hP&w5uJQ*!mP>0X_>tz}a-6ZU#{`)BM_Z`y_Ho>?A=5u4NQ z`ghj^sYLdne*p{!PK=Sm`B6pa^@c`!P@*pp`bht{J3OMFSJrZyaBtJvQVAp*8X*rv zxiy~OO>{FxAs=Ahg$$q(ys`!zWZhZJP7nQ64l@p`V+qUDHZ?i{$x=;u`fJ1SE}H-+ zt})MyiG^EOcrXdWH7njt)lX&8wE90^u6F9yN}+Aa6^Wv3g`XyjTpB&GPpZ~>28A2g z4VNb`3Ffktqf545t?O~pzHF5EY;mRR^t#1#eKpCpc(A@MKfFINPPDym^c z<7Zg1u&ILi8as5v7*e7b@MKPA@&`|Z=Y_JWK+HC{8-N8SBfu`CFg?lX6u(KC1PQcn z-KH1gW^2*CSK|@~Vs9Pk=5J6Po15VD-AP;9E}GV4)CK^<2Iv_|snn_@@p0B+-frvo zWjAgoSG^tFPkdyeOyver-2r1*X{Lf>M5=rXo|`J7ruQ*~$UFMZW&#j*1IU32-bK~r zI~$+*MP*hU^jn52lx1Ic8LCC8j;lQn)D7|GXsu@z?+EALGdJAO-WwFPs43DPPKSUN zfMToJSJEI%u1P}jot{^AraH`I1?VD~QPw>W-a6&@Xt}mccf`X2KMN2|aVOx;R5TAK zo0um((Nm8r1R4=B{1&HoU*Del*{gVK4r}{w#Fw0loC&(9=1PoD=}nl$$^zk&sHxU- zZkC&JuTNj*7XV!r?B<{C!0J|E0*W=F{q}+{%6p+t%Y$BB3B`73SiL0JN$l2ddgnYo z%L?As*gmMiyR+vB`U&qc1i+pR@v5mav#$)F((0G=?TK03jel<+Q3Kfw_se+Fdsz^= z8tQqk+_iOM>09EIWg9hqWcP~cbaeQWt(J8w?UcFjn?M$TJCpeYXkt>`_GnrjAwFdm zsL5(5lDmHG1^wwF5F1hTTZhA>m3I1y&h`=Xhjjb1IR8pnxQ;nLkk&OR-1)#{QO8r&-s)^>RG-Fc;8zX*vrmwyVy3893M0h0YP>>F8 zLtXr7aqYP8W){`lmPoS-4>c6lK|8`PT2j_c6d^Kv;0@s7O=7UF?L%0E{3)DmCqctah`8Ja&kBw`v>HX&a>GZM z#u#x3?!NxwcLf{D|{iM|gTzIu8_{#SZQ_Gs6!chV~bP$RCz`wKJ!rybKEu}H~ z7f5G>t+X1>tM*sHVOR@wvWlc0{61;;>L%dY_|$w#vwH>S5{J1^YL%_cp+b<3lpP)h zt5oz;kNzfm?D|V56bJClioV-zu(AG$k~JO`?m}d@_%@_aR;_y^_mP<2(V5 zt2~&%-)#tAXXZX)8Fkx0QlWGn?T5FpiUaRSx+++*iQ{bTaE1G7Ot@p!JYM>&hfYyT z3OluKu&sR99py?drFC1SCHc8U^@8sHn#_KLwZ94!=2(IEy=dF0DK{b-P;5B?6gO-Y z@+lB-4Kx0**TT@ED2>~?WnN*z9ydvE3wY>Nv%74cfCk&(Nz1yCv#8W|SC8*DV}id) ziW(p(R~Od|@t6A;G4l`!*AHAnRWc}XYS%{x57ZgHlfPUfhK>#vHo~U_{sm9`!VuLX zhp`cAESYEVFFpF7!kdkl5greQF9VIqtRhZ5XcgO<1i>w`Q`!f6-ve0uuBgsR`7v^L zRP@y4m;%cY(IeQ&UaR5Zk-_@-QXo3-wycWR_kGsJo~Iv+@be5?vy3#Lb_0@f_jglH z`gyGW<9=ZwTvq&yS93NYwLs`T_F`Y&YGz}j9WQc69dv_R5QaU64DmD zSiwF7HoMJXTmSa(*o!bFx(MLME00P5@L+BwxzFZ~6Dl`;tlOqHh_)VuXc-E3g=hQp5}OLxo3lBy|8W<}%geGSf+cr-Bpq{dS0Uu}gGDz;b7 zw>4W$LK2=`1`sIy7XWPBi;`m;>HD!Ru8#Wqp5iTl#ywMfZ9Hl9Wy+CDHh0gTxgc6m z8DwzgY{Fec`jjOkZ;yLIR4$=F#<}8@3$k=sYHaMkJ}KaVR}JEjISAIPWiH&iepKHM zb!PF=A3mC40uYFKcm2h<SU8ZHUlW#YDo{EK@kY3cWmXe>B}|?`m7gUDlIq zxGVov-)jzEUgq`5jTK%cD>GpdwHB2n%5vhl?JTYB7PcPu8i;+v{!L>Q+01wR!D@5t zruv{k3$AR-V54sS*8NjaF8L0=5?dSnhZ7`4eUL%1O4G5s7#E=YMf6(?+L z8Zgy3AJvaVAk!#v#C?Y&I5Cm9oq+zKt^MwpYl?RHO?undQ{g(%(XSQM*Qh(c25Y8l zhR3aAi>*+Jhyvk96zBJ8Bp^;(%o)LMmXiZ|?swmNTD!PO3`GtzG9?|$pKEs zdL>MP!7Ao(!AeGMe6#rX3P_RgM1-k-DbNgfbmvsByY$aj*W`Y7b&uQ-js0*pqV(pg z_Nd%?Il2Tna6H*O{LZ+}Wu_Gfl3T%qy5GfA3At3FdGV8e#CbEKPi{rhpp9O5OfG1e ziz)?-@DuBrmm19~$Hw0Q2seKH#S2w*K|G&u_N;FGF+XKSk19vXcnjECH``caXLWbM zreAD|^k&IUg<@Yw)_YyMFZ)e=#=iLSMHN3FO^#^SL%Z^OD_ey}qAX77koqG;gHXTh z+=!OOmA#PV=4Y7Q!FPZ-=F|uQc8GA1fq1cfabg2Uz?8r_IQ>uK4rK{1?4-N>ZYS3Tlq$8U89DP13K1Vl^%$u zrZY>d7$Ii4%l>>-zYjeLI0~<`tEMGCpzz~4f-sf4E{$M;%kS;Us8L^naReFdKa3v> zboWng;7e`ISDdT4&&0L&P9Y2K_&93B^Ff{XyTO3X_bE> z9segrrL0u?+s+*Sn>%7KvV;Z*AUj~e3)8QQRRK&u+1%#@!@R);lv%wsUaHKg>cYN3 zsv|DwfC1P*0Tojk>`}ew3)!{1uC8}E*K=#5%t`(86EK!Mz4NA%{-I6BQ*RptE(aPYo za{KaJh=+UkUOw91s^(F{iVzISKN9rIY=>0LPHH94?pY2}&=xVn8tpR<<65n1#|whF z^WjsQ{j?Ya_q*4^ag_`5xrK#ejV|{FY3V66@1zi+Ftx^?`F=IJ!=13pr7>g*h{$|s z&=y4<^bZa(tPlU|Qc{(-x8Hr*+@=;+aQ&hF`HT5)5BkN--rK}1dA2%u`#}T0(?HOI z8TJ1Bsn$-@>PDv-s{)b-ryp7}fYt=uRF@#u=gINf_N=m27-3g&h`Vxuf6o%rt#xmm zy)FpfZt~hw%{#KbRq?P$KQ3wY1##HkTl3IBaB;##J>=&Ur9o}`i+(E8s7&oUIM}Vd zT|Jx(D{}wB9qEzlpSf3lk4NSBGX-M?oKwQ!ZT?{)hS&ZQWmUq9e|i*f@5-sGmA1cm zc~5MVKKZrx+17!*)qNCawHUjb7AEGjWFzxk**Chkefn_b63l({#eKis#Gl@FyFdsj zfm2Pidjw*gv)jX}!wu?RvEG$5TK$bi#lKuinYoD%%BL@yov~kWZg1TC8NeloEYdJ5 z*f+5EySozV?2TB_*a*o*0W?(hh zRUHhcsTB)4e;AI-K5ZG9SH{f&PMzENwnWJWS8WI@_RcR7rcI=4dOrZGF>+1JcG<+g zbP?`$lajvevA7ddY(YsM*v=XVPbeks$@>7Vi8|UVkAAW>Cfe|mWJ#YW#ft&p_5lBR zXu)`V5jR@Fm|K$2 zgjJUr`DsGmeCjm9fFX%+Xe`U{ux;bF~IUVrKuE^Koe;sOQI zj|QGtsl6XNK@BVY{G8>{ZgL=7s4xCrar-aSUh^eJSz&5Hx2XM))C%1t$9yZ5L3`ztnY~_`!f1|y$oDV2OHadcr*ZbUddmW@8{J}-^G3f zpHO+SdPgbnCM{{Q)gpHSf0{v_eDhJ;mAVV?ntDkp?@62x;6UZM5eAId>zL z9RZsE;?CKg2>&e+3{R@|nyP!Qr zVa*dP;2L*+?)#Ez5l|Rg-4KK+U2$$xc`UKKTb{!s@%worTKQY+|6P}@|IM@fKWc#W z_iF=sCjHMuzWP8)L0%HN|C2j%_+J&HD;4RIHF15ZN7@r@pTBT040u6VpZ=%6YV-f0 z|9_Kp``<4S1|I7alco4Pr6sI1md~2!4WApP0#Mj!wqMN5+pAyR(B^gkP*K^sHLsSt ze;U09Tj+F#przBVRby+#qdG!BDfFQ21P&pM8rpbjIDaQO3aCC>GMOdRC8HX^D=iOx zHxFR0OY$-{84O}7o zo{kjU`TN6e`p@V+O!TJ)WAG2Fn_jkT+GK(T5-mFHB#P-if2{l?qqV{%G=bm}lq);$ zHfw$_J4_Ju$SBRLu!^xGMxiG-SGD3C+4+gare7lH^s+=NuHKbWavtU@+L4m2m8g0x zYH_31voeW*8T=(5#R#-Mtutarau}~29SSlHpLk7MuX_6TjMs`DY(w$f_b1#gbjeTY9)#srineLqR`)Xt>y}47fvq4Ag`rAAE>fy6jZWP-$0? zLxD8wb@QZFwK%>0o^)OQ_qA)h9w+e8gpY14#H>yh3sR`R0CGO`Ws8uAA6#YBhF}LMH{-%qz5v zFX@wAmq$H}El%O&Wqddmw7rUyl3W$O`u+J!y7N%?zd-4d`WcscPEq&pCK`7fr~*s= z&OMNvS80-dJk6|tb8UKolWVOkRk(og56C8~$^cl&2#d9{Yi_YPPRk7_He&Wfzs`1` z{ee|h8lSWbv!7h#-v{92u7&GHmjjtZgTr>V&3nvHy3hIAp80~`T*M?!A<%vP1$Cap z8>_xgra7du_Ylnxg9{@-8!-us^j_R~1OUoKijd=+UlyaPvDH=}dwgzsLzv;;ZHplV z$PhYR)3&E1{<-uQOABW|O{+J&cRE_AKAQkJDaTN?tF*pQekHJPfVpzQGx5vHX{g>! z;_boZPq-ewvM()$Sin)0yoU8Py!mh!k=KCz4emxtngVj(hVxLq`$|bapav#U6H}Ba zF)7WChId0co_|$bXZb+X zsBIxU0Ya8+Ed%w1Kgrp-P&+l>{@Tl;7=xaQ@tOb5IEd7P+vnn$lPoyz$Q z#0-{!xMhwHHLv9iI*eo`D&DTq?5pv|v-mX(k}p}Z6NQ+8myZ;cpen?mx<%Sx2H(U` zj~|wjtABx6AI>K0GjVo=L6+83a6tpKjn)8E*~um@L%7`N81ek*vA=T+47}Fm-5DQ3 zaI9m*ztjj1)Vm(aYu7(;LAB6Am1K`R3B;{bOWh=gmo@t19ex0M1#{`X85`Q=PWZ(~ zTmy2(X>sN=zjG;f`p>*~> zFz23q33<-?HZWp z5-S$%xd55aqSZ$*NrK-bRq+5gZC($S3AGn+JpSsM0xpZ>^P*-GE51^E?jrHyW7P}( zI{W;q>Fuvs`#Dpbp7HO#yube9Ppa2`H);?ANzLKtNxHwlCKq*URIvQa^aF4e;S#cn zcSP1LdpU)!^EynX)|Z*H@={GI8>1|$Ql|1OXTZJZI9s`$DMwSf*`N7*?G~|~?4UHG zbB8%^o-mtp8Q%{gz?{BWU%zB1oj)zh%enf8S}jk}fHISKvL~&NUJjsD>r8&{KM@A1 zqu4PxQ&z%}uw0fGfDJ7CxHrV7aI5zt9%^K^p_jk+POjA<6hArrI_?9m;9eRM+ve33 zuV_sX%?{SB_*(%7T&h>geSvJqyjYp=C+Lc4`&^jE1&Mt_#3_zlojm-4q$wyUnxciO zyUJ|9mZDl)O43L?wHr5mh(FDbBSZjxwU28{GHMqm4T)fW5%Q@;i%ea!Xh`Gt|1``X z{{}lZz9Wwi23$RkkF7lG6vpy&nSs~T?lhMq(u_RUvIeGVFYya=qpGN(Oo|c?b(VP! znTC>1%6RW3zgQ4T8m_P`=EpJFZS!3ND$IwvH_+IJfD`slRJFH09N9am`_Nw1%OgSK z5pb=;i>#_?7Z#%Bh|Aw=baEkNOcm;!sX*W|`Sb>fe=f zF52%V-lNI9>_wQ05+V4sj~>tiJ_KweDd~rZO-SJ**aF!2KOx8}O@N<^E%%@o6==Pf zFVeHM`|M7sMizKiYkR%X5Vdzltuv_1DXn$pb%<>1U`k(JcCPAHcOQtj|KSS; z(Kk(x;l;`Oc`6KO1qqqa96j(se+x1cK^y`Ou~U+X{qakaGc!Zw2-viQ0V=xzrB16! z+Oya$vn~2ieuh4|#RWi$1p_V6@nK;cNfL${+E$HzXq0d38r)w=7G#jme?7Zz53uMc!v`j@ccV+Uo1vySDb z^mv8%aMWB$>{?IVca(?MU)-lmAzlW&vwVlh=FwJ6pP82>#WiqFM>pUGV}XH9(lW+J zOuaH6yyH7~V!F}$ku6bu-AnyQUUK^HOv%q*$}8J%<}l_q2pt6SjteA@_`Vt71ymPb z);SruJr|^bzV#QgNI4Av>ivH*M1jNgpF2hWkE5*Qj3v9{Zy?*2e(>MpDF6QgTJ2w@ zGrwHKHY{Gt*rm5>0de--pPzuv_Albdx9ecua?S7`Cb_sD!u;Nbc^ECb`l@;y z>-Jfn_|wrpUw?Pt!X=H6E!W=!-)>6X`D5^<%;yr26`hoW18tIp-lVYcShjsFkz86Q z?V6}V*GiPh5~u&H1bN=*|Mi@G_)2M(y!BiPIb6H=0-*6P!CztT&Tifh0PCLs?j)2% z{_lJ(7fAzUC$y-d$8G)?%5F$Vng-=6hq`-5r3n$$;#HD@nJvgpIDYj04&~oYbud;1 z+j*1D9h419@)rJtf24fcJQsj{v|miH`Qx#cJ^R;1R47-07>W(fBu_tBdi(w%pCLo( zO~23M1!2~6%XR0jef_cW-+)&&6Pa2y#gIoAmcF`;J~bh`mBfgdDw3 zXwmO*@{BqkgFK0(sV>bKSmH6|7nk|X&^2Nd;-0aXz%Db8OtX7QkjQz+DJ+1nSjD{e zw2&kB{`;MW>k*gLxBS!4WpC#6D)s~#o$lxtVhsMC+f2I&(-z?_hyQ{8?pGFgl}oSu zJo1mDv~~NxifH}kQAXx8tCn8L|EE{V%`W6ejQ{g1vvZn%j57L* zs7%T0oY>n?P$Hg`$M`uB9xyo;4RMyu7DIB;t!OnIvJ>{@fT1(GjU0zgJ%xMD>9nEb zC7Sqn^rWfO=_FbZc;7T$>E?kjLu9hC#aiDmqd1{s$9D zO!OY)5iA#^(v$)JLBN42<_E67*rDzFO$&=DTP??IKAhk?vQ%G~&n6FpIQ8!AV>!C{ zKA2BG&JV>#s>Mkdy&y>SnCg#!%H}G+q87(tt~@$=rYRO6FyvQ}?%{0AwI88X81T)7 zR1-^;CRr&a(B_(H!ufL)xAx#nyS&P|&dXtMc z5~b*e2MJUPqB(>Pp+UY9!J|$(*P4`a8omGKB;HmU8N5)0hXoyvrg_SgF>bG5;JXG!8vk$N$iln?K!4_Vri`vLnea_s3UD!y;?p{E84NG8y1^Jez{C zoW#SP&oE{c{hS7nhrA*Sll(5}J4nKjj)ULMY2$f-zZ?k&k=a7@gk}u?`x@WQ>N%2tjk;(Y{9yw3jlKCFTaDE*TXzs2TuKfj&x~UTauC>HEm9ogC%Ra z1%9zKQU|iX_u)f^w}9i=%}KwiLu}J@6rN$kxME$ib^J4;YP!Hj&e$W{ibYW+3ghE< zu)!e=l89u49zgqfh(UH}oP~`eBSFNSCeXLrApIhAif7Ek{g2BBOebX%{_BsP@%YB< zmtte?bJrYC>$dSs9&-po|L0 z1pt<%6hj-i%XwCzuU`ckq%$Xt@m-||*tjhCat0}wArYSD2k$5^CXIDU>pig!;quM9 zB^sg*rgN}QUs&QT+A9*Q8ONQD-ieb(1SHR7pvt*K?D>V!Npr+xH_=7+Cyd&?r=s*f zY?ey`m8N_!;`~q>5td)VU1M>oG4%-hM=pYZ=Ib6a)Cp0K8ocUOQj?1$35pwo-PtJv zAO@#?P;{Nb{nY{ku?*^6G^QPX4QhCKELYbi_M=m23Y{QHKd>uXQSI6(=V&m=^@tvc zewQe3^O<4UrROsql>|mmAW67gQcJ36H(ewqCE#Q|R3E-634MrAc+pfMi;`QG(^uw) z`c7u4{z{4}h5IHDk8*MeQG2Is7$&xN(5clpu?aR@KX7#H?JQU{Uk3Ic($vw?CU!Fl#ERu#oifk&9Azhontv+3CM% z1G8M|K%*Cyq>OH$)+jO^8HYv8QE=ihsy%=TA!;Z9izn$UfWeu7?)MSeJ_8qbBhqL; zkVubVPCs^Y2)HObCL29<<7rq8e*TbI?9;F|5FB}_6WYZokV325JccToc#<%m%~ve! z(2^Q%Z5Q#LMUagj(3BV0foF6R%$*ta5?wBs8BmH@oF&b9{&v5|^V{;@tHnWw*5DP5 zL9}%8wJp+Htk820d;>#$u8E=&3CyE5zy>hf7xK%v3N*nbHYZlZ5@D}(M^Lk zV_eZ}i#l+vF`;nOlmpBXMj03S<_un%$~lc?W{nk87m>=9V(lTzawg;YCXr5ZFm$Y2 z8guGAWi@kLc$jRkO41PYV~v#)mT~SKLCne#aNxt|bcgYD=%EIQsT#36AuDe66-%K{5k+uy4Tri_okP_Gq=CNVwR^*WTowpz zeH$qct{aQLDx)k?D#xC8%^Lw?jx%DL4rU*VJ(7(I6i6w39Gip_XbV80g|UUgO!3Tj zzhD{FZ+e||VXY{8M&?1~BB`>dGjdCosG*iFCj;n`Q8jOX<@qU>2WEe0^PE5*TS)AT z@O4m0@oe;K7^r7(qH#QpH6V-#z6PDM(+5SH!3=WHu#y=f7j*@#IRWd7I6e>1@}b)| zT6(I-al6^?CJ{wG2!co^d9Kiy$PQv=M=@=44iS`+&3x0e0y&3tv=Ly*j&~Afrsd$q zz1kt`8(IOGc7%A}G`cTa_OscpVIG&rb2X=o=`hI@gif*Usp34W>=Xn?nsGHuCLOkC zAlMueeIs$UPJ!Ut8VLf4FrO#&9pET@QYyIhXU^;HD90<+SB@*7zH&WeV7sz_wQD4M zgO`}qCfN_|WuxnVl#wUXi5&_nPq}e|fnuB*Mld8@QqvAbr6BW5)$JYnrQf_Wl+QYy zknhwT*t37E(X0vFtqYyX4(jAR{kfc%(}HfKy!rKa8(;cL zuw*pddswxku-WhZB=kU+>00D)z9LHkf`n_!L~;$`V7q829yoT0d)ahZ7pI{X?mH<1 zv%<+8!pTfPx?)Ju(Z7)3bq7pV8&eiQz6uA)R~Rf|o4l!;DAl zV15w?+&#xNCiIsewxhu!XveI#Y|!ccA;)j0s#vkR0Ey@qnRQI>IupA+u#Sa8^!jwO zk5j!5AAu@Y$nMG1rAP;g=p-L@Mn31_op~FX(y*EoF86;%SItH1mOdv@#kgS#UCt%` z@W+`NN($c6d_-=xJuApV*8xopjV9+qD=CK1L5s;07hTt8LiIoq1|le-&kvdyiuy97 z@McRpubpD+j=HR9aGQ^8B#7%zKX8ToXIQ+L-e zUNK-p;z0hoNCPy%uhCI%1Vm!Kg#?txE6MB`CCn#obTE*szF{K!Qe@TI*keLFvpSbB zJc`9VsQYk??-!dmAbj6o3o3qtvPTDsq6~mNNfe7P;HLy3N#VPtg#Lpl>N(7p8x2wI zmTZrSefP(q`|to({K(iQcu?D7_ZZ=)EN)#TCdqpd)9Yj&&yW~*i8M&uY3Xol6pw_T zA-2#tjzqwFEk*1m8p3Om$OKL*8NoNb|8(M;F{w1QPw<T6hal4FnFZz6xW`9nkT)ECM< zO)ub-@nM@$Tg|@OkxYA%>W_6-qSxHY(S=eKAC1&Y1olkm8_uObhn7G;0mtSTy<6&S zG-ks#5>De7f0FgDJjxB=&sW3OZkA?>sFTKKoFGQA5hGp1ag9yPM_%P;Rlqd!rJ{)( zKtFAXg+pRTd`rR9%2K0YJh%UGsnm3m{#q%?ly8!JymG77mM?Zaq4^B);R#Iy8!jSU zUM1@YbupW5^PRks#PMc^H^C2e}y>Zw2% z9Xp5E;suzIXx)N|m901#gY5uVyYJI;uo4#yd(vVJ3P%`*j4X_k+Xi}Jn#Mz7AAvTU z<0xGKoZ2WJR~~a@f^=rmh7CBZ(io#Lqw|h}2_@@CXCcA7EJ)p0!2wlbn>~7Ltal?L z9gVvuqYVLOvhtM2A$?JN*g}nbVf}}OLDBGmw*Y0KsQ|?myG+%k_lV2R1lGl zv3c7iyJaDxI@={YCA*xDY6$%S0R^(fl%a4}~KMUjs%}H)W6>{~)ha zB&-dfOk}~%i&PQu^;l|%5L%1I!tS0>T9^w(dCr}%#{H)K;t2iXMzUzNBMX{ z?BI-dj>t$x1-EJq;31=(*NzU-#a4=2-kKvNh=PflRyb9;A+3Me5X#p2*;~H1kL*;s z<6UO|Rfv;O3FJ)tKmE~OAXC+sHBXDc(JTb@QW zAr|;AW5IAQ(Ob5U+6|lkXddLd2``It!~2~QUgiHs!C^FT6yAXTN8kRR*!BOL0{*iA zH1A*UQuM2U>x7+uxl0LnTQgC!^~dAGKVj$I?D|xhqucs^Zuy?qF{@A9ntuS=9J}m= zzQjMLxb^yhvP6|Dm&X>)IyLM5vU$w)+QHUh<+In|N{gy$>yLWF*SBPWAem`1D}i@6 zKK@a-7kN;z?5vm46WG7%^@rNWw=U6C$1W^<_uw3O1u({koJ7d7-iJUe;Gjy=i-QKw z@*fFrD95~*>HOV?1K5MRhgFvml$Ry{pB|paw9%D*oU9kpS}=HoTHLvd-Y0B~IV+XB z2kgT4z;0axHhozjV72`7tp|XreR{Kc*T>fRSHN~EsiNGzM*tO;F#-MWbMx(|+I2Y! z&H$WT`EQ9tC?EKK+5m{B|Jw=taeLAG@A<(StrkPwzuIfvt@L2)HEsWjtB6Ou5bu5! ze6&6H?`hxJywD>^m7V^#pVG7dS{<7ukZsg%K!-4Z-aErV>;K=&a%v4*V?OLHK9xZp z7&TSiCs@pP%TetKzNc6MjHJ|nG;%-9Z|`=?u}!$EHw!qS!dMNhX!|IF{q}cMCdrLcif*Fi*ve8aWNhW1!)A73b&(U$r+)12H99hzY-&B%!P#o2 zmYgz-=J_8j#0rDQRyvIxdOMfE_+A2BI;{QN0QPO1jRMa|%P{(3Y}sIhfchkWsl1zr z_KCPDTYQ(Ya3l=qA*L8t41pRytuWN65d>?^lE8V=+BJQ|{9B0`r|zJS;a@ZFQVA0^ zZ4GLL7@fmA#yW=p^LDJ0?)4>D4}`rX$LSKmGMzl6G$wnCvF;;}VwY&JK6-6hz$k2lj$zHT$cJfhv zZAK(d7KpkMjLjOPcS!0fqj^j)z1XL@=cI4iK&IwFjv#u`#_e<6p@_iZNW&wX8KrNo zA8uW%_@3C1y7R5Nzb>=X`9VG<&6P28L>I+S@p)MOW<|}4p^Ft;y=nI`pGM{$onO!& zT`Abm0n5g5I>Uw<>D)I1BCiGlj{|;rR5P0O5bVnc8|k9L-%-|>T&RC~jMtf|>KuaS zfn%4p;aqERhG{6+2iOLah?{a;?09LBnyPlxLR5)-2YZQ2ZU zo-;YId}sYGGHsqZZ&l!cl? z8xiQm4uLM`&d}jK!PdD3_rXJ0=hD1TerI&p`#XyX`&VT^t!r}%7hb-f0eHsigX$o; zWN}@eIA)UQh(~sCv1o5{ckDA9!b1N0wg`E5b*xC&PU;K%>~J92^lv!iJ^C)kS8JtV z%#cb|A=UI$ES$?$O_xS}N(H=k%@E9SE5oN9aH?<*OWY-#xS!(no}`B^n+4tCbosM8 zW$~QcY{_|aGU`LUdD2N?KxgD;)Dr8AZ!tp`a$>bgXM&qd>+(=cP;JUE;Sj(<5fjmC zki&J=v&MYs;mexEA~g0qW)j8UKWe^@#g(LXi?bB#H=5=o-L#LN%Wt!5agXt+u+|JX5lFpf$GF1Px&smFER z*yuYjhb@jwyK@7oYltzgJ&z`)DPBBaHqKV0aGzfFeo*@uXSk|Zr6Iog`t(d?obF)| zrAQ;zVg40Lox}z18qySJnX@)5O0q+=ygRhvq2jCyyAdZNvdr8TzdO(qj(wiY%tULR zACLS&T;Xh!>?U7HDT0YlGv|&K4)3)jQY2p6g#i#i;J6mpKRZ|j6>@O1QKo3SRGJ3~ z&1OWdx@Ad-3abY;FS;z;Y9;f*VbzYA4Vp5LCquY4D-Xq(-}HuxXG=~T2V!@Hu)reZ zJMdUtsEiccYwFY?aGdcJq$2&_X;GXfxEU16&cWs6LvYmEiNh*MXoy3~QNEG1)svi@ zU>*Jj0nA45@XSZHab2Rerfcg;_#_^sJo42@TfHGif=_0D{v3m49>aP>E)Z1i zpVCS_cc58iSgH_h?$x@rLA^^YF5~U>t00HD-_wR+cY#2nst0m4!+2t4%tXzJPT^6* z4@?|ll99YKu`fKD43>5IYfcQuNxf~H!UmeBy!@ce4lnTNEzx`v`HMV0I~ARP4`9-l z$G?f`4{Dt^>aI^UzQmMeI%mWTbjTPI_!Re}q$(-WpbLoTky0-D=GxtvukHf%7IsCY zCd(rijbw+$7Mu|bPOXU4mgT72B)WGM9d;d@qD_6(CA`W4wg*cMyj_I6%#geUKMYY6 z4ey|YL-U;1rFii{NsoS_YGnY*DF^@lqmf4}1}#SKg$Au9x5h8+4)Yr`ad+F91NULm z;556Sbx)bkGMJb6;-ML-)9s<~8AycV)fv!D*&+GkgCiOhK|2keIqY9$reEERXV1<8 zh1!Y{q%$7FpY>x77^ta%v5`R-vQI+6&`hlO4YW8#oe)OM7{t$rCopSS*i)SZ>Y$kp zN~LLZ|IkXOWc=P{05{6A+UJ*I#+D(jh?{ej6|6iHin$53Nhbytaa;nJuGdSA-+7KX zQ&-9}$|f{$RpYFs3&UZTu+W2VgEh$r>u7Qjk8kdKi|?9&gELI5s8YsoEI)dnR(=(0 z<6C>XwV3zW^j9b?1>L6N*hJvuIo~MMA%6; z&6ZS?y>d(*W!F&V+onv^D;3{VbW&61D!et;=%Xnju}|E~Fl+F`Nuf!7#JYTrlRUFq z0s=c>;SoP@PV#47M|FoL%4l9bM9nJRshpJ|c*p0^pCmtP32Mu6oZK`pc0eJDqFAG` zA#`I)cX{m_rWmA~-6b)KSdf8(f^=U8^H5HQ0=6K7{feXM8q$Tya8fLvl7Dq7Ypg8P zdkvajICkS~t1vl5lWMml8^o;xhXM}yJnj~O=0gmFqJQmnkZfl)7G;TzSay-Eha>y* zT+uJg*0f5YW>yhB_%|!U>WU1iit+dBkD7c*w#?UYj8IL+Y`FK zY1@}II;aWgp-t*9k^N!|)^3$1vYveT?jMcb&cW(vNAb(E43e{P71w8jhv6Y%KJhg_ zFb;@pQ@_&t2-b^f@(FxMn*oL#{}Y_3LSO81b!JKPpV5sTeXdPn5w|w@bcS(zXAXvA z>t^RSZ||Pf;`$lR3t#?hge4eb$HG`a&#C%>&^fK&YVMTvl&UY$VTXHnK+R{P_Z^WKq077-p=55D>S(eHo0~N|uc}=|uBWv3 za@2Xziv$MbAvspse~W*{ljsrm3{ z`88(Xw0x!$mP3G*<=5euQcL_+bkI%-K4rG|sjQcib!c3sRRAVpxwRrzP&D~xMmahq z;L7+6{9q{4arlX1KsM!9?>G?Nxuhh`y%`!jlz~?9{Grp6!J1#ZKO;4acUSWwq|wzT z;(0OJXS;Cn@XoQ$4V+Jz!R8~Dt3I24t{*V z;I*BMsCN}VN*t)CJGqT51TNB{7A-exX@V9l+T1?H&@c#7(r*S_5h@b__>Vl(2}~R` z6YM1Ie=E(4b&<}&sfboWC!+{VkS3DBpNF3Ufpa9fP{y2%E|jst%b3Zq&dlK1vn@8VW%GoKVzPiF8>)k(~=Q$idB ze_}}pYqk!Aep?vB)L=z$`+V*cUHpj&#P<+~G?;q6^ccWP@!v0T8RC3%sT; z+}uFwT@_&!zJRHhgK2LIjj(GOCdnaC!`%(Q5d`-@widrSnZnHE9JG_p+oCNdPOpWU zpcUs9J5+a3EjbiC?5wXg9ykUiHT$fB;w^poCV|4?zOMYTrW36H|O!bwu-{;fIbItkL?Rm=fVT{SOB$L2gimq$ zeI5D$MHCfejb#{gOT8X~_ zo{nfR2%7*`vm&NsC!!k(!{AFyGzWL}$JZ73MWh<~&9EO^VkGcKOU!OF;i!esz^7C^ z9o8-BHJh*)%W6g}L^B9Tq63i?I{1|M9AUAWML&(!*?c~Oqdpa~=))-kL&RS!o9fnnZ247}^#?y~#uZc>Wg^4&?jTzvK4Kiw7dhO%9 zE7r9DIW+2~v%x`gj_hgwXIQ}HAgCdb;eA-b&ChjM2x%@`p%%i2fzax?6YifEu2p>1 zc5uc|F8p5)O(Nxhy=SwA6;!J@8hh3W>pJIKYj*`zcCv$aD}>>T&ht7jTPtpmYf<64 z&>h{9%%WpF*8y?rB!g>R2PypOnho6wiYST7Z^>Dr4@&gw3>(K@{4;DQol^u!xKiGTouqLmqZtXdRoZN!#fV<1Ue1!|eB6AD5~We|}>iX?5~swaQ+LX^9t&xG=eCkc-Nl8r%nz%d6?FV=twT zYFVQ25f@T7O`I?wflG0Abl9nofpk$<(bXSR8TUsiD!I!J|B@C}vYCfiN*>#6FjEMh zjN?-QkE>>tUcQ=`Rtg>Sx(Ya%soi+pv@;5XymHQ&+tNRW*d!9K~u(di5T_8 zbB#gW&cRJwojPf{W39JlAe_D?mE*)oiJsn`24Q^5B_l5M_*=zqb`pSN9Jkr4WU`s?BakEB^I zl_tPUd^#Cg3cnC6cs4xOo?>nd9jX!3pu#pQgr2`SI0YL_n18Db>CN6Ae<)hcdRH;ubH#JC^S8;=@;hjsUWa&-&*Pbzk zkv{rXmv62YY$*0P1Ps{b!vc>8kujd>+ViH~=aWpK)_wP9FN`_MHp3yG3l;D#Q zJbsm`QQnFa>3&Aioynp;#%+1a8F?Xve|*Z%2@7*+QZR-Fi{o00kFqCl`-*YkEJ!Bz z3#zI6C@relG%y)nT};~J1Im^`*p}%q16sxs*YEQQm2rKE?a?ixU_J(Bmlrot8GchZ zQ(?J%?tpe=5`e~u?zSE13(?^WhkEX_eot>tf*WC05Z(8KlqTi#&ibZ9^UZqoR1*n_ z?Yd>r(wbeD)sC#m<4F#HgG<2**}5!@`po{?2Lp?9ARXCw398)u>ye~+f`OsA4HxI~ zp3`8iKeD(Diq;*q&*b|TcR*J`$!E}klecQ1aJ}T~z+-S{%D`Ix(8UV^Ur+^keF^I{ zwqm@rYcKm~rHvm_qYwRhIZ)K=mHfW0its=Qg%w0r1%DdB=6Ej{tWq#CmXKq(LdlliG65492G%GOLkdjnO zZ$NNU%reN?oMlWSXsHS7n=B@*{n3CH*HPt_T_A@M4-xJk+c@;i%v_N1=jAceKBkVW zOatmYPh`Rlq-2f}8n6Fl>lDtoV5YF4;4)8S}U1S1@+v!=f5cOdn zj%QC~jJDw}gEx0gt{@`sbxy3rer#2Z`&n1Eh?venu+ z7O>Oh%6{_KLeH=CpSaF?GV)vIhZ!nO=JXHJH0e_`exto$r6^(Dvk-`{9d^15V3mZZ zRYmzVXvk4jX7XiKYc~_RS!tu5oq)ak#VsTU#m~wVN$j?FAfWQRi!5@!$;I8=(i|2;E9a%TDD_sW&yF9*#C6-&Rrgr5 z(a1XY^DZSj>2pumDG-`lr-+*Bm$Yaf)4=8WAp)iVcWaY8voDSft@Ta6=RVeK?m{s$ zoR~#nm{6&2iAUijB+Yrw`LuLxFegZ0Odq9t60}UNHr;n`%Y2g5_p!db_(g7TsOjsd=JwQh$8s zuGNIw;=a4zxNbXYMdldOk0@wVb+(OQ_zOEm_};^@eDx1^8H`85K~K-?`nAqY#leS; z8Bz<|*AvQRbUDtPQMTV_2q>L>bHkqsqDDM(gQp`ez$KyjHc%p|+gq4ws)jxn?G_{} zVnl+mRARzwIwlwAa!PO`LD*ba4(Dw0SH?`<1uB`hR)bDyR|aIejL-5!lZx#Uy0aRt z!wPh8i|X&H6Q%a9`%o*ROSfxBF1k2a6q+(ZKnwQ^?mpDFDHET-+z4NH=AS20O~xB& zQvWZjgTmTo+UH=`UG&2N`fIJ2aFRiSOev0v5_mY5)FgDdYUC_;{vm11 zU3pVR860uo*&M++%^wp}@PxjB+s@6XlQYARU5UQ6a)#=~;-2wJVa8161eS%Jey>Qd zHn3}Nww*>%3xv0HB&ayRc%>GGc^=K%!115LO+$N&4`L8S5TGu?R{}t{tSi23gCrod zm5YwK6E8X?_DdhDYA$5!4~MbSzm0Kw7YrnMKO%QLOQU;E<2S<7n3F$&tM?9iBf+qr zaE)F#{u(q?g=T*0hdm~RpMh!xSbPDzUjZn(F3kOSgG@5kr~CPfY1-*IO`+yLeG;Ux};A4-ar$dXT?_PTa=(hp{IU2h9`F!KtcN9P*tP|L&p6G#{ip8lrXY5 z*TYaPhec%TXO|C>WAwx0S%H*bIj8wq_No=5M2pfCivqXFx8|0-Ky3yE-xAXf%R2sJvtx_ zcs~e_>2)`%*>ByG4F5yy+acBbb=8y& zKpQ>5q}h5PvzCBu8J(crrW;97Wbx!rzPvBL{V3V-x{6gzcYI-hib@XDCHuw-ypddg zBasL9GWsR$1zYfHSQ(1z8SX@P&n@^KfSb@N_847=8-DI5>(>pZyHjaf+{wF{*G`9s z@AOb`$9;&L>BzH;Ez^Oa^(3bQuWuU9kMFKO;B{QfloFaxX6F)ahE z=XcOvL6RAo@kA>Lg#<7pN2o5F@V^}|cs#(j`{kUj32byGoJs?-PU+SJ`0+c%RWa;n z0bJ`%|2RfcK*PNyB#BgSGX2A!n_-GPsqS{LAn+GfEJ#^TDCI zSqDn7OeG&DAdIMwJmXujZOU)?Z))V{;D{A9#gVdgKXTKq-!@kAqXY2c{$H|r@@|#~ zQk2^68n}Ys+~q=d*DE6d(8c-Ue#Y5c7wS8f3rfOBN7qXiRWHo_wJQZ)NGWXh2@1rPY z*Y3VM*jaht$&t_uSIxq8ZgM1(z$WKqzUR_O;b%=p9Xcim;EC@bFU)0zZqhbTxP*y? zSJ~v~jc0<`h#_v}{QS7r!UU@;^GD(*`qzshdBPoE3Q}XDb;uQ&2ss{`7>;;nHd)V_ z)QHc!T$p$2QbuHS&l`A+QKHZiA#O#gKle3iCPpysuCM4M6cC7yuRvbTWrW`68inS) zBJwq4QDhs|x|fivUd{!Oud~x@Rtmx8Ev8i;Om_bL3;ZPS`G@3=)<2K~!eay09_m`+ zu>(i^V6zy@m@vQABqwcJx%Koy3ZuLBw(DfLg|6{4&7y>S{zngdWX=5Myi(JdRk!^n zc_7lXL;v45d)46Sc&m6J{F(`Vn`0}lLCeExrg^xq{e*tK<;NXODO020&|D_82}^k0@ZdX^20p_(22w0WMznI$Dc z`u(+IeB$L>b2xaf0#B?`?Fp1ThAn&#t8*nSJPI?7RXsm&jHs0LjY-5Hq;R1P5T07P zJ;c^1?=jG$vEp4k8a5jQUdB~q#!?9uXY;8Yd(L#=h$b}|aZ)7SIklbt2|<`UE$9Q> zs5`Ggufwj_XC#(I3xo;D<2goZSif;CUgU8c?mTu*-I=XlxSTEQt_7a7OBG?kjOmPhVTBF>F{fLzl#NyoLEEm9hSrblAB<+m4xrJ$cwCCLXMWME@ zgH;1PIY3x(`CYh*;FlI2$Pt|5Cof-_Nd&5;DizaS$)VMykkkBOX_>~LUR+=>qNz~h zza|+j)TDm9xbbJc3S!SgCw`SD@M3-b`6^dSM29iyhH6P9{^(JYKkL$364JRJ%GRGg z#aB!9m3T5@M#~5V5Uz4*@tsPq5)bF#2|K5%pR25xvtV0*#X|1)|Me{?K$v7lNWQP-2RXvjI0S z><##0aApB_E>rP>DcpmT=^jLHMfsNLo=1XY#`jO%mzRdZ&yD%>k;Bm z*iK1Zd+QXA3=3a#=F#zhmZ(j=#dB{}bC-1V3FJvz+)b@M7j+AI`D z3MN4r4HhcYwy%VFn?fK57rLCqkRk5hF@E5sNp}1R8|Y|}hcetxH?i}+G3vFTV)PBu z$PgPQgR*71QO*@q&?L{a(i>4z;n4p_I#k)hK=zTJ0Cz`#mlzew6vQns;Q#fM$zCbWU2A`|Sz^5=#_2Fbx9Z})AZO3z|pcZi+o) zMY%gt+RiM}odovf3<*!Sl$YX{BN@^W6S}iU+|==GXb3n9wv(eh)l%i9(y-u))0%$% zhq|j@_}HBiPqrup>ns*L|A+Xoz1VO>ku>3A)=J=S(XP8I7@(t0iPD@mO4E*oKm!L_ z5sgTAEjCL@7JJ2RBJq$rPCBrXKt$pQCm!M$V-)M!a#c@* zx@w+j@Gb3Fi(&w9{vW*GnX7C#3Tuu8D{9*^w=IBP^L{e%Zolp28ePqt@ljj-c`>!l zgU!veot+DOKGXS`fg8fDRo#Oqh^NoiH7Vu*Hd@E^?B^`6=AcVBD;0PPrAY+9<35%) zeu`1T%BvED=&<)$_xL*!5iTW3W&*Kqd)y>X+jR{V-eb$?O3Z>;d31QQo~6+H6l9K7 z5)>B}QbvNhYpZ*IsnHjloe#hpn|*pAS}p)JYs8UMY4b_nEN0=iua`lA3%;$|rozr(}6;79c_S7PkM&fw~MhPy6VZG^l&% zGho7?Qisx41Ql8E5H@)%b~$I;Xcxeo;dwRUCb`AR~v6|*0UPgUlUVRvvn}lT$!pzGnD{vfb%(;6Cot~3>Sx;T?~G?E!5>FP>kVwu4gUBbNarmv{hAt0o= zoqK{6RLqVFarlwo`jf!*HWZCee5md4{?z=JTk?)puKJZ~E?SoGLydTy_*OdUnGuA@;Yza%|0^Wl(?=It&SgoOY%0U1S8;f(f7VNyE9jitM)D5 zcxn*K^Q2Z&lUrdix^3AFs(IP_e%d~7R&=Y|QFB*;G3w|id~&)d%^xmJC{ymC)NoXI zVx^uSSA}Sj*J_VZPeuBXs@3MGF{o8dRn8So!nejDUVYmD?A_T@*x` z-uS@@p81WBl$irztd#Xtr&&@{2y{r4W`ulhfm(FO$NBH z>GdGd(o?F{rF5VnMeywZCcTo-dOwsCK``7wiqDKtiqq=kORrfrX=#XsQ6njH0T}H_@1R~M7ukCFry2dIBt*KI7E?lWvsm&=+t**qx|I@HDg3Cw z?MvgXY5eql9wBhn#86pQTnp~B+d$f>sU)CT`ws&0_}4owx^@iFMR(Q}Nxi%9shgLX)}yIxR1?Uy)fpg^-0dKXPyhiM`wc;O%tY^_> z6i$(-m@eWlTJ8X7RlaTyqcH-6Hm3#lHY3Yotn*Zu=^7~e?R#K~tUM7LR|ZuMm*#@%^KYT(qc@DK>3I%r4`YW>}gA;sGWZIt8>zMLMC zHW z65Hdy#=&emLKJ}B^c+*-s-0Z(V<2Ho;UF7ixuK-k*{N2d)w8Z@f(SZ!Rgqw*_CPMv zz3Gc~=pay%OD(%by6_Ccqm8DRYg=CqDvrYdnmoO)Pt8&(Bs(!sMWq~OiKC*90QQFF&FizC;k~wm(gf>lTqetfsZ+8JcK+$_Gdb5D+H<<;8UqIP{u zx^vSUgcj`8k*0>nH1k!!X~dap4$(Do-MgJZp|UMZr;@`$wqDL_HqN!&LHqgj=kJ*} z?BZYPK{L^CInI^VlODsh&WtngBk-PL9RJo>^PoxI7{d^xhTLzgG0L;dEa-;j(8M?R zn?Og|)z{aTBbrO@#OiYqfyOhqic(|Y=pCfC7G2pA9c2h1*?Q}mqX4&Q5DBcS2p=y_ z>Esin1ln25L06g6$}W$xx==@WP&V22F)iT-INv^yxN1t!r>JpwJf0=Ry&|%eLg4S+~2h74F7FLeSXBbwnj*~qSWn~;ZJqJD0ih%}wCxft1-_Zk(YUwd>~AwJR?FF;Fh zB6X#-ky8<7(XJzf-y~%m?e&r0ihlm`ZvNT+xX-Y7cX3R5c^d=bKz8qqhVs?l-9s09 zC*LUo*G_GI)zQlBTW_TuM6D>t2kb;%YjD5-$M?N>=5!cd#H!9`6CCj3j%2=K09gmy zi5qnLqI7IJy<8>si)M!m$KoGXJ#XgDIwZ8ZOyRyCOItZ0@rk)_|5t(|p0FBIw#AR! zf`wQw%b-A{Na8uyV#hFoP#{1#Yf!m|0G~K;IKQG!<+be9ZEg?Y;qp;o*fQvIao>kz zJ1RFVaFVCE4CqY2HLHiKz}9-5KDyar$N=rGsKh7?;>@jifNy+dqAqZ99cct-wEMppM3A{1w zY8)kdI+~Zu4g82j}_+73F z94EN9|4=kJ~&ajAFo<&&w3WP$qbr>hiuaNG?6mIi>dE76N z`OY*G5lH%W?QiU+{;uf+%YrVVW*JF0)>+9-RpQNFcKtZdQC($BY$3Sby`C99AQM?2 z@PJV8{T+Ac$Vu`9t*4fEV3FaJ9_7))wB8p|oqEn=c}~n4^B!IZHG(G($MU_2Q><## z7XRI?+GhD|YtiStMoI}h6whoSF(%+(;ZGQuMV~7fH3E#|cINBF0j(q?XH&sFicy9~ z;{>D4lv&T%wkj@_o3RhJaWpy-4V@p-7)}9Je3522c_0q(6vFD%N4w<_?4IFF&*?oz zfF$V90xFA&_M^(^`8pULz6jbRO?rl0h!X{zXCg(A=b+9M^3s94@LzdL&D-p1xpf2I zBnb#*Kdtv8RFj^55-dWDOb&z71|Q?lcY?D+qcD|WHN|; zbqul1k3WOxU6x-&&~0jvFAV03-7#4uV&U&1<d@v(0c4{FXwt1M*)p=Dfn0i z6=E*6*9!QLccV+kPC$0&g^K>K1?e681gT%iEUFM;8AzPiuPFL_pa<<`+&&nyCSlvM zFcBQc^*6E7I^b3Loz*ZDvspfG%MYB@pz-K3%2It)l?uN>s9XB=22DFl`aS^z?R4u` zx2oM7-Uvur+qa+)4)0Dxh1ojd;8B(D$fy8-WJ zA~tW+{9)UW!N&A3jD= zMGb0NW}(1*Zo3NHq%De5*?MJ+GEGXPqkYF*G3njOQamPJDz<-bbT{642wWg(gx;%g zYB*Ui=TBtx$E=Gkt}(ha6|%%1Tyzn=#$OXi8$BeFjHUqhC$X|-;Vu7~n6AV679>A^ zR<0?XoN1JVTL7c2%i=JrQC0f0jxeU;3{ZmK>S!UF@Zy=%#M%ZU*hdzK|- ziz82x`Z?1AieP9)yYNz+51Zc8$n4f7gB*j~H;J|%q*>TX2?EcTp{UJw{E1|F9$c{Y z2w9<~WS1URuR&yH8Ok^4)DUPr4{7N2U{*ru+6z4Z0wcC?%<~*ffG; zINx0km9BU*FtE4M9T!gq$`y-?Xizoo)v>wBa^5+9Wdn;-D@`5IXdFM>Eu)_W`#Dq%AQRHPpbSs7YF|;;|Ky$?MQh?&7j7ji~CCEl}LchMjKA2C9AJ7!Z z08qqNYva3VxN0aIp_J-6(mF`JXl!@O=aCjg(aKCpJZlwoPxeUp!W|S4ZX==mbR~`d z;XrDUGauyJ0^|&S!m5+LMNHOiG++jaWVnL@qk#4)mY+%ELPz+->f%?|sacJWX7{NC zb0ajc48#?_y@6ffJak$ex-Cbai12qMMAP9wG4H0#_NTN7WOzV#`9wP!N|ty{`YCu6 z$*DxX`Y4}|y`T9ho5UvsN@uAeilULH^6&@LK#jOxVv;uIFRtJ<^z}}5(HY5B9mHeh z?V8oq318)wzQdg#ZcK3;Pv#-SSF@&t>Q8_PmBCb!$Gq;y|6cjrq8Wl|s02Ep&SbjA zRi)35g^;!NWvW3sQ*NpfJt82Gb1N-`SL5w>uPM`8*E&5!M|J}9cgH0bDaE?utuiWdGjHO`icCoMp7JS1&6-grGw$U^a{@h3n*WV9g++LnS z9w|}`Pt2aghgRxwFnp9uV+AA$-$9>}LdR$2;td13`E-A_P+rQUg~*#m_lsf#2hXaH zmVklH%BdkrlT;_?Y{+)=cfb{YuS6+5sGt)e`fGsM-j56y-`wV582xS zY+!2ZXu9P+5oc%l5)HPscd8ZJf-(?|?s~%y-$zqJ7+<|A^C9b_eOEqFx$|oo{F(%j zfZ~W-ndzM7jfs%~MO=~VXLd)dDp^NgoV8jJ7HCneE-$jaUP#H9a>m3U+L%sqzgFqPIQz#0{wqGV<&K6>Y8D>HoZBy6X;fY4sy6u}dQKH$QxV9B< zN;It%;qOkz2(6jE)b%LrT%AhJmNT5nCbEb5SL8VOAUVQE1k5@gB^p}e;!C7TQ`hQ+ z7F9Dgpym0lTkONLjbfwBEp0e<9#tW6*}iQ$Q0bkgs5U>UtdZ%$R1Ug${@ITofv5(} z5kQtdnyif@r>T3;$25q+Yli?^c8$vxW5~0^En)gHHlu?mQrv}i5%%f4UDrmg8f*Uy zku(`3Lg;%f_smf^6?FpyjX(AS>y?qbUAu0>m{&A&fO>qwXqghY(iPEHyK><#n-ke4 zwrLs{TE$NY)cPs~=SnHtn9(JAgsM#&eVJrc z$0Prd!2iE;6ht59lioVPM>M@1-`ZXKs5|@p1~+xxAF}2DGj}2~mRJ^Y1O*66vG7Yn zB?|}skmLUU6cM8l)DmoORhO%x%G1X(pQyY%o)Q093jZbrzbAU?$C*As3d^b;;IO>-;G{~upzs1izy=c|4K6ZKnmsiuoi8|wT4Puq^)+Ks<_ zimVGh{}8Z|e*06;XG%43-)B$22K@C;Ri8g3jJfT5zwCGR)sA2W7uvM%IGEz!2wAc3 z=`z`G*Cqbm#s00&;i{6iFQJodJJMO|@_KczuN%}(`@Ln55)iz;|5`byQgEHWrDO!e zE_VLnueihe{h5>%*0>{ze^HK#iLuG`pa*! zdwbuJWHeAwEOmXr&tW%9X&>v)(ucp58~piekeF-nk4ZmRD*9M|oZkJ zZ+f*wo_||<`xXAq@gou>tn~(7wFg0Gy!?voJsX?bVU`j~S9Tpe#&L(WOv3b{8Q!2}W_~^M%hq_|y3kU1AWZTwPpb${8DacY|>gQ-e zFZnDuKGqEi;7OmRo<+S5UcARLlwlwO1MZ3aPVnx5(WP8n0_g5^mt{F;ttR@;7pfkR zTHXV1RdzHF#JRn&d|Y6uX7yEjCR=0&+8kl%vhQM-zhv`4g{4H4cJi7IdBwQj(hH+4 zd((6}c+1*H)&^&1!%)|ZA6Y)?gr+tdFN3#;4_k!b27k)k1tj>!BXS}5->nv*P2clH zq%jAcc~?M3){!l}{`K3>;CDdl{#$$QmiFM5UKSj+cIQ6fTZ@!tty8+f+WlS1KugyPxt6iVEDrM&>{t+} zft79P!P-u%P`-y&Sp*^mSr=`7Ui*Jv*QE2QS^@mi_7}op>_Y-c+CVVNok!H)7e)Sy zkosrkT?o_mK@Uo%LY7ZWZjd#r=2Ntu!OC2OHoSh!j87o>SBb5w@DKBuV$hpltbLYj zc&t|8bJvLW*c{t)=7pu$J2E4cuH^^%DF1cVb~6I{#!{51V~T_YVY5G3G<4s8 zSx@oa?b>oOrI#q!>`Aio0pWFo7teI?rUezU>h$) z7AI>X5sezbUComF{5EkvBEZ>JXJ5P;OJ3u$c{-f05bHWvgSt8vLh}=6{H{@~lLKhn zXm9o>wCy*HJvP0~-C&Tkn9-u(N6vXOZTduCp{qfcApmJ^lAI<8Jc(O8;Fp3u`iDgh z%D~P0KiyDVkC~4_97KZBDc+A$>#3#@DwQ}9M~ZKnpC7Je-EJLfrbZ$)CnH2#opLG3 zJqg%Xbj#_>^9yUsHC?h2qQY~KZN7{Z`R$%Jr4v;42kZ&myrMOpye0RWxc=-`F%ov0 zL1;k_Pr8|84fT?8BjwT7Gr3D1vWfj~4u%;GZZL(|3uP9#wj1lm7^zSK5(;)5rNfL5 zdUPFdLXUm{SUldTpMW2Iun*Jgc8*{+A1{nQ5sBS;GV z!3c)zpcmbE84DHl#N?Q>!AMlGzDKj9e|?4>)4BrtZq}o|zI?d%5@d%T-VH~xa z{n+(U&+n?eGo3mV{Y%EnMh!!IAX4=fWTsk^B$#RI=Y;Ac^=z%9r@+3L^zbYdCB17l ze@E4q$^d(aG02ZnIW~!Gp*65ATYK}CjMk%!wqAmj9Peq#8`O;IUNq@>1WNU+krk8* ziP&=G)sdW0CFiD`7$dO@$pDSC%d50Q)Pvj4GO-rF$Ro%Qhv9wJ(eWkIhx~h|h=)uQbz= z4qr2JwxigY1r8=2bTiLyvaU>YzcBxz^siqYp3d&OZmRT=H%rj8bRNpkyt+8~VeDhA z-Dd5kF^Ri5X(;yV54WBlQw-#}D8wRv3^5KtW8(UHO=LhwYGO_85M6;cpr0xO#fVQ(U)L;RzChA!3GY#@hOP0v$tH{hdvWEL z%!bAu9evH2u(sZ?tY6AJZK3f5^Z3i6PyNOB%-M5?&tES2my34?shIHa^RgV_?e+6N z=b3sYMWrkA$w&Y4q3gOXOu7U*Fizj?m7lkn6yvEq?FE{DYl02gi;lD#t`)l&Y4L|Y zu`7NPTjKf=e4c8ay{mIYwymHO79!!FAhFLP9x;`8> zccn+Zmgc;sP^=KsEY66l)Em)}P zFV(8&xuQcM4sU=n`1ZN0@ZD2%)l6QQDvxuPRI~7Y1ao9kJ09kpCt4QD)H`I4$BH_! z_8b9cSD(rRZryI&x8O{P6e~zNjQvi0^wABT?Sys(DKI0;Y@T~HY-~DsK3c!I4oHU& z2GC$!E(&9pxbWt(5ZcuUQVM&<+}WXc3HD-McA3r$PVIyl>ttCbk-2pNH!@DE+yftH zlswU<6IJ#Y<7CoSfwpa853eMCtOd z*ZD3RQv|-F(1(I=^Gi`#?bzY1_dVZm_&}^+>y~0NuI}m);`Xw&vJ324!GYmWp2@7a z(7V%;cI@G#ww9~wmuPj|jN_VSlx}4iqSCkvm0j4w_BI$1hK+-ymS0Fd$%0^Ay^0nn zmA|@%-)T0FI~wOFNvrm=8>*gLmD@ikquZ{}s76`O3Sl=*G8H+(NENk>J`k;iA8(pk zyCz>eaZK<{($pK4Ey)gm_UK6Sv0r-|Xwrj}Q-dyW1~7~Dk1 z)W(^}o3maSVcxSl$F<|!z&z4+ujvHJ{LVm!GgvdE^Vg4DrJ4*AsT2^VzX!3<-M)1h zN0q~YCgmh377`ha=4yReZ97;(%km_qzCDQ48OvajIn0XI%pMB3#181P`eHl*O)%@< z9mltm)|%Uew{6y%kYl`FeCOkd31`z&)}H))DcFu!qk(OkEhV@ay-uTa6mFu4Ah^Bv zO``A4t)9`+pa`EM!Vrh`VOb`5v5T+4yd7m!%7WR`TRcU~$Hm5h$A6s!r&T{mr*ky- zumy&zlNsa;_Dt|{f0v>WaH2&?rmLv7g-OziRm4t+Xixk^3qODP!hbp9j($J;7yC@%WZ2*G zc8x`pPhrX0pRVWhFojS67}>aJ3Cr++D$)!rMd8VGWa|gn#Y^GYYV6B-5V*{Cjl-Y^ zWDyvlx|ILGby*&XuT_PTq-O=h?51UeP8d|zlh!#g-RY99f1IY$a7fJyy0e`hKMykr z&3=q>wO0;gga{u>m0MxJj*U} ziR)c5Udx`^HVCfRdp&&1iFs7*_~a-Irg^sgCRHz!qggKNnCqP?xJ}Von(Sqf3Axz> zJ+F5&KcrGj+Pm@#OmfxnFD*9GAJ%J+D29CMP-|7eA6?Ks@aG+TyQv%)MdHvK)dCjptRRtd&t_K;rO zC9tbnWe2IW1;x|IQ6*-NEx34}(M=o`Jee#{bdls!GaF<<5v+%?^^QKE-iRP1Kf?ViKhmmT^K^^dy?Tah0P@wJ!<-`Xi*b3bs_&YE_Ql;*U zE)QxrQX`8%9OIu(}Ybu|7w5kAl0&GdIm#?hMGakUF2z7369ypdO1lzx=EDsvElxj z1?-LJlC6Ks-Srqk%qI`>Nn-Zs*1fsJ=ZDgTTi-I7+D%pZ%UE-7&s$0JxK4{wi7`#9 z>qy!wJ><8Fif2246nofPoRnE3pJQ(lCmk@vIZ1#)fay2R3J7sV+cSE*$DcD-ku^ZGy=?ru1r{38J*B7}R?6_>u;w|EW z7nZIubihZg+R_)sUmQ)tk4-f?d;iJz;?NbO`RLY$;%p-dKc5&GFLZqwI@8ke3O&k`44bzt)fp!|gstb5;v! z-s>Blpf(3M_g{6P63O#MkfB6g8F5Yh__~VEb^t0Yd2S&`kptZoR6&3v@VZGcKHm;( z0lt`j3fexGHGAbAf6Fv#XIBi(`6M2G^lLw&f8s{3HE;iyN&XMW@E`mGKxi38rW*F8 z1rdt+1K2U!rF(|BZvmoO{s2Tx0Cex7alI8h%X*aM=9hVCiH%L?BCa%M`h|NO(QWz8 z%$WY`{Ltv*0O5%F50X#EG-52lMK~TqaUty zh5xZxS4P#%v6Qi#C-xo|e30$Ml6)h3Oa2RJ!+NB*XY%umPfh^f3;7D$*-&J!$36ZB zKFE0bWAIIW9DEq`>N9o+Dr(_bTv&7z=+`o9zv^`)0KUpaim*KZ`CGXyDzII3xB0+) z?M|ED5WvBEB?kr>u-g{T(T=K_+8@meMu$TVMCG$xZ~UVV(Dbg71K3PePMN(Y4weC= zDOKkr)OeMr2WX_WhLb=EW?I3uw>#JbU6(FOUu%zpWm}%>PpI}{0U%~0xow#I-ODyV z0G=Q~Yv0QLVd1K+TU{@KZ9)I5@ZFYRU=YH_G_<&)vM+fS&Oc4hjLC|&t8)CuZyR4|mDKfL=CLH(6ra>k=`|;zvAAsod zqT7_qNo#ye+t*?JRNu%NU@bTF0BnUP7k;@3T%0egtDr33l~vVhk!f+Z?7Yk$wjiAB z?4!4NWq*W!!F?yF->zMWB-celyd{PddZ^l+SphmVe_kF-)u_^)GRV>U&Zjt+^=!pv`d&@Y{Mq z#vR~^!y6Zyh>Rl>w6UKKlTTX)w%ZSs1p zV&Pm~d{MdeR}rYS1=@D<`L%nsa82Fds4|T%doL_ANc;`;j}P1R$H1UrnHN#)b#o6w z|9Ek{z+Y>bpmSYYm$BtY)lC=EOdgiV9Dd6kG@SSAT*P;OGqWXn_B$D!D5oD$S$t#( z$E>|OJ#%+6ywOr`ow1O98m7IScd$6)?nc+9_1PowwFh-NDw4oqLlrT z-X?agN0~J!Dp8V7j8hxu%R1Z$(%^c8k%#ba6_E`O$Oo`^UYTd{a`2vDkR%64VbC`J zm!q%h?9DUwUbf7mv)Um#OB0*ygi2 zd9G^nLHq2dFL~)0(5Q-)d9CHJ;ptLD3w|mP*2)N(*Ee8N>t(L#jNsJSdk$C^2k_F% zi{8O{?c-wNoz@;SCVbSSrBmVazt|rFGUR{_{cPjlX_vVl9s#jE--dmXZ4M{v4`{~i zNf~NMWoJk7;bDKXL0u1uHpZwU?~T**JH@$zB@&Rbd6G?fae*upBDTJhCs^2K*%Fr* zC%1l$Y30+~I~|EQFIU6MAo0QEanqM`YOvH~$;DZ~1PKy>d<-Mmh}TIx-6prp2gjAG z&!EqUKDN_k{rr;4?a%kx<@bzhr3%pG7x9bB`eaCc%PgHB=6AAT*?GRgqw2Uy^30jd ziKfJq`G$$xw*?@@$_O7IN9`i0T?Wd)6no}B;AT&5FE-to%a5LW%euCB~h6<<_a!SQb{9s@ArHY?`aBU+O;<0D!|u zPx_Osudv0=s$oAZ)~v;1U3@{p3)Q;UJ^RQn zEu+nzFcny4c3E={i0X?6i@P(ioS=O&2#%Ha+0Sjk3GvI~dGWt}P*@!`Hak=P=?Iwm z^<9fcQs4ipC)0mk2oC%IVx_u?P#^$#T)0jqT5|V5*uk_T$Ymo-_4g zW>_d*+HA-C8~D-Wymqk|tT}0zVC&12XF)?=jFF!#=anZ;`pg9JsJ?k)Oeap-bvD+R zOj%$$Z`*bw<~qrN0fg~Ar6%4VKFT})dfI@RK5KDOKQJ&Yk?0|5J0|@O@A%n@K)!A& zDTqwc3doEV#8lNrbV}=}bm+?o&>!AE{z*V`V`p?DNe>jqle(R|_%X?uIrNF6zDsJP ze1jP(gST!$$NY8d*wr^#0Fs7PW^Qw-`(%wpTw`@SH6k{z*cpSdA17Pz4M>sMQ^U^gLN zKxXm#dAqz-&1LGi}i8x6s^C91rLqwgU|e8K7e=J5C$2P3R~Ib z^ph(#G@TXMOvR>;I~rt;rA!Tt$=z^_F}%+4J>Jowkq3-G889~{(Nl388m|hnNtMAr zWG^WkWH6ODW=-%9W@IU(4tolw;!E287Cq-N$bc}MJ9+tv5;RG}g$;a_JhYrrn=%3E zn|<8yenoW+9Gm%wKj>x<8nl#g0|?X7-cyqEm~*AfKK#ahM{AHvdsJ&xW0M`=;c&>% z6b(#e$X?QnFDZUTS33<|*HD9PiwfHVRab_fK;?umKd${4cW~dia8L}fz;m7w?W++v8_Apl=67EKZmO=wXaS(-_l})$#v2yGRL@44CWi){H#}p^mZQrNdMPlZWB7~n@29y8 z(czeV@mXAAzrYqUL8r}X)X-71eK}bm!3)O$GEUX#?6)Vqmp2KHZUl#*|MRPA|v=ofiR5{-N8BUkO{#1@Otco|JilDKKp zV)wl;WvW+A!B(US2t!|Z<6g5nK0N&fN*dj?AwGSQlGqp7qmz&fZE=~Je6PgKTI{}E=FHvNH-L$bdx6zm zhTNkCr*}7=jaA7z4qb8AwnXilF=f_SSvbXIi+m$I3YR{D(tkM6IuI3%DP z$p*={w!xZ9O}fTLG@y1{A6R4)=P+gNZHb#wRJqz3X1AfMT9-Ec?&HCJPUDx!s{Pu@ z;S%^{WFFGO2E6;bBAySTg)!Z<(~DrHg=`ctG}CHuZnvPx-EHl=oj@FbNoRF4in@ahd^#T zldxfNx^5`|BO$Lae3H0^7}arh8u%)^3NS4z^E3mq6-njo05Dyv0c$@UJukNFv^Qf^ zWgvVY(@0(@D8j+t17!h(Tz-Z>lOPXDQsK}!y;z6t-aQ(0Z0tZtp=iUgMOCE*S;DD zaac?vDVsE&LRr#?#&NlB8d$ZJ0)%(Rscsqd;AVc1tVjQlj&vtvg!KPf&w@`q4ju-+ z)aiLADDxlsevB$>;L)>{dILazjPb@Lj^1F=bHjGXU^*xT@5dvxh7aJqPp(#}s+zXk ztzr*~ji?^DbZ}pC9`CEgqr zINJ7b`TE3p*Td*5c_cc7zWuL^5Ix&qBQwA?o@}sb3v+5A@R)xF>f`Brh^DuavNsg} zs`*us25Cq-Xh@QgrkC-p0Q)pSb!rQi9#9=L42{S2d`!{&Jo_L6@e^U#S@HlOd!G~p z)6Q@on&Zb0hz^4IV%ToFqTF%APswHdZpA+98|=<0)5rt`p~uca+c63kblp9a(F6)2 z6QNCi_`do4hw0~f6Xb05xQ?=^WRqk$`XNt#kwY1yfa(!Lf*rPe?RGotz$ksNA)sOK z2Xuc!#JTfB)3&ENn=STPE&>rDBXBes8}jpa_&aGXx(A0;x3rn))E_U{el>TAfyn`C4qMwg>KFjnN^A@#EPc7ff>b@%QNrI% z7cNJ?B;Ik{-u14La(yW&WvDR&87B(`f;I0z#;$HeFEWTDczfwMxn!Xb`6g#=WKCF& zy5@Wn4uFqGOO5+naNr;M9Wx(GDlIo`&%MKm*5g4m=3k%TWxtwhoI9@;zxpr3!s39? z8LA!sJXr2J$h{%#(u8HgE%4< ze1#)^NJdTTuSvE7C+#Q5gF1af^Q@%34ZjU+M30l$@qZ3I^=h3R{|YpVktp_w&xMF! zIu~u?clDS!p{K-gPW38j9gvKRkYqf&R6u%e;tXr|yXq1OQ%~Uh8TP<4^n;?g3j!xl zNV(&CbK$z}xev{WZXeE3NcKp^j-^lzvZ)$cyGa5+??)rn4zd`v+!`kh8jDfD3MsQKK zZ^{Q+u+e;OFczHbd)%K6?uy-Mgx32UAQ5|XxXye8?AfUD9~mR@2b)j9UW0`t7+3k% z!v3PzUp)4gRQ|717hMdjhRHb5?S;T5Dm0p@P#CGL6LHRvPiw=(OmXZ^CbdsS@&2Sq zsZ9={pGAL@#QcY@hQioBYf!(k+MA?G<>wyVtjG+~`OFHC34%6+x8z1ZJMZ4lKG|`# z(fj^R5VFGPvz9(M4$Cq0XR|C%MjR8t#BunA37TAEIOcb%Q~b!!VoP3M!l5S*_W0NR zGoh|Ev;vf@%IG-m){p?AqUfRhfx#|9a-*~DW6l=V7cBL5HugvY3t78&){c}SDNu-Z z3hA}=V#3s}fk7oYm_9}+yh+~~0_H^GizHO&;$L<(p$+tz0Tce;YCPfH-HcxkeKdU? zOuIv(y8rQDsMV=_l6n*wLJ;MI*37vZrmVngjBKK4`9r8exr{OFN8JOT{zYl2)oxfE z)&3u-bzghlFx%Z@BRN&t`jmRUOHjYMKOrBD$uoq8ie3jjWXN$$LWw}u5aeAq_nNA| z1j^H;TY8#c21DiQvb{#@^gDxOlPLP<2ucT@C!?llR`x{kF{6QXnj2%a4&a4Ep);HK zA5Rv>xW+8%;MDXr+Nb%33Mk9}@BvQm|c$~<|6T`ozWW%>|#N9L24ulCd z-%V@i*GKd9Pc(-%tCIh0eQgqbR(u1K){uUJxD5mGR?&E!Bk8BvTXx9eIX+Wr7|<#W zvtmOL_FXuvHu%kwj58#u3fWF{Bs9c7mr{5MfeCyhJ!CeHIdd&U`ANn_5#gVMQi`gK z`7kDhhVsO4A0gcqFyk`#`hqt>0yJj7r3SdgQr~>lQy7S+Qa5e z*!lO5cCqL>2_N(X)Uo09hEB1(NzEx?&{gk5(xqgZY77U*XJDU4QLwj#blv{2xqQhf z33o3wxwWzs#u!uvFGIBak7En1-=(ezz$MRCn=@r&?yf$ffA$whFxHu_&c25uU!+4K7NJdu%U#Ckbyed%YqZw_*O9sgU`lOSBvjT&riN8nJ zS5AB|*J2QgOG`s_CwkTgdEcEo>)0@ZQ0kdU@B7lHDNTn3ByEM&&z!ocs)wLr%aI#D zx!mB{uU9Zht>s+ibV5KWDEUGkAPl(UcP4<8!QJ|y8x^(hsc20AhZ>WhL3v^kdyH(W z%6mLmedxIh5pGL0s0!Tg#}G^N_VPa?b1sJCvzD_(G=`8{Dr}h8-owOepK!I{R>OiK zq4Grp{K*^xrmYw+WsKUpDb1;SJseQ_&r$me5=XPO|w;9Q5rB%OO0wSGXt{}nr3 zG!-3|bJL-_1zpC!~%e`#rw{o zmppwViCTfbVUGqc7Cr`bse9bHfbRWILCGCq%n`2*>Rk+)2`HntZ}=Txivix zNQpIVai3%~NgtEMOPTxW;$ie8xU@6<eUu}P`fu(-s~9=)HV`zJ7XfUY@sQ9>zTwOUY* zfgKqYJK_Y+rD)YXKZ_3L2v@J`9E$8dJKa-3>;ZL=YEZs>+|Yzprm<3*XsG1dlRYR( z?8K0QA>tEtor=ar?cNIPjq!Tase*A>p_~I6@$e$)CsZL_uTqw_q6WL1rhg@EBE)G> zb2sv03WYhu_c$Yv*_P0;y zKC3=i`^9;e#a~<>zL%q<@bhTqcJ5wTYx97YZ_A0;?$5LA3aHxok*N4 z8gysMZ{WA0ak{>=F2&|rQ1DyrKqeBO#gvPO!izYG13T9QWu4jW~y? zMK2?=XuO-O_auC`xSMBB@ggdAoAYX0!YjlQVIE(7ipWrzFSsPMcgWx@ZJXZ@ zO7{=VS!M{T8nibYTgiqIDk&yQn7vz7MI|LLc&q!poPp>3Jz*tqSns6Xu(=*o83i*7 zAI_>I3d(rDT8>`gR+~4%P2QT(D=ZfZWQSu9a6c$JNg)Y~&=`UyDob*opg}V4K1Cb& zoFE=ib%N~`(>tDeEPik=VL!R%i`40)B>l0uk|cUUmXLP%U7eUNncrY5kE);~5K^aa zTTm_|6c5Z3Na*QS*)#13mEdNsZf1G zN6b34Mn#45gd5?DS?@VPp`T&BD-d{#@G_}V*QmDEFyf15x zvY+A*)5N>&bjL>6)#EAQ7dC8dt_{e%#h=qZlsuYL*H{0LuryhK&=Z*!oS5d@KnV2d)hrWWnrh& z@RT8<#`aN&SW2zab@IAxqIA)#C!B`5((cv$ zLT$KAz>rhD*TF%2y)}oBB&1TcNs&F8&<3Hj zL8u!U?iH(g%^5dQ{N51ioW@b_AdpDADTZXR)a*ZV+cZV5E*F6%C1ow*y>)DUpBHjs z8S#ChW2m|xz1^U=ZPRVRm=kI9xs2oD8yuwFWP_%r``r7ar?&R(GlV7sZYNgpTEm!~ zME^+9H?rgehN`EaD;-ho)2F||y}N#3;Zf6<$T?9`H;qy~N3)!)G#unj9rP zZbUpzNR8FPB{Sy(iY1dWR3wL@L+gdnsHZ!LOcH76Oq5o4a!lU5l4Vc|46h_gK4Tay zOIpB!c8$gf^SVDumkeOF_Ecoj2!AB1JCM!$#$4<4Hx^%zj%4`E4M!xMgflkRCLMz! zb#vcAO2fq7>uc&U3x2Crcw}aBlz6CAGx{mo;G=vUak?56=-u93H}$dOzS#x|!9eS# zl4_D{zY(xInN!p3NH%KfYD`_y_hQVimMH-U<=D&(G#(Lcc%4%d1B=iwggVJ83x}2I4$I&CGk~)2KiA@N#l(5W!N43nvEiImuU@#Pg zuwUYit1fhIf0C^fHTb z^O{j4HLN43qbPo)m5Ki1gwIGhFx{|8cLu5keX;jfoBfc}vbOoEs)D$u^LYzBx-hotf1N*7{3d zIN4UBnS%W_WKtNHFlCKgThMJN?y<$qq`FtgUC~p0eBmrShLI1KvcbC5ED+MKPnbo`ss> zxa`+88JvHB|LQCL=N59-J{bR#Ft6MGaMPbu?Dne<+aZlLdlce^rGbf1xboqjsPWfO zMsxHRtsq$O7c>0D4FATMe=)=Vo6PX;X-d(L<{;Db^HB(Z-FJkPb^N3BQGO8iAf%Ub zI};?Y`P+;i+RuL&EVsLEn-9+6p=}|Fq(p-p+MR4j?{_>O-H^W@3njcB=F($-`r}6q zg`vNFL{nXv;{OP$xNPQ6bz>;QV`rY%!Q3A`fuzzF5*d;lLVC-cu>Bje9H(d?QvruE zQhn*@0FwZQsy7amx*_DTOIxURTW+W8y_xSBYqtl~s7{`)SIW?x%HY{LCl(u0H*uIruqw!>wD)+-J9+1G=XeslO{DXXCaw z3-*uZNm=Zo>4%5jd2HCepCt7PpX=Wpd5oh|mwnjMS}!BTg8P2rZK1<-*=OB=s*R7y ztvQm2JvsQ+B}%_g{Q;C+6ossL{C4;r!t+bzhviM~en;6~xc;J|FKe4vbq$$V#>_{L5N2F4j-tNO`_Fb;- zVYjWY^3(l~LKckjk?U`nBttwKt}GsGN*8X;w7g>FdFse2*AqWG#NZ@!O? zcIEpHtp?SCcPcHRg6QLAmPz4n=!0oiU0rwmx@9A+aZVF4W1#Mi%<`ChyT=T$@Pc@2 zXfF%0_T~>*rA~L-#0dZdvZLUiK(5y5?C|*owPf4mvHHbSCh=NJ|KVlXmFL0ucdq?5 z4RCOf)!+$pr<^A)fp`Jj=>GrqZ;rq#$v>?KuJN!tZ}B z6|jVxlgB!K$#Yq(oaDNjH~QV&AYy<+Td*nBxQv$EZx8*`_8hftG}{sF!-W(qOSf>lvNFD|Amv;FSK%dgBDq1XLv&sjJect?P${o@zS@6t6zHtfCF ze3i@bYl{Xe+c(*pH!fMRVeF4{eRuRnFc)KP9GVNEneblBrXZPrm`nWJ1okF%`q`O< z?vcw7pg2*sy1%EV=Z|wGjK2kQS$=yE%mrOyXWkfqP#;kO^|7F1CB(rCTza2IcFR`d zd-euJcdf>QClZG@t1j9V{%Ju@;akCiYKJa?1vxfMH+nBd>Q#b)95BN75#K_679IPX zBig}&nk`WI_jcUA%G~2|iB#yFaw^v)@~2!6%wg<@GbUZNK`Mb2q}MG8Igqi4h-_IvjX(I;X4>yd2GZsW%$YgQyLjD zcN`&bM|?S_;G)?uD;xf&)^O>I<#m8vzjIHT>hC}M@sd9*By1xiLRCSV`qBB=n|Ye% zHW~Smx`OX)ojMIDUq6MMx&?P$ z84qygMCGwIK&`hA_WXXJ>C8H?%dSU$eCsiW3;o7pqHU*yIsL` za8_M}SKV;~37ipO9yl-MI;#jDlp`R+NmJu0*tbFx->fQx8?~U{p_Lrhz0)0DPWjUamZ{Tw3fotEN3`eT)2b>jfi3Q{njieP zkE~(NbinBzrI5^5LH_E3M`7j&)xlNY9oeRD=Z{r$WBlZ!ZDNMoU8Z)#QSB8k0eUBX zwkJr)?N1hDsogN|KMy)^#W92Xluxxh$0Eb{^(d_Ya9ln5@#`WoA{RzY)+guRc8*9y z(O!op4OI2X^FfhdlxZ1;G9)BpYwchwCra>=ySXyzyEXQd=58AYL*cHV8v}yc)Q5R^RxlNR2poY0Gj`~_-e&|c$ z6S~cBdq5#s|9tIyxl;`33NKir?shD`Lc5`ABf+h;J8gS>>0q-CR zg0O#0bY`gu3?Iwz4nuhd_9GVc9X^<~Tm7`#-w&wmBw}ImP8N)WXY6#r4873qvy#cf zi0Tn_-BOPar|~)`mi0AKwVBO1_u<(zk0_4{`!L@I_mV>F2?hhrXQF{9(QxQQ$5jx6$CTR1j;)AX~2@&)@dtDPpMz!9dnWQiT?hOgpB zPxOz1+OqZ>Gh1&Eck?;ww4+6LxGd8-Q{aY4i^{y^bZSKt##;hJC)WWe92cN4@NSs? zQ5I5zy;Kyl zmxZV?Z~Mg(mvN>lIp6Ze5^3%wXI#eSWC9ZH)Y#H7pvN~~rZYCH^co5O4##S!+Gz#L z_eXP}+$L-IM73uOQ>Vb^g~Zey&C;Bk9WXA}sA!&T`n)LyUlzwcoV+1sIsfB+gj>r| zsnbiiV}|oyuE}7b(tc}ulR45GWeZEO4nKy^jAIL7ZYTHEt4H(vd^=Uy1B%uR*7`!) zvIs-VNV9SURU*tHhi9`UJ++{Z>-OCo<oJ8&o`YVK{@5RWr!oAW@nTu_nc__QP3itxlsv!}(gZ zJcP_-bh;vrVSU2TQp5}2edLwSUm;1E!;|dYZi~?`b&$1z8}(<|9_j8@DgBxB%B$UY zYiV@NK<_FG_oK_Q$r0J)qYk#p%RWoDgPuaNEOwQ0hn z@FLn46Yi2NKCIe+7_GDP6A13&5dN4zbbp2FYtgK(K}v7N+>FDUPnXy5a;ao8j@Kc9 zcrV114cRcU8=O+4J2?*e2J8lac&Zg*8UWi1=;=o2k+8}u@#H4i%Qf<($(DUq&8$yZ z2ZJ*+^1fTlTG=S=Y)Q9C+^4@F0xH0c^ag|JjWxXw)CNT^XU#UtVR3Jq%2~GF$+7yv z{@R`1?Vs4?cWer;vD_!v@!rGUJ?&j`zDhV$)?XXyv=}#e2EIzA?_>P|@I{pik17Mutd^f_c`EOI`tS&1a#S9!!9WH{)p*Tm8+5dzN)A(`_X zkhosKi)~gB(=7>74@~vR129qyGt@gk8uvl~pXu;P`2jw1eb0V=x^=Ho-XgXS3+Q*S zbq`C@ri{ORFi^9f2G$ElFVD$=`{sXaowG%{$!&%RmN&E12-2`sb#cPcN=%0HIrsEW z*K8doKX-_FeSQ5v4FHR$H?rrAbD(J>CUJ7~r~Ju~o0ajvT;0EFJd3bsUPDUCvKYh5 z<6y|+|Mi=h4hk~nGfuY{4Od5L8wH6pZwzN;PyWGfgHgjo%VrcV~o$BFV7=dYW4BXU*nYnNmfS4u=cmLI4vgI z!uyQF35@7?tY;qS8B-Uh!=j}_%>bmjp13{_Y>kS$qjY2NleOlJYkad^jMIDkHvL_{ zoYBq6FD`sKk8oMPE%W62d4!S$MHCm@39?@_2poj@h0McwdaiN!uOJ6(0T>>|@HTH$ z_Q#R`Fl+N->(tmI#xWx2;SAO|QLfRE{GMtNJ`D`@{3MwF^aXdkY8oK%km@ocIzB#+ zvgy1FpRSqT^>60}5J8SZXrB=tyUauSu=%szV?y~*q>^x8O|1sXbJ%N8skDeLpoTuumd8)b}dvoh| zNdI4ZDKswW-aKoGvuwQF%o{^PL;o*k`~Aj}TXy8RZ8Wp|b~U70BAYLLK9CcGSZK2J z4Vej^L*&FN-ba)`zCl}oSB<8tB*Lc<9U4QQ?Zq1&>?uh!x=?v7&;p-1kKPQ4+3qE2 zhy&QKvijxfhE@k!F@*mj)v}R+oU#YWX3V)Bs$0r3-eqA(4rr`MQtRCZ;=5O*Knh}v zV5G5k__!OmyJ+~@Kn?=VezDpcctr6{X70tJ?vfK|Zoz%!*ge$F&3uHr>A%lt8 zs<*4oQIdx)PK87>gdus!sHs{(Hs0_xk6)M?+dm4>M-1IjYQWHj5)J?tL-eR=X{*vx zRD4YqJ>+%8r-3&#>nl}$N78HYHemn1>GRooMPHPB+A?)yx3A3&BDf3mo`8&GxFQPU z+C5WDb=?&Gb$^e+bMXMY+#5*Z!)=oKF)5keqqo9kM`@GQA$p-_+~~08^VC@HQQ)fG zf{}nH6`+}@3?RNatjfg5AuBuwp;DN0%yO#T*5TSVfJ<)rj!+C*p*jU@u4=~mcnR>* zvf+w%cp4vEi=cMO2JjJ|r`Vhv2j|znxMfAQYL2?d`5c*~S?x{Ks`LXEQ_*xV|7LxtI@dtDJ4I5F;%F3bn(_C=L5cEO&?R$~@!L z9+>5PNHZ(WVX^%j>eiZPHm|mBI-Y-VRRlvn$6@+IhPDe>RbKrBu)D7WBS4O2x=Wiq zTHaU|gulsb9b!0r&H7*+QPmRuB6{e}U^1M8A`uvftzF6ahnmvv!VD^w+O;LFS#SV! zbVivzC1@0FIZ%_a+D)2!_}eV*i-W6t&>DGj)cfnunZ8lo5)MUN(y;c?z&=Jb7OB(k z9*4_EineCGmf#wulCu$nxnXg-Ijqn`RU4o!cp>nf8!J+K4oSY!A8616_2kN=T74G? zsX1$-Jom0=6uE7E2?6wSGR$Gohr_Ek>hbE493e8@a--l$92;Jf@cwp0;cdr^%5cFG z5Y5{caZZbCkSOVa0HwP3UUC4Sj-gHf&j$O6ZD@zWBjy}w*ypK< zroGL&OG<_?X)(*!k98WRv;q#6?W6EJ?vSFMahTu11KkAQ@}FGVhqFs$i-X;MT#-{n z+JV8mPZ1X6I09v_QjI8_Ds_pNMbX;0{GD96x~LY_Q2pc!H^e{dbh3XrjHU3P{7+^6Yzgeh#!AYGD@!0 z18=8(GZi!&o<8=Qa{+n_ciUPE1efdr3iFX?YmbBIliKZQ(Qv(kJ)rWk?B|t$j101S z`^hZwmTXdX_hXZ4_A>Q#iaJ<0! zD?pO5>f#shLILP@^AiiR4{c(|-KS6u5ujVxQMR&8`uHv8t2q2jTt6CQaHY`cC#b_q zip&VnK)_uxjH)`}(sLKYH=YcGJL0Sd;w#TfWT!#|0n2E+x3p0+S%b2LXRtP=btG$5 z{{1RB=+>;nYlhT<1Q4NeKu)QJ6bj5PtwTi|s%6xc5&jOJc+@1L2#W3l{Pp-X%Q|ic zqYTtaP$az}=z(Iu!>W}6F9Mg}t+JdJhU%Vyb0vJtRO^Tr2Du=o-S`vnojf%upf-XYwPhd^+TS}-TW1;2 z1Ui1-WAl?UK~S&+JhB2Okmj*`IQ` zrEhcsr7E=+D7t>z(|S_9;BLLo}K>a0SSanUfWZ51fd8C z4i5I0T8r!}Y8zQULOOT%JaF5e{|Wx}`jTSMN$eUL`~2fN(7gOD6f6vl@7z-Ez)%gv zzgm}5?B_^-x;OL17V!837IIYO!B7;mWBqS~;CIBEHbxXA1nReqeU11M3m}B(jEU{G z<#zsWcILXQ-&yGGx%2jS>y#f-&F=wxG7>}x#4y`E)Uj``wBXrzFn#X<7Q~h-*%b;sT+#;7{Oa76)*`pn_pDj4;Z}gS# zbdI*=<(LctdyxbJxMvocjBfTo_^$#JZ8~q%G+V{v;~@42+FL8Pr%vD5oGM#w&36VU zX+c5b{7N?Nd$igd+EFhENvE2ax2H}+KAAD(I5&^Yk1w%s0D)UzkLti6m%s3lt@Z{{ zp?O}=&l9Ha*DVaM}l)?5KE6a*w#My5t^Lq-ZJ{fF#PBSBd_5@@GPypDk`RMn3q>+KM zR;&SW(aw9Ojqd%iK46S&8}9GNXz>Ec1^F!+UM-vQbK_;X^KQYy-`sMV$IQQRTtl1F zVt#!0PPNk%fHZ&DZWx{vn^`9%G#(u)D$Y3UlFi-Xm*da|HdyQm!AR^i1Qt4mhep@s ziFX>fL&0^11h&4@j`Cz&P$=*^UE~zb8b-2KGM!{w0hsu4jc?YDHCr=Zn!ym>1J2sQ zcqKfkM4vOuCV$FtL_Kbp?9{Oht5nQh(#!`Wa9B_KM*zR*blCAN`wKyuDu2aB@sIKx z?<=E@gbi&W+T*!2)u#`xgvNF6h4MYyIXv&x2Ki+x>JCoX$!W8klFc0I$&m!HczntL zb3b}S6t`{XH$=M^k~Cws#~uuMHT^J!ei+KJJj1AULA|Y9(n%4rBcke zk+mJ9-G1C7%)Y#Ky}|zi-^`p1%Q@CYxeU^dH`9lj?MdkyP`mY=Vj#$HNRaPXTp-tM zO#9%w!uz21##(s!VQb>yPM;Krt>MN4~!rx&SzqSk08;Iu%K<>=X zd>AC@9yHqpyzt$|D%lOrW)?b|(~G`Ae1$#6dDpd21yY$6hd-OzFS}1BgHLjeMNtbk z?ss#9_q$fiJOi1~Jeo>1*4!#gGOPgLb=)Y;X3HZ?ko5;o<+>#u5p4#k$m$;|gY;9u zjKjf7^ct`h2jg14#UxJ8)Pc-yFIdRkONM0+qF#j$Ftflx)0=eqTvs8+&2bEia33j(~#W^*<9q&KUIBH3Hv$xSDe zD^F?eMxNlRY!(VJCFS)oahc^-Az%UG!i%L_*`P#wo5g{YAM1~Fh$0&&yn2)KvqYnv zK_jeH$=x4Y>6|4>F1$N1B>F}nxJ3ulBGw-}G1W5_vZa~5occ@I;Tdn5VY{eYlDr42#PP4hSYoz_!lRFCY-@2? zF_ONXFdhf79FrQA1VXy{A|9BrGEiScP3f$fQbYXOH`Ah#Ko# zye1p@t*&Q@;hSj9uhCu4@*Lje+<^1aW@gcZ_2h>nLvGN$S7dZQ`{c5~y?-)%Jq@5=0mk#Y`_1qloPDb?4oXsi|8o|qqR!XX$nraOcGj7)mb3+NBWsz^tz(j^b z9mOTFr7g<4y(PY7i(N4luZt6XSfdP;_{J#TtS)R`kG?q$MJQM3{n-xJ({QoSzOzvfsd$U#y|BPJ!C^$w_6;m{MA5))0{d%!|M z!VuW3G0(-U=sq+`f!_j$;X+C5-3NmDtCMX&FUGY$A(6Hys7yV*i$vffh0aL^4Gz_| zhELX1%3Km@5>dio&9o6B*!)PX2D}M=%=Y3e^MSu*b%2C<+oYsWOpdlTN}jbhINy5 ze20`si7+HlMBZya5{9GYlB72kpk5wzU zcgq7AWJkh#{)l9k?I~_^`Q{oS;SxoC=6zD}H8$cKorXkoqbH6qVW1afXJ6t`E?s+9 zq@n5AuQ`?D>OK?;bk%&NJI=uHKWVlwXGV;+4`XEZ8HNB%!Aky(}j2PvF+I z^zheNyCtu!+FMimrZ4(*^XW6|QjT!LDX=okR#D5+#uB9$f*?X7uYvZmR@n%bK6E!l zr5PekT%QAa`Jpi1#f2cJp3~n*3X(KVl#=8S)0|u>xG|am2{M2iKvTSxof4ylVN;~c^s_kCd8WCdAyt1}A zYT)u_)e@9Cey)Z+af{DV#f(@qeE3)`qY~xvNZD)Uo#gf>o0C86w7ADfAUVpj4T>`} zOr%~iqq{*kH6kDgyihR%OkDzgEZnO&N#Dwq$(tv`WJ#rrQcpc^>kivqEW@KowT6;R z0vGIIpPwMpo6-%gWI8%UYzUbQXPSGSU_PC#cH99~v~<(>ns%+K)5|b2DMIm-?KjxW zaiq()i`~8cY|!>PU1yBLs@W06D{@G4aj@1L)-CsAed7__n2FT~4g`T`qSPu)5Uyg( zoV3YhffmK z_SJsSiuM;NZ(5(@vd1^}b^P)qIaIm|TP?6dbYS^zM?mcKPBIK_hvmmiGHVxx%q^iX-6)mF`G1F-yW1>L%BDQ(G$PA5- zW;lHJ9|+Su_Q3sVOSOkjTvvVx;#XA?Yl1)bj$a4}a}iWP^4(irgQr#CBAzH9>&6y? zZeTAzsz2_{gy%#{vUSrby!Rz$fnJsFeI1Vh8I0Ct zK_AZnhj*t8Sl4g?8&zO@fXsMwyR|>d7B3o@d+U29$E;B*WwTSJ9+D61br(%WoYQ>a z$+}fYAC1^!NVPDKhF~CTRY`+)nv}0N&6?$~Pi=At!gLf$uwkr*F4(DF(~Q)q$s^2C zkJN_bOt~ZSXqJ#Kd^YeS-^A5eAN_md-Tg{v-Lr~|&5mPwB$KsLIwn-}zPj_K<++S6 z;>m<0|7K-(wZ$57C5>sHI9;yW&p*SBdza<^<0WZLMDzRN$p~;) z#i?aH<>>)GU%Kk+p&<;_5_CMd9c`LQB2CX~y0Nh199}Cmc}xPPK1#bM;!Vth z%_@MrMxEd--HO8<4p#NJ>S|6;CFqLu#RN_(Nm`_uMnEak_VNYVn>BPN-CPG<)?r%{ z%vo(ct4--cHMui%BR3>%F;NXclae5fARs@E0~))k47fx8K@%2vvCJX~@e(iIZ0}4n zb?WI>FFn^JB)osUQ1{rix{4fMbPqABYs3&JH)=w--$Medf44Nsw2@RZ;3p$6qy^OC zS7dmC+o{jUpRPHCKbL@KP0ytb5EuIUk#ro=4P@Ay2~r)%;Dc5~=Tf-v=BjhnHmNIOzzqsW~k~!DTDXzkPJTn)UQ(qQd!4t%dyc3`J-ezG?VaFZ<%hbd&@e& z_ZYg=1E5QVN_jY%O&v7`jedU=n`Gw}?!i-IOe0J3<9-%hUJ*fKjBVkLa`B=l%Fb&v zR$~*{>khaSf}%@znHuEE9woKX7j^|P^y;dm=O}tDOP+uxa;>_jw>QZMm&8nIf#XQ! zRl&*O4C+)~%MRvx6msy2SJw>=C6A}ib|29Y=i*8kA=Sxmo_yqrc<8Ye6b`nu*7DUk zl!m^j_m_cmr-H;%eMEC~%?}AH&k2y07bsOKuS*6pQ;av17v7jN_jE=v9)k|Ufrh~> zDrlOVE|U29#L$vQ_|i$Wv?#fK3D-8Fr`EtxE1ahI@WH-nl4H9cnk3AVCq&_hu6BRY z!<(2Uw)Dyb1)5F%X(|}tf|`t96xEM|gsnrFojHt&b!u4>T}bH~Wa&4NUPuf&6|-Ax zkh(_Mj3w$tNwP63PV?;oyL@8uD$oF@B|MYfGN@WOBbA%^I$l!oLb57_p}!z7-{D0Z zTBs&caZRQ2ef|1dQC7RmJx)q`Z{LMfQ8&Fi(4c>Bw(=ZlE~B8kP&4@i2t|BZ>uP-C zYbKjAsVvea#ex>$QAwVjDj~X($Z>d69p(&6cA`_Pg6P#RW+Qe=OIFmk)EVAcN$;WDc~@qN_-D0)C##R=^~@;y;bCs^N;3 zzykPlOlrAY-IM=^_+hGSTzmLnhGen6F$hu6PM;`F$Ug2=wxT8)b|n05#z#skzWx+r z3P;lh)sLsiyC2aUq6Ua#1vwScWP+o2MFNFN68nj+ZZ4rJv`tft4L0~;Mmg)kQV+aD z`|c=J&Ns-N)ovw)hPO%976cN<(9XFrkQA-V;BfT)N6VwW=fCZpl|SX^H*xtw%VCaU zlYll7N1t2DyJW>9c0059XMv!%IU|Snkf> z%;56)=m9rwBgN5=^&p&x}e}yyu3j8kkD?|?EA^wWnFX&kPSB7K3|IS>{ z%eQrFI3E8aBjsG*a?|z})O2~T&8kY~ZY-4;H|EvNWYb#dT@!FQ>BEP&LzM}JKqy`K z3`)71eTREqJfB$WS+m4;OkWPV$ljhYX4Zx;ZZW^{TOTLNWlj0?74SmUF01+M8t4=3 zGA_geq{dvcb}Y$h7ytgpUomKpwX30a&Fb5s;oX;>HfY>I^Vu#FLwB*> zJlWT9MRakCGmT^grTdHlptRt}hf{Jr(I<1~j;KE}CNa$&Epu*e2zfi-mbnB%d@dc@ zyX3nf`PLlUN8sAiIQMAY{kK&|{FUf0K%Udzn5f$Yl9)R|Qg{PA7ECt!4dR=*&hzG@ z0rx@5b=+7UZ1%ayXoTaf>-X3;d$9{Z?sn%!eNz!=VJ$vte>2gb0E-%*PkUA$0clSh zNHw>8yDSlJASwV-1cbaK$&=`>?;FZ53rV^ucvuRFg$^fvyP84IAHe;)rD rnh^T?-Uosk#=lknH$(pPqXG6D=41Bg?zJ<}54Lad|Doca2haW=`!3&} literal 0 HcmV?d00001 From 0fc93765332ff8ea5199cd212c0e519a33b917df Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Wed, 19 Jun 2019 13:38:09 -0400 Subject: [PATCH 078/108] Fix image and clear long output cell --- .../automl_with_pipelines.ipynb | 63 +------------------ 1 file changed, 3 insertions(+), 60 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 17b95a874..f17aec991 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -1110,71 +1110,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![](pipelineWidget.png)" + "![](pipelineWidget.PNG)" ] }, { "cell_type": "code", - "execution_count": 58, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "PipelineRunId: 994a7673-8f48-42b9-9cfa-1a47bf70c304\n", - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/994a7673-8f48-42b9-9cfa-1a47bf70c304\n", - "PipelineRun Status: NotStarted\n", - "PipelineRun Status: Running\n", - "\n", - "\n", - "StepRunId: 9f99614d-6bc5-4c10-a121-b09afdc02c74\n", - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/9f99614d-6bc5-4c10-a121-b09afdc02c74\n", - "\n", - "StepRun(Embed) Execution Summary\n", - "=================================\n", - "StepRun( Embed ) Status: Finished\n", - "{'runId': '9f99614d-6bc5-4c10-a121-b09afdc02c74', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T17:11:05.931189Z', 'endTimeUtc': '2019-06-19T17:11:06.005469Z', 'properties': {'azureml.reusedrunid': 'f78cb325-802a-4779-ada8-05db82c97835', 'azureml.reusednodeid': '70352e68', 'azureml.reusedpipeline': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.reusedpipelinerunid': '50a80cb2-8adb-4cd5-a337-c493404b7549', 'azureml.runsource': 'azureml.StepRun', 'azureml.nodeid': 'ba6c6c5a', 'ContentSnapshotId': '8979e52a-3c38-432c-b9e3-a235b33b7d1e', 'StepType': 'PythonScriptStep', 'ComputeTargetType': 'AmlCompute', 'azureml.pipelinerunid': '994a7673-8f48-42b9-9cfa-1a47bf70c304', 'AzureML.DerivedImageName': 'azureml/azureml_b2a8349416887710026a15e07f74a6a3'}, 'runDefinition': {'script': 'embed.py', 'arguments': ['--embedded_data', '$AZUREML_DATAREFERENCE_embedded_data', '--sentence_data', '$AZUREML_DATAREFERENCE_stsbenchmark'], 'sourceDirectoryDataStore': None, 'framework': 'Python', 'communicator': 'None', 'target': 'gpucluster', 'dataReferences': {'stsbenchmark': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'stsbenchmark_data/', 'pathOnCompute': None, 'overwrite': False}, 'embedded_data': {'dataStoreName': 'workspacefilestore', 'mode': 'Mount', 'pathOnDataStore': 'azureml/f78cb325-802a-4779-ada8-05db82c97835/embedded_data', 'pathOnCompute': None, 'overwrite': False}}, 'jobName': None, 'maxRunDurationSeconds': None, 'nodeCount': 1, 'environment': {'name': 'Experiment automl-sentence-similarity Environment', 'version': 'Autosave_2019-06-18T20:46:30Z_9b3f4178', 'python': {'interpreterPath': 'python', 'userManagedDependencies': False, 'condaDependencies': {'name': 'project_environment', 'dependencies': ['python=3.6.2', {'pip': ['azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33']}, 'numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], 'channels': ['conda-forge']}, 'baseCondaEnvironment': None}, 'environmentVariables': {'EXAMPLE_ENV_VAR': 'EXAMPLE_VALUE'}, 'docker': {'baseImage': 'mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04', 'enabled': True, 'sharedVolumes': True, 'gpuSupport': False, 'shmSize': '1g', 'arguments': [], 'baseImageRegistry': {'address': None, 'username': None, 'password': None}}, 'spark': {'repositories': ['[]'], 'packages': [], 'precachePackages': True}}, 'history': {'outputCollection': True, 'directoriesToWatch': ['logs']}, 'spark': {'configuration': {'spark.app.name': 'Azure ML Experiment', 'spark.yarn.maxAppAttempts': '1'}}, 'amlCompute': {'name': None, 'vmSize': None, 'vmPriority': None, 'retainCluster': False, 'clusterMaxNodeCount': 1}, 'tensorflow': {'workerCount': 1, 'parameterServerCount': 1}, 'mpi': {'processCountPerNode': 1}, 'hdi': {'yarnDeployMode': 'Cluster'}, 'containerInstance': {'region': None, 'cpuCores': 2, 'memoryGb': 3.5}, 'exposedPorts': None}, 'logFiles': {'azureml-logs/20_image_build_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/20_image_build_log.txt?sv=2018-03-28&sr=b&sig=IuiLro7VKrTsmPEJbfhRGYus2BolDMlvECS3bZ3BFvo%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/70_driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/70_driver_log.txt?sv=2018-03-28&sr=b&sig=sE6YQBEIcxO1nlrQ6DrJsxEmG2CNm8LT20qK2RohIkM%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/driver_log.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/driver_log.txt?sv=2018-03-28&sr=b&sig=N4Ygz1QllDgRPhcRMZVs3zK%2BYYOh738B3kzpgdw60j4%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_stdout-job_post.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_post.txt?sv=2018-03-28&sr=b&sig=8IEjHjHLfGsriil0ZaaHkMizZyhGyVl1VzBD5gwwTvw%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_execution.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_execution.txt?sv=2018-03-28&sr=b&sig=CC4n2r9mjciD7h%2FMA2Y9NF2exyW4V%2BteCp9q1QBhsTw%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/56_batchai_stderr.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/56_batchai_stderr.txt?sv=2018-03-28&sr=b&sig=70mUUIdwHi1SJ0s7XyOBviDk%2F9cBh%2BH%2FwV%2FO84%2FQ5xk%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_stdout.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout.txt?sv=2018-03-28&sr=b&sig=JHPzCDmads8HeyP5HovD6eZa8mSFDfl7l8IwhQXb0d0%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'azureml-logs/55_batchai_stdout-job_prep.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/azureml-logs/55_batchai_stdout-job_prep.txt?sv=2018-03-28&sr=b&sig=W8EnL2oERfRojlbZ%2Fk7KutncWvF6IVSQInSuvUPbdvU%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=b%2ByKyJj1%2Bt8XLmJYOOkvEKSu5glJeyp7lbRaruwGmgo%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=9nwwElqLPKA10YapK0r4t3DP1b0T4BM293%2FKkc3u0%2BQ%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=OUVWL%2FjXqUFM9ymDdIeRMUSxUOQzlU%2BMFRT90qqJflw%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/138_azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/138_azureml.log?sv=2018-03-28&sr=b&sig=GgFEfWuyG9Q3VjDZI8Mr%2FxkwJ6XW2%2FSPwtHurNrpX54%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r', 'logs/azureml/azureml.log': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.f78cb325-802a-4779-ada8-05db82c97835/logs/azureml/azureml.log?sv=2018-03-28&sr=b&sig=a%2BKsFAoMIlVigIYCutZSa%2FmlqqeLFTCiI3VzFg9GPSo%3D&st=2019-06-19T17%3A01%3A07Z&se=2019-06-20T01%3A11%3A07Z&sp=r'}}\n", - "\n", - "\n", - "\n", - "\n", - "StepRunId: a3a6fa4b-d2dc-479d-be93-eea52e5c597c\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/a3a6fa4b-d2dc-479d-be93-eea52e5c597c\n", - "StepRun( AutoML ) Status: NotStarted\n", - "StepRun( AutoML ) Status: Running\n", - "\n", - "StepRun(AutoML) Execution Summary\n", - "==================================\n", - "StepRun( AutoML ) Status: Finished\n", - "{'runId': 'a3a6fa4b-d2dc-479d-be93-eea52e5c597c', 'target': 'gpucluster', 'status': 'Completed', 'startTimeUtc': '2019-06-19T17:17:00.446785Z', 'endTimeUtc': '2019-06-19T17:25:31.355471Z', 'properties': {'azureml.runsource': 'azureml.StepRun', 'ContentSnapshotId': 'bac07baf-2af3-4a06-aadf-aef17548794d', 'StepType': 'AutoMLStep', 'azureml.pipelinerunid': '994a7673-8f48-42b9-9cfa-1a47bf70c304', 'num_iterations': '5', 'training_type': 'TrainFull', 'acquisition_function': 'EI', 'metrics': 'accuracy', 'primary_metric': 'spearman_correlation', 'train_split': '0', 'MaxTimeSeconds': '300', 'acquisition_parameter': '0', 'num_cross_validation': None, 'target': 'gpucluster', 'RawAMLSettingsString': \"{'name':'automl-sentence-similarity','subscription_id':'15ae9cb6-95c1-483d-a0e3-b1a1a3b06324','resource_group':'nlprg','workspace_name':'MAIDAPTest','path':'./automl-sentence-similarity','iterations':5,'data_script':'./automl-sentence-similarity/get_data.py','primary_metric':'spearman_correlation','task_type':'regression','compute_target':'gpucluster','spark_context':None,'validation_size':0.0,'n_cross_validations':None,'y_min':None,'y_max':None,'num_classes':None,'preprocess':True,'lag_length':0,'max_cores_per_iteration':1,'max_concurrent_iterations':1,'iteration_timeout_minutes':5,'mem_in_mb':None,'enforce_time_on_windows':True,'experiment_timeout_minutes':None,'experiment_exit_score':None,'blacklist_models':None,'whitelist_models':None,'auto_blacklist':True,'exclude_nan_labels':True,'verbosity':20,'debug_log':'automl_errors.log','debug_flag':None,'enable_ensembling':True,'ensemble_iterations':5,'model_explainability':False,'enable_tf':False,'enable_cache':True,'enable_subsampling':False,'subsample_seed':None,'cost_mode':0,'is_timeseries':False,'metric_operation':'maximize'}\", 'AMLSettingsJsonString': '{\"name\":\"automl-sentence-similarity\",\"subscription_id\":\"15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\",\"resource_group\":\"nlprg\",\"workspace_name\":\"MAIDAPTest\",\"path\":\"./automl-sentence-similarity\",\"iterations\":5,\"data_script\":\"./automl-sentence-similarity/get_data.py\",\"primary_metric\":\"spearman_correlation\",\"task_type\":\"regression\",\"compute_target\":\"gpucluster\",\"spark_context\":null,\"validation_size\":0.0,\"n_cross_validations\":null,\"y_min\":null,\"y_max\":null,\"num_classes\":null,\"preprocess\":true,\"lag_length\":0,\"max_cores_per_iteration\":1,\"max_concurrent_iterations\":1,\"iteration_timeout_minutes\":5,\"mem_in_mb\":null,\"enforce_time_on_windows\":true,\"experiment_timeout_minutes\":null,\"experiment_exit_score\":null,\"blacklist_models\":null,\"whitelist_models\":null,\"auto_blacklist\":true,\"exclude_nan_labels\":true,\"verbosity\":20,\"debug_log\":\"automl_errors.log\",\"debug_flag\":null,\"enable_ensembling\":true,\"ensemble_iterations\":5,\"model_explainability\":false,\"enable_tf\":false,\"enable_cache\":true,\"enable_subsampling\":false,\"subsample_seed\":null,\"cost_mode\":0,\"is_timeseries\":false,\"metric_operation\":\"maximize\"}', 'DataPrepJsonString': None, 'EnableSubsampling': 'False', 'runTemplate': 'AutoML', 'snapshotId': 'bac07baf-2af3-4a06-aadf-aef17548794d', 'SetupRunId': 'a3a6fa4b-d2dc-479d-be93-eea52e5c597c_setup', 'ProblemInfoJsonString': '{\"dataset_num_categorical\": 0, \"dataset_classes\": 140, \"dataset_features\": 1024, \"dataset_samples\": 5749, \"is_sparse\": false, \"subsampling\": false}'}, 'logFiles': {'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.a3a6fa4b-d2dc-479d-be93-eea52e5c597c/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=acaxB1G7%2BLXjlhmZ7re%2BwzFYKUIS4pLZSqX%2F7Wx5yHk%3D&st=2019-06-19T17%3A17%3A40Z&se=2019-06-20T01%3A27%3A40Z&sp=r', 'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.a3a6fa4b-d2dc-479d-be93-eea52e5c597c/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=wYI6C3wu4V8nM6jJLrp6dI1m5G4lU9ELOuoIiucV27g%3D&st=2019-06-19T17%3A17%3A40Z&se=2019-06-20T01%3A27%3A40Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.a3a6fa4b-d2dc-479d-be93-eea52e5c597c/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=wPHCRzEYZ1GQ3UbXpsfSk7xFu5SMDzJTsLYkt%2FRHWpM%3D&st=2019-06-19T17%3A17%3A40Z&se=2019-06-20T01%3A27%3A40Z&sp=r'}}\n", - "\n", - "\n", - "\n", - "PipelineRun Execution Summary\n", - "==============================\n", - "PipelineRun Status: Finished\n", - "{'runId': '994a7673-8f48-42b9-9cfa-1a47bf70c304', 'status': 'Completed', 'startTimeUtc': '2019-06-19T17:11:01.903161Z', 'endTimeUtc': '2019-06-19T17:27:39.802989Z', 'properties': {'azureml.runsource': 'azureml.PipelineRun', 'runSource': None, 'runType': 'HTTP', 'azureml.parameters': '{}'}, 'logFiles': {'logs/azureml/executionlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.994a7673-8f48-42b9-9cfa-1a47bf70c304/logs/azureml/executionlogs.txt?sv=2018-03-28&sr=b&sig=YB7H6UmXpGnLZ0ir%2FK2eQuYfwb4pQ5CKXlJhcRnPTqE%3D&st=2019-06-19T17%3A17%3A42Z&se=2019-06-20T01%3A27%3A42Z&sp=r', 'logs/azureml/stdoutlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.994a7673-8f48-42b9-9cfa-1a47bf70c304/logs/azureml/stdoutlogs.txt?sv=2018-03-28&sr=b&sig=PPsWsEXEsUhc%2F8BPXh5KnX6Ze5fUbSohjcqgeghWaIk%3D&st=2019-06-19T17%3A17%3A42Z&se=2019-06-20T01%3A27%3A42Z&sp=r', 'logs/azureml/stderrlogs.txt': 'https://maidaptest3334372853.blob.core.windows.net/azureml/ExperimentRun/dcid.994a7673-8f48-42b9-9cfa-1a47bf70c304/logs/azureml/stderrlogs.txt?sv=2018-03-28&sr=b&sig=UQ7xiAJdha6PiJENVKM0OiuduPNfgQbSNrZdEHddO7c%3D&st=2019-06-19T17%3A17%3A42Z&se=2019-06-20T01%3A27%3A42Z&sp=r'}}\n", - "\n" - ] - }, - { - "data": { - "text/plain": [ - "'Finished'" - ] - }, - "execution_count": 58, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "pipeline_run.wait_for_completion(show_output=True) #show console output while run is in progress" ] From 26cfbbcb7cf1e3ecbec133f86c5b7eb332df09d7 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sun, 23 Jun 2019 21:22:14 -0400 Subject: [PATCH 079/108] Added ACI deployment of both pipeline steps --- .../automl_with_pipelines.ipynb | 559 ++++++++++++++++-- 1 file changed, 506 insertions(+), 53 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index f17aec991..8fe674295 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -130,9 +130,19 @@ }, { "cell_type": "code", - "execution_count": 34, - "metadata": {}, + "execution_count": 1, + "metadata": { + "scrolled": false + }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING: Logging before flag parsing goes to stderr.\n", + "W0623 20:14:33.703469 29968 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -161,6 +171,7 @@ "from scipy.stats import pearsonr\n", "from scipy.spatial import distance\n", "from sklearn.externals import joblib\n", + "import json\n", "\n", "# Import utils\n", "from utils_nlp.azureml import azureml_utils\n", @@ -170,6 +181,7 @@ " to_spacy_tokens,\n", " rm_spacy_stopwords,\n", ")\n", + "from utils_nlp.common.timer import Timer\n", "\n", "# Tensorflow dependencies for Google Universal Sentence Encoder\n", "import tensorflow as tf\n", @@ -181,16 +193,18 @@ "import logging\n", "from azureml.telemetry import set_diagnostics_collection\n", "set_diagnostics_collection(send_diagnostics=True)\n", - "from azureml.train.automl import AutoMLConfig\n", "from azureml.core import Datastore, Experiment\n", - "from azureml.data.data_reference import DataReference \n", - "from azureml.widgets import RunDetails\n", "from azureml.core.compute import ComputeTarget, AmlCompute\n", "from azureml.core.runconfig import RunConfiguration\n", "from azureml.core.conda_dependencies import CondaDependencies\n", - "from azureml.train.automl import AutoMLStep\n", + "from azureml.core.webservice import AciWebservice, Webservice\n", + "from azureml.core.image import ContainerImage\n", + "from azureml.core.model import Model\n", + "from azureml.train.automl import AutoMLStep, AutoMLStepRun, AutoMLConfig\n", "from azureml.pipeline.core import Pipeline, PipelineData, TrainingOutput\n", "from azureml.pipeline.steps import PythonScriptStep\n", + "from azureml.data.data_reference import DataReference \n", + "from azureml.widgets import RunDetails\n", "\n", "print(\"System version: {}\".format(sys.version))\n", "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", @@ -200,7 +214,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -225,14 +239,14 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:02<00:00, 191KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 227KB/s]\n" ] }, { @@ -246,7 +260,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 211KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 246KB/s]\n" ] }, { @@ -260,7 +274,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 210KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 227KB/s]\n" ] }, { @@ -280,7 +294,7 @@ }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -293,7 +307,7 @@ }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -305,7 +319,7 @@ }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -326,7 +340,7 @@ }, { "cell_type": "code", - "execution_count": 40, + "execution_count": 7, "metadata": {}, "outputs": [ { @@ -406,7 +420,7 @@ "4 a man seated is playing the cello. " ] }, - "execution_count": 40, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -417,7 +431,7 @@ }, { "cell_type": "code", - "execution_count": 41, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -457,16 +471,56 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0623 20:14:53.197878 29968 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0623 20:14:53.476237 28700 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0623 20:15:00.695510 29968 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n" + ] + } + ], "source": [ "ws = azureml_utils.get_or_create_workspace(\n", " subscription_id=\"\",\n", " resource_group=\"\",\n", " workspace_name=\"\",\n", " workspace_region=\"\"\n", - ")\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "print('Workspace name: ' + ws.name, \n", " 'Azure region: ' + ws.location, \n", " 'Subscription id: ' + ws.subscription_id, \n", @@ -482,7 +536,7 @@ }, { "cell_type": "code", - "execution_count": 43, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -515,7 +569,7 @@ }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -523,7 +577,7 @@ "output_type": "stream", "text": [ "Found existing compute target.\n", - "{'currentNodeCount': 1, 'targetNodeCount': 1, 'nodeStateCounts': {'preparingNodeCount': 1, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-19T17:10:43.157000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-20T02:00:52.824000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], @@ -559,12 +613,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This step uploads our local data to a `Datastore` so that the data is accessible from the remote compute target and creates a `DataReference` to point to the location of the data on the Datastore. A DataStore is backed either by a Azure File Storage (default option) or Azure Blob Storage ([how to decide between these options](https://docs.microsoft.com/en-us/azure/storage/common/storage-decide-blobs-files-disks)) and data is made accessible by mounting or copying data to the compute target." + "This step uploads our local data to a `Datastore` so that the data is accessible from the remote compute target and creates a `DataReference` to point to the location of the data on the Datastore. A DataStore is backed either by a Azure File Storage (default option) or Azure Blob Storage ([how to decide between these options](https://docs.microsoft.com/en-us/azure/storage/common/storage-decide-blobs-files-disks)) and data is made accessible by mounting or copying data to the compute target. `ws.datastores` lists all options for datastores and `ds.account_name` gets the name of the datastore that can be used to find it in the Azure portal." ] }, { "cell_type": "code", - "execution_count": 45, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -574,18 +628,18 @@ "Uploading ./data\\dev.csv\n", "Uploading ./data\\test.csv\n", "Uploading ./data\\train.csv\n", - "Uploaded ./data\\train.csv, 1 files out of an estimated total of 3\n", + "Uploaded ./data\\test.csv, 1 files out of an estimated total of 3\n", "Uploaded ./data\\dev.csv, 2 files out of an estimated total of 3\n", - "Uploaded ./data\\test.csv, 3 files out of an estimated total of 3\n" + "Uploaded ./data\\train.csv, 3 files out of an estimated total of 3\n" ] }, { "data": { "text/plain": [ - "$AZUREML_DATAREFERENCE_dbe7476178794853924424fdfbc4dcc1" + "$AZUREML_DATAREFERENCE_5ae3e36f570d4293ad895beac8538e18" ] }, - "execution_count": 45, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -593,7 +647,7 @@ "source": [ "# Select a specific datastore or you can call ws.get_default_datastore()\n", "datastore_name = 'workspacefilestore'\n", - "ds = ws.datastores[datastore_name]\n", + "ds = ws.datastores[datastore_name] \n", "\n", "# Upload files in data folder to the datastore\n", "ds.upload(src_dir='./data', target_path='stsbenchmark_data', overwrite=True, show_progress=True)" @@ -608,7 +662,7 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -650,7 +704,7 @@ }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -709,7 +763,7 @@ }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 15, "metadata": {}, "outputs": [ { @@ -831,7 +885,7 @@ }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -856,7 +910,7 @@ }, { "cell_type": "code", - "execution_count": 50, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -906,7 +960,7 @@ }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 18, "metadata": {}, "outputs": [ { @@ -980,13 +1034,13 @@ }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ "automl_settings = {\n", - " \"iteration_timeout_minutes\": 5, #How long each iteration can take before moving on\n", - " \"iterations\": 5, #Number of algorithm options to try\n", + " \"iteration_timeout_minutes\": 15, #How long each iteration can take before moving on\n", + " \"iterations\": 50, #Number of algorithm options to try\n", " \"primary_metric\": 'spearman_correlation', #Metric to optimize\n", " \"preprocess\": True, #Whether dataset preprocessing should be applied\n", " \"verbosity\": logging.INFO,\n", @@ -1016,7 +1070,7 @@ }, { "cell_type": "code", - "execution_count": 53, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -1036,7 +1090,7 @@ }, { "cell_type": "code", - "execution_count": 54, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -1065,7 +1119,7 @@ }, { "cell_type": "code", - "execution_count": 55, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -1078,17 +1132,17 @@ }, { "cell_type": "code", - "execution_count": 56, + "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Created step Embed [ba6c6c5a][d271deed-bd3b-4e41-9814-29fc11e585b4], (This step is eligible to reuse a previous run's output)\n", - "Created step AutoML [4cc715de][cd685f96-c946-46df-a4c4-87a53b92c90e], (This step will run and generate new outputs)\n", - "Using data reference stsbenchmark for StepId [0c4ee4ad][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", - "Submitted pipeline run: 994a7673-8f48-42b9-9cfa-1a47bf70c304\n" + "Created step Embed [8da9e158][d4cc417b-9bee-4f25-9980-8d490c2716d5], (This step is eligible to reuse a previous run's output)\n", + "Created step AutoML [505d087e][eb49a173-bec7-4812-b873-7d5a0d2a051e], (This step is eligible to reuse a previous run's output)\n", + "Using data reference stsbenchmark for StepId [fe0b62e2][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", + "Submitted pipeline run: 0db34daa-1d3d-4531-bbf0-6b647864f4ca\n" ] } ], @@ -1113,25 +1167,424 @@ "![](pipelineWidget.PNG)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "pipeline_run.wait_for_completion(show_output=True) #show console output while run is in progress" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Cancel the Run**\n", + "\n", + "Interupting/Restarting the jupyter kernel will not properly cancel the run, which can lead to wasting compute resources. To avoid this, we recommend explicitly canceling a run with the following code:\n", + "\n", + "`pipeline_run.cancel()`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Deployment" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Register the best AutoML model based on the pipeline results or load the saved model" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Found model with name 4c242e457281488best\n" + ] + } + ], + "source": [ + "automl_model_name = '4c242e457281488best'\n", + "\n", + "try:\n", + " model = Model(ws, name= automl_model_name)\n", + " print(\"Found model with name\", automl_model_name)\n", + "except:\n", + " automl_step_run = AutoMLStepRun(step_run=pipeline_run.find_step_run('AutoML')[0])\n", + " # to get the outputs\n", + " best_run, fitted_model = automl_step_run.get_output()\n", + "\n", + " # to register the fitted_mode\n", + " description = 'AutoML Model'\n", + " tags = None\n", + " model = automl_step_run.register_model(model_name= automl_model_name, description = description, tags = tags)\n", + " print(\"Registered model with name\", automl_model_name)\n", + " print(automl_step_run.model_id) # Use this id to deploy the model as a web service in Azure." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Register the Google Universal Sentence Encoder model if not already registered in your workspace" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Registering model googleUSEmodel\n", + "Registered googleUSEembeddings model\n" + ] + } + ], + "source": [ + "try:\n", + " embedding_model = Model(ws, name= 'googleUSEmodel')\n", + " print(\"Found model with name googleUSEembeddings\")\n", + "\n", + "except:\n", + " #set location for where to download google tensorflow model\n", + " os.environ['TFHUB_CACHE_DIR'] = './googleUSE' \n", + " # download model\n", + " hub.Module(\"https://tfhub.dev/google/universal-sentence-encoder-large/3\")\n", + " # register model\n", + " embedding_model = Model.register(\n", + " model_path = \"googleUSE\",\n", + " model_name = \"googleUSEmodel\",\n", + " tags = {\"Model\": \"GoogleUSE\"},\n", + " description = \"Google Universal Sentence Embedding pretrained model\",\n", + " workspace = ws\n", + " )\n", + " print('Registered googleUSEembeddings model')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Write scoring script" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting score.py\n" + ] + } + ], + "source": [ + "%%writefile score.py\n", + "import pickle\n", + "import json\n", + "import numpy as np\n", + "import azureml.train.automl\n", + "from sklearn.externals import joblib\n", + "from azureml.core.model import Model\n", + "import pandas as pd\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "import os\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "def google_encoder(dataset):\n", + " \"\"\" Function that embeds sentences using the Google Universal\n", + " Sentence Encoder pretrained model\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " emb1: 512-dimensional representation of sentence1\n", + " emb2: 512-dimensional representation of sentence2\n", + " \"\"\"\n", + " global embedding_model, sess\n", + " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", + " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", + "\n", + " # Apply embedding model and normalize the input\n", + " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", + " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", + " \n", + " sess.run(tf.global_variables_initializer())\n", + " sess.run(tf.tables_initializer())\n", + " emb1, emb2 = sess.run(\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={\n", + " sts_input1: dataset['sentence1'],\n", + " sts_input2: dataset['sentence2']\n", + " })\n", + " return emb1, emb2\n", + " \n", + "def feature_engineering(dataset):\n", + " \"\"\"Extracts embedding features from the dataset and returns\n", + " features and target in a dataframe\n", + " \n", + " Parameters:\n", + " ----------\n", + " dataset: pandas dataframe with sentences and scores\n", + " \n", + " Returns:\n", + " -------\n", + " df: pandas dataframe with embedding features\n", + " scores: list of target variables\n", + " \"\"\"\n", + " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", + " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", + " return np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", + "\n", + "def init():\n", + " global model, googleUSE_dir_path\n", + " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", + " # deserialize the model file back into a sklearn model\n", + " model = joblib.load(model_path)\n", + " \n", + " #load the path for google USE embedding model\n", + " googleUSE_dir_path = Model.get_model_path(model_name = 'googleUSEmodel')\n", + " os.environ['TFHUB_CACHE_DIR'] = googleUSE_dir_path\n", + "\n", + "def run(rawdata):\n", + " global embedding_model, sess, googleUSE_dir_path, model\n", + " try:\n", + " #load data and convert to dataframe\n", + " data = json.loads(rawdata)['data']\n", + " data_df = pd.DataFrame(data, columns=['sentence1','sentence2'])\n", + " \n", + " #begin a tensorflow session and load tensorhub module\n", + " sess = tf.Session()\n", + " embedding_model = hub.Module(googleUSE_dir_path+\"/96e8f1d3d4d90ce86b2db128249eb8143a91db73\")\n", + " \n", + " #Embed sentences using Google USE model\n", + " embedded_data = feature_engineering(data_df)\n", + " #Predict using AutoML saved model\n", + " result = model.predict(embedded_data)\n", + " \n", + " except Exception as e:\n", + " result = str(e)\n", + " sess.close()\n", + " return json.dumps({\"error\": result})\n", + " \n", + " sess.close()\n", + " return json.dumps({\"result\":result.tolist()})" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "# Substitute the actual model id in the script file.\n", + "script_file_name = 'score.py'\n", + "\n", + "with open(script_file_name, 'r') as cefr:\n", + " content = cefr.read()\n", + "\n", + "with open(script_file_name, 'w') as cefw:\n", + " cefw.write(content.replace('<>', automl_model_name))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'autoenv.yml'" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80', 'pandas', 'tensorflow', 'tensorflow-hub'],\n", + " pip_packages=['azureml-sdk[automl]'], python_version = '3.6.8')\n", + "\n", + "conda_env_file_name = 'autoenv.yml'\n", + "myenv.save_to_file('.', conda_env_file_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating image\n", + "Running................................................................................\n", + "Succeeded\n", + "Image creation operation finished for image pipeline-image:48, operation \"Succeeded\"\n" + ] + } + ], + "source": [ + "#trying to add dependencies\n", + "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", + " runtime = \"python\",\n", + " conda_file = \"autoenv.yml\",\n", + " description = \"Image with aml pipeline model\",\n", + " tags = {'area': \"nlp\", 'type': \"sentencesimilarity pipeline\"})\n", + "\n", + "image = ContainerImage.create(name = \"pipeline-image\",\n", + " # this is the model object\n", + " models = [model, embedding_model], #add both embedding and autoML models\n", + " image_config = image_config,\n", + " workspace = ws)\n", + "\n", + "image.wait_for_creation(show_output = True)" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration (using default here)\n", + "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", + " memory_gb = 8)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating service\n", + "Running..............................................\n", + "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "Healthy\n" + ] + } + ], + "source": [ + "# deploy image as web service\n", + "aci_service_name ='aci-sentence-sim'\n", + "aci_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = aci_service_name,\n", + " image = image,\n", + " deployment_config = aci_config)\n", + "\n", + "aci_service.wait_for_deployment(show_output = True)\n", + "print(aci_service.state)" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "pipeline_run.wait_for_completion(show_output=True) #show console output while run is in progress" + "aci_service.get_logs() " + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# load test set sentences\n", + "data = pd.read_csv(\"data/test.csv\")\n", + "train_y = data['score'].values.flatten()\n", + "train_x = data.drop(\"score\", axis=1).values.tolist()[:500]\n", + "data = {'data': train_x}\n", + "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 59, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ - "#Publish the pipeline\n", - "published_pipeline = pipeline.publish(\n", - " name=\"Sentence_Similarity_Pipeline\", \n", - " description=\"Sentence Similarity with Google USE Features\")" + "# Set up a Timer to see how long the model takes to train\n", + "t = Timer()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time elapsed: 44.0914\n", + "Number of sentences encoded : 500\n" + ] + } + ], + "source": [ + "#print time here\n", + "t.start()\n", + "score = aci_service.run(input_data = data)\n", + "t.stop()\n", + "print(\"Time elapsed: {}\".format(t))\n", + "\n", + "result = json.loads(score)\n", + "output = result[\"result\"]\n", + "\n", + "# embeddings will print the error message incase error occurs.\n", + "print('Number of sentences encoded : {0}'.format(len(output)))" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.8352580253296595\n" + ] + } + ], + "source": [ + "#get Pearson Correlation\n", + "print(pearsonr(output, train_y[:500])[0])" ] } ], From 9b12d60373c2d80f52f9e5e50a43e2068206429c Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sun, 23 Jun 2019 23:00:20 -0400 Subject: [PATCH 080/108] edits to automl pipelines notebook --- .../automl_with_pipelines.ipynb | 23 +++++++++++-------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 8fe674295..1298e4fb1 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -24,6 +24,8 @@ "1. PythonScriptStep: embeds sentences using a popular sentence embedding model, Google Universal Sentence Encoder\n", "2. AutoMLStep: demonstrates how to use AutoML to automate model selection for predicting sentence similarity (regression)\n", "\n", + "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Container Instances (ACI).\n", + "\n", "This notebook showcases how to use the following AzureML features: \n", "- AzureML Pipelines\n", "- AutoML\n", @@ -66,7 +68,8 @@ " * 4.3.3 [Create AutoMLStep](#4.3.3-Create-AutoMLStep)\n", " \n", " \n", - "5. [Run Pipeline](#5.-Run-Pipeline)" + "5. [Run Pipeline](#5.-Run-Pipeline)\n", + "6. [Deploy Sentence Similarity Model](#6.-Deploy-Sentence-Similarity-Model)" ] }, { @@ -98,7 +101,7 @@ "source": [ "### 1.2 What is Azure AutoML?\n", "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To achieve this goal, AutoML automates the process of selecting a ML model and tuning the model. AutoML even has preprocessing capabilities to engineer features from raw data. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", "\n", "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" ] @@ -114,7 +117,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " + "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quality machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " ] }, { @@ -234,7 +237,7 @@ "source": [ "**STS Benchmark Dataset**\n", "\n", - "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." + "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." ] }, { @@ -657,7 +660,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We also set up a **DataReference** object that points to the data we just uploaded into the stsbenchmark_data folder. DataReference objects point to data that is accessible from a datastore and will be used an an input into our pipeline." + "We also set up a `DataReference` object that points to the data we just uploaded into the stsbenchmark_data folder. DataReference objects point to data that is accessible from a datastore and will be used an an input into our pipeline." ] }, { @@ -1029,7 +1032,7 @@ "metadata": {}, "source": [ "**Constraints:** \n", - "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after acheiving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." + "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after achieving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." ] }, { @@ -1168,8 +1171,10 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ "pipeline_run.wait_for_completion(show_output=True) #show console output while run is in progress" ] @@ -1180,7 +1185,7 @@ "source": [ "**Cancel the Run**\n", "\n", - "Interupting/Restarting the jupyter kernel will not properly cancel the run, which can lead to wasting compute resources. To avoid this, we recommend explicitly canceling a run with the following code:\n", + "Interrupting/Restarting the jupyter kernel will not properly cancel the run, which can lead to wasting compute resources. To avoid this, we recommend explicitly canceling a run with the following code:\n", "\n", "`pipeline_run.cancel()`" ] @@ -1189,7 +1194,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Deployment" + "# 6. Deploy Sentence Similarity Model" ] }, { From 551fa93051a6feeedd07455811eac073a59f61bf Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Mon, 24 Jun 2019 00:25:34 -0400 Subject: [PATCH 081/108] edits to automl pipelines notebook --- .../sentence_similarity/automl_with_pipelines.ipynb | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 1298e4fb1..7e1fd612f 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -43,31 +43,22 @@ " * 1.1 [What are AzureML Pipelines?](#1.1-What-are-AzureML-Pipelines?) \n", " * 1.2 [What is Azure AutoML?](#1.2-What-is-Azure-AutoML?) \n", " * 1.3 [Modeling Problem](#1.3-Modeling-Problem) \n", - " \n", - " \n", "2. [Data Preparation](#2.-Data-Preparation) \n", - "\n", - "\n", "3. [AzureML Setup](#3.-AzureML-Setup) \n", " * 3.1 [Link to or create a `Workspace`](#3.1-Link-to-or-create-a-Workspace) \n", " * 3.2 [Set up an `Experiment` and logging](#3.2-Set-up-an-Experiment-and-logging) \n", " * 3.3 [Link `AmlCompute` compute target](#3.3-Link-AmlCompute-compute-target) \n", " * 3.4 [Upload data to `Datastore`](#3.4-Upload-data-to-Datastore) \n", - " \n", - " \n", "4. [Create AzureML Pipeline](#4.-Create-AzureML-Pipeline) \n", " * 4.1 [Set up run configuration file](#4.1-Set-up-run-configuration-file) \n", " * 4.2 [PythonScriptStep](#4.2-PythonScriptStep) \n", " * 4.2.1 [Define python script to run](#4.2.1-Define-python-script-to-run)\n", " * 4.2.2 [Create PipelineData object](#4.2.2-Create-PipelineData-object)\n", " * 4.2.3 [Create PythonScriptStep](#4.2.3-Create-PythonScriptStep)\n", - " \n", " * 4.3 [AutoMLStep](#4.3-AutoMLStep)\n", " * 4.3.1 [Define get_data script to load data](#4.3.1-Define-get_data-script-to-load-data)\n", " * 4.3.2 [Create AutoMLConfig object](#4.3.2-Create-AutoMLConfig-object)\n", - " * 4.3.3 [Create AutoMLStep](#4.3.3-Create-AutoMLStep)\n", - " \n", - " \n", + " * 4.3.3 [Create AutoMLStep](#4.3.3-Create-AutoMLStep) \n", "5. [Run Pipeline](#5.-Run-Pipeline)\n", "6. [Deploy Sentence Similarity Model](#6.-Deploy-Sentence-Similarity-Model)" ] From cf1d13e5d0fdbdebece20fab1aa2cd82dbc3c4d1 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Mon, 24 Jun 2019 10:54:57 -0400 Subject: [PATCH 082/108] Changed notebook to use automl embeddings vs google USE --- .../automl_deployment_local.ipynb | 1085 +++++++++++++++++ 1 file changed, 1085 insertions(+) create mode 100644 scenarios/sentence_similarity/automl_deployment_local.ipynb diff --git a/scenarios/sentence_similarity/automl_deployment_local.ipynb b/scenarios/sentence_similarity/automl_deployment_local.ipynb new file mode 100644 index 000000000..9a61e40ef --- /dev/null +++ b/scenarios/sentence_similarity/automl_deployment_local.ipynb @@ -0,0 +1,1085 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Using AutoML for Predicting Sentence Similarity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to use Azure AutoML locally to automate machine learning model selection and tuning and how to use Azure Container Instances (ACI) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "1. [Introduction](#1.-Introduction) \n", + " * 1.1 [What is Azure AutoML?](#1.1-What-is-Azure-AutoML?) \n", + " * 1.2 [Modeling Problem](#1.2-Modeling-Problem) \n", + " \n", + " \n", + "2. [Data Preparation](#2.-Data-Preparation) \n", + "\n", + "\n", + "3. [Create AutoML Run](#3.-Create-AutoML-Run) \n", + " * 3.1 [Link to or create a Workspace](#3.1-Link-to-or-create-a-Workspace) \n", + " * 3.2 [Create AutoMLConfig object](#3.2-Create-AutoMLConfig-object)\n", + " * 3.3 [Run Experiment](#3.3-Run-Experiment)\n", + " \n", + " \n", + "4. [Deploy Sentence Similarity Model](#4.-Deploy-Sentence-Similarity-Model) \n", + " 4.1 [Retrieve the Best Model](#4.1-Retrieve-the-Best-Model) \n", + " 4.2 [Register the Fitted Model for Deployment](#4.2-Register-the-Fitted-Model-for-Deployment) \n", + " 4.3 [Create Scoring Script](#4.3-Create-Scoring-Script) \n", + " 4.4 [Create a YAML File for the Environment](#4.4-Create-a-YAML-File-for-the-Environment) \n", + " 4.5 [Create a Container Image](#4.5-Create-a-Container-Image) \n", + " 4.6 [Deploy the Image as a Web Service on Azure Container Instance](#4.6-Deploy-the-Image-as-a-Web-Service-on-Azure-Container-Instance) \n", + " 4.7 [Test Deployed Model](#4.7-Test-Deployed-Model) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.1 What is Azure AutoML?\n", + "\n", + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", + "\n", + "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](automl.png)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.2 Modeling Problem\n", + "\n", + "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence). The sentence pairs will be embedded using AutoML's built-in preprocessing, so we'll pass the sentences directly into the model." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Turning diagnostics collection on. \n", + "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.41\n", + "Pandas version: 0.23.4\n", + "Tensorflow Version: 1.13.1\n" + ] + } + ], + "source": [ + "# Set the environment path to find NLP\n", + "import sys\n", + "sys.path.append(\"../../\")\n", + "import time\n", + "import os\n", + "import pandas as pd\n", + "import shutil\n", + "import numpy as np\n", + "import torch\n", + "import sys\n", + "from scipy.stats import pearsonr\n", + "from scipy.spatial import distance\n", + "from sklearn.externals import joblib\n", + "import json\n", + "\n", + "# Import utils\n", + "from utils_nlp.azureml import azureml_utils\n", + "from utils_nlp.dataset import stsbenchmark\n", + "from utils_nlp.dataset.preprocess import (\n", + " to_lowercase,\n", + " to_spacy_tokens,\n", + " rm_spacy_stopwords,\n", + ")\n", + "from utils_nlp.common.timer import Timer\n", + "\n", + "# Tensorflow dependencies for Google Universal Sentence Encoder\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "# AzureML packages\n", + "import azureml as aml\n", + "import logging\n", + "from azureml.telemetry import set_diagnostics_collection\n", + "set_diagnostics_collection(send_diagnostics=True)\n", + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.core.experiment import Experiment\n", + "from azureml.widgets import RunDetails\n", + "from azureml.train.automl.run import AutoMLRun\n", + "from azureml.core.webservice import AciWebservice, Webservice\n", + "from azureml.core.image import ContainerImage\n", + "from azureml.core.conda_dependencies import CondaDependencies\n", + "\n", + "print(\"System version: {}\".format(sys.version))\n", + "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", + "print(\"Pandas version: {}\".format(pd.__version__))\n", + "print(\"Tensorflow Version:\", tf.VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "BASE_DATA_PATH = '../../data'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Data Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## STS Benchmark Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 195KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 165KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 225KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], + "source": [ + "# Load in the raw datasets as pandas dataframes\n", + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", + "# and renaming the remaining columns\n", + "train_clean = stsbenchmark.clean_sts(train_raw)\n", + "dev_clean = stsbenchmark.clean_sts(dev_raw)\n", + "test_clean = stsbenchmark.clean_sts(test_raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Convert all text to lowercase\n", + "train = to_lowercase(train_clean)\n", + "dev = to_lowercase(dev_clean)\n", + "test = to_lowercase(test_clean)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training set has 5749 sentences\n", + "Development set has 1500 sentences\n", + "Testing set has 1379 sentences\n" + ] + } + ], + "source": [ + "print(\"Training set has {} sentences\".format(len(train)))\n", + "print(\"Development set has {} sentences\".format(len(dev)))\n", + "print(\"Testing set has {} sentences\".format(len(test)))" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", + "
" + ], + "text/plain": [ + " score sentence1 \\\n", + "0 5.00 a plane is taking off. \n", + "1 3.80 a man is playing a large flute. \n", + "2 3.80 a man is spreading shreded cheese on a pizza. \n", + "3 2.60 three men are playing chess. \n", + "4 4.25 a man is playing the cello. \n", + "\n", + " sentence2 \n", + "0 an air plane is taking off. \n", + "1 a man is playing a flute. \n", + "2 a man is spreading shredded cheese on an uncoo... \n", + "3 two men are playing chess. \n", + "4 a man seated is playing the cello. " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train.head(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Create AutoML Run" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.1 Link to or create a Workspace" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0623 22:36:11.699752 28712 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", + "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", + "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W0623 22:36:12.707345 26644 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "W0623 22:36:21.440700 28712 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n" + ] + } + ], + "source": [ + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.2 Create AutoMLConfig object\n", + "Next, we specify the parameters for the AutoMLConfig class. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**task** \n", + "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**preprocess** \n", + "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**primary_metric** \n", + "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Constraints:** \n", + "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after achieving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"task\": 'regression', #type of task: classification, regression or forecasting\n", + " \"debug_log\": 'automated_ml_errors.log',\n", + " \"path\": './automated-ml-regression',\n", + " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", + " \"iterations\" : 50, #Number of algorithm options to try\n", + " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", + " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", + " \"verbosity\":logging.ERROR}" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = train.drop(\"score\", axis=1).values\n", + "y_train = train['score'].values.flatten()\n", + "X_validation = dev.drop(\"score\", axis=1).values\n", + "y_validation = dev['score'].values.flatten()\n", + "\n", + "# local compute\n", + "automated_ml_config = AutoMLConfig(\n", + " X = X_train,\n", + " y = y_train,\n", + " X_valid = X_validation,\n", + " y_valid = y_validation,\n", + " **automl_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.3 Run Experiment\n", + "\n", + "Run the experiment locally and inspect the results using a widget" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running on local machine\n", + "Parent Run ID: AutoML_5d78c9ca-8ef5-4de8-8185-ff8d9c215fd2\n", + "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", + "Current status: DatasetEvaluation. Gathering dataset statistics.\n", + "Current status: FeaturesGeneration. Generating features for the dataset.\n", + "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", + "Current status: ModelSelection. Beginning model selection.\n", + "\n", + "****************************************************************************************************\n", + "ITERATION: The iteration being evaluated.\n", + "PIPELINE: A summary description of the pipeline being evaluated.\n", + "DURATION: Time taken for the current iteration.\n", + "METRIC: The result of computing score on the fitted pipeline.\n", + "BEST: The best observed score thus far.\n", + "****************************************************************************************************\n", + "\n", + " ITERATION PIPELINE DURATION METRIC BEST\n", + " 0 StandardScalerWrapper RandomForest 0:00:55 0.0551 0.0551\n", + " 1 MaxAbsScaler RandomForest 0:02:03 0.2161 0.2161\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:01:11 0.1536 0.2161\n", + " 3 StandardScalerWrapper LightGBM 0:01:18 0.2905 0.2905\n", + " 4 StandardScalerWrapper RandomForest 0:00:55 0.0993 0.2905\n", + " 5 MaxAbsScaler ExtremeRandomTrees 0:01:26 0.2212 0.2905\n", + " 6 StandardScalerWrapper ExtremeRandomTrees 0:01:16 0.2179 0.2905\n", + " 7 MaxAbsScaler DecisionTree 0:01:04 0.1751 0.2905\n", + " 8 MaxAbsScaler ExtremeRandomTrees 0:01:07 0.1676 0.2905\n", + " 9 MaxAbsScaler SGD 0:00:57 0.1439 0.2905\n", + " 10 StandardScalerWrapper RandomForest 0:00:56 0.0106 0.2905\n", + " 11 StandardScalerWrapper DecisionTree 0:00:57 0.1145 0.2905\n", + " 12 MaxAbsScaler SGD 0:01:06 0.1320 0.2905\n", + " 13 MaxAbsScaler DecisionTree 0:00:59 0.1490 0.2905\n", + " 14 StandardScalerWrapper RandomForest 0:01:08 0.1994 0.2905\n", + " 15 MaxAbsScaler RandomForest 0:00:59 0.0474 0.2905\n", + " 16 MaxAbsScaler ElasticNet 0:00:59 nan 0.2905\n", + " 17 MaxAbsScaler ExtremeRandomTrees 0:00:56 0.0972 0.2905\n", + " 18 MaxAbsScaler DecisionTree 0:00:57 0.1686 0.2905\n", + " 19 StandardScalerWrapper RandomForest 0:00:57 0.1139 0.2905\n", + " 20 StandardScalerWrapper LightGBM 0:01:41 0.6102 0.6102\n", + " 21 MaxAbsScaler DecisionTree 0:06:03 0.1807 0.6102\n", + " 22 StandardScalerWrapper XGBoostRegressor 0:02:01 0.2740 0.6102\n", + " 23 StandardScalerWrapper LightGBM 0:01:14 0.3608 0.6102\n", + " 24 MaxAbsScaler DecisionTree 0:05:05 0.1105 0.6102\n", + " 25 TruncatedSVDWrapper LightGBM 0:03:45 0.4076 0.6102\n", + " 26 MaxAbsScaler RandomForest 0:09:24 0.1617 0.6102\n", + " 27 StandardScalerWrapper RandomForest 0:04:49 0.1695 0.6102\n", + " 28 MaxAbsScaler DecisionTree 0:01:18 0.1237 0.6102\n", + " 29 StandardScalerWrapper XGBoostRegressor 0:14:31 0.3548 0.6102\n", + " 30 TruncatedSVDWrapper LightGBM 0:03:20 0.3898 0.6102\n", + " 31 StandardScalerWrapper XGBoostRegressor 0:05:11 0.3367 0.6102\n", + " 32 StandardScalerWrapper XGBoostRegressor 0:10:10 0.4319 0.6102\n", + " 33 StandardScalerWrapper XGBoostRegressor 0:04:53 0.3045 0.6102\n", + " 34 SparseNormalizer XGBoostRegressor 0:06:14 0.3080 0.6102\n", + " 35 MaxAbsScaler LightGBM 0:01:29 0.2507 0.6102\n", + " 36 StandardScalerWrapper LightGBM 0:01:16 0.3288 0.6102\n", + " 37 MaxAbsScaler LightGBM 0:01:22 0.4191 0.6102\n", + " 38 StandardScalerWrapper XGBoostRegressor 0:14:07 0.4284 0.6102\n", + " 39 StandardScalerWrapper XGBoostRegressor 0:01:30 0.2736 0.6102\n", + " 40 TruncatedSVDWrapper LightGBM 0:02:53 0.3996 0.6102\n", + " 41 StandardScalerWrapper XGBoostRegressor 0:04:11 0.3436 0.6102\n", + " 42 StandardScalerWrapper XGBoostRegressor 0:05:39 0.3030 0.6102\n", + " 43 MaxAbsScaler LightGBM 0:01:22 0.3371 0.6102\n", + " 44 MaxAbsScaler LightGBM 0:01:24 0.4611 0.6102\n", + " 45 TruncatedSVDWrapper LightGBM 0:02:57 0.2378 0.6102\n", + " 46 MaxAbsScaler LightGBM 0:01:20 0.6012 0.6102\n", + " 47 SparseNormalizer XGBoostRegressor 0:01:18 0.3183 0.6102\n", + " 48 0:15:12 nan 0.6102\n", + "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", + " 49 StackEnsemble 0:07:40 0.6485 0.6485\n" + ] + } + ], + "source": [ + "experiment=Experiment(ws, 'automated-ml-regression')\n", + "local_run = experiment.submit(automated_ml_config, show_output=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, by accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Inspect the run details using the provided widget\n", + "RunDetails(local_run).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](autoMLwidget.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 4. Deploy Sentence Similarity Model\n", + "\n", + "## 4.1 Retrieve the Best Model\n", + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the get_output method which returns the best run and fitted model across all iterations. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", + "\n", + "The different steps that make up the pipeline can be accessed through `fitted_model.named_steps` and information about data preprocessing is available through `fitted_model.named_steps['datatransformer'].get_featurization_summary()`" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "best_run, fitted_model = local_run.get_output()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.2 Register the Fitted Model for Deployment\n", + "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Registering model AutoML5d78c9ca8best\n", + "AutoML5d78c9ca8best\n" + ] + } + ], + "source": [ + "description = 'AutoML Model'\n", + "tags = {'area': \"nlp\", 'type': \"sentence similarity automl\"}\n", + "name = 'automl'\n", + "model = local_run.register_model(description = description, tags = tags)\n", + "\n", + "print(local_run.model_id) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.3 Create Scoring Script" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting score.py\n" + ] + } + ], + "source": [ + "%%writefile score.py\n", + "import pickle\n", + "import json\n", + "import numpy\n", + "import azureml.train.automl\n", + "from sklearn.externals import joblib\n", + "from azureml.core.model import Model\n", + "\n", + "\n", + "def init():\n", + " global model\n", + " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", + " # deserialize the model file back into a sklearn model\n", + " model = joblib.load(model_path)\n", + "\n", + "def run(rawdata):\n", + " try:\n", + " data = json.loads(rawdata)['data']\n", + " data = numpy.array(data)\n", + " result = model.predict(data)\n", + " except Exception as e:\n", + " result = str(e)\n", + " return json.dumps({\"error\": result})\n", + " return json.dumps({\"result\":result.tolist()})" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "# Substitute the actual model id in the script file.\n", + "script_file_name = 'score.py'\n", + "\n", + "with open(script_file_name, 'r') as cefr:\n", + " content = cefr.read()\n", + "\n", + "with open(script_file_name, 'w') as cefw:\n", + " cefw.write(content.replace('<>', local_run.model_id))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.4 Create a YAML File for the Environment\n", + "\n", + "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, autoenv.yml, which specifies the dependencies from the run." + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "experiment = Experiment(ws, 'automated-ml-regression')\n", + "ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No issues found in the SDK package versions.\n" + ] + } + ], + "source": [ + "best_iteration = int(best_run.id.split(\"_\")[-1]) #get the appended iteration number for the best model\n", + "dependencies = ml_run.get_run_sdk_dependencies(iteration = best_iteration)" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'azureml-widgets': '1.0.41',\n", + " 'azureml-train': '1.0.41',\n", + " 'azureml-train-restclients-hyperdrive': '1.0.41',\n", + " 'azureml-train-core': '1.0.41.1',\n", + " 'azureml-train-automl': '1.0.41',\n", + " 'azureml-telemetry': '1.0.41',\n", + " 'azureml-sdk': '1.0.41',\n", + " 'azureml-pipeline': '1.0.41',\n", + " 'azureml-pipeline-steps': '1.0.41',\n", + " 'azureml-pipeline-core': '1.0.41.1',\n", + " 'azureml-explain-model': '1.0.41',\n", + " 'azureml-dataprep': '1.1.4',\n", + " 'azureml-dataprep-native': '13.0.0',\n", + " 'azureml-core': '1.0.41.1',\n", + " 'azureml-automl-core': '1.0.41'}" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'autoenv.yml'" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n", + " pip_packages=['azureml-sdk[automl]'], \n", + " python_version = '3.6.8')\n", + "\n", + "conda_env_file_name = 'autoenv.yml'\n", + "myenv.save_to_file('.', conda_env_file_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.5 Create a Container Image" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating image\n", + "Running.....................................................\n", + "Succeeded\n", + "Image creation operation finished for image automl-image:4, operation \"Succeeded\"\n" + ] + } + ], + "source": [ + "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", + " runtime = \"python\",\n", + " conda_file = \"autoenv.yml\",\n", + " description = \"Image with automl model\",\n", + " tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"})\n", + "\n", + "image = ContainerImage.create(name = \"automl-image\",\n", + " # this is the model object\n", + " models = [model],\n", + " image_config = image_config,\n", + " workspace = ws)\n", + "\n", + "image.wait_for_creation(show_output = True)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "https://maidaptest3334372853.blob.core.windows.net/azureml/ImageLogs/47396d4d-c4fb-4706-9f18-bb78e20eefbb/build.log?sv=2018-03-28&sr=b&sig=nnAVwkrhuOjZ%2FMO8%2BJtyov2qIYJMzofWqmcoSvPvKCg%3D&st=2019-06-24T12%3A50%3A55Z&se=2019-07-24T12%3A55%3A55Z&sp=rl\n" + ] + } + ], + "source": [ + "print(image.image_build_log_uri) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.6 Deploy the Image as a Web Service on Azure Container Instance" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration (using default here)\n", + "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", + " memory_gb = 1)" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating service\n", + "Running............................\n", + "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "Healthy\n" + ] + } + ], + "source": [ + "# deploy image as web service\n", + "aci_service_name ='aci-service-automl-local'\n", + "aci_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = aci_service_name,\n", + " image = image,\n", + " deployment_config = aci_config)\n", + "\n", + "aci_service.wait_for_deployment(show_output = True)\n", + "print(aci_service.state)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.7 Test Deployed Model" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "sentences = []\n", + "test_y = test['score'].values.flatten()\n", + "test_x = test.drop(\"score\", axis=1).values.tolist()\n", + "\n", + "data = {'data': test_x}\n", + "data = json.dumps(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [], + "source": [ + "# Set up a Timer to see how long the model takes to train\n", + "t = Timer()" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time elapsed: 3.2256\n", + "Number of sentences encoded : 1379\n" + ] + } + ], + "source": [ + "t.start()\n", + "score = aci_service.run(input_data = data)\n", + "t.stop()\n", + "print(\"Time elapsed: {}\".format(t))\n", + "\n", + "result = json.loads(score)\n", + "try:\n", + " output = result[\"result\"]\n", + " print('Number of sentences encoded : {0}'.format(len(output)))\n", + "except:\n", + " print(result['error'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll calculate the Pearson Correlation on the test set.\n", + "\n", + "**What is Pearson Correlation?**\n", + "\n", + "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", + "\n", + "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", + "\n", + "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.6069626886131223\n" + ] + } + ], + "source": [ + "print(pearsonr(output, test_y)[0])" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 574c77c5914c7eeb9d27cd6642ef6695c9c59f8a Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 12:02:02 -0400 Subject: [PATCH 083/108] Notebooks PR ready --- ...une 18 Following Regression Notebook.ipynb | 1316 ---- .../sentence_similarity/autoMLwidget.PNG | Bin 87866 -> 0 bytes ...nt_google_universal_sentence_encoder.ipynb | 1223 ---- .../automl_deployment_local.ipynb | 1085 --- ...ml_google_universal_sentence_encoder.ipynb | 6367 ----------------- .../automl_with_pipelines.ipynb | 435 +- .../sentence_similarity/pipelineWidget.PNG | Bin 109142 -> 0 bytes scenarios/sentence_similarity/pipelines.png | Bin 18681 -> 0 bytes 8 files changed, 306 insertions(+), 10120 deletions(-) delete mode 100644 scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb delete mode 100644 scenarios/sentence_similarity/autoMLwidget.PNG delete mode 100644 scenarios/sentence_similarity/automl_and_deployment_google_universal_sentence_encoder.ipynb delete mode 100644 scenarios/sentence_similarity/automl_deployment_local.ipynb delete mode 100644 scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb delete mode 100644 scenarios/sentence_similarity/pipelineWidget.PNG delete mode 100644 scenarios/sentence_similarity/pipelines.png diff --git a/scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb b/scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb deleted file mode 100644 index 9831b23d3..000000000 --- a/scenarios/sentence_similarity/June 18 Following Regression Notebook.ipynb +++ /dev/null @@ -1,1316 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: Logging before flag parsing goes to stderr.\n", - "W0618 16:45:51.878871 26704 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Turning diagnostics collection on. \n", - "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.41\n", - "Pandas version: 0.23.4\n", - "Tensorflow Version: 1.13.1\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0618 16:46:02.219784 26704 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", - "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", - "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0618 16:46:02.576774 28512 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0618 16:46:09.812178 26704 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n", - "Workspace name: MAIDAPTest\n", - "Azure region: eastus2\n", - "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", - "Resource group: nlprg\n", - "Found existing compute target.\n", - "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-18T20:41:02.587000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 215KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 255KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|████████████████████████████████████████████████| 401/401 [00:01<00:00, 246KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - } - ], - "source": [ - "# Set the environment path to find NLP\n", - "import sys\n", - "sys.path.append(\"../../\")\n", - "import time\n", - "import logging\n", - "import csv\n", - "import os\n", - "import pandas as pd\n", - "import shutil\n", - "import numpy as np\n", - "import torch\n", - "import sys\n", - "from scipy.stats import pearsonr\n", - "from scipy.spatial import distance\n", - "from sklearn.externals import joblib\n", - "\n", - "# Import utils\n", - "from utils_nlp.azureml import azureml_utils\n", - "from utils_nlp.dataset import stsbenchmark\n", - "from utils_nlp.dataset.preprocess import (\n", - " to_lowercase,\n", - " to_spacy_tokens,\n", - " rm_spacy_stopwords,\n", - ")\n", - "\n", - "# Tensorflow dependencies for Google Universal Sentence Encoder\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", - "\n", - "# AzureML packages\n", - "import azureml as aml\n", - "import logging\n", - "from azureml.telemetry import set_diagnostics_collection\n", - "set_diagnostics_collection(send_diagnostics=True)\n", - "from azureml.train.automl import AutoMLConfig\n", - "from azureml.core import Datastore, Experiment\n", - "from azureml.widgets import RunDetails\n", - "from azureml.core.compute import ComputeTarget\n", - "from azureml.core.runconfig import RunConfiguration\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "from azureml.train.automl import AutoMLStep\n", - "from azureml.pipeline.core import Pipeline, PipelineData\n", - "from azureml.pipeline.steps import PythonScriptStep\n", - "\n", - "print(\"System version: {}\".format(sys.version))\n", - "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", - "print(\"Pandas version: {}\".format(pd.__version__))\n", - "print(\"Tensorflow Version:\", tf.VERSION)\n", - "\n", - "BASE_DATA_PATH = '../../data'\n", - "\n", - "ws = azureml_utils.get_or_create_workspace(\n", - " subscription_id=\"\",\n", - " resource_group=\"\",\n", - " workspace_name=\"\",\n", - " workspace_region=\"\"\n", - ")\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')\n", - "\n", - "experiment_name = 'automl-sentence-similarity'\n", - "project_folder = './automl-sentence-similarity'\n", - "\n", - "experiment = Experiment(ws, experiment_name)\n", - "experiment\n", - "\n", - "from azureml.core.compute import ComputeTarget, AmlCompute\n", - "# choose a name for your cluster\n", - "cluster_name = \"gpucluster\"\n", - "\n", - "try:\n", - " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", - " print('Found existing compute target.')\n", - "except ComputeTargetException:\n", - " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", - " max_nodes=4)\n", - "\n", - " # create the cluster\n", - " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", - "\n", - " compute_target.wait_for_completion(show_output=True)\n", - "\n", - "# use get_status() to get a detailed status for the current AmlCompute. \n", - "print(compute_target.get_status().serialize())\n", - "\n", - "# Load in the raw datasets as pandas dataframes\n", - "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", - "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")\n", - "\n", - "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", - "# and renaming the remaining columns\n", - "train = stsbenchmark.clean_sts(train_raw)\n", - "dev = stsbenchmark.clean_sts(dev_raw)\n", - "test = stsbenchmark.clean_sts(test_raw)\n", - "\n", - "if not os.path.isdir('data'):\n", - " os.mkdir('data')\n", - " \n", - "if not os.path.exists(project_folder):\n", - " os.makedirs(project_folder)\n", - "\n", - "train.to_csv(\"data/train.csv\", index=False)\n", - "# test.to_csv(\"data/test.csv\", index=False)\n", - "dev.to_csv(\"data/dev.csv\", index=False)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Uploading ./data\\dev.csv\n", - "Uploading ./data\\train.csv\n", - "Uploaded ./data\\dev.csv, 1 files out of an estimated total of 2\n", - "Uploaded ./data\\train.csv, 2 files out of an estimated total of 2\n" - ] - } - ], - "source": [ - "from azureml.data.data_reference import DataReference \n", - "\n", - "ds = ws.datastores['workspacefilestore']#.get_default_datastore()\n", - "ds.upload(src_dir='./data', target_path='stsbenchmark_data', overwrite=True, show_progress=True)\n", - "\n", - "input_data = DataReference(datastore=ds, \n", - " data_reference_name=\"stsbenchmark\",\n", - " path_on_datastore='stsbenchmark_data/',\n", - " overwrite=False)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "aml_run_config = RunConfiguration()\n", - "\n", - "# Use the aml_compute you created above. \n", - "aml_run_config.target = aml_compute\n", - "\n", - "# Enable Docker\n", - "aml_run_config.environment.docker.enabled = True\n", - "\n", - "# Set Docker base image to the default CPU-based image\n", - "aml_run_config.environment.docker.base_image = \"mcr.microsoft.com/azureml/base:0.2.1\"\n", - "\n", - "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", - "aml_run_config.environment.python.user_managed_dependencies = False\n", - "\n", - "# Auto-prepare the Docker image when used for execution (if it is not already prepared)\n", - "aml_run_config.auto_prepare_environment = True\n", - "\n", - "# Specify CondaDependencies obj, add necessary packages\n", - "aml_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n", - " conda_packages=['pandas','scikit-learn'], \n", - " pip_packages=['azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", - " pin_sdk_version=False)\n", - "\n", - "print (\"Run configuration created.\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "run config is ready\n" - ] - } - ], - "source": [ - "# create a new RunConfig object\n", - "conda_run_config = RunConfiguration(framework=\"python\")\n", - "\n", - "# Set compute target to AmlCompute\n", - "conda_run_config.target = compute_target\n", - "\n", - "conda_run_config.environment.docker.enabled = True\n", - "conda_run_config.environment.docker.base_image = aml.core.runconfig.DEFAULT_CPU_IMAGE\n", - "\n", - "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", - "conda_run_config.environment.python.user_managed_dependencies = False\n", - "\n", - "conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", - " conda_packages=['numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], \n", - " pin_sdk_version=False)\n", - "\n", - "\n", - "print('run config is ready')" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "embedded_data = PipelineData(\"embedded_data\", datastore=ds)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Overwriting ./automl-sentence-similarity/embed.py\n" - ] - } - ], - "source": [ - "%%writefile $project_folder/embed.py\n", - "import argparse\n", - "import os\n", - "import azureml.core\n", - "import pandas as pd\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", - "\n", - "def google_encoder(dataset):\n", - " \"\"\" Function that embeds sentences using the Google Universal\n", - " Sentence Encoder pretrained model\n", - " \n", - " Parameters:\n", - " ----------\n", - " dataset: pandas dataframe with sentences and scores\n", - " \n", - " Returns:\n", - " -------\n", - " emb1: 512-dimensional representation of sentence1\n", - " emb2: 512-dimensional representation of sentence2\n", - " \"\"\"\n", - " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", - " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", - "\n", - " # Apply embedding model and normalize the input\n", - " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", - " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", - " \n", - " with tf.Session() as session:\n", - " session.run(tf.global_variables_initializer())\n", - " session.run(tf.tables_initializer())\n", - " emb1, emb2 = session.run(\n", - " [sts_encode1, sts_encode2],\n", - " feed_dict={\n", - " sts_input1: dataset['sentence1'],\n", - " sts_input2: dataset['sentence2']\n", - " })\n", - " return emb1, emb2\n", - "\n", - "def feature_engineering(dataset):\n", - " \"\"\"Extracts embedding features from the dataset and returns\n", - " features and target in a dataframe\n", - " \n", - " Parameters:\n", - " ----------\n", - " dataset: pandas dataframe with sentences and scores\n", - " \n", - " Returns:\n", - " -------\n", - " df: pandas dataframe with embedding features\n", - " scores: list of target variables\n", - " \"\"\"\n", - " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", - " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", - " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", - " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", - " df = pd.DataFrame(df, columns=names)\n", - " return df, dataset['score']\n", - "\n", - "def write_output(df, path, name):\n", - " os.makedirs(path, exist_ok=True)\n", - " print(\"%s created\" % path)\n", - " df.to_csv(path + \"/\" + name, index=False)\n", - "\n", - "parser = argparse.ArgumentParser()\n", - "parser.add_argument(\"--sentence_data\", type=str)\n", - "parser.add_argument(\"--embedded_data\", type=str)\n", - "args = parser.parse_args()\n", - "\n", - "# Import the Universal Sentence Encoder's TF Hub module\n", - "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", - "embedding_model = hub.Module(module_url)\n", - "\n", - "train = pd.read_csv(args.sentence_data + \"/train.csv\")\n", - "dev = pd.read_csv(args.sentence_data + \"/dev.csv\")\n", - "\n", - "training_data, training_scores = feature_engineering(train)\n", - "validation_data, validation_scores = feature_engineering(dev)\n", - "\n", - "write_output(training_data, args.embedded_data, \"X_train.csv\")\n", - "write_output(pd.DataFrame(training_scores, columns=['score']), args.embedded_data, \"y_train.csv\")\n", - "\n", - "write_output(validation_data, args.embedded_data, \"X_dev.csv\")\n", - "write_output(pd.DataFrame(validation_scores, columns=['score']), args.embedded_data, \"y_dev.csv\")" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "embedStep = PythonScriptStep(\n", - " name=\"Embed\",\n", - " script_name=\"embed.py\", \n", - " arguments=[\"--embedded_data\", embedded_data,\n", - " \"--sentence_data\", input_data],\n", - " inputs=[input_data],\n", - " outputs=[embedded_data],\n", - " compute_target=compute_target,\n", - " runconfig = conda_run_config,\n", - " hash_paths=[\"embed.py\"],\n", - " source_directory=project_folder,\n", - " allow_reuse=True\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Overwriting ./automl-sentence-similarity/get_data.py\n" - ] - } - ], - "source": [ - "%%writefile $project_folder/get_data.py\n", - "\n", - "import os\n", - "import pandas as pd\n", - "\n", - "def get_data():\n", - " print(\"In get_data\")\n", - " print(os.environ['AZUREML_DATAREFERENCE_embedded_data'])\n", - " X_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_train.csv\")\n", - " y_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_train.csv\")\n", - " X_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_dev.csv\")\n", - " y_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_dev.csv\")\n", - " \n", - " return { \"X\" : X_train.values, \"y\" : y_train.values.flatten(), \"X_valid\": X_dev.values, \"y_valid\": y_dev.values.flatten()}" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "automl_settings = {\n", - " \"iteration_timeout_minutes\": 5,\n", - " \"iterations\": 5,\n", - " \"primary_metric\": 'spearman_correlation',\n", - " \"preprocess\": True,\n", - " \"verbosity\": logging.INFO,\n", - "}\n", - "automl_config = AutoMLConfig(task = 'regression',\n", - " debug_log = 'automl_errors.log',\n", - " path = project_folder,\n", - " compute_target=compute_target,\n", - " run_configuration=conda_run_config,\n", - " data_script = project_folder + \"/get_data.py\",\n", - " **automl_settings\n", - " )" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.pipeline.core import PipelineData, TrainingOutput\n", - "metrics_output_name = 'metrics_output'\n", - "best_model_output_name = 'best_model_output'\n", - "\n", - "metrics_data = PipelineData(name='metrics_data',\n", - " datastore=ds,\n", - " pipeline_output_name=metrics_output_name,\n", - " training_output=TrainingOutput(type='Metrics'))\n", - "model_data = PipelineData(name='model_data',\n", - " datastore=ds,\n", - " pipeline_output_name=best_model_output_name,\n", - " training_output=TrainingOutput(type='Model'))" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "from azureml.train.automl import AutoMLStep\n", - "\n", - "automl_step = AutoMLStep(\n", - " name='automl_step',\n", - " automl_config=automl_config,\n", - " inputs=[embedded_data],\n", - " outputs=[metrics_data, model_data],\n", - " hash_paths=[\"get_data.py\"],\n", - " allow_reuse=True)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "In get_data\n" - ] - } - ], - "source": [ - "from azureml.pipeline.core import Pipeline\n", - "\n", - "automl_step.run_after(embedStep)\n", - "pipeline = Pipeline(\n", - " description=\"pipeline_embed_automl\",\n", - " workspace=ws, \n", - " steps=[automl_step])" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Created step automl_step [db9288ed][21fb4afa-ca46-4929-b39e-49a272ff0cbd], (This step is eligible to reuse a previous run's output)\n", - "Created step Embed [70352e68][d271deed-bd3b-4e41-9814-29fc11e585b4], (This step is eligible to reuse a previous run's output)\n", - "Using data reference stsbenchmark for StepId [a1aa29ca][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", - "Submitted pipeline run: 50a80cb2-8adb-4cd5-a337-c493404b7549\n" - ] - } - ], - "source": [ - "pipeline_run = experiment.submit(pipeline)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "3e86e9c25d3d4e509c9a2d0d2ccbe486", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_PipelineWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', '…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from azureml.widgets import RunDetails\n", - "RunDetails(pipeline_run).show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "PipelineRunId: 50a80cb2-8adb-4cd5-a337-c493404b7549\n", - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/50a80cb2-8adb-4cd5-a337-c493404b7549\n", - "PipelineRun Status: Running\n", - "\n", - "\n", - "StepRunId: f78cb325-802a-4779-ada8-05db82c97835\n", - "Link to Portal: https://mlworkspace.azure.ai/portal/subscriptions/15ae9cb6-95c1-483d-a0e3-b1a1a3b06324/resourceGroups/nlprg/providers/Microsoft.MachineLearningServices/workspaces/MAIDAPTest/experiments/automl-sentence-similarity/runs/f78cb325-802a-4779-ada8-05db82c97835\n", - "StepRun( Embed ) Status: NotStarted\n", - "\n", - "Streaming azureml-logs/20_image_build_log.txt\n", - "=============================================\n", - "StepRun( Embed ) Status: Running\n", - "2019/06/18 20:46:34 Downloading source code...\n", - "2019/06/18 20:46:35 Finished downloading source code\n", - "2019/06/18 20:46:35 Using acb_vol_35e6dda2-50a4-4121-9aa7-690a7b1c1e12 as the home volume\n", - "2019/06/18 20:46:35 Creating Docker network: acb_default_network, driver: 'bridge'\n", - "2019/06/18 20:46:36 Successfully set up Docker network: acb_default_network\n", - "2019/06/18 20:46:36 Setting up Docker configuration...\n", - "2019/06/18 20:46:37 Successfully set up Docker configuration\n", - "2019/06/18 20:46:37 Logging in to registry: maidaptestc9922809.azurecr.io\n", - "2019/06/18 20:46:38 Successfully logged into maidaptestc9922809.azurecr.io\n", - "2019/06/18 20:46:38 Executing step ID: acb_step_0. Timeout(sec): 1800, Working directory: '', Network: 'acb_default_network'\n", - "2019/06/18 20:46:38 Scanning for dependencies...\n", - "2019/06/18 20:46:38 Successfully scanned dependencies\n", - "2019/06/18 20:46:38 Launching container with name: acb_step_0\n", - "Sending build context to Docker daemon 46.59kB\n", - "\n", - "Step 1/15 : FROM mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04@sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", - "sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b: Pulling from azureml/base\n", - "Digest: sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", - "Status: Downloaded newer image for mcr.microsoft.com/azureml/base:intelmpi2018.3-ubuntu16.04@sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", - " ---> a62e2769d877\n", - "Step 2/15 : USER root\n", - " ---> Running in e13363f06f21\n", - "Removing intermediate container e13363f06f21\n", - " ---> 0c6b3062030f\n", - "Step 3/15 : RUN mkdir -p $HOME/.cache\n", - " ---> Running in 3b1110f2ad4a\n", - "Removing intermediate container 3b1110f2ad4a\n", - " ---> 371d5ca51f9f\n", - "Step 4/15 : WORKDIR /\n", - " ---> Running in 4ab46653a217\n", - "Removing intermediate container 4ab46653a217\n", - " ---> ca6f18a2a1af\n", - "Step 5/15 : COPY azureml-setup/99brokenproxy /etc/apt/apt.conf.d/\n", - " ---> 532467333a34\n", - "Step 6/15 : RUN if dpkg --compare-versions `conda --version | grep -oE '[^ ]+$'` lt 4.4.11; then conda install conda==4.4.11; fi\n", - " ---> Running in 1aa523971ab3\n", - "Removing intermediate container 1aa523971ab3\n", - " ---> 20d95b09e00e\n", - "Step 7/15 : COPY azureml-setup/mutated_conda_dependencies.yml azureml-setup/mutated_conda_dependencies.yml\n", - " ---> 4e4acdcc0a95\n", - "Step 8/15 : RUN ldconfig /usr/local/cuda/lib64/stubs && conda env create -p /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b -f azureml-setup/mutated_conda_dependencies.yml && rm -rf \"$HOME/.cache/pip\" && conda clean -aqy && CONDA_ROOT_DIR=$(conda info --root) && rm -rf \"$CONDA_ROOT_DIR/pkgs\" && find \"$CONDA_ROOT_DIR\" -type d -name __pycache__ -exec rm -rf {} + && ldconfig\n", - " ---> Running in ace0ce861df6\n", - "Solving environment: ...working... done\n", - "\n", - "\n", - "==> WARNING: A newer version of conda exists. <==\n", - " current version: 4.5.11\n", - " latest version: 4.6.14\n", - "\n", - "Please update conda by running\n", - "\n", - " $ conda update -n base -c defaults conda\n", - "\n", - "\n", - "\n", - "zlib-1.2.11 | 101 KB | | 0% \n", - "zlib-1.2.11 | 101 KB | ########## | 100% \n", - "\n", - "libprotobuf-3.8.0 | 4.7 MB | | 0% \n", - "libprotobuf-3.8.0 | 4.7 MB | #######4 | 74% \n", - "libprotobuf-3.8.0 | 4.7 MB | #########6 | 97% \n", - "libprotobuf-3.8.0 | 4.7 MB | ########## | 100% \n", - "\n", - "h5py-2.9.0 | 1.2 MB | | 0% \n", - "h5py-2.9.0 | 1.2 MB | ######## | 81% \n", - "h5py-2.9.0 | 1.2 MB | ########## | 100% \n", - "\n", - "sqlite-3.13.0 | 4.9 MB | | 0% \n", - "sqlite-3.13.0 | 4.9 MB | #######5 | 76% \n", - "sqlite-3.13.0 | 4.9 MB | #########8 | 98% \n", - "sqlite-3.13.0 | 4.9 MB | ########## | 100% \n", - "\n", - "c-ares-1.15.0 | 98 KB | | 0% \n", - "c-ares-1.15.0 | 98 KB | ########## | 100% \n", - "\n", - "certifi-2019.3.9 | 149 KB | | 0% \n", - "certifi-2019.3.9 | 149 KB | ########## | 100% \n", - "\n", - "absl-py-0.7.1 | 154 KB | | 0% \n", - "absl-py-0.7.1 | 154 KB | ########## | 100% \n", - "\n", - "keras-preprocessing- | 33 KB | | 0% \n", - "keras-preprocessing- | 33 KB | ########## | 100% \n", - "\n", - "py-xgboost-0.82 | 70 KB | | 0% \n", - "py-xgboost-0.82 | 70 KB | ########## | 100% \n", - "\n", - "_py-xgboost-mutex-2. | 8 KB | | 0% \n", - "_py-xgboost-mutex-2. | 8 KB | ########## | 100% \n", - "\n", - "xz-5.2.4 | 366 KB | | 0% \n", - "xz-5.2.4 | 366 KB | #########4 | 95% \n", - "xz-5.2.4 | 366 KB | ########## | 100% \n", - "\n", - "libgfortran-ng-7.3.0 | 1.3 MB | | 0% \n", - "libgfortran-ng-7.3.0 | 1.3 MB | #######9 | 80% \n", - "libgfortran-ng-7.3.0 | 1.3 MB | ########## | 100% \n", - "\n", - "mock-3.0.5 | 44 KB | | 0% \n", - "mock-3.0.5 | 44 KB | ########## | 100% \n", - "\n", - "openblas-0.3.5 | 15.8 MB | | 0% \n", - "openblas-0.3.5 | 15.8 MB | ###1 | 32% \n", - "openblas-0.3.5 | 15.8 MB | #######5 | 75% \n", - "openblas-0.3.5 | 15.8 MB | #########4 | 95% \n", - "openblas-0.3.5 | 15.8 MB | ########## | 100% \n", - "\n", - "libcblas-3.8.0 | 6 KB | | 0% \n", - "libcblas-3.8.0 | 6 KB | ########## | 100% \n", - "\n", - "ncurses-5.9 | 1.1 MB | | 0% \n", - "ncurses-5.9 | 1.1 MB | #######8 | 79% \n", - "ncurses-5.9 | 1.1 MB | ########7 | 88% \n", - "ncurses-5.9 | 1.1 MB | #########7 | 97% \n", - "ncurses-5.9 | 1.1 MB | ########## | 100% \n", - "\n", - "protobuf-3.8.0 | 683 KB | | 0% \n", - "protobuf-3.8.0 | 683 KB | ########3 | 83% \n", - "protobuf-3.8.0 | 683 KB | ########## | 100% \n", - "\n", - "libgcc-ng-9.1.0 | 8.1 MB | | 0% \n", - "libgcc-ng-9.1.0 | 8.1 MB | #######5 | 75% \n", - "libgcc-ng-9.1.0 | 8.1 MB | #########7 | 98% \n", - "libgcc-ng-9.1.0 | 8.1 MB | ########## | 100% \n", - "\n", - "setuptools-41.0.1 | 612 KB | | 0% \n", - "setuptools-41.0.1 | 612 KB | ########5 | 85% \n", - "setuptools-41.0.1 | 612 KB | ########## | 100% \n", - "\n", - "astor-0.7.1 | 22 KB | | 0% \n", - "astor-0.7.1 | 22 KB | ########## | 100% \n", - "\n", - "wheel-0.33.4 | 34 KB | | 0% \n", - "wheel-0.33.4 | 34 KB | ########## | 100% \n", - "\n", - "scikit-learn-0.21.2 | 6.7 MB | | 0% \n", - "scikit-learn-0.21.2 | 6.7 MB | #######5 | 75% \n", - "scikit-learn-0.21.2 | 6.7 MB | #########7 | 98% \n", - "scikit-learn-0.21.2 | 6.7 MB | ########## | 100% \n", - "\n", - "hdf5-1.10.4 | 5.3 MB | | 0% \n", - "hdf5-1.10.4 | 5.3 MB | #######6 | 76% \n", - "hdf5-1.10.4 | 5.3 MB | #########9 | 99% \n", - "hdf5-1.10.4 | 5.3 MB | ########## | 100% \n", - "\n", - "pandas-0.24.2 | 11.1 MB | | 0% \n", - "pandas-0.24.2 | 11.1 MB | ##3 | 23% \n", - "pandas-0.24.2 | 11.1 MB | #######5 | 75% \n", - "pandas-0.24.2 | 11.1 MB | ########9 | 90% \n", - "pandas-0.24.2 | 11.1 MB | #########9 | 100% \n", - "pandas-0.24.2 | 11.1 MB | ########## | 100% \n", - "\n", - "markdown-2.6.11 | 56 KB | | 0% \n", - "markdown-2.6.11 | 56 KB | ########## | 100% \n", - "\n", - "termcolor-1.1.0 | 6 KB | | 0% \n", - "termcolor-1.1.0 | 6 KB | ########## | 100% \n", - "\n", - "ca-certificates-2019 | 145 KB | | 0% \n", - "ca-certificates-2019 | 145 KB | ########## | 100% \n", - "\n", - "pytz-2019.1 | 227 KB | | 0% \n", - "pytz-2019.1 | 227 KB | #########4 | 94% \n", - "pytz-2019.1 | 227 KB | ########## | 100% \n", - "\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "pip-19.1.1 | 1.8 MB | | 0% \n", - "pip-19.1.1 | 1.8 MB | #######8 | 79% \n", - "pip-19.1.1 | 1.8 MB | #########4 | 94% \n", - "pip-19.1.1 | 1.8 MB | ########## | 100% \n", - "\n", - "tensorboard-1.13.1 | 3.3 MB | | 0% \n", - "tensorboard-1.13.1 | 3.3 MB | #######6 | 76% \n", - "tensorboard-1.13.1 | 3.3 MB | #########9 | 99% \n", - "tensorboard-1.13.1 | 3.3 MB | ########## | 100% \n", - "\n", - "werkzeug-0.15.4 | 254 KB | | 0% \n", - "werkzeug-0.15.4 | 254 KB | #########7 | 97% \n", - "werkzeug-0.15.4 | 254 KB | ########## | 100% \n", - "\n", - "python-3.6.2 | 19.0 MB | | 0% \n", - "python-3.6.2 | 19.0 MB | ##7 | 27% \n", - "python-3.6.2 | 19.0 MB | ######3 | 64% \n", - "python-3.6.2 | 19.0 MB | ######## | 80% \n", - "python-3.6.2 | 19.0 MB | #########2 | 92% \n", - "python-3.6.2 | 19.0 MB | ########## | 100% \n", - "\n", - "gast-0.2.2 | 10 KB | | 0% \n", - "gast-0.2.2 | 10 KB | ########## | 100% \n", - "\n", - "libstdcxx-ng-9.1.0 | 4.0 MB | | 0% \n", - "libstdcxx-ng-9.1.0 | 4.0 MB | #######6 | 77% \n", - "libstdcxx-ng-9.1.0 | 4.0 MB | #########8 | 98% \n", - "libstdcxx-ng-9.1.0 | 4.0 MB | ########## | 100% \n", - "\n", - "openssl-1.0.2r | 3.1 MB | | 0% \n", - "openssl-1.0.2r | 3.1 MB | #######7 | 78% \n", - "openssl-1.0.2r | 3.1 MB | #########7 | 98% \n", - "openssl-1.0.2r | 3.1 MB | ########## | 100% \n", - "\n", - "tensorflow-estimator | 205 KB | | 0% \n", - "tensorflow-estimator | 205 KB | ########## | 100% \n", - "\n", - "liblapack-3.8.0 | 6 KB | | 0% \n", - "liblapack-3.8.0 | 6 KB | ########## | 100% \n", - "\n", - "readline-6.2 | 713 KB | | 0% \n", - "readline-6.2 | 713 KB | ########7 | 87% \n", - "readline-6.2 | 713 KB | ########## | 100% \n", - "\n", - "six-1.12.0 | 22 KB | | 0% \n", - "six-1.12.0 | 22 KB | ########## | 100% \n", - "\n", - "tensorflow-hub-0.4.0 | 52 KB | | 0% \n", - "tensorflow-hub-0.4.0 | 52 KB | ########## | 100% \n", - "\n", - "scipy-1.3.0 | 18.8 MB | | 0% \n", - "scipy-1.3.0 | 18.8 MB | ##8 | 29% \n", - "scipy-1.3.0 | 18.8 MB | #######3 | 74% \n", - "scipy-1.3.0 | 18.8 MB | #########3 | 93% \n", - "scipy-1.3.0 | 18.8 MB | ########## | 100% \n", - "\n", - "tensorflow-1.13.1 | 77.2 MB | | 0% \n", - "tensorflow-1.13.1 | 77.2 MB | 5 | 6% \n", - "tensorflow-1.13.1 | 77.2 MB | #3 | 13% \n", - "tensorflow-1.13.1 | 77.2 MB | ##2 | 23% \n", - "tensorflow-1.13.1 | 77.2 MB | ###2 | 32% \n", - "tensorflow-1.13.1 | 77.2 MB | ####2 | 43% \n", - "tensorflow-1.13.1 | 77.2 MB | #####1 | 52% \n", - "tensorflow-1.13.1 | 77.2 MB | ######1 | 61% \n", - "tensorflow-1.13.1 | 77.2 MB | #######1 | 71% \n", - "tensorflow-1.13.1 | 77.2 MB | ######## | 80% \n", - "tensorflow-1.13.1 | 77.2 MB | ########6 | 87% \n", - "tensorflow-1.13.1 | 77.2 MB | #########1 | 91% \n", - "tensorflow-1.13.1 | 77.2 MB | #########4 | 94% \n", - "tensorflow-1.13.1 | 77.2 MB | #########6 | 97% \n", - "tensorflow-1.13.1 | 77.2 MB | #########8 | 98% \n", - "tensorflow-1.13.1 | 77.2 MB | #########9 | 100% \n", - "tensorflow-1.13.1 | 77.2 MB | ########## | 100% \n", - "\n", - "libblas-3.8.0 | 6 KB | | 0% \n", - "libblas-3.8.0 | 6 KB | ########## | 100% \n", - "\n", - "joblib-0.13.2 | 180 KB | | 0% \n", - "joblib-0.13.2 | 180 KB | 6 | 7% \n", - "joblib-0.13.2 | 180 KB | ########## | 100% \n", - "\n", - "keras-applications-1 | 31 KB | | 0% \n", - "keras-applications-1 | 31 KB | ########## | 100% \n", - "\n", - "grpcio-1.16.0 | 1.0 MB | | 0% \n", - "grpcio-1.16.0 | 1.0 MB | ########3 | 83% \n", - "grpcio-1.16.0 | 1.0 MB | ########## | 100% \n", - "\n", - "libxgboost-0.82 | 3.9 MB | | 0% \n", - "libxgboost-0.82 | 3.9 MB | #######6 | 76% \n", - "libxgboost-0.82 | 3.9 MB | #########5 | 96% \n", - "libxgboost-0.82 | 3.9 MB | ########## | 100% \n", - "\n", - "python-dateutil-2.8. | 219 KB | | 0% \n", - "python-dateutil-2.8. | 219 KB | ########## | 100% \n", - "\n", - "numpy-1.16.4 | 4.3 MB | | 0% \n", - "numpy-1.16.4 | 4.3 MB | #######6 | 76% \n", - "numpy-1.16.4 | 4.3 MB | ########9 | 89% \n", - "numpy-1.16.4 | 4.3 MB | #########9 | 99% \n", - "numpy-1.16.4 | 4.3 MB | ########## | 100% \n", - "\n", - "tk-8.5.19 | 1.9 MB | | 0% \n", - "tk-8.5.19 | 1.9 MB | #######7 | 78% \n", - "tk-8.5.19 | 1.9 MB | ######### | 91% \n", - "tk-8.5.19 | 1.9 MB | ########## | 100% \n", - "Downloading and Extracting Packages\n", - "Preparing transaction: ...working... done\n", - "Verifying transaction: ...working... done\n", - "Executing transaction: ...working... done\n", - "Collecting azureml-sdk (from -r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/75/5d/b9a03efc12c2d18bac509cc8754c3015ee70a50749a63f3b1ba0070c01de/azureml_sdk-1.0.43-py3-none-any.whl\n", - "Collecting azureml-dataprep (from -r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", - " Downloading https://files.pythonhosted.org/packages/bd/ec/dd8521421adaf64264aa26ab31a8be4ffd01c29d0600497eed7b955868ac/azureml_dataprep-1.1.5-py3-none-any.whl (23.9MB)\n", - "Collecting azureml-train-automl==1.0.33 (from -r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/63/30/9dbf0166f81f2a0b0bb751f74a5a9fe5e491703a1858af85b7b127917320/azureml_train_automl-1.0.33-py3-none-any.whl (3.9MB)\n", - "Collecting azureml-core==1.0.43.* (from azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/f6/b2/ba8fde6c28251cec7fee4f6040ba13476a42ecbc138785bf958a5f500704/azureml_core-1.0.43.1-py2.py3-none-any.whl (937kB)\n", - "Collecting azureml-pipeline==1.0.43.* (from azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/67/31/9266e565b2965616ed694aabb70f035f01627f8e7cfeff48553c3631f0d7/azureml_pipeline-1.0.43-py3-none-any.whl\n", - "Collecting azureml-train==1.0.43.* (from azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/67/e4/b5a3d84ac40ceaf4203ca0ef0629e8de9c27edefd9ba0e7c32f5630f1930/azureml_train-1.0.43-py3-none-any.whl\n", - "Collecting azureml-dataprep-native<14.0.0,>=13.0.0 (from azureml-dataprep->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", - " Downloading https://files.pythonhosted.org/packages/b8/b1/3b38a679a77eabad4e62157ad7f5e783003c0b2cdbef4fcc20b0ebafab98/azureml_dataprep_native-13.0.0-cp36-cp36m-manylinux1_x86_64.whl (1.3MB)\n", - "Collecting dotnetcore2==2.1.8 (from azureml-dataprep->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", - " Downloading https://files.pythonhosted.org/packages/4f/a1/3224449b9fd886f2be43173d47e8c9d42e2291bd4e6f0a9706ca6d0a4807/dotnetcore2-2.1.8-py3-none-manylinux1_x86_64.whl (29.3MB)\n", - "Collecting dill>=0.2.8 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/fe/42/bfe2e0857bc284cbe6a011d93f2a9ad58a22cb894461b199ae72cfef0f29/dill-0.2.9.tar.gz (150kB)\n", - "Collecting resource>=0.1.8 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/34/ad/9cd037c01c075f9a273c23557f8e71195d773d59d3881bbb26011d396c8b/Resource-0.2.1-py2.py3-none-any.whl\n", - "Collecting azureml-telemetry==1.0.33.* (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/cd/12/eb308994938105cb80357c57201a48a16388531b9961cffb543073ee80d0/azureml_telemetry-1.0.33-py3-none-any.whl\n", - "Collecting skl2onnx==1.4.5; python_version < \"3.7\" (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Downloading https://files.pythonhosted.org/packages/a2/17/99dcc4c5881fbbf73151c2e66cfe2b3ad0664e114f0f06d987c2c21afb86/skl2onnx-1.4.5-py2.py3-none-any.whl (171kB)\n", - "Requirement already satisfied: pytz in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (2019.1)\n", - "Collecting onnxmltools==1.4.0; python_version < \"3.7\" (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/6f/0d/83201824e7693b63eac297e294cfa8af69f70b1b56492d40d580ba44bb6a/onnxmltools-1.4.0-py2.py3-none-any.whl (326kB)\n", - "Collecting azureml-pipeline-core==1.0.33.* (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/6a/9b/86ef6afa306d46f4f443dc9a9d0e09ef05abf8ccfdaaf19474a4fe0eaeb7/azureml_pipeline_core-1.0.33-py2.py3-none-any.whl (162kB)\n", - "Collecting numpy<=1.16.2,>=1.11.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/35/d5/4f8410ac303e690144f0a0603c4b8fd3b986feb2749c435f7cdbb288f17e/numpy-1.16.2-cp36-cp36m-manylinux1_x86_64.whl (17.3MB)\n", - "Collecting lightgbm<=2.2.1,>=2.0.11 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/78/7e/bc87e7951cfaa998cffaf39e6c721f5bd04efb2e139486206356edb289a5/lightgbm-2.2.1-py2.py3-none-manylinux1_x86_64.whl (1.1MB)\n", - "Collecting gensim (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/d3/4b/19eecdf07d614665fa889857dc56ac965631c7bd816c3476d2f0cac6ea3b/gensim-3.7.3-cp36-cp36m-manylinux1_x86_64.whl (24.2MB)\n", - "Collecting sklearn-pandas<=1.7.0,>=1.4.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/7e/9c/c94f46b40b86d2c77c46c4c1b858fc66c117b4390665eca28f2e0812db45/sklearn_pandas-1.7.0-py2.py3-none-any.whl\n", - "Collecting scikit-learn<=0.20.3,>=0.19.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/5e/82/c0de5839d613b82bddd088599ac0bbfbbbcbd8ca470680658352d2c435bd/scikit_learn-0.20.3-cp36-cp36m-manylinux1_x86_64.whl (5.4MB)\n", - "Collecting wheel==0.30.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/0c/80/16a85b47702a1f47a63c104c91abdd0a6704ee8ae3b4ce4afc49bc39f9d9/wheel-0.30.0-py2.py3-none-any.whl (49kB)\n", - "Collecting pandas<=0.23.4,>=0.21.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/e1/d8/feeb346d41f181e83fba45224ab14a8d8af019b48af742e047f3845d8cff/pandas-0.23.4-cp36-cp36m-manylinux1_x86_64.whl (8.9MB)\n", - "Collecting scipy<=1.1.0,>=1.0.0 (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/a8/0b/f163da98d3a01b3e0ef1cab8dd2123c34aee2bafbb1c5bffa354cc8a1730/scipy-1.1.0-cp36-cp36m-manylinux1_x86_64.whl (31.2MB)\n", - "Collecting nimbusml==0.6.5; python_version < \"3.7\" (from azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/e4/7b/d7d2ccfd778df731ae661caa15dc58ef4ef0bffa236176296bd7b9620c8d/nimbusml-0.6.5-cp36-none-manylinux1_x86_64.whl (60.1MB)\n", - "Collecting PyJWT (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/87/8b/6a9f14b5f781697e51259d81657e6048fd31a113229cf346880bb7545565/PyJWT-1.7.1-py2.py3-none-any.whl\n", - "Collecting requests>=2.19.1 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/51/bd/23c926cd341ea6b7dd0b2a00aba99ae0f828be89d72b2190f27c11d4b7fb/requests-2.22.0-py2.py3-none-any.whl (57kB)\n", - "Collecting SecretStorage (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/82/59/cb226752e20d83598d7fdcabd7819570b0329a61db07cfbdd21b2ef546e3/SecretStorage-3.1.1-py3-none-any.whl\n", - "Collecting azure-graphrbac>=0.40.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/3e/93/02056aca45162f9fc275d1eaad12a2a07ef92375afb48eabddc4134b8315/azure_graphrbac-0.61.1-py2.py3-none-any.whl (141kB)\n", - "Collecting msrest>=0.5.1 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/9a/23/eea6c1fce5b24366b48f270c23f043f976eb0d4248eb3cb7e62b0f602bcd/msrest-0.6.7-py2.py3-none-any.whl (81kB)\n", - "Collecting pathspec (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/84/2a/bfee636b1e2f7d6e30dd74f49201ccfa5c3cf322d44929ecc6c137c486c5/pathspec-0.5.9.tar.gz\n", - "Collecting urllib3>=1.23 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/e6/60/247f23a7121ae632d62811ba7f273d0e58972d75e58a94d329d51550a47d/urllib3-1.25.3-py2.py3-none-any.whl (150kB)\n", - "Requirement already satisfied: python-dateutil>=2.7.3 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1)) (2.8.0)\n", - "Collecting jsonpickle (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/07/07/c157520a3ebd166c8c24c6ae0ecae7c3968eb4653ff0e5af369bb82f004d/jsonpickle-1.2-py2.py3-none-any.whl\n", - "Collecting ndg-httpsclient (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/fb/67/c2f508c00ed2a6911541494504b7cac16fe0b0473912568df65fd1801132/ndg_httpsclient-0.5.1-py3-none-any.whl\n", - "Collecting docker (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/91/93/310fe092039f6b0759a1f8524e9e2c56f8012804fa2a8da4e4289bb74d7c/docker-4.0.1-py2.py3-none-any.whl (138kB)\n", - "Collecting azure-mgmt-resource>=1.2.1 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/97/1e/03813b5705b46d86d8d6d594930b78f14b13d901b5ca089152e06e67b680/azure_mgmt_resource-3.0.0-py2.py3-none-any.whl (468kB)\n", - "Collecting azure-mgmt-authorization>=0.40.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/6b/b2/c0d62a3a91c13641e09af294c13fe16929f88dc5902718388cd9b292217f/azure_mgmt_authorization-0.52.0-py2.py3-none-any.whl (112kB)\n", - "Collecting jmespath (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/83/94/7179c3832a6d45b266ddb2aac329e101367fbdb11f425f13771d27f225bb/jmespath-0.9.4-py2.py3-none-any.whl\n", - "Collecting ruamel.yaml<=0.15.89,>=0.15.35 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Downloading https://files.pythonhosted.org/packages/36/e1/cc2fa400fa5ffde3efa834ceb15c464075586de05ca3c553753dcd6f1d3b/ruamel.yaml-0.15.89-cp36-cp36m-manylinux1_x86_64.whl (651kB)\n", - "Collecting msrestazure>=0.4.33 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/0a/aa/b17a4f702ecd6d9e989ae34109aa384c988aed0de37215c651165ed45238/msrestazure-0.6.1-py2.py3-none-any.whl (40kB)\n", - "Requirement already satisfied: six>=1.11.0 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1)) (1.12.0)\n", - "Collecting azure-mgmt-keyvault>=0.40.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/49/de/0d69aedae7c5f6428314640b65947203ab80409c12b5d4e66fb5b7a4182e/azure_mgmt_keyvault-1.1.0-py2.py3-none-any.whl (111kB)\n", - "Collecting adal>=1.2.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/00/72/53dce9e4f5d6c1aa57b8d408cb34dff1969ecbf10ab7e678f32c5e0e2397/adal-1.2.1-py2.py3-none-any.whl (52kB)\n", - "Collecting pyopenssl (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/01/c8/ceb170d81bd3941cbeb9940fc6cc2ef2ca4288d0ca8929ea4db5905d904d/pyOpenSSL-19.0.0-py2.py3-none-any.whl (53kB)\n", - "Collecting azure-mgmt-containerregistry>=2.0.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/97/70/8c2d0509db466678eba16fa2b0a539499f3b351b1f2993126ad843d5be13/azure_mgmt_containerregistry-2.8.0-py2.py3-none-any.whl (718kB)\n", - "Collecting cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.* (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/97/18/c6557f63a6abde34707196fb2cad1c6dc0dbff25a200d5044922496668a4/cryptography-2.7-cp34-abi3-manylinux1_x86_64.whl (2.3MB)\n", - "Collecting azure-common>=1.1.12 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/e3/36/9946fa617f458f11766884c76c622810f4c111ee16c08eb8315e88330d66/azure_common-1.1.22-py2.py3-none-any.whl\n", - "Collecting contextlib2 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/a2/71/8273a7eeed0aff6a854237ab5453bc9aa67deb49df4832801c21f0ff3782/contextlib2-0.5.5-py2.py3-none-any.whl\n", - "Collecting azure-mgmt-storage>=1.5.0 (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/8c/03/62c3ed229b9b83fbf4dcd56ae27d5d835f3bd921004c09a478729c221fff/azure_mgmt_storage-4.0.0-py2.py3-none-any.whl (426kB)\n", - "Collecting backports.tempfile (from azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/b4/5c/077f910632476281428fe254807952eb47ca78e720d059a46178c541e669/backports.tempfile-1.0-py2.py3-none-any.whl\n", - "Collecting azureml-pipeline-steps==1.0.43.* (from azureml-pipeline==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/b9/24/917523741c7d18f4f7447b1e3e8bd00ef97a26f1991426e90ec64d90834a/azureml_pipeline_steps-1.0.43-py3-none-any.whl\n", - "Collecting azureml-train-core==1.0.43.* (from azureml-train==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/2c/af/e6f15c939c7275fe2e630fc9c502f4c0879acb28bd930fcac6083d597766/azureml_train_core-1.0.43-py3-none-any.whl (61kB)\n", - "Collecting distro>=1.2.0 (from dotnetcore2==2.1.8->azureml-dataprep->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 2))\n", - " Downloading https://files.pythonhosted.org/packages/ea/35/82f79b92fa4d937146c660a6482cee4f3dfa1f97ff3d2a6f3ecba33e712e/distro-1.4.0-py2.py3-none-any.whl\n", - "Collecting JsonSir>=0.0.2 (from resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/aa/bf/5c00c1dafaa3ca2c32e7641d9c2c6f9d6d76e127bde00eb600333a60c5bc/JsonSir-0.0.2.tar.gz\n", - "Collecting python-easyconfig>=0.1.0 (from resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/b1/86/1138081cca360a02066eedaf301d0f358c35e0e0d67572acf9d6354edca9/Python_EasyConfig-0.1.7-py2.py3-none-any.whl\n", - "Collecting JsonForm>=0.0.2 (from resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/4f/b7/b9491ba4b709d0616fab15a89f8efe4d3a7924652e1fdd4f15303e9ecdf0/JsonForm-0.0.2.tar.gz\n", - "Collecting applicationinsights (from azureml-telemetry==1.0.33.*->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/a1/53/234c53004f71f0717d8acd37876e0b65c121181167057b9ce1b1795f96a0/applicationinsights-0.11.9-py2.py3-none-any.whl (58kB)\n", - "Requirement already satisfied: protobuf in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (3.8.0)\n", - "Collecting onnx (from skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/88/50/e4a5a869093f35884d1fd95b46b24705ab27adb7e562a2a307523c043be3/onnx-1.5.0-cp36-cp36m-manylinux1_x86_64.whl (7.0MB)\n", - "Collecting keras2onnx (from onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/1a/b0/6f3012cb0c959203dd3ce05e0fc61c9112f0d4043fdf917cf665d8c53254/keras2onnx-1.5.0-py3-none-any.whl (186kB)\n", - "Collecting smart-open>=1.7.0 (from gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/37/c0/25d19badc495428dec6a4bf7782de617ee0246a9211af75b302a2681dea7/smart_open-1.8.4.tar.gz (63kB)\n", - "Collecting idna<2.9,>=2.5 (from requests>=2.19.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/14/2c/cd551d81dbe15200be1cf41cd03869a46fe7226e7450af7a6545bfc474c9/idna-2.8-py2.py3-none-any.whl (58kB)\n", - "Collecting chardet<3.1.0,>=3.0.2 (from requests>=2.19.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/bc/a9/01ffebfb562e4274b6487b4bb1ddec7ca55ec7510b22e4c51f14098443b8/chardet-3.0.4-py2.py3-none-any.whl (133kB)\n", - "Requirement already satisfied: certifi>=2017.4.17 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from requests>=2.19.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1)) (2019.3.9)\n", - "Collecting jeepney (from SecretStorage->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/2b/f7/ff23b9b59534f501d47c327576aadda59da5b83d76ff837e6075bc325b9f/jeepney-0.4-py3-none-any.whl (59kB)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Collecting requests-oauthlib>=0.5.0 (from msrest>=0.5.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/c2/e2/9fd03d55ffb70fe51f587f20bcf407a6927eb121de86928b34d162f0b1ac/requests_oauthlib-1.2.0-py2.py3-none-any.whl\n", - "Collecting isodate>=0.6.0 (from msrest>=0.5.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/9b/9f/b36f7774ff5ea8e428fdcfc4bb332c39ee5b9362ddd3d40d9516a55221b2/isodate-0.6.0-py2.py3-none-any.whl (45kB)\n", - "Collecting pyasn1>=0.1.1 (from ndg-httpsclient->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/7b/7c/c9386b82a25115cccf1903441bba3cbadcfae7b678a20167347fa8ded34c/pyasn1-0.4.5-py2.py3-none-any.whl (73kB)\n", - "Collecting websocket-client>=0.32.0 (from docker->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/29/19/44753eab1fdb50770ac69605527e8859468f3c0fd7dc5a76dd9c4dbd7906/websocket_client-0.56.0-py2.py3-none-any.whl (200kB)\n", - "Collecting azure-mgmt-nspkg>=2.0.0 (from azure-mgmt-keyvault>=0.40.0->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/b3/c2/af4b47845f27dc7d206ed4908b9e580f8bc94a4b2f3956a0d87c40719d90/azure_mgmt_nspkg-3.0.2-py3-none-any.whl\n", - "Collecting asn1crypto>=0.21.0 (from cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.*->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/ea/cd/35485615f45f30a510576f1a56d1e0a7ad7bd8ab5ed7cdc600ef7cd06222/asn1crypto-0.24.0-py2.py3-none-any.whl (101kB)\n", - "Collecting cffi!=1.11.3,>=1.8 (from cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.*->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/5f/bf/6aa1925384c23ffeb579e97a5569eb9abce41b6310b329352b8252cee1c3/cffi-1.12.3-cp36-cp36m-manylinux1_x86_64.whl (430kB)\n", - "Collecting backports.weakref (from backports.tempfile->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/88/ec/f598b633c3d5ffe267aaada57d961c94fdfa183c5c3ebda2b6d151943db6/backports.weakref-1.0.post1-py2.py3-none-any.whl\n", - "Collecting azureml-train-restclients-hyperdrive==1.0.43.* (from azureml-train-core==1.0.43.*->azureml-train==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/14/1f/5a08797b00e0a70fd0420311bb809a740175a1700dc405a84616c62b622d/azureml_train_restclients_hyperdrive-1.0.43-py3-none-any.whl\n", - "Collecting PyYAML (from python-easyconfig>=0.1.0->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/a3/65/837fefac7475963d1eccf4aa684c23b95aa6c1d033a2c5965ccb11e22623/PyYAML-5.1.1.tar.gz (274kB)\n", - "Collecting jsonschema (from JsonForm>=0.0.2->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/aa/69/df679dfbdd051568b53c38ec8152a3ab6bc533434fc7ed11ab034bf5e82f/jsonschema-3.0.1-py2.py3-none-any.whl (54kB)\n", - "Requirement already satisfied: setuptools in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from protobuf->skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (41.0.1)\n", - "Collecting typing-extensions>=3.6.2.1 (from onnx->skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/0f/62/c66e553258c37c33f9939abb2dd8d2481803d860ff68e635466f12aa7efa/typing_extensions-3.7.2-py3-none-any.whl\n", - "Collecting typing>=3.6.4 (from onnx->skl2onnx==1.4.5; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/4a/bd/eee1157fc2d8514970b345d69cb9975dcd1e42cd7e61146ed841f6e68309/typing-3.6.6-py3-none-any.whl\n", - "Collecting onnxconverter-common>=1.5.0 (from keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/5d/e0/1192dacb4b8139758db420484e98d54295d09a7aa458a14d1d581147534a/onnxconverter_common-1.5.0-py2.py3-none-any.whl (40kB)\n", - "Collecting keras (from keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/5e/10/aa32dad071ce52b5502266b5c659451cfd6ffcbf14e6c8c4f16c0ff5aaab/Keras-2.2.4-py2.py3-none-any.whl (312kB)\n", - "Collecting boto>=2.32 (from smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/23/10/c0b78c27298029e4454a472a1919bde20cb182dab1662cec7f2ca1dcc523/boto-2.49.0-py2.py3-none-any.whl (1.4MB)\n", - "Collecting boto3 (from smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/4e/0e/3158a8eb94f32bf1b1c926c0c9ccfd441657b012d4b4d17e49b36865af54/boto3-1.9.171-py2.py3-none-any.whl (128kB)\n", - "Collecting oauthlib>=3.0.0 (from requests-oauthlib>=0.5.0->msrest>=0.5.1->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/16/95/699466b05b72b94a41f662dc9edf87fda4289e3602ecd42d27fcaddf7b56/oauthlib-3.0.1-py2.py3-none-any.whl (142kB)\n", - "Collecting azure-nspkg>=3.0.0 (from azure-mgmt-nspkg>=2.0.0->azure-mgmt-keyvault>=0.40.0->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/c4/0c/c562be95a9a2ed52454f598571cf300b1114d0db2aa27f5b8ed3bb9cd0c0/azure_nspkg-3.0.2-py3-none-any.whl\n", - "Collecting pycparser (from cffi!=1.11.3,>=1.8->cryptography!=1.9,!=2.0.*,!=2.1.*,!=2.2.*->azureml-core==1.0.43.*->azureml-sdk->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 1))\n", - " Downloading https://files.pythonhosted.org/packages/68/9e/49196946aee219aead1290e00d1e7fdeab8567783e83e1b9ab5585e6206a/pycparser-2.19.tar.gz (158kB)\n", - "Collecting attrs>=17.4.0 (from jsonschema->JsonForm>=0.0.2->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/23/96/d828354fa2dbdf216eaa7b7de0db692f12c234f7ef888cc14980ef40d1d2/attrs-19.1.0-py2.py3-none-any.whl\n", - "Collecting pyrsistent>=0.14.0 (from jsonschema->JsonForm>=0.0.2->resource>=0.1.8->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/68/0b/f514e76b4e074386b60cfc6c8c2d75ca615b81e415417ccf3fac80ae0bf6/pyrsistent-0.15.2.tar.gz (106kB)\n", - "Requirement already satisfied: keras-applications>=1.0.6 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from keras->keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (1.0.7)\n", - "Requirement already satisfied: h5py in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from keras->keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (2.9.0)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Requirement already satisfied: keras-preprocessing>=1.0.5 in /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib/python3.6/site-packages (from keras->keras2onnx->onnxmltools==1.4.0; python_version < \"3.7\"->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3)) (1.0.9)\n", - "Collecting s3transfer<0.3.0,>=0.2.0 (from boto3->smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/16/8a/1fc3dba0c4923c2a76e1ff0d52b305c44606da63f718d14d3231e21c51b0/s3transfer-0.2.1-py2.py3-none-any.whl (70kB)\n", - "Collecting botocore<1.13.0,>=1.12.171 (from boto3->smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/c4/a2/48924b841570e99fdf9d593a4d76568e27bb335f8b63faa716f5bba9db87/botocore-1.12.171-py2.py3-none-any.whl (5.5MB)\n", - "Collecting docutils>=0.10 (from botocore<1.13.0,>=1.12.171->boto3->smart-open>=1.7.0->gensim->azureml-train-automl==1.0.33->-r /azureml-setup/condaenv.5tx11lmh.requirements.txt (line 3))\n", - " Downloading https://files.pythonhosted.org/packages/36/fa/08e9e6e0e3cbd1d362c3bbee8d01d0aedb2155c4ac112b19ef3cae8eed8d/docutils-0.14-py3-none-any.whl (543kB)\n", - "Building wheels for collected packages: dill, pathspec, JsonSir, JsonForm, smart-open, PyYAML, pycparser, pyrsistent\n", - " Building wheel for dill (setup.py): started\n", - " Building wheel for dill (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/5b/d7/0f/e58eae695403de585269f4e4a94e0cd6ca60ec0c202936fa4a\n", - " Building wheel for pathspec (setup.py): started\n", - " Building wheel for pathspec (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/45/cb/7e/ce6e6062c69446e39e328170524ca8213498bc66a74c6a210b\n", - " Building wheel for JsonSir (setup.py): started\n", - " Building wheel for JsonSir (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/ee/30/5c/3a3b5e1386c8db9a3be5f5c3933644ae0533c1351c6a8eb4b5\n", - " Building wheel for JsonForm (setup.py): started\n", - " Building wheel for JsonForm (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/e8/74/51/42c2d41c02bdc6f0e604476b7e4293b8c98d0bcbfa1dff78c8\n", - " Building wheel for smart-open (setup.py): started\n", - " Building wheel for smart-open (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/5f/ea/fb/5b1a947b369724063b2617011f1540c44eb00e28c3d2ca8692\n", - " Building wheel for PyYAML (setup.py): started\n", - " Building wheel for PyYAML (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/16/27/a1/775c62ddea7bfa62324fd1f65847ed31c55dadb6051481ba3f\n", - " Building wheel for pycparser (setup.py): started\n", - " Building wheel for pycparser (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/f2/9a/90/de94f8556265ddc9d9c8b271b0f63e57b26fb1d67a45564511\n", - " Building wheel for pyrsistent (setup.py): started\n", - " Building wheel for pyrsistent (setup.py): finished with status 'done'\n", - " Stored in directory: /root/.cache/pip/wheels/6b/b9/15/c8c6a1e095a370e8c3273e65a5c982e5cf355dde16d77502f5\n", - "Successfully built dill pathspec JsonSir JsonForm smart-open PyYAML pycparser pyrsistent\n", - "ERROR: azureml-train-core 1.0.43 has requirement azureml-telemetry==1.0.43.*, but you'll have azureml-telemetry 1.0.33 which is incompatible.\n", - "ERROR: azureml-pipeline-core 1.0.33 has requirement azureml-core==1.0.33.*, but you'll have azureml-core 1.0.43.1 which is incompatible.\n", - "ERROR: azureml-pipeline-steps 1.0.43 has requirement azureml-pipeline-core==1.0.43.*, but you'll have azureml-pipeline-core 1.0.33 which is incompatible.\n", - "ERROR: azureml-pipeline 1.0.43 has requirement azureml-pipeline-core==1.0.43.*, but you'll have azureml-pipeline-core 1.0.33 which is incompatible.\n", - "ERROR: azureml-train-automl 1.0.33 has requirement azureml-core==1.0.33.*, but you'll have azureml-core 1.0.43.1 which is incompatible.\n", - "Installing collected packages: PyJWT, idna, chardet, urllib3, requests, jeepney, asn1crypto, pycparser, cffi, cryptography, SecretStorage, oauthlib, requests-oauthlib, isodate, msrest, adal, msrestazure, azure-common, azure-graphrbac, pathspec, jsonpickle, pyasn1, pyopenssl, ndg-httpsclient, websocket-client, docker, azure-mgmt-resource, azure-mgmt-authorization, jmespath, ruamel.yaml, azure-nspkg, azure-mgmt-nspkg, azure-mgmt-keyvault, azure-mgmt-containerregistry, contextlib2, azure-mgmt-storage, backports.weakref, backports.tempfile, azureml-core, azureml-train-restclients-hyperdrive, applicationinsights, azureml-telemetry, azureml-train-core, azureml-pipeline-core, azureml-pipeline-steps, azureml-pipeline, azureml-train, azureml-dataprep-native, distro, dotnetcore2, azureml-dataprep, azureml-sdk, dill, JsonSir, PyYAML, python-easyconfig, attrs, pyrsistent, jsonschema, JsonForm, resource, typing-extensions, typing, numpy, onnx, skl2onnx, onnxconverter-common, scipy, keras, keras2onnx, onnxmltools, scikit-learn, lightgbm, boto, docutils, botocore, s3transfer, boto3, smart-open, gensim, pandas, sklearn-pandas, wheel, nimbusml, azureml-train-automl\n", - " Found existing installation: numpy 1.16.4\n", - " Uninstalling numpy-1.16.4:\n", - " Successfully uninstalled numpy-1.16.4\n", - " Found existing installation: scipy 1.3.0\n", - " Uninstalling scipy-1.3.0:\n", - " Successfully uninstalled scipy-1.3.0\n", - " Found existing installation: scikit-learn 0.21.2\n", - " Uninstalling scikit-learn-0.21.2:\n", - " Successfully uninstalled scikit-learn-0.21.2\n", - " Found existing installation: pandas 0.24.2\n", - " Uninstalling pandas-0.24.2:\n", - " Successfully uninstalled pandas-0.24.2\n", - " Found existing installation: wheel 0.33.4\n", - " Uninstalling wheel-0.33.4:\n", - " Successfully uninstalled wheel-0.33.4\n", - "Successfully installed JsonForm-0.0.2 JsonSir-0.0.2 PyJWT-1.7.1 PyYAML-5.1.1 SecretStorage-3.1.1 adal-1.2.1 applicationinsights-0.11.9 asn1crypto-0.24.0 attrs-19.1.0 azure-common-1.1.22 azure-graphrbac-0.61.1 azure-mgmt-authorization-0.52.0 azure-mgmt-containerregistry-2.8.0 azure-mgmt-keyvault-1.1.0 azure-mgmt-nspkg-3.0.2 azure-mgmt-resource-3.0.0 azure-mgmt-storage-4.0.0 azure-nspkg-3.0.2 azureml-core-1.0.43.1 azureml-dataprep-1.1.5 azureml-dataprep-native-13.0.0 azureml-pipeline-1.0.43 azureml-pipeline-core-1.0.33 azureml-pipeline-steps-1.0.43 azureml-sdk-1.0.43 azureml-telemetry-1.0.33 azureml-train-1.0.43 azureml-train-automl-1.0.33 azureml-train-core-1.0.43 azureml-train-restclients-hyperdrive-1.0.43 backports.tempfile-1.0 backports.weakref-1.0.post1 boto-2.49.0 boto3-1.9.171 botocore-1.12.171 cffi-1.12.3 chardet-3.0.4 contextlib2-0.5.5 cryptography-2.7 dill-0.2.9 distro-1.4.0 docker-4.0.1 docutils-0.14 dotnetcore2-2.1.8 gensim-3.7.3 idna-2.8 isodate-0.6.0 jeepney-0.4 jmespath-0.9.4 jsonpickle-1.2 jsonschema-3.0.1 keras-2.2.4 keras2onnx-1.5.0 lightgbm-2.2.1 msrest-0.6.7 msrestazure-0.6.1 ndg-httpsclient-0.5.1 nimbusml-0.6.5 numpy-1.16.2 oauthlib-3.0.1 onnx-1.5.0 onnxconverter-common-1.5.0 onnxmltools-1.4.0 pandas-0.23.4 pathspec-0.5.9 pyasn1-0.4.5 pycparser-2.19 pyopenssl-19.0.0 pyrsistent-0.15.2 python-easyconfig-0.1.7 requests-2.22.0 requests-oauthlib-1.2.0 resource-0.2.1 ruamel.yaml-0.15.89 s3transfer-0.2.1 scikit-learn-0.20.3 scipy-1.1.0 skl2onnx-1.4.5 sklearn-pandas-1.7.0 smart-open-1.8.4 typing-3.6.6 typing-extensions-3.7.2 urllib3-1.25.3 websocket-client-0.56.0 wheel-0.30.0\n", - "\n", - "#\n", - "# To activate this environment, use:\n", - "# > source activate /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b\n", - "#\n", - "# To deactivate an active environment, use:\n", - "# > source deactivate\n", - "#\n", - "\n", - "Removing intermediate container ace0ce861df6\n", - " ---> 357f156a89a0\n", - "Step 9/15 : ENV PATH /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/bin:$PATH\n", - " ---> Running in b46d6928a32a\n", - "Removing intermediate container b46d6928a32a\n", - " ---> e0cef77bd281\n", - "Step 10/15 : ENV AZUREML_CONDA_ENVIRONMENT_PATH /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b\n", - " ---> Running in 6785c898e6b1\n", - "Removing intermediate container 6785c898e6b1\n", - " ---> 22b08f45e3b2\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Step 11/15 : ENV LD_LIBRARY_PATH /azureml-envs/azureml_361db214fdeff5eb353cb3d3027c9e1b/lib:$LD_LIBRARY_PATH\n", - " ---> Running in 1c2d773a5c25\n", - "Removing intermediate container 1c2d773a5c25\n", - " ---> b8a67309c446\n", - "Step 12/15 : COPY azureml-setup/spark_cache.py azureml-setup/log4j.properties /azureml-setup/\n", - " ---> 639fe2b42e43\n", - "Step 13/15 : RUN if [ $SPARK_HOME ]; then /bin/bash -c '$SPARK_HOME/bin/spark-submit \"--repositories\" \"[]\" /azureml-setup/spark_cache.py'; fi\n", - " ---> Running in 2b8e5e18802e\n", - "Removing intermediate container 2b8e5e18802e\n", - " ---> 76c3e4641f51\n", - "Step 14/15 : ENV AZUREML_ENVIRONMENT_IMAGE True\n", - " ---> Running in 39fa1899d8d0\n", - "Removing intermediate container 39fa1899d8d0\n", - " ---> 1f7288ce6b97\n", - "Step 15/15 : CMD [\"bash\"]\n", - " ---> Running in 4a3e210076e4\n", - "Removing intermediate container 4a3e210076e4\n", - " ---> a44c080ae64a\n", - "Successfully built a44c080ae64a\n", - "Successfully tagged maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3:latest\n", - "2019/06/18 20:50:48 Successfully executed container: acb_step_0\n", - "2019/06/18 20:50:48 Executing step ID: acb_step_1. Timeout(sec): 1800, Working directory: '', Network: 'acb_default_network'\n", - "2019/06/18 20:50:48 Pushing image: maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3:latest, attempt 1\n", - "The push refers to repository [maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3]\n", - "9bdb266887f9: Preparing\n", - "a69cb7060f7a: Preparing\n", - "3cd6b95b801a: Preparing\n", - "21bc0f0f38b2: Preparing\n", - "a309c58722fe: Preparing\n", - "21b14872ab14: Preparing\n", - "3c9c46eff366: Preparing\n", - "5d7b56c93f07: Preparing\n", - "a93427c950fc: Preparing\n", - "94919f134c61: Preparing\n", - "be63f83f7ae7: Preparing\n", - "4c54072a5034: Preparing\n", - "49652298c779: Preparing\n", - "e15278fcccca: Preparing\n", - "739482a9723d: Preparing\n", - "94919f134c61: Waiting\n", - "be63f83f7ae7: Waiting\n", - "4c54072a5034: Waiting\n", - "49652298c779: Waiting\n", - "e15278fcccca: Waiting\n", - "739482a9723d: Waiting\n", - "21b14872ab14: Waiting\n", - "3c9c46eff366: Waiting\n", - "5d7b56c93f07: Waiting\n", - "a93427c950fc: Waiting\n", - "a309c58722fe: Pushed\n", - "3cd6b95b801a: Pushed\n", - "9bdb266887f9: Pushed\n", - "21bc0f0f38b2: Pushed\n", - "21b14872ab14: Pushed\n", - "3c9c46eff366: Pushed\n", - "5d7b56c93f07: Pushed\n", - "4c54072a5034: Pushed\n", - "49652298c779: Pushed\n", - "e15278fcccca: Pushed\n", - "a93427c950fc: Pushed\n", - "739482a9723d: Pushed\n", - "94919f134c61: Pushed\n", - "be63f83f7ae7: Pushed\n", - "a69cb7060f7a: Pushed\n", - "latest: digest: sha256:7c746d01dd80267b57a9b907bc60eb993269f63105e8f58080994d156448d2eb size: 3459\n", - "2019/06/18 20:53:02 Successfully pushed image: maidaptestc9922809.azurecr.io/azureml/azureml_b2a8349416887710026a15e07f74a6a3:latest\n", - "2019/06/18 20:53:02 Step ID: acb_step_0 marked as successful (elapsed time in seconds: 250.532526)\n", - "2019/06/18 20:53:02 Populating digests for step ID: acb_step_0...\n", - "2019/06/18 20:53:04 Successfully populated digests for step ID: acb_step_0\n", - "2019/06/18 20:53:04 Step ID: acb_step_1 marked as successful (elapsed time in seconds: 133.795459)\n", - "2019/06/18 20:53:04 The following dependencies were found:\n", - "2019/06/18 20:53:04 \n", - "- image:\n", - " registry: maidaptestc9922809.azurecr.io\n", - " repository: azureml/azureml_b2a8349416887710026a15e07f74a6a3\n", - " tag: latest\n", - " digest: sha256:7c746d01dd80267b57a9b907bc60eb993269f63105e8f58080994d156448d2eb\n", - " runtime-dependency:\n", - " registry: mcr.microsoft.com\n", - " repository: azureml/base\n", - " tag: intelmpi2018.3-ubuntu16.04\n", - " digest: sha256:2b9f1a6f5cde97d4f400724908a4068eb67fd1da7ca44893c5559fc24592ce1b\n", - " git: {}\n", - "\n", - "Run ID: chq was successful after 6m31s\n" - ] - } - ], - "source": [ - "pipeline_run.wait_for_completion(show_output=True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/scenarios/sentence_similarity/autoMLwidget.PNG b/scenarios/sentence_similarity/autoMLwidget.PNG deleted file mode 100644 index 363a599e89407cd6baf655aa3ee6bce07aabcc20..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 87866 zcmeFZ2Ut_v+AfN+1Pd5k7OF&W2?$DWQdI<0Kty^MB7)LHLSOH&Kf-jk#=0SBy%mF`s@jhj7iif8% zY1f9wHsJS;8)qGZd3b)kwRvnAy9y5F;o%mXJ9Ek=)NO&y8i3h5&Ir!9f52VbAd9eh zF>4;$3*$a1kuc)tn!uD_8<5qz?T*^zdL6ZU_Br)iSd@cPYLrL|Z=A^alP7*V{d<(u zxX|xH%2`{-vrZloIwbMyry(|54~kj{A7$nEqN@iY*Iuu=4R|N`E;P+NC!H8@33eq` zBPg~>2F%&ESI64_{<&al4`U4z-5gWB{{HDNY2!=(>33>KPXiguT?X6#+$uGs^wpND zfA5v*m0jf*_498>*>WTY&s_&A{B*}-?K-xu)vB!`kOYwK3?wqBQE1XRudHeeKma+Ece7i@e9&iiqPlH@!j?l&SuGmVD zL!PitW!SsJXpWAoEAFsuFLObjIOW?3;||GKGAmvYa%>nC>*C$|!~i#sYV3mey{p$# zoKm^v^5D)Ms+XKRjv(u~u#q{W1QRGx3z~4qb0ralX2B_+Ct}0{w4LELh<>Lt=@Wj| zL=BafhzSwf(jL3uyCF4V&Ml(t>8d}(K4C1QFVz~mQ*!58)00$2Fa4zIFZyTC4V(Wq zv}Y2}f1tXavc(7d?NHeY^kS~k>cws&H^M-W-h+J3yjkz*IZq;$2c`4v9EUMZm6j~@ z)Lm;aV8zu{f^XvdG6O{m;$iRys;$4Gk>s!kA^i~#XLigO%XF!9hPTL~r6zyvm3Tfl zgVg}U7&gUKiCiLy@LNUdN$PTIPC>6LdD&hbD~=%l%K9Z=_E%PT=Inf&6#JLbJ7y=2 z58UmVuFjd%tBKQ%sr33`&)_)ou+vv2JNTSH(gfbceXA$crlk(9zCZ~Yf>dDdW4G-C zJFFfe(0SBTe2%^6HOZLp3ujZ5tkB!njVD>Q!d0NP zQTyk6UnT|$w?fsY%8hc>D+6NXAOfo?D7Z~x3{%mt3DBtSrXiQ#pIf0YQJ_9axGrBBbOY9$< zsehp(;Fb2LWo}2F?&IAK+P(2h48^aI{t+vvywV$eh1mRD61#ds2z-0>&|nzP5WhU4 z_Kke0W-*Fjgcvfs8u1{hkFLjavm<&R=z?g9%Ie#|?Nvz|eVX5eX+txQrklS|q26CV z3j=M65@uvYYx7bwwJ`}Pw}l*1D^S&o5!ofI;a+pf`I?~@(xp!#Ioz66;5nc} z@#WC=P-G+Q7W+4l{U5NL(idap>k!ttknX@2)ernTef+Fm?wp7GB*z$yoNLO>E93t~ zvv0qi%MEPU*M1{}=J>L)Xg%#8j;o&1^?>Ao^YQkl`7Dwqu?? zqSnmP`{Y7w+-hf9E(~OwK4>p$Bqexlj$-#EL3DE9xe}q;IPzXK%A%meTtr08oq73@ z@|}sHcl7=&Tks)lE%TS#8Q%pq&ps7|?=qY~a(srXyP0MZOw(2ijo4DgVRb{~Ezv~& z=Q=ensO6Pya*;@syl$Q{GcJ4!yF~?NNAOKkjpB#|AN=8-YIkmjo$fQ*Demci@5{bm z7k0ef?A?N}s2<*3TlMvj0+L@(q)MI12(s=vp+9w){Od!|nJ{gc$opfGU#&wPf%b=` z7fY(IZDZsgD2*N}(IMRqG=jv;f@rw{nl_2{+aP)BdkBImSA$#_ed$+2qkenDbcEU@ z9EA*E$wt(B7&Vwp2w9Jc~Xn?D78toSIAJWy)w?g*JN1FO0|o_Wd? z{=BsD&GO4?^*a{19&q9wrQ@xAqJTcGE0m(zN0JV`PNX@BO@t*^ z*27mC0T0&ful1R9dEa`3jgzyS?k;L01YJo}y5LqovM7in0Go@G2mT_A!Cbp*dvHjF28bD$ws;`F?bLw#kFIf z1vVlnhiLhkdua)#Op2DJ7?n}ZY4St}nCjvIMI z>|R9r`$RrTBA5opf6^7>PpKiWG-Cqeav_z zNVCW>>)2l|K0GuuAiF&BiGe?%UgO>>G;gbNn{(744t7qWGuQ9po}?SqM$jqf)~Wo+ z{N3`Idi${c0eH=p26|;V!ba0mucUECw~c=66D)@Fxa7=evSL~R<}u3X!NBbcYBd7W zYBM^A%69%x&Q-;QD^b_fvbFF-BBUD^pObUXfTZOv6+8s35sczjCNLL87 ze``LC7u4U>$!DW;k@=^K5d=xX!?5|c_k&9f+dyvbk6_5WG?mg;h307c{qCTKa_lAc z&4Ya$pAk83$Dr!%Ii%F5fJC!>4Dwy;g&}&ZTt2Aaa9}RSkA{08DQ^v(GU%^AARoVG z(%O+`aMseTSh_3QfXV!AM&%agD8?A+-ulVjTjeT;=9*SzG55>d9B9RU_^1*-{;|wj z<{3$&nd+Xk>Jz;KmVKF(`3D&d`37PXKbxl)=r83a?CCt&Xz~k#e2_GxZEzsu^7qlG zV?>00FLaxvP8GC2I_^_zUD)N%%_g+Vc^0~HHrH%SA|E`Mm9m&i#*vRS%LIOsCl_H(1m`6fe27(-oB1M!7MOfI-x1Vd zHh$dZz}y++hCg)iw415ZDs9I#+uB^J<+5`z#-B#cM|UaT=3I;XTqq?+(FU`anNXt) zd4zVG5x%@0RAm~(n((TjS%a@zDn9cyVjCka-8mSn)DCl)-c!Pi^b>^%H0}N|B4>!_ zBZO!?QG1>}1npmbyL0PZUeYShCSZilBL>R- z{pO4OeRj_^^cpq@<)DpeyGWkm`}?KchZ~dke4*(mf6oh70}>)nCkItoFg#=e+YaA5 z>iY70Z(7nt3H_IH+&Yf|-isS0JH=V+Cv&KGOF$pRc(q5F8^Q*KZE?jCw!EAS|)<_N*Z}Yc`knBn!reh7yi0X9Z z;Z^}xyf#P+dknpQrFd#Gq3x3X+6nhxpdU|u?7_^=+t1GL&7N;R+-3MxR}3h&AYby( zT@Vs@HdOp&K~st&;W@q%bA90yy~J-Y&YuM6rdLQ^Q_PtVZGigZk}x2Nhf%%3UJH|d z%IOC`y<;{vMNm1DcwW`-!QOF+@u6hX)fdsywqo}!V7%u3PaeLE-B^G8B4VPstB?N$ z?D71I$w6$w&}sK7rtYtk-m51QoF;|9mtuwH)z7>KJM)>(A&-54SmpXklLn;eK$8Cy zbik{o%T{{nVzNod1OMW$Te7KrM{9x}JRqj=DF?3JNV;|s+7Z|jbYZfhJP|gj@f(O% z%k(hEo%F?&G9O5VESVwZi&blLF|j+9g7qRF>WB!Ea9E);XSTK zCn0-T=Zo3PxtTRJL%HiG49P|!UbfR$Y)smJ$@Nq#rMyqc!kx@HR|KeLOFJbfZb z-TnJ;KDd7c-S%Z#$5xjbvhzAb0VfdjS)_DcKaXn5mM%Bg10fL&SOv!J*>^JF4g5BY z79Dyn_QT?%>E)*18*b)6(?3_={j8RXjqDjpD6M>4xsPQ;WUvZG{1k|JcCTix)2f% z+IVT}W_x1)dt=DsW%GlcFvbP(WKs=37i+L38?44%BPboOAwUNhu@f8dA&>k^E=tPk6$jrRdeuHc?6pX$*%b7IVkC4k#-S3_W(y@f6kIUliakF>NfsBpzJ(Sr8F%b<~i0P7T zK@S!rRKf_R5#P>C?WZ2*>eaTc>PzG$xX!Bm;+f5O89~m~v0+Q}`Wy>!sjj@=Cc--( z*LJ!-NyEF?6byq&w|3!RG_!dBkTEY#%lsvO32PEIB(rMja~r~v?2@bg*+#5Qk-cx; zu^>J;*9n)Gv2CY4HlkIvl%ZCb;ELwW4x}aim?fumew97rAK(nyFDfeI_i~%KuV(j< z%j+!zR~;Fqp&uTKQ|iu%i19|r zDhFIa%VH00LdG??{YLj7Hb(=!ZbZ}?~{)y;#thZ>Y-)vo-BkOXHWsJ;nkT&z)R-bJ0i zW*I(2ZkR_oyM`}TBW3l*cX*FSBng>hRX&cXO6x4bh7rm9ZnQqaow09I1hs(hHF65L ze%U+3XB=kiq&_=m)W}t<6;vi^qc& z_|iMjZBx<-!5BeJt)Kd|mWfWQdYt#TW&3UK*)Hlc)25>bjMFDR9y1EdG%s}0i?xwN z*nT;YbyM3e%)F6I${7AiiW(M>8r#|YTRfj=V5~>nlLOJmxHeC4gNF%f&G-K}yZ-|YR3 zF@xrd@eFDJk<=Jw{76ArBhuI`!HbM@CAaUT)L zi_o*zj+%Mx%oaXh6xM<%5oRFWs_s(mNlgf))x2+LUAc0=vviN?qD=NrF6*M1FQD8Z z$6gK>wZJSNc_wd!S>Jm$;W)?5!KC$hapcw$?|JeSU&b3Y|3Vgob9t#?6BHamqbW9YroCn)99#DTv+=BXP73%i6+%+PPuHdJ}+L zi&cLI*wWjtCU4=xe7yhpx8C#b2HOlZev9}(!f7Z*Wxv&kUzC^8`Q_E#GuZYI(&nTG!UeT&&Y1Iuu@s8hp)1+7Sk?3}%{PDd z(k}^`Z&aD0`C|g@h8i7w>5-D>wCD1k$lTU=bJDORS-H7}edV=h*_HzqsDuQC#~$y! z@2$LYOrC6wP`%fVi6f>ITK-Ns&ws-0KoE_JACW8``Y8l_WW+E3jyTnJ<5vk&-ea75 zPqkW=QQBx6<4B>%SFuug#N6esZpr$bI@*eO2}`Oa0A0OGBfnMsm42LpE){%E231@YIZtUZxv+&;FTf zI_|zj2^H>_`Bg@^@RA7}u7JJP5~TiS1$s!SX}^pUp96=Nxsm)dRQ1i3o@wT^R!7Y5 z&}QP>cVF8+tUr1CdNJuM`>KZ9W1<~&`29ow7iBK7J}z?-Aw_~ai}VDlN7j5w_oS^LHUB;?NId_{ zmtL=T<>#Sde2_<1`sbvVfae?By%H&_+ZLy!P&Aj~o)kV!H z9P6DsDuA;ZXx>j8gR{v`;Gye!0|c@f2p$^J9GlZ5%w`L7*7OGOp#voM`moU=PWzRn zIjrz)^2Qf5@pTvtpYvK6HfvU!O=5~rBEC)|#SNE7n&yJtxgVfiNCp4(A@Ul3B%D^r zg8RN2W3||CHoP-(hf2OPM%xws84nKQLQ#E;ae}xZ^&P<-zJfFF&jw*ufP1d5HwU`2 z7fF%fN@3rggm(wKmSdL}(I_`oCATJwp^sB_tR1DlqPl=5*`Tq8uolxuFQo>qFFlLY z-~^+dHzcl<2$#80mq{1;4+Z8>?^E!UnYe++BI~BYs0RIH`usX+jYlq^*O=W zfQ$LqF!|QdMW31EuG<|_-9Ty6|B540aTMb_hJ#J~7}8suQ`0z>m{nFAu}0TnM^Mt1 z+}QoPk++q?SViveoO#GdXg9dr?HkJ5ydOMD!=bTj<>*a8^uHx0rP6C%Z*`{;!+>^} zwH!mtrI~KaelV`qnUv%VlM0l3 z97%-H;Y0BTlFSU>6=MptpNtz_2GgZi;szK^pwTAgqHbh>QW)MlzyRpsM#~jW8JKrq za~jcojL#w2hPUsAaE995-MaECndDt=6-<0=BfXp(=5=r+b}V(t8O`|yPWBf^8isbi z)(wu%uD&wCvEKTuL^nL0t0@uG6<0dfSze#wR1@5;ReI0C5wG?s4H4JU#q7GPFeV^=Wt z)iQI<2$=@oD6)sIJOO7PGq3pIYq%1NM6Azq+uc^j`1=dL4q#~o1VqH_HKgzutn-S; z@5W>FPe?l@_8@K~-hfK`9uWE)v%>b>;oUd~_K+?Y{OtaY1NCgIxLtP>m7kuMq_^4y z1kyaoaQzL=k-Zkq{jy0%ImG=N-SCw#Os|6!i=(v=C>Xjs3Kc#8x^O_oVQL<}ER5{t zz6OtFeo*=y8p7t!(Q#C&Cl%@V?qyrkh7~QSkZF8yLJG^AjYbk91`OTx=l!|VO(_-0ZmekqHYYf6-QAAMtmfiBzWXH}vovPt z#$G*6+{}Wcfth)GN$3q)*)aDD*$}%NF0*Uzu~*&UZw}u#RHW`@>= zyDV_+X0IvNhh~lDqMBx5pfc{3socQGNE$eRahHG#?c`PoKM{t;(Ib|QBjJqal#zHh z+BoWCCvR{_(<6c*Ci7P(cBp#inDCCj18G5bAva7fwK}l z${%Q+bX~8N?A<$ZIg$BM!IxlgYci!;?RO{*g61G(=Dh=!x)SLy&Kd#N$Y@-XM$K(6 zw5?91l!k&{2(H346d^4U4_x}1QfaG0I5s>Lre&9Y`AHg%d&k~zWrf@9wu)l)bG2Co z3iKgF-Ew$)DCGVQ&28eBc`LSDEeZTO`xA6@+X};0);w@A7 zequOTu%g;8D#VM|&-#1AtLOcqzVO7q+R~r${fhsO9t{AARKW_1pGo)~7r*TKh35|v z8NZ5O)_jxqgBEZl^G6SqUsU-I;(sUM-2R;{fQwBMh41;&1uNvf(*mvx7Jl#X6Mugv z0SL0m7QhA2F#RYnkSj93+_#o zHM=z5_(d82Oze-O@rQ_;dFL;=;twl^JuazQ6fC!s5U2=WswZvuR5x&#^W77z74nN0 zc@wm!L~}b}mw7w*9Dw}w5BUK|XaJpm%zQuO^V^kf?)c@FwBLtr5f$tM@x*fZMIl~u zsNm5wrrq`B#XxvC05a8~)T>(K_-m95BDiF6;(BWahtp>9QuX!i&*BDa6L_kzmK#(E ztx18Xxc;mlSHlj?9t_qE;WT#RFzUcP@sbUb z%MrM5DJ(l2dkL%{SY8?SgmLd`T>E-J{Vjvs@2MI$5TNWx8AyqS@28Hb#Y8!DkX z8&XLHZ(qe5NX$@$H!-Yjci8cQ0GI5R*hcoobQ<;+MD2v}lrR-PixMcWe3zBwu2i zpWQdulw0JDVSCN#6yUNUH-_s)`x0xw4#k2azxhwI{e_zrvErAq;WdD!$s}%}GwDvm zieSN$YGfpP4YAahiAA?w5mu2+rjT$e8HB(Q23K*Dmye^k!4fJ^D;wS^AaW;DVi9eh z#E`@=7JRmkpDZeS+sQ1_gpwkG38q&RC5s@0x4-~OLTL;o+k<9D|isWE-&MH|r zkWD-f=nKVdbO-v|f;XCaI);oX%+*ZEZu)BwjJzO)1I)e$`lM(DH)##GHaJ7cOCz%1 z0X~2Wn%+{`;D#s9p;@@VzWPn~P`H$hTSelWHVfo;N!(1r040fDuH`4<*ht3VDBH4i zxC6E9*qcCEVED5Bt(a|wW8*9uRhC`ZgsFsbG*K%;_aVrL{Ng9AH`;bNu)s+6gc8TI8$05uyaLSXyz zQEtsEs%d771h@=}pOd#q#uT@{hNrSe(hRD#co$+odPr-MmMioPx}(f>m5zY5PcnMC zS*mY1D~EA_BQ7E$uU)HH$OFYNHtH3$=3%0jxoo4+$jI4lPT9!CCHa+j25xP#dsD&h zc;bKd@>^W^@$Y9@@9Yhzp;$^Iw|~_}Z1l()ob!FaVtX zQS0VL7_tg1mpXY^y*Ms6gdAZ-A7Y_aSy+JzZrJo&)qV!SUUFj7kw+ONS_U~3w)@7^ zm_P-Qwdti$1U*WLSc455rMFlv07ORb=;vE|OMX+>j7w^pQQ-=^BN|L>++vc`#$80z z6+}E3GuHD_a9?!~fQLbw9s!)8g1xRT=A{+sz?bbK3$jc%jYzP>v-&$X4eYiyMgm07 z3}j~jp1PA)W=+l4X0rnh^Xhi14mYiqbL(#s>Kf9tHgil}AtW2lI>@bbX}@EO;r|o^ zn4lxpffC;^@G57iohSUQE*xb|O2d+B&3dm1k494F!QJ#;`=x5YijlN_L>SCCS#{wR$5!b>3(-AF6=^^~bjb zL-w&NaQFg8=8EN^0pMr^4y|d#NfdA_@yJ-K=gIGl*Lwd-RHKgv87aTxIIsB!?Za%F ze{~Fv%1GwgrH<9a$yjT?=do76_gWsvAGa;oHqml}yr)^Yal+Z>g*NNWzgFftfK-$7 zN)aOD#N+1z13n&mb|uajcxY=E7<{hp2yTmstEWVj(!a?1|BW?!gBbpnI ze=&iTE`>eTflulUC_JM-VRa=SVY9s;A`Ad@BGz+du@exVmp3 zKQagHiMPKuc#G2=PNx_lSVOYi!3AAJhbGc4t<*_XJ9-4PDL1+sHW}?LGib1y5(on@ zY6AH)e7%)?5Wz+g{OP00j&}^}+|x;=83g6qk77{(P2)CcfM$tKp)YhX>$!wqS0{VUC; z`CaJrvPQ`We{cV=*(a>_$YJ9I=c&E!;dt_r5-#kO!FtL78hdbQONY^>zF?&tV`IgIqr3r);Mh9Ca9&u?ZgMl0k!KAhMNjt zoRdr4MCjGu7mesJ+9V*m9cdtP-Lfqd5;80ssM8(tm&fG)PnH|}*A^-)K5^$vyBlz? zK1va8nl(nof+N>AK1du!N79h?VD_wOcT$JSP5i+-WdS($8uf5+Ld3*gghSIx3R=%l z760LIlHRMZ@m=nrkyjq2cpI|5kcb_X6DoR<8wsHs>!QoIDz?;^V!13FoVIia$J!)q z$jmovV9w(>^PA-KCPlj)n$}yC3^vIHa%KWiz(WvH4mblcbwmmJk?aj$^WJ1)B}NP5 zLF+3rpluSNgcDhIL{<~Ak%@3+8z=Ige9)cnsoX8zxMq!|8nS_#e|hvVqs z-~lD>iV^~$f`6k=AL$jv70;t_9KF+O(ZqJ^;}zQ3tgBDKFsG0C!e=KNJR zAOG^DhfoQm_RPv>wKG`R8SnVw>q@>ev4Ry3eJ1zF>fs|l8Aw(we-oTt|0I()^qw`O zx4Y_GnWi9ClOzc0T^{PyNW;fh)ZKlrUb#aPBoZIGIQP6=JhHXjm!#+6vZ7({3b|+8 ze91R3gsUEVzw*OC)|%FycbMB<2%pDOzSaZTKBHQl(|JLs7(;#QVT*cheo+?pI?s&l zBfnF|-vz%)0{IIRkY%QPgU5XK%6;L{6)cTi*mzSIRNgw`Vn8se0lYDf_Bfl8lHeOz zr$&7d_5p>mP*b`E@6|}BE7^)nicvCWRvtpq2R@L8)baNL$!vsUwCMQzt?i&McvGUq zhk==(7yPfUn**u^f8lAzzXOd?TL2G#MuA7j9-HK+FfO?U6RNMx+($KC=k^Ryd;k@l z!mO$1Ji&T{f+YrdGEdHrcYyo>nhNc}&Wufx%gXP0+EuA%CiiQ_AD?_5(-z?&Kpj+e zTNXP*(C}3Rje@MOvNZ|M?CRdm|s=-<5XVZ-V#Ky&HidB!0*{ zkr@q*eEy)!%hcc8-H+#+P5R96PueFso(Vrhxu%GSNVH{r>d&zqce7LyM&)B9ch_Up z`^S1EK$>r4E*=l~v{MTojsK?Y<1Yhf1q2aPP8!#{CMv5ZR3%U$4=|}})SCWTvKjhx zfUt2x(P?~UMKlae6>&2diZ>};w#Gyt{O)Hq=W3`WZSgP?xSBK`UsbY%%TgN4;uCz`Af0+2=sM-Ep$YmiRIk~l zC(G@=W%UtG^m_RYh_<^3T8itCHsGQb2zchphEqWcf-TE$9Huk(Xh{6Y;sQsbF2Xs5AEp)mL&V=wnZqQwvq3EB5;F zPLSX4LGjBIMp^ZGF=O*9>%hJBJ~w303m(%eY$en{A_z>5UH2i$=E-;e@&dj8+KBo0 z#UKCQjne-&-t%h3mbEv$Acvo~llB)}7d8>S3~W&?4%xzF4&atJ)xcg+6{vshjC6O) z^hXF*586RGk`(I|62fKEDYh=|jhA`-_*S-Fb4(2hTNw)IG|q`{e}<2MuB3OHNrKwR zrK**M59WP^{h~Tgx*a;+{_29I+i-dPhu|QfiISxx75Z_~!_n1Nrhi`$Z1H!&3egX{ zUV1t9&wcc5)DVU$DgsS_8yhG=^^PHtZSS8;qtLEEzT<`7tpa${|7X0}W;j^Cr&R-w z5Uw6ldJITdB#U0`cbTfM1K3ZKO=4iCmMh9o@_T*b>ae%p@(l`1ZR>e;Y<_jBdh~g` zp73Uql_?cOnx14T!Pd*uV$Z;UB9KDvVoo{%TWoVMHO|3LNE1L4_Z+<$>Qj94tA;qW@N@Fa5#< zRo?!Uef8V^V9(#bY`nVtlf*0Jq2t1x8jQjVk!lauD6Uchk&QZYLA7_w!bRt?M<%Me|}jLS5~rO z0QqMoe|KNxGLa4=)_&h?YyeqDvxm^-oR5YK+3X6VxB*hJy}+ah`@QAH8QG1;VvQ$D z;majaYrYj_a zb%k>SC#E(dhD%kXV)q!kP6P`En}j4R&lK0YN5eezi6k9kH<*8AYVIGNshz&1u)@gi zv3&Ib^ZTwd7NHAUORx*i4gkmv;D>4Xu=L$V1sd9@T0%p_Sz9{$h{)wey%$G0#-mDf z6=4lTP09I2&|(i-xTKPn(YWQnu=T@O|CU71>qfl@g+=2|dcpOM^gy*YFXNT%W90e+ zkMa#lq2Hz|UeAuBm3KL}U;M*ckB#wUMz<S_P8Z0vQ7c}zkLISV&{}?oT4Xr zr@heaX)q%0SZ5HfGO50pQDq_QwbW7WPQswiKw9+-R%`a5A>x;*ed#LT>rt-#?0m1N zgU@Cn1Z%GFb~K#*BVON~5~#S|yDuIe93&a{%Q`MQQ3_;op@ zH_IBf)RDO0N?QarD6(06?Atnhgr6lwDpr9=&WjB{oLK*i)ysK+y|ht!{CeO5 zLar@oE?GDZvVYz##X52FsruCp2+odufMv@@`AVa$pYMIneHWr6mkWEl z@r)wOOFb>5#%a=n%=XWEqiHjtf_V;hNOZm@Y-PwaEc@BS`r)=mA#Ka$)5U)zO~p~3 zcHyV{gv-gNOmF5l8uU811??n~ z?}>%6#f8r#1}%VFtck4+Rr{!AUHNqnoKw9zZU59MCO{h4nx-$>!nzznJX1CaqZ>?H zrM#^t=iFm1nN_+$mfRg-bhSQmnhL$3l(^k@lzXeYZ~3vx$*H1Hqbk5@I9A#Do~#Mp z&yVX{>GHdW`q6ENuamWFTX&ZDmorxrgY|?-ktu2Fx3ltZYZx(6QZ|WcJ*%#WEMFRk zE!Nw*;yJ2rVl>n7YxR6O-q)^l28qfJWlJZ<&YbGdJwN9(a42u2Uq2=|XaRE26H^v? z(3X^6iXF5K#_&DtB@_FdUe2R*Vka~P#8`4I)u}~)#9&$U{?9wL= zCZiA7?o*1q@9(S(3AtyoQAwmM~QOYnpjD;kkHda97)zv30->A?hw939r7n`G*uw*LS+k*xE8 zX6;zKvsdEO&iIgv0Aual{+W68ZqY#k6Cn>vLw4G-C8<^iJ;nDO!2f!*sao%EAzmPM zxgmX}rRc`-)604B!caSm&yj4TqRUI=6oS*mivdQMZP|@m}gr{svH~Rw>c1El|FHpkojG@h3?KN<))#8FRM$3027`8u$rBT#<3GsrAQ4h%q$(ql* z(TOfKfK85Q(dimpR*ebuWc#CQ{51Q}g?70K{AxqYlXt)y5ps4K5vuiXbse(+QsSB~ z5=7`0D~dQa4IOFVQ@E!8*e1=6ZF69vt^xH@n_uJ3EeX=$OZqq0_jS_562?_5`^c7w zuqDX-XO`xf)XvGP$E{NyY8V4G(~T0o^mk5|MzHZRZhk5I&?&L+fOXU@VdsM?$F&s? zIXPGnw_d!qu(iINIC0wH_?7yDRzV9Bokd5gEq5K7OHP|84n#JbJ&&%78+bYayzb*i zRLi4tRgR(Bclw?S3Q;Nul#iSG3#q)Y94WDX@_qR0wv#H@ zcdcV-r5ns8_*p=8?udmvSVxP0u<0dMO^f~Tgqys4YNFF*vW8dIq;~bowZwy@%3yeD zVFJ{5;g-J@{C>-Pg+{7ejg`l9irh?zvW@HTrVj$?(0Slo^32`VqhPa!1KQooH5d(w z!qG^Dq=->lDln*Lt$Yd!Xf}3<4{qXvWMK{BeF1&s8M0dQLSWAAm;M;UBvQ5<=sh2F z_miic>lNtRf5l;8lZf~iK-{oj_9npICR-0K+Uv0VZ z{-aG3@@{5v-ay3KZ$@k>CZ@HbPEn!kNJ}DpYe9-$jG3MV2q1h3i?s7xZ;FU*1U!Y)!OOkOm2|rPTz-N zf(Pj$0mse$=10oB$CMta_zinn7Z?-X#gxXEg^V993q19NdEjwqZY`P6Gq~*%?E~AY zWHTZ~0s~b1-9EfZR(w>@Q@ZIwl_Q~9lYj#qj9;r}9L&SS-m^_gik;v$keiJC<56=-wx(hyosD zT#d7hT*BuDmtU&Jl`^Y)`hxnGYuLksK}(seAHyoD@-)Mxtd=SFC<)apFL0DHm=oce z-@o1(;XI%zCPqot*z}=?FID2$j)~$uBEFJ%4AsGgn%fXis{GQIZ#r}{=491nLq@I} zl?~Vc5$D(P?3UtMQL6zx$fnHuQ5wF}sql{yfBceQMd+66cgqxW=m&yB6!L(m-3`b94J^Ds{i0+xGbHYO$FhxEGlROBpLxli!R#gAScj}#S zK@&~&4a7tDODvJ01lZAjt7Xqd$d{*GC1nowH0zGTYjZ-25XI|{{GXBS5^W`;uN&Ab z-;9j2zed*fUBVlnhAt6`0<(}cOX0vkkO1{FL-ffQ>@Vaz@UX>%`ui%DbUV*Ic_+lS zr5(PW$ogKDz-dPxWugRMR%i!2N2>&46%MDiY&lu_C`^LC z1yBj_CZw6t&8pb8JU0T}e*ri*U%RBHV%|3QY9G3RQoFC&?$J}u<)!)hit!s?}stz%?eK=@KtHkDZ4Ct7{oHH1g*ewdOm;iXw!<7CE9_~5(XBI=Ml)bN$vv9{9rolR# zA)4w*q5f0An?4qGQ2T~oD7#Xg&RoeNS!)dVDx+L017Es;%8gWx$*)a6h8EQIuh{%C z59L>Ta}o1mqCSG!J$K7ZIAAlnCudI4PPduip;8H?mzX^dcb#?lI=MBu*Jg7NJy_0p z5Et^<;>cKpcK;n4no;$O8S&?>haP25$aj}2KJ6i`Ob*)3{i!>GlhGtow7Z4nVIbU% z*U*AWp5FwwvkURpESORy|$A~ zXk2*!NhckKwm^lcXP_7CsHaFbS+|Z;f_HBYn_f4vsGY%wQ?n_&4Y%`e9xV2jph^&( z9{nwnPYWpfX6AnhEOQg_n!eEW9zM2 zs2oOzcdCqtN7msB2C;vgxBqWohupSbhOb6S{S_Vf-{2jW)ZfOWyZ@(v7Q1Bg3aJ_4 ziN8$KpAFphRa|j#0q~4Xq`_?^)WOa^(DKpyXPCoS4SGRe&7TyM$Usa5OugAh2^f$gWlT3jvj~BL}7q2RY zDk_2ixanJZ_pYQ^71vUP%7rI_6^C1Qy;Nz3VnE$9z zqSly0jA``*@VxUsGz#yH(U5_16o7RR{A)*VORk7)Ztf<3-$G7Cq78JpFJ<+wg)|Fl zWg$kp6n9O@f;X7tIVEf`4Tf85CG17RMl&M6X$83I&+n(`CRNVd31EC9Hw<978$LKa z8Y58O8k}W+(7BYXf2UY2gxLDnKo#&KGvbCFy)}aC_m;i3!!q=pqfXB=5Tm*dc%xn$ z>-7W1GIVo)Ji^gP(h}Nc-8FFtOAMzpk-P^49;OXn$mo-@p0=&_IPd;0cP0L)i;>VYX=B?aVQngj2;r%d!W6K_wGbVfZuzFo@D>_Zr0YJ-)sVVW(?xQ zFRdKe9rUoMVGla&WU+Riiwy$WkrdnkCtGmmI^K`(JuL8S7FcjS{{77XuWw+x6I~OF zz}Pu_ca_qk&pf@&{yDEB)>Kx9JeKCg4skwx+yN!JcB3YS`9BQc&kG!t>G)_#131kE(o8q3xm1n5!dp+Yn|D2U$5ND5xqWxzz9GCjWBF2s ziK?#dtQhr4OqmSGv^<|_`1L0<@K6@Z5^)+W z;Dog#VvnnAJ!)t7z?a;NhNbJ}y1%U!w56B$(?jG+MAFl1x7%3;;Rj3upGrO2ku(CO2zN(bFVWtZ zQho0GWh6IZ?BrqhX@Wg5foxviXF5I8V71%iMda4=(7tEiwli8Hik;PWJ5L`~8Y)#? zk613Bzi+`O;-PXuQ#=^Fg+Wf#Kl7#Utw^wr#JgMR3XHpMvj#)8=ce@L4u@@zD>NB- zbFDc|YszT;Enme@^35cBCYE38T?)2Tq1Ze_ECQV%<*!3Xl?oYdmD-H)P`rfoE^1-7 z&_rSd1=<1%5l|fydZrRnia7u41t$SbcG>@8@4bVX?E1Z5y}1PoQp5t%MM0#75{eWx zQj{*ei=jw~5RhsDDo8IPU69@p5Tpx1X`v&%3PK=(M8qUSNNDecxA(K3XV2d6Gw+-^ z=gc{?XZVX@0$gjYYyH;m`~7^^E6@2KJ(ffHn;Gr5&n!daS6FS~e%DV!bd9>$oJ&qE zhD$wYIV)abMOwMOemR3w>?_J#T#wIJQ2^6vGoz_ZV^-g#i$nYM2KErhKPKJ1x$W&W zJ<8d#b}xk(Q;SaPuLW={-wWaXcu1WGc z*+7f6n||upr;huVq?~2Vm(*tDgpXGWdN_XjLl|?6?kcAW>tN5)eU?S8>LULe1&Wau2TaQXZo#;`->xqM)Lg#YZH(%8rHA#0MRQ%~VR{hKhK z7vSuf&z=VkY=uv%zV5#raP$tMA2CXA4F?yDcNtTkgCqS%ZEqsi=*GkJ@V6THDp4xA zds6r7V5#-+@Wxe;>O9~N`P5u8zweSG!>+?_rCG?ba12e{2#=42Y73`n48MD2&?$tz z7K|Z$N9<#q)xepFx|B%&GMkY2uxL=rxcXjCmgn&}?{lfPCk;PPE9z_opfxvWY~ zeRgDLGxj?4ECIAhbIA$P9^QjsGH3Iei`l%3cTaYuQb#7SRe#k&ACAB*(7pe;3?vN7y;CZtE(H3?0f^NB7sV;C1cV4!PU7}L1Hj$86OvS(V` z!|v7mYn7Gn*v=jKnKMxFn35D@w$QE(3|5;w8fnF;OS+_8tP9e<3+ zG(PGu@IQkNIThO@5c+IJ*T0lAi|f6?O6m~a*3A-lx80M7qrzhwwJ&RD<=p?nj%F`e z`;u?LkCA2x9rnP8iL`Bk&wDS*r|@HRoZlZ+H#crM9!2>o-(`quWHw?>3QJR;#aP+_ z=9CGH6uUIG-wtbhY*C6whSwX$+~Sj3W~`+ARUkg@8RfaF8uND~B}N~-R$9CIS=-uo zJnIr~6jueRw~)aON>igsVL2$$xGNJ?$Q)S%hpiaZ#n+W-*SHB1cE+|3Tgqn`fxXxJ zO^4K2)*M5FN_d`6=|*B~14qHoR%SS(#22W=q#c*#Ooy3zVl0A23lu%Y z7747p`Jcp88BOu)M9rwuD3SMsKO*8^u)cslFL}P5Igr%rQSy@GtWII?jmYk~__c%q ziLcH0$yfT^o1w|w-U-v8)Wp+@**qOZHu({e-YX5);uHRjFQsq<(0*7Qa7s7{Fo z4pNT&X_oy(C)>(gIb>Apt*>WG$y9nuMwyme9$~>z!b5vf zue$|uQF*pKaT~^}bzP8`l{05^wDIR7{MQ2VnE&*?r-DSPCa1fj{3`+Q)~SX+BB4f> zXy#E1x`~)v`s~Xej&#!od2Q04rPta$!GlgZ8oGAt;-KBAJvZc=3nt&YguhR9{)Pi{2`Gr+=)>rJeutj_Q(gj|XR&n@4a zuTr69%2nPtc;$FJEH!Fu6dITTRhc-_Cun**G=7k1U6>qRXneVAfO`*G>QyTIIYN(?XY|5K#;_V@XLSwLVcuLYkJdSiCC9_!pT*iRS8Mv zA3up35W|a0NB`7EE#c%oNR=Z>q$8Ib@Rj{6bfT@Yj?oIrOm;x&3N{>9*H8a2fxVYr z?LzmZ{zPB8!c8+y#?uBxxh+1524E!9Cxmx1kVzEp|g#@^IBaLeGF$xWBLtWx}jb>f_P^tyKOBu%&mNE2lC5)xAmskHusNHYm%} zg6CV~zF9E#LExt`Z@Dd}vLG+NvY;JW%PlIF68a9$D@dEDfZZ?bF>2H7`=)5?>@H%h zRu&ib*_1h62NJ#qUZIVszO6Zn*7kRHb1(|tUsU(zo()6(_1d46)(#i7M##VAqWk_Z zlygnh2vTivR9C9PzJyDyW_L zL;JkokC&Z(-@1(4{p>;=By;3190Z;V)sp!r2_-Gm|IwEBDd8aXLpFj#ad_@1hmus# z4Js3Vy7r||f>^!Vqn@XRmg9O|?6Z(v(n4GendG{-B*G-Q zv60{ie@wsNTX(vOC)c)Sk8j=kUNT)U;Z*;Q(?}YN#OB>U>y*TKT?J4#^M6*RWM#}VKo`I#hMSA zE(1fMHO*yMxL@2t!kH~MZ-(-~@8mVHb@@X3ojs0^+6_J=XKut0d3t@@mI`$_6&Ozq zCfNAbf+xy%q&d5-m^)(*XnojM@*NIz5sBiDZuJD$)VyeZpaiplrm^#MU<|1usw$Qa zT|Ok`E&)}_6*T1V5WJ^+xTyx5I+?%_e1a`fu1V7O<^EHpZpllh;qfErEIxIzrv2#M zjs~fZER@AmuQ7p(rV0|h*wifP($h4$E#iPA$AgZ=qL>>Om*R!i9_Gzd$Rj-8?#7RF z23XNU(N=|$lN!ysOSO08?UV@NIg`5m64)UM4?iQtKski`QI~z4Z$s2NE2T4)n2oA= z;X|@sk8wRu_`}Y)6^-JDaY+oHbk9))`NTJ_E~#tZ!6Hbfd0!kJE6*-2_35lSPpU$|bzzjbTB3*t_2D{abplGi3T?sPY^iI0DHm4UA+lkE!i6-t)v}9|rK_HC0&qmFCZ5ntLzXqC_%C-9q(m zYCu6W2CtZnlpb;Dpf_tJTl8#rg(=p}ezi7%Tv-8G84l|`lUVs!%-FS^pW`|Ygcd1# zgOI<(7kdnugg#}lD3<6x9_NoN#m0`9uCsvR?lz*9$K`CL^MxL%ebXY@?+p^X)x5L% zT)h^&rR}YG1CU3t9B>+q!^mmL6k6sQS$zqKlu!Pvf27er!uu*3ggr}~d4zoqxc=mS zf>MqqpOzi%nxr=W@KbU$gGrb4dlvuN$9+2dp2-09O}p!dv{Qg8ftfQ__PW;0;8dMZE=_7psa507fHcY>M+zWJjs`%f(P~s(! z)fwOilk@rnI57SC4}a)?KH|T47LQrzqEAiFwDKS~huB`40k@>{M8M2_Tfe@t|Ra zf=EK9hqvKOQ-3AN!l4-|G3I;+D-?HIW#Y_a`S)zWnJ2U(W2^ua<#8JS(BS*hm77tZ7yAU_s zssq28T-sStAX=b;Sq-qQjvO~sbF z9$vQOT>(jB@;b*U`8~#su~E+Dy%nr5Z%H|^Azz4>RQB_d+be~I-t;FjoDQUcdtAMI z%J6AM7xEnxeDuy63vWZNd>sJ?mN-3bkyvo+nAjF88l*a|LrL~*PG*w50MtXuSiL70 ziHtOP#DfsKGs*|bKn&h9Q)P;1#)ffI-&jT0sOkrzZB~h5eH0)Ja7sCez>%vLyC-$M zTh4S*`TeyZ1of#z!ty2ALxrWLQqgx9ZwW=eg+db1ua!z`{k|ct@DAopG{Oi$$yh0( zr*g4vQsI?e6YdAw72XNqkboG2a~j_2bF-!L;Yw#K6;(|Rcavx|V2S7CvtE7uC!GKO zGd@vUM3ZKi_E^=aWnX1N@;LLp`V;9Mh5iD(l~x$o{_4i-qw2#|5#ZC#DW@g-6p`G$ zk5lt%o<(hGbvTN;|EzA9#LBxJ>4`&}>uOPWv{XuBp;BN!Sm0ClyCg|PsJmr5$ob`K zR^Fpn>5Ws}VV*TvBdyvvMZ>rT=K<4L;EgGR}cV+eCJm2 zL!CX8eKFN(SLU8GHI0AKAS6C>qc<4in(4bUt-uH#6qX3|Eb$AM8#2nACtVV|$2HiA zQ8_J^4kof040cvSZNlqU6PYjReAA*tkPMZQG-y+(?ZP)RcN$9>99HUy$A`RUO(cMw zN9*u7b?tllxVqU^7fR<6?hbEz=@Zy*X<$YM_vCRRNw5{6OC`0;&}oj{OrM%iH(Ob? zE3eh#%WB}i5_rx%nhA6`Ew5u!EmGx>p)Bqp$RUxyVjy%rG&LGzv}g3nW8}fRHK(LR zFEV-oAy|Op61>-+?$IlNTKeI0F*IYz_`lT$4BW>7gT5R66fjs3jTXw`(s7uJ+0EgV zq*7OL*N0zz!rxfs1I;iC2@7<&N7pk+ngRA$7KCQ=OA~kxs2n!FUR>tQUV$XW(4aa46JE|2JEeSh&0jdF9 zCC3Ptd~CnXlClwEH^@?mP%u$MvFX7zi~ML2g4mee1lABxz8|$g57O7A^lJJnrXw)Y zu7h9Pd5pJ%KSSQwf6UyWqef~;zOts~P0h(DL3Z?V35K5nm}DCs1Lv)ZrA&4??^j@7 zs>;OzolJ1eTu@^c-sY{}TvqKJw=Y@7Jn)|24eHGs@CT{+h@GED!s}j1EKJLskwD+9 z;v(hi zuo}}-7))&9pV8s!3Wml!(ameV8b>RhH;4w23n~YohyI>JgELcKME)FZXa7aO7~T=AZu=?yq}nFI5#>M|F6 zuHO8z(!Q%GEEje+j-;NryZH`?5s!v3OONnlFHuTppP?!Tpp=M^HN!WRAtyrC652tv zFJpV;cm(#!s~$EGlh4@&a7HzGWc0lx7S>E3$$MLB6Ve&r!G#fCm}4jn>uuN0TPK!F zqS($8;2Z$o0>Qnluw6o95ITI;PsY7xQ(`^xF*)7>^J9N)8^%F(v6hSYV7CC4u159= zp%twKtQTaWN_gaTdG&Z97c(o*V<-7hRu zHVvNJ1fKKgWH2b1*gO)??7jFzskQ9T*Hyd#Y$dMYKOHq{ERKhxZ#ynYQWZ3m#JVFV zi>%LUMrXR!bVtXZxQrOndAv>Af)6C|wW# zVe-LZZLAL1+0SpCn~7cWYW;4C-0M^QBVKol&uAQZ7r2Eh)KJhn$`0;U!n`1?fm?gB zGKz1vKC~|0PWSypZw6shViT*ouA7XK?6`ahG27~w3ycYe3UQqerSbl9x|PAO(yif_ zC4)H3H7oVl{_JAc%zCOE5cM9pnUovRMXal4b$+O7cbpuq*u!YJtHK&bBP-Vn)E`}G zFy=~JIE%SXS={*?02ESIiibKV{Gbf{d`?6G-37{u!Zp@+d1$kM;r>ve+V7oW#w#AJ zjtQFbSs4PN*uKmg^rsdBj%3?#u@$+$*K7b&Is~$sTSu^}k6wwyIFo_p;!R9-PuAL= zLnGcEP1dQn%Au}^_D!?6CsUsLU@k+?NT%|E&D~L<6`&|VnG&xI65I<`erB7bGnIg# zDvt*pYb#9ispe@D$hB&p@^l&OUlr(h>;zmSV;=(2we`~~54;>FOfbu0w1zol2Y3XE`4#H!d#x{zRZ?0xy9)-Wdn|}!4J|9mHlr@9~pDfW-`2gOUgs4B5ynXMFBve z0WBxMRkZ9c3E=VF!(BVQvptB=jCEqtfK;l=6h1Pn{8eK8?)_V(=K|fCT3Y4&uhadqY*(D?^2 zOX1vDptO3+FL*myT!qUdHUgdBAWEl=J%v44$h?5jyX5fp8(HY3Vnv&A*7ncjkQ3fI zlA+-j;&x>6za>ll83FlnR`s9;q_zIi{EESX6^^SuGT6w6!q%PhADiuiychhh+H2QJ z=UKvWTMnTfL5_y(?fQXQ=}ji-k48$lX&2^zhJW}TtACZoSF?kY>9d+&j!(9hl902I zKOIOdrL(C)FxB7ND`CT34RTdWFm3;a(n8q&pHiIaV(+hZtxJDAJ~_k!wYi%Rv4xQxEqwRXzAq_*o!9gMpVN9zcG{e9rE7*8ke;<;n>>qXgke> z*JM!hNMekzs=zW#h_QUPzNl`G6)|#E6rCVE3cQ8L6XUWyNA4*<@KgCd`m!37ef4^} zrWCzGr4P>2pBx!z$=^Qr$|}fYRZgyMzH6Gs!%1psv)4ATCu(h#S()~(m1$@#C!y_A znueJ(h7QaGoZS*_&~P_l!#$4K$-m#ZK^uopy;ShjUhc7z@1wjBp4!_l1JkQPowtYe z_Dba{CDHO|A;HsdNn=emor|%c&*8zuS2{)ff@fbXhBOwEj9P)s@V2t%!Yl17D0h)` zMMI|Cels5#dWw1~KFqGmzs)|Ab+D>8r8vlqt{1K_1cC{)rUoId$3gLx|5R`ej4ZlXkXhENO11@^XVNBY62wjvm~9S_*nr!ZYdE!OF0z+;?aiRCDku&?NVNQJO}yj? zOPtaj4bE|&PTg`9V^5LW9ebg*^w$AXB(c2`BEo^LaHzk`d6#r;5bVE_8ZYARD57l{C1+({YYV zH6JbXRAvv_UnT7}WXG)+P|BG3wX?4=sh}wm`eh>`i!dK)+QOPin20Q5;O-O{NM&gL-NaESjEfcxnGQaBh3@LLbk)?m>>41Z zMXB0+H;X%<3Pwpr`SGwO4;2*Bc;%dq#BJ^#p{`H;7#L`MUlUx)e zIl0Y);kNh?O|4TXBgm@eOJAVee@mVAZKlW#9ww5z5mxtm* zZ>l$=0@X{UiDgv|+9#me51LRaT))x-X!gIN2(o)ZPD8+Z`UW_hmiz| zzkB;-Vn7rA6&eF-cS*nFAg1xVeQs7%bAvR&<}TGK*;0_a;0O4=6&?OI{f`11_33v+ zlDCS47*gp6lV6mxxcjnsO66}I;jN(sU#0*2x|1Ng>XT*M6Ph`%U3u2S!$g7HUC9T( zz=41LjBS5z)emKZ=6y!L9o-YcVlPvzcE5=2?TM(AwJHESsyCk09{yKQ!e5#|AD~M7 zBi_L6DR>4kQmMdz`39LuRw*Y8UyHKLpYFw@egD`)HVF@jRD99X;IHi|42J~ zhz2nHm)>^QNZ=WQZ1`gSNi_PADgp2VfDpw#TJX14^uMvk|IbJKOp$~Hh|t?Z>%X+}im;wJ@GWbclWXSgvQSnhckln1*MHUHm)Cz(?bZ|LC-&M4-=a!+ zK6v0-Y?bWS-?7-Hy`SQ(19>ip0@&m-r zBOq0jp?Zj-{)O5%$PUV+ZBU?(&R~?^=1svmLx)GRQMdnUcB^6~_CNNSsQQpcO&9#z z6HFI~uQpV6mbVXY%v0)W-Y%#=pqVS9Z#~I`&%PguD}3*&{1ecLd9u7Lyy_lw%N|`W z+O?c?jpFz2n>?{_BApH}!k5RS9D{08$1i50gIrqCRqExGN8b=DN^lWUOUM&b*)VCU ziiUFWBum|w^gr$3HuV*i=kLzqc?ZPCUTzo8ef{hr;`A9xpUweDv zax|OERpkQ|hpPE8VUufhp{1{S#7ZzYJ z#@x9$B7QnXVd=@A0$N`D&xN%Sv?x@E=&jAFXv3xBX?Oaa288brCED%Jm-1%avSSZG z#O1=81l^una~*2+k>6A1t~Z9cjGDrJ{)uada}*ZKy2WYPmMD(4%e%`xm0jHvMfqG9 zx8pC)?96<_Liyd=v!`|OnRUVk+xlqL5LR7o=$CFoeQ|i=1)`B-w1&Lh@*SmoR5=gJ zwZA0?RZ8K(W5=Z*5v-wiHlKgk_n^>K2H?%*7jRvG;K01FiE)~_fPVPY`8&uIldcQjixWWPoO*>~n@KToSqyd=4A8p2RsJIeNmaxrhWw*TNP2B$jMQ|H> zNb;ysJ8PqJUdK!)IVakhk5b6YT+PALt#E;NUO{m8vsLI(aGm>4wF>lENzrbRbcIxT z{Aj7|9mvd;=Mj*dykWHiJKdbAlxFG+>Wia^JM70XRx)F$dds%f8dVv6wu+{ZTp8`8pO?oq?#Nh(n|;x6V;Gg3#C@VaY_69m%>~Jc!IkEX30N+&bJkon^?f; zOq%0o##c&-G{_lIy0>ypQV4ZV*cMIznJ~TO`qnbK!&Ypir;s;njaF4OtheVF{wi~R zMuVlt{}|}#Prqs{qpdg?lXf`A`FKKZ#94iGFla3-OtD!Y~wb6koJS+ms9w40~_4_;nyE) zYhIHM@e}y=#D)mr4vv=Vu#e8c7rhEeI3KK|T9%)2cu%Oburf|E{!c24u78Iq*Sv1~ zOSUB9Z43M@`P^tTi5FVeQV>+#>nDno@Z5cL_;SS(z1V>xoB_(ll5!#mPyNeepSp%^ zP#4n?7^_P5dvX=Bc-BhO915ly@atoJYG#t!IT+ejHM*X9AG1qffM zAidy+W;eea0dCzoi{DAjYAbDuH}N)5ekj7Co1yd_n!WzIvQ)ys>3AI3^LVLkQkHgt zpZK6DC5AC?3P|je?=a>ZliPejs-!WOFd@z`(D00#7yR`z`5wL1lQqdVOr0rgj4@L2 zXANsuhFFc1&P~ZxS$*;^C|}a&B@H*R5pG@#_cmVW2#7GTF7iB-c$QDUS)Ki6@^IAj zi&rI2<1jrbLD&Gc{U=3Z`15D=D)(T^n>=^ZFmNe?NcvnQYwdV_tS?q3|;ASBluj zuSY>K`sw2pC$SfhWr2&*L#{~%U)VZ7KVjQCHn4^Yc=nw;n&IJ`SCf2Q&5Hp0t8vtO zz=c}BU8pRti(hYkyd0I_`Rz8xha31DUFHm>e)17?yY(@+yo6tafHrf@E#}l-4RlAG z{b&RO+U@E~2KZTV2p_J%uQV$b6D;}Tz7}m;zm#FkECqyw>_o5)D1AMeint?PSNUh* zt?z7nq8FL+Bbb+-ze~;B&(?juQ8nKYLp7o@anseRp>H|H(4I>v3?j95#?&eBK`m$z zxR}X*4tUQluBXOtMsfQLFrUB&;CX?fhwO zku+G9XgB#8;=%rNL@CN(cS_E`;+|7X8UE((asRT@~(5ickT6;|W+vB6NW@9ff zR-w^y1q1ZJMeLMIq7%Uwc0DRqMPJ#7$MGU8&iw`P%{!oBcE3NBFjLHD)ddNBISC>w z>Or~dG)71 z3`Shzc9jAsll%0Yo+6q0dHpCg+RkKwu1M_A4atxQE!6aMIFPv@!e_k$1@^;k07uWf$J;Jt6jT$(LI zy?M?;P+xFDkGjHO&{o}ZMCz;*gjPuLPzo`zea15nw0QE2Y?qEKh>Qwx+N7G-X#7y- zbgqJxBP=^MU9@<7@p|%m^wBVfd0}Ok5idDy=Xs4w9x2o1hQ1RGu)78wPcnQ%fUG@~ zcFudqfLq$_N8j>-I8!rEbKOTMFhmP1P{*2kiUScRU)0`K?cC>4G}*ifUVhrFsfW7ga9_m})s*So!XnhRD-`WN;62gg(^27#nfZ!XkoDF4$D}9< zS`7y17F5~87$Im;ZHx-8dBSW&yC7hG-dne3qy*+4Q=k)q1E0d4gFjRyr}-^+5>rv! z0q&9B0z3|N*^gWdLz-50EV(T4ZCifsIV!zHz~1wlUVUC7SSA;CN+r9l~Cqp5JJ}Qo+y11#LLH5JOH9AL*st6{W{&jpdZ}v(1E7tSJTHMTmTdRX&V| zERL4?1vGi8*|V>4EjYMDJhV_dRLL&CHU&u(tR;~8=-C&_w$I=w^&_57vd0S{0K??1 zY;5FZvl>A7U?b^c*}eSzgO5*1dWCESYekL%Ky@1jMFJ?G{GMq!Z~2e&y>gZ4n=rv#``&h-X!e4L4hHR)3eS$?%+`!scUevwo|cT?anzi#21fQ_OVSyB zn{n~p6)xsoeBO4UzD%bkFPhVW%ES>ol^l-G_wz@nA=oti=&@Ylq+KEai8fc*eFi@Su)1f{DTXB7Y6SR)w?G5I&cark9~L1gIPzv*%njE zeKz)CdQ`wuR~W)>mAiGcLWZV*()_Wu>h)}ka$SA3N)|4f)l8v9POtF&M7z3LeoN5C2-8WIx zV}_Go=CaE72NpLWHeT81=KmsidOy0AS3y>v%!mFK@g7pD_*5sppB#xGSYY@Y zf;>?L@{S4;G^_PjX+^HiZoEGYe!DX6f0a$EPCuF9v}7-QdLgXgEA+gkvelOI4Nq~E zexkCDMRds?A^W<{qcvbt;lfU3<)D->X1hdXwkuL* z91-0mbhVYyZo1=QWW~bN;Blzlp3$SS3YMKpVIUw1+5_y4cqLc3*|PyY03 zY0_;Jd@SaiX*zThIms1W!b#zKTl>fg;CX{pdb;9%aYEHMKY@B{07_+ov#nIt1OsUT zy<-{)9I3;8sUXX4#oXUFR-dhFyc0NX!JIfc*}D8;eZGToYk!_r9e? zOAel1{6*W%LB$11q-r-c<}tiM8#@Bq8Enf6ok#i{6Qbo?R+7l~sMFoOLCq?h_j~0g zje|_J7srHfmHQEt2F9*vg~_8{@eGT{y^L!BXHZVh$+EJuKQu5#m}n#$J#&jb15@Vu@!pm}v`@^!(@<5_S~Kn43f_yF!Z zbiY1h3>EeGut>i%OsB-`rj$tf5$g9tfJ;r_DU>JM|M;?<0dsghe{`5O$sjYp%uV?$ z)}w{`iB8hlzag4Jr|fZ!=^pZgkRD$Kbo}NIj|v}so>yznI;K#=oL4#`yi=&}Ty0DA zfI_W@FWfPiRKa%|$LeoyMBgiDl(g{~q;Gi~2c%pmtueGoYIIv7A?(f}G_#d6de9;t z*38kPcE!vq64C-~h$8N*KjX0{E-9~EB4sNtYgo#d0(E-P<=Qm8oZWG`d{f>oqA7dU zp>6L0QaD&1J|~kh7L<5_d{pIx0P;meiP^-g`tN+_Bs84%$dpYy2|QYi-T<>;XC6+|*A%HbZ5OKnc) zuauSsw3Yi!JJNkgyIVx{3s|NR-j@s<>iPWQ_h{Q}9S?aT+$}?-w&C#A4>nMxrR?syCIg1pp15~+ zQwDy4RAGVG51Cii82HKW@V;%woH2e8CVr>JBlhDuWK9Tfx+`&-Ph9g9cx1ykF)gBfm&z~6?UCm?p` zcz6>j?;`kFbdI(eEtr{(tl1ec*CHJKy5pE<@+avId+ax4l*2VLH(9mUL}pkBTAc6i zK;)e*ufmRbW4OF~Ye?Ku^e(5Mr40zFxJwspm&$_VE4pH9mi02O*|^S~Ydz*|6ewl^ zUsVM_Tea)#(j&KxTJHiL@d1u_oCwjA_m->nrN&IekoQ>a(;MD`uv1t8cyZAb*d&{9 z3$CfB2xRxIcv|JS%Gj6^o9HN6KnUmqF4|KX+=jaRv$Ud0bg2XJt2qU@(O;ZSyOhZnbCa{4&jD! zMpSN!bp|X;m7%_2^bhghU1;o@fWRrcpZ9O{9Er3iGZ>!0T)owxEf%jM!iwoV(*D#C zS$qnInKd=G8Q#1#7LT};>fbEZ<)3s1g)o(0iPw8{-5I*S$&9_YAdz6*m7#DE=N1^O zHmMo--A;wz&j8oy{usm9Xp^K-SosKH+LSDwUWJLD<(y>x)Ret%#C3MIufb@j$qFlnl{f&AlS1M7|*Zj(Gkq5SQSVuJ*0<}Xa(s`#Moqwy4e~XL1{cUB| zGex$z?eQr*kD7TrpacJ%kL9?=e8_0KauA4h;<*2PgE3hW!?Fqc3Pm3t{b_5<~AHuJ>4}^fS-1^sUB7zVG_4;ji1o zP-DWtmG?@2D^Do9GzE|l9V0m^t1XS1on;~gZl-ir1qSxafn1yG(i^`5iS@C_e%R9( zG@$C5(fodp6$5>oqW@;iwnH7T9dm(yIAa;QO!w36#~0)*75|P5`yWK?|8>^wQ0skJ zzDF3=;8p%tby%6yEsz;?>ObF<@Jmmw`kNp71(KaMI28XE05S|ouv}0~KRd1fh{pu^ zmH+0hg8!kv{+l?(UzvFSALHW0H6;*>r%ZvsHZ~JeYwvM9kP&C2Oimk`TRjyEkYYik zZ%V{Wfa9vLp^c>YENud*$o>Q5N54?9H){YSRvR6B7#F7;1=+>#qBb?$_q)v%7AS8m zd&%MC)=kn@G7h+)xCFVn6Sy8z7Ic8#__iJ=oU7DZGH4yLzqFpkKIZgcB1!>dZ=)aQ z)V%p+z-v3cnv9+0iYSLzY?;7VZpnifc@ahMxd_{pelzB=H);$$u0vq zO4xjUSnT9T1$1A-_kfDmuszuLR~31MH&BP;pqi4k;Pb^?xy%chtE_sE?39G_8LpsA z75Q+G`3ZUj{FAqh76KY|64f}$YwhpOUnn^g+q&>`{9}_F{=4Y;gz6=w+kcD5WnNU9 zww5w?CarPgqgAg1)P{~&iFNt@@Z2pVrb}6eSW5oO-)nYgsN`|>l-$+^3(6(Dxl&|m;A6v@vf-LC?1QJZOLm?H;3zh$D2{@h)%b&E7?79i*%J7WHYYs1xf-1G05YSuV zr@&-+Mb@SMk7&2iIcP%%@v3cBNh)@ z*U)HhRB&9b_iYFYvGA~~LlDwuMC2B!ikyUykF&s)%7T;A3aIMMR&foXrjdK<{#6U| zf=mtxw#hS@8`qP%gBz}uMM5$=Zl?MhbKT~NG=0PSm!a$dU?`K#zjIN}Sn~p7LNXNA zB?G_mV$1qAkiAWM?InGb?l`6>GMF_-&MVxub2@DEd~C04OVxXcgj??E#{NArZALw~ z!bnp02Ss1ji!M*suVFi0`WtkPve(c!FsJB53o%d14(#pK$ca$wnXHUBz@_dQJA33k zw)hY{G(wr)b?ch)=z0fR3hWP+cHeDKtq1!W%!SqVBzkgzpN437J*z(6mkyaZFZaUj zGRA*K+fSzIW$))I+EuinrFXw?+XJo_%+9KSXBJp-lZ7f9eVg~Q_maUsr>|bT7w5uL zoQ0W#4rwUO^b(uXpqFrg zEHc#c_L{CpD~#ZvwOsCM-s{>|;m>>bFr=_m(bwSO$VNcdz#?^C_V#PJX{CD&6CXh2 zq=wn=)cl?;V^a1$()H~Os?V#H=54!GnEY7JZTwt`_~ub$JOJI{ig~k)GpY`q;Sc(O z+kDI*+LH82das*5LEB6cp6_2$vMo*m;=m>|(oaY+k%rJLT{jR~8jGoHsg42AQ}DO- z-rZ4N)R>H9VuBo*h?`d*^~=7>BW7(uA7iSZXRWPwfmr7MdT^rXW`LynqP zz7$R0sQW(ccBD*4f(X4{Ot%(MIpBaABIJl}q}MzfC%#m<#7rvvc_uQYU4;9AxXos2 zJ&&Y}pj6(}Zarf=DvWW3ZRd5%{r>Bb9s_APP+h7qw^7}U78j|H7%pu;mvRk`FQEVthu+ansK{^Da2?P}or6awAAs|91L6Fc@q$o}4(nOF>C{hGMP&x?^ zm8P^v2~i>~7y_Z4H|qE8{hz(>f66`Qj(hI7j)6%CZ?fLipJ%Q)=g%hahh!AnYhYTX zz4V(n>dJd0-+_~DU{$#wEZL$A56SZAsvW5SyYc#RMQ-TH6QLklZYX=B_=KAq&!(}$BKO$Ep_t%%yju~fH;a^#O9F5RmrwQHfDWuTcQK{e^@g{GM7f&%V&p19dVMg`H;Pp5F z2@qPSkd5@zi7C?;rkE*F&faw~y)7;K(`oiNeKyXTt!A@`-sY4emn&>?P_%t^?@UB) zmj#)+_t|{XNWScNOu# z$?Rt4I4(P5V3u7Y`tBRn&;*r%4l;8<^E8! z&6^P>3V5?fWTuYElD_2ksrLciMpv>HU+CzToffYrlE97k}|`FU7DYo%f8W zb?be~cp`EI9U~oQcdnAjW&ixXE-lrBeHu!5=!KOQsBn+wdp)Cc%sFK2vyJP(5&Kgt zxtP8y9ZBB{WXQs{kz9o1+QetfXJdIEv*+H6CX3{*HTGq-YTA+uS~RvU@aWH3)tRg& z=h8sk7-6z#ziTik7{%b1{_qHc)>vvmJ#n0eejJJcTISo5-gCPDp4|*j#cN`J5qPvO(0W>FxTrqrT6R>E+UBcBIq9 z*JyeFaymAwL3B4yiI>l)DwV#s=ha`Ahue9PM8}0cNYU2?ub~j_NK3TB%;!(SU9veA zm&qkGZwvZtVBAWd4X)H?GquglwmCk(<3r@_j$Z44++t^|6Am5vy;MI~=41>`h&(L( zoSBh}>9WO5;cJ)VWv3fqUzu`=bvlYf$~h% zV`M0g7i34uuQnRieAL7{wt0hfprw0mxqUq#0cr9VmmhP1t7<%OxV~c>%V%i#peDE4+^`gM%oy?VuhUVRzK6Mxft*aT8rin)cl2+{Bd6s1priymx)C zhk|S~J5#^hg3Xy;>^nM;r06tuqh29LW9%AFv^MWL)5qh+89z?V>^@jQvs90PJ8YlP z(f&cLcx8mUmzMpUZYv=)?snyMRW-djn%U;Ocidq*Ec`7je%4;5)T^)TZAUM{jZf~d znegI8K0Jg^5|Hw*1{AU$zxsDdhfh~!UTyHP$V?5)s2t+?p6}cQ9C`V|FlcjI`Ro&d zXxQtdH7N?zB#X#i;D{Bn?w*r>5O_udj0Os1s_7e92!FWl4 zRf=jb;s>PlsK0jIzetRqZ@BrMzIeuo_XN_{nm5w+2%yaCKG1&vjb%=pju3rUJ@sQ+ zWi{n=Z}Iy}UwP*lS8Gbn%N0St_MCg_RBC9TVF|sZ2{|;Y1JjsGxLVWik*7CSSu2)F zPNpirNJ&{|@LL7-&(GXy%6m1DZU|Usac_y~Bg4w9Ja%N({Vt=dhDX(vw-GKI{S5Uhi8+W5a-DYK% zG2|J)!O!E#{FiFT>Dq3&(YLdc?+>TN2DyV>{xoOft3NpJ&5yNDp7{8JjX$EE0J?s; z$qowdHtajolqy?OQpXUT& zP{=A7UZ!Kb%U5NQr9q=cZrKBC39@DN`C{myw0OUaEiiGjpe2NQ&TYXrEOjhvD-P0D zV!`Zn(+@|zLEvXWs@B)4ub+n*1Im<2zr-f`-UGw8{!ygCff?qET6*g}&j_E+&%JtE zL~Biyn~75|OOW|dl=n=79?Q;8_Ra?1c&LvV3MC(Z*h>lVAsmF8Wv0^E7ycUrP6k~)t;+Crs596 zk1s)}&VcwFpqnuK`DO(=_AaH2Z>%(G9lg3-W@u`r$fp_b?q$Vho;Iai?+u#f3^dU8 zn>HO(MW^8rP@vQ}!ytZQCa6!>t+b)5MvPHnx1v&~$T|Fb-rLLC0ce8@$fOZOKaMxv zdK7;gmPs`sqv<*+zYk@-nKC6giKBqQbizDi&!p=?&R?1|i*!RV{)S2{>Y;GIOCv~KE*i6Z$ z2YSn>Vf(bzNw1xr^qIG%y2%UrHl_aOxx`Rbmud}*Rk|!a0+^p<|JufH8DoJ7j$6#m^I$GML3Gp$wvGGb&CUxI!l3 z>wOl&+Odyr6e;xB4jO{-(RNy_gF8sLAz36S8OsvhR=_xMUm1&G-EI3U-U99J3Ns0C#(g` z#}J_bjuOfPY&sR*Z8TbHi*4u~r<{&^8r`-qXM4&bo9LSbxkX7|K%nJcAlN|=!T|KH z=F0^^11|&tWg4nbL*}o<9ZDzn`winynJ7_uH_O6qt4V19%Ce;J#`IC?`JeT~G$80z zFR8Ob{&`Z9IODw>x#$D6DI}?oXS3#M9eU`-Pq! zhwzc42REN7O1=1*bumX64oF4H`zOKmorHV5dt3}IaX61To_6G2sXs}S(L0jnX3}!% zwJb>7nQSVYd_m5@XCuBRDwwIrwvRbOUe;G6SLue|N5gaZjcr9!SCT;20?N}C@)XJd zarf;HVB^VT>3MFS;|LBzpBc`?GoPy4Zcn8sY4grJ_Y#^}V_wMJaASLH3)Xi6E=5&~im(^JT=;;)-Mf6GVo4hkxnLUO+L-&*Ol>i0Gt(U+l zxnG{6flEM&QW7vSXm6XFX&VT6U#$SWaYCi*``~Au5qYA^HRdGIPFLoOu^YkNn(vYm zr^68sd|oQPmN_kR`KQ|a2|P{UETU(kxL~7$V2QwZU;B9OS+U_I59hfKrGnno#Mn2S zXFgt(?JicN7&U6pA_5|Oc-Mps40zVA|7@YDN3yq;mR*1p8FrK{JZ37C9kM?;Yep$; z>(vd>wgMr?D%YZyBc4fhnT6C8JIH%xVJS&m+-vo}7G;1KQ@Sw4sc2JN*E={b5eXFy zTk7v1*tfvmd~SFK@h$Ba8S|H39)HPNFW{GH>~Sf8i(8m-^53GM zw|W0Br7b~Id3D%yIqli3elN=f?ie<5^kzBbBpnE?CYc`WBCNB|yUV0eLIfL1kUgZVG5@ETNlnbEH0TfdyT}}torfGJjSgd?Y-RaIut1Rp^ z@DW-6^bwwn9w-1v)z#%^LorTO&Gf%oK>fBErArNNM%GEJ`nGJ`$E=I)T;Mnnn|qkN zX1sH+YWZSXDs<7|FEKuVq6>&*+5-i74Jc}=SI)ZLA69nHpr`+SQCV5AV?{FWXYho( zJFrWDnV0#$Uiizv{Esrw|1YNaZ#~iepOcw9+rcnZ1PnM~+0pefv%e>?h3=@{OXA;-v%$?A z-xENrR#0kX zXReH(zV~Ql8_?W~aJ$?DvUS_V?hrRWyM~cD4LQHb;jsWKnXY7qifP0)7o;oD;xGKO zVyR33r$0~U&>}pace5x?c!&Bc0iv5f>|{+w!(PKE3G=5HcodNqo#TJ#g;MEntuV(P zNJ$A_Bwi{};%obSQ1%5=t1mVRr_nqCP?o`b#8)gb%*?T|XqvaDgYe_A1m$Zv-yC+P zR0i6LyR)gGtE%(6pNH>^r8IInRs^KmCSw~(_G`OuXc!3y^KoCJK4kFMdv#$(B!km$qLe17dJ!37Rcv770k z!y4ycf@(06-Rk8Gl?an=OGWa@WL;i@mTC&uEW5$y51W?N*z~=j%-z9^nD&d;IfXwS zTcWiOv!NDBT&II`d78Nm)q@8Ua@`zw^Y;}tcH74JrsBY4)%Z0z`x6mKxkw8@2bD4G z`lg%YbknVjQ%y@zLwiO5`AVKIZ67I?mP9{Ir%*}ZO%S4^n;Y61cYjNJ#txXddq~J> zjGvMo6+})0jyJ>SujN!Uu~_YusDGEgeyUWit`X=bApqGo`0rOOo(JUpeE3 z*SbLW(>Wm%E)yyyBknwz4)I3fA#60SI zy9ZS~Mal;;|3|R0tRN4~{s*kgAr`iY1|6x@Sd(`U1O~$72^+tIan-SP zM<KyPT=yr;O@j1$_s&4sQ}6;h^LAPwf9GN_l($ z(X#sJcH={Z-i_j!p6Ir7rRO9m2}asnLeZBy8lKcsm`o3GA`;U+8DCd${}%M=dF~7| zLIYO3&wTi4pnJTL;zDsXdzSW)MM%4=L_(EFwjkfaBXW;}?+r?ckquxl#7(P^HT$kH zQ*1X*>rKc0wrxRM!6N+l&oSbK`_!c3=P|ksrq1)NsOr z8n$qT>y5Ng_Lm`ZN~ICFaRU51w-U8Z`T)Y2?mS^Xy%v8Q8O1&zcX5XCn0HRVLvq>W zVvo8sC5?FqKcwq_qX(G>21vZT2|VsPb(e)RaMAP|t@4~|Wk~Jg%f{w@yEW+mDK_*D zMVIxTz)6SXGw#K)Co6QX`Hz97L5*5i1K=U}(V zNLbg4Rg>(}OWYb1cj)tvVRM6b*Z$aOG@q+2hQD{K>3EuaQ-1w64j{1uc+-I%b(Ic= z7-9_De}E<<^dvKm%65tg3UC{UH3Z5lzhdiq5SK70p725n0rlcxH3?whb?gY9NTNC0ZfCrlWn* zk4q+lEyksjKoI1n>D9L*@^-CF^?o7dY_vdw{Tar?eunlw6#(;`l1F;zuB$1RS10Un z8bBdG=O)sfmb~h6K&(HHkuLg(X~+UR`@Li@K8<+>cMxIaY*+bx4aa^#^~uAL0~VkC z9Tqb$-hDHcQbIlf;E=_!O1KC9AgP5Sv4LcYx`QA$bs|Tpen^`VFVoeRhE_=UBh+&6 z_!+a})BA-F3Lu5KZVP~%q5xaU*xwlB0mex0#AghWa#w|qx;!-E>PP``;G_3**?(Lq z>0>^Zqj&M@R0LKgpLa&n`J3b1k(ne`V?Kf}I4xuCOox!+m`W`jhRQKhIc2z6!taZM zG0XqTyV*(us-Lr+t8vE-*A8y*CHS^7YZ1&|^{F--xya&23<`Z?Fbl5DjcsK9O=tip z#fQgEb{$R!*M2Wk$e(+ENS{~_`*e@bEK^ObN{gZuc>e&*5#VRq|_t&!2_c?$g_6& zdIXD*M5SEF&$zsQCZPgAW1&tr>U{aav3U~-#dh1W0rwX3oEJ|UnHNgJY9E=a`P=RJ zc@4=6Xh=Am@bPdnLGfMv-ctm~xcz15R9oEjw9)!V`pFKBEF$lTLprW9p8n2wY?;5%HN`OfsQf0^ptH$=Fk0dyw}J= zPh{9oKrbY(DOTS79QCHci*G!7oCDe=dKC4=BrfGcWPw~59bd-3nLfN=^Re{Iv=4d? zlA^uSb%pP)VRCGT$3u1t!tI65Cd}sxyB|Vj=VkvgMa#SlJG$ml{;*%gr%Ycy(`%rU zWx8IU+fBrQhXkmdW_s|&w9hAOzfB|Yzn873|IsA<**we$4FLvp!O|J!`R1SQ_;hXS z9UbKY!LU!y&#XOwV1)LVVdwbV8se0FVv^RzpkNn3g4>c&BJ&#iRL&E`s@k5{Q!(im zH?CMJyqg(!e&`)55`Bo%%XRhaqqv{OvXjw_|3DD;V##zaIszZ45#T4uxb|#qm{LyC zT64h5g+ML5cSQ7U*Pz1RXbWZIbQ@VS{$GT{5Up0{BLIr1<<4wXR$E`q=VR~w15C3Q zHnFYTng?j6{&jybN-fq-r@LFf$#~i!C_Sk`qgm|TIWtNtU1~<|4e5UuD8N!1fbko( z4DzDcZ`J~-*54<%7r73By{t>>bb((7PbA9`1#;1m;F8efE9x-$y~Y4Qn-&O z!ylcd&yt}m$*6tEN|Vy-cBD-2$(=q&+Y(sxHt0ruhGg>1trT;pNe+3cs~+5oSZy)9 zD~`ykHuCH%(kf5uLhB@v@Ibm8*NrLSKGF8Z^bGmy+iR3~LdWcOD&K4YLh+x5Z5ZJ8 z1^E5q%D?VrEH}Oj$=LFp;B8OWf1yC~*sjg1SkP6@Dl@VnM~01tysxLZC%wS(w9(~K zn~OnFc3Wm0poE{4vh@%I(rsYwa2&HAY&Z_y%h-8&@iM<0r0chsYBaN8_H47BB)fju zjo8e!7|k^?fKnK8EA$J)Y|tr(8Pu-1wI^Hc*u5LS8qcI3@zI)kW%O7V65W6#V7!qZ z0uAj~$B02_f7*gS7hNVMita&ZuxC!vrn^=#_dY_qdI-oO$=oW3Dh3g|#xBGD#WPRQ z_sX$-8Ut%LgSM8gKHTgdGgR%kUqP(c9o`Jy6A00hgYEzl3S0|=cf1MJuXn21Aa+|c z?|(QDw&;HV%s##Bjy$VA=B#i+Cith^4d&N(z*>U)9>w@ZIISEv*iJ3U%LTvM@Mm3- zyO!-PS~&eyR+OB1^UCF!Ql!Z4ff}}9E5sa14mjjN?7Tk%d$c{Y2LXIQNxFZv68HU< z>C<`4OtZ2hk5zAPW(4Ofi=}~{KxOS0f_NNV27WUH3rn|h`HW}^X%GADDTxKDwA#g~ zGS$xq7J{I+YWWz>&9i^;bNGYE;4zu&g z+;I=JG*#;!=>rKfqpfMQAX~vKjRsQBOGutS412l-q8e!T@3bBxWNtmH z!fcO@X*E?lZugkp)*k>UV)BNOp8)k%fOomM9-{=zfg@pDosyj(EaD?kcJ!^fkHI10 zXYh~w-}+=ZPS>2j&p>X$na7l%XLyfPo{sJVV!+J*6hVB6az3rqyB)3GISVvIu}$bx zi8dQ`*}|8lx|)e2tTmqg!Sb;#KCtaIHGXprCGWLOv zF9&FRC1V#L;qbEhAHRlh86!s+KPGIAC#|_VAIP|>^w!6DaAYEv)twfq2ne24S-&X_!U6^>ytM8=;2cy%F zybesjE53|GN{j^{rbiyzA6Ul(0UJ1S%Ogm#+vA2i6^apN?h>m?Y0EL=j#>BiJpZ)&A+UwUKRx;All(%}H55NTDAr-v42b%KnB*?G-rp(MsS5EOnVfCOH2wy#+UT>+kW zytUjK1~^9gGmxG4>8Gn2H_(R@&!ETYo|B2adAU#ab<1*Wl9KRKGXZZuT_j=Mo-kJc9 z>s)?Q!h@f4Z~8y?QeLq^S{d=*+y6kpCVOVrd5r$T4Ws9;N?hbG)0+>-(*8$deiR0I z4SkMa`4aAaLTQdqtW=*nQ>l$ys2sJHXa*cz8!#)5|G^?a-@?FTl^YiGsyolm&j4l; zA41Pwo_|<{4ux*dwlg?GblF@Rmy~T`bLU?#PY0)MrgN#x%RUz~(G8nZ+g+ui)gR)N zul{$c%K~Bud~pBwdX2Th38D-k}$}_w8kEqT}#^@zZC-!IExzJPl zUgDk7SD5I4Q~GhK!NNds9q4WIp1Be|k@NopFyZ68kXKDieeyHs$QN>EIzZ>7PZ~Rj zEgv1A%j4KEZNyS;2Lsl=FC}m6oB}YPhrCi6alx;ubuxPsz^-5;2hFguNS1Z+q)(=DV<=z4xhWAwfhmnmW%Mk>u+4JMnaP|jgA z3_Y92G_nr@;Na@~KH{5X2MOPA!F@ku|94u{d6~)nf(E| z@D=^Vu;lxX49nPgot(@eNM?-IUl5@eX<00u77;8zV8+G1ki^n*1SZfjQ@mVUWVH9{ z^I`h9|IL0o{PKmwL*KE_GE7zs^>$Zo|6tK|xu#io5|F)fzTha}sMF*94)spyL*^~c zW1MGcd@)`j&*D{`JxxWxK39JugX}{WxfNvL0>6Pl<`4nj*gzAu6qN$QaG7|)j$Sv& z)D^t8;NeT5W_FKbv%y!*r1fM{^44E*R+gTb^CsWI8=Gq`jv_klx7|2n{9i+Y^L=@G zQH^bCMr^XtaZo@EHz{4QstF&TS9cTg2HBs#@IF#DKPrZ^vI+mxe6sU%$W^?s36wHi zl%UbysS&%GNFV+hTk<#-z+EW%FLEShWvs|Dp?VGU=05;}0$;Y@o3<%Eq2qyn{0U{g zn({BSZEmcarC3~QIqr8M_lOIh*Ha&RDB#O+$^bE!G;T1KuD7Fb%N_fbkjQk z4*YF;SGP1LU;jcy$DvN6Lsg7Gdv1Xb?oZ5ldkW#{*{uC^0jzERY{W0gt&{7`E>?y_ zay811afqu}GLF4jP|4Q)my4VgP2&^C`-xrv2Yk*V1L^CW{ez_V{DA=r<0{t>)2jh} zRbwu27%x*Uh}7wSqEqrX&aMIixv5}2lxQc9 z`6kTjjqN|^kdntjk!SV{zJNa(3V_~kAkL}t4;vod3WdAHeo_BdUZX^a>Mx00x$A!v zp>7Aa%WfSwA+c+R9Km0g>#~*j*Ngu6<^AGS-N%RAN3a3}mSJT;zZT$gLIAtmzlfKB zaBDjREUn7dbq@&X6w#}3Wq>DE#%reQoW$rP-1+~;6%wWyj^lmObRg|Nv*i=gI^hw$ z`S`yjMEc74Aq%KA5pZ_vI#%${Mu2>4glSOj89p{hlc})zNSxx7QqT4^z}}7X{{XAM z|8*vN0E$?4H0RY`SkN9#Vmt(YKXmult=E`AJ+V`2-exm8e6eC?B~SPY+BBaPEBWZh zSrx;fs_opa$wwKLt5umpOb*0*h^1b|)XYaGLougWzR0MqSSI&BgKR(EhI@K~2L>bGa4Hd%V%iN+MSISiyRP{9mPsb<4KCFEgIo*iS^wJMOo{J}g#qiu=2; z)=sxOSK9#7l73~Ke^*GMAN?=zwF)m`{lNF?VfX*LLZN3Y=p}Yp z`Zq;hcgqvvo6bcos91Q}y>vgDOl&tS-$-18;B}Davmd6kP8bTK-+Y z^pO(3!-M!Ej(`8X^4R~4-~6}`-m)?Cv<$!|hyT9CuJmk!rXhp<>3>&sENClB$mOsM z1Mu%{^nqWGO~T>(fR`WS_b~$x;YcP&;DONPY-*pCD=00|@~<*KR?R%suiz(3c?pC4bWY7;~hqq_}LeUZ`$6o>7ogd7sYE_Ja`31O?_as?@B;k{41= zF15Q*;wfrghqvE}*yvH1Jg!V>DMI-V%=*E{@w?l-^t(ZL{=k5jNTv+Ud%IhPp;J2@ zhT9#I%g5joc|Z#?M9(uQobpF+6Qpxd?7MmVw7fx$c->Up7>DMgj!;4XZx&{HlZr~_ zl$jb6yy~1L?%Ig7J5CLcxU191<8uypPE0 zuB@?LGSr{_UCWt0k}<_inH-}=+?S3TFC ze)mfArGW7dwPgfu+$IW2_G9J8j1o}Fu+Yq`3QT0$sm(a}fP<8HHEi2vKG;sGENdRs zpd&sipXG;jv(bqtRbEc1iAWnuOW4a*&mt%y5o^1zSEfF z^HY2@7Yes0^A7mM3MnkkpwmdWHP}M^93p+T8%{&9-j{3ujoNuGE5)aFOYWUUfYU}z zLc4($dD>ILiSZz=C4T!+HLh%1S=3jZm{A`Pb*99 zsZ&DF5ygwnwjRcG>qe2mE*n!Wli$0j=ZQS{$mvDw9QbTt&jALdvWi1hs4p1JsT%|q zYbGN^ej5HXv1*!UzN2#byKK86R11-xuINS}V=_MkpCw%O4T!JI7Vb(5F(;5Cy>5?6 z7Ok1rCo1Ev$e5cu`?kzhxD05RBe8yUP2kVDnXXRMu z2!1cZ1aZ0WCRMaDYGPe=Nox~*YrsOPC`;IZC@Z_Uw-#(4n=)SWJ;-n~Cu`JaAW0Or z=rkct%@20qkxt;PPuRMA>K!KN;k4&*vfTY`UE~{`zCda-UQPVruz`wVI26;Zh?Slu zdx3yzoFhpjz^&S6BQb;&Y~Ls$>*Go~fM+}}qg8Sl@Y2%L2(%eQCQghTg zT(UC3&W5VQOhC^oB(U(0tJ-hwt-6a>Ql~@4yF;>%I&NA~8Sy)t*4K1Gmxx3k)^Y6v znV0(~7!-nYj}t=l5r)6&QMEl>UI<&h9-4;=!?Zpd2cfoKq3|L#6WwX@==h1WU^R3l zA;bcCjNzq4|2_$?tU}@L1jTNCajz~N2JGJGly?f=^ZL|{v_V2}yQr6u?~7@#n?<3s zfm0Y+?pCJB$K}UUPnFsodQ$!ts#O*7WIHIQ=307>g?Ts;33S0PHPB!+Wz|vjiRX}U z^4ru?d$wUqzI?M2Iny;tUIc|7ItT-nW#2{?Vk1R2)io6+JeolofQgK@U>S_<2Q^4J zYswPcgiFoIBwndBc(ZP_cthg^l04Br+Ag~4@%Etmmxp55&;TwiqUZ~8l_-Ju6;b>csc#e2&*CVr ztlHgxV(SH3el#`(D?#MJ$nv5SZe(G}}$tj&+4aUw4n=v%!9OV>%tVHPh-lcdz6mTy0P8!X{%;q*Ep%UVxu znb2Z@q@r^0@^xD;x*Zq%lL__ijRUJ|-W$d}>K!-=K_d^A0^Q%l-vDg7e-0tKTH=p~Zy>x_PBkVVChh~35OSbOHOw8m zh04Xtn^5I(4XQ+7!LtJ!Xn8^;`;x~U?9~x**LTA>{YOM`d*m$6s5iFSi%mG2CEZ3l z(p)tLA{*ZgEvxs#gh*py{M5SB?=U#B_3^Pz?e84u(2c0D0Lb=}*Zay*)E@IW$z=_A z)0;z8y!KN(P%Gi!l#pkq2qEUoUZl@5g(+K2m6Mtb3_k`N4_0MKc{O#xH&$32rrr_y zCs!JtZSJAIhSh#z;YSr#op^g-1<}NfBR95LaidbErhF9gJT>ntPiDatkHk zu8)*!fR=llzl4$`)ibMjHBMQli(+LPeFt$TX?S%CXIL_Q$U#>h-Zf z0b5VWssldHu$a-=clNhE8e4%KO&xc2bodc83@~l=&WWB*a0c4$SM@ zPxTN`1Qr`6N9f>gM@^P+(jLxDR*MxkO1P&Swz=qvfLKoHu^O68eP+e0&Xf8OJt2M2 z2aRmqU-mmsb&*bhgI`BOuUyp?f@-5l*Z1#;J+X8r}hw zC&}H-s`c$8uvasn%1i*eHl}T5ZDnp{0|Q*gxrZ`$*>`d)FqG&&7ue0w99j8Z&q|@0 z`JR^NtZ91hnsB#kjYR&$?zF5_kvQ)AM1qI1Jjzgrm#_l2YU z^CocHHmumq{E5od?_|n~Jo?uZ`z*I?6Ooon-9%)1n}p^h2z~?uQ%d6^K@Gd2rbl0o zVsVPEQLeqo(DXHeKQmqX|Kvv_4TbbFH18W7dq;Ud9r~_N%^pbZq-6w6t8R?P(2iL397Y?S|l|A zt9Wa(0t%O*O7yRu$t76tw{>yAEtfEyMzEyL-f#4{3*Rx>CsDHve=+SV(jbq08!U~T z&H8MWGbp5#kQUTt#42rL231Zb_8`%0nh3k{%;J#BzL1_BU>CfbGMGXz4vJfzMH?Kf z>cNd}XZ7P+_&1ht%i9y`1skV)87-&OleeCSok;AO2j<}ES?XE&#_}u$B0u^t1`&mo zS$@_vnlYX_o>>|7@oq3b2AY6Rn%fo%gu;V8*%+?i28#B>u1W$SVRM~SxZMF?#?QzU z2LGvP(LDDvt;viGO80`K^$;cs6WrxJUQ#VijNv;R?*_W-87=Gic)aS&>zi`lXmK-K zSzhORhvC6#R-u^s=C`0GduzKDA`DeI4F2Hyv+u~d?V44=KR{_#;Q91+)rL+e!=g`9Xs%JEGvJQsW|t z9+Gxn1%fnTAwCKa4f87g_XfcX3|B_~C*$XTy|nJjz8`?N(nTOA!n*;4+1_P^4C_EK zD}a!?k_*TZR`pU{=_PxK6b*+89Sd(yTE@}KekGgX(1(5WcN~Gp{O@O7xA}qC*&K+b z`$LbmEUtF*JG2nhmX&uFg5b)Hs*YTLuIRe0%V@b6yq_~G4v6a+Wb_#z)v!Y{Up?q| z7VhTC>PW?h?Ja1B&S@JwygnZl`Yp;l7cdYqOxT%PRo+H6F9mMhQ(s|JucF1|Zo~}N zxsMtcQq(r@*s|hQl_%*@FH*{`>0aOus?f{1K#bq~z1fN=j9=;?p$Q8U<$o_ulx`3z z!a)gD+bSsjXO&UZI)8PbH|cs(S8BQ%ypXp312(BQcY*yDi5H(cL7Yg-gEwQo!!zON z?Y#hHt;Q)hKOE@A?+#D-v?znSSzy-J+h=77H1b{}DsNg=A}wj$R!=Qwb5A;K6I)dD zu${^SL{JyyM&)p-Z)hdCw34K-W523nEMvcAn?{@FLyg;-g%Qyl2|5KLP67@WJ?4UA zq_p=}9bTu^Cx?aNtGzy(0Txutzjsrt;+`&`VKoPY) zffFgh$x0SUkD4Tp-|Dlv-q=D|Cv0$(lKrGAIj5rFJ645_EJ6pif0DKce7g{rAnx+}PjYU6v(sRS38f8`?>xQ_84_^aHV3UT+^8C->Y!=6A$b zulmiAoXTV1HeEy_?wKrGQpv2``DNUrS0UWkfIdoMf-HN9|@Q15IdBr%+7 zXBvEQ^t88 zD^72cRK}Z1U|UJC5QD{<+reA&JvChR{`&F!hhK7P7nA^X7n^3%I7Q}Uz8IC(?GcPG ziZ!%QaX?Eo)bXmvM}usI3G1=^IC#MBP6zzG6e!0jt*A4`1PFnA#Z+7F-G*l~I`s(Xn z6|sT>fnm2z>ThA4Hq#vpa>gp`706bADe|7VAfXV@x z_(_?7{uqbzz~slES7s6U#BZO6hz)~Ku*Z{=&)TuwRPtWtD_tfq;CCHjC4^#TG&Hby zr~}+F96gPI1drK zLrYd%NPJ1R?}GQ+I~?zUGzUGI#ZcJt=32S&F-M_G&e#ggl3=@;t_{J&49yJq3(aRv zi8m*DoH7oiPeW5A+jSVY92}JKD`a?+&lS-Ip*_~&nGYMaUuxU}#>rmF5=G*7(@vR0 zc|G5vMo!>u4GTRo%sh2L_qkGvGs1LV z^5Em){1{)C04o9w*jgloMHR9}<^0OV(ua${c`+xU_ve#tfS9?2u3zv^9u&GgcWJgF zl|z9;RY#^5Dm24vm_`!!5l*Uj06(j2hn6bp-(&OA)_eTSnwL9QA+IoaTPkThQ6yRaJ}}Ri^|PoMlz}l^HI~Rt|>=TxZBq|Fg9GV==0S5Fn3y4>?Nx zd8Z!oCCV(dGizVE?(Yt3-CaP3`B0UY%4>S9<%4;IPy5_E~sAPhqFG z^8wF2p@t;j;Xlhy|Lgkt-<{h2fAOVL{@*{n9&VnZAcq$l@S%Z6-fcE7qqhvuRA+{- zs=%yz`0G;ha_Cmv-WGmucTj`Pp|L+h+ya@!7g#pEQ}#FoKA|v2#u8YF_2Zs4lRn*N z-Mw55G4LB7?w${?Xt>w7pR_0&9s^_OzRAF-#1Lao=nnw$jycL|E-C}!cbUC>XFerU z(bN{NT%&69&=Q)9*J43eB3M@y86@$-&!(mB<0G3d4f}d6a14u zq>l%cD^zeEb$kc@AsfEWJL`Z0-9jBzEe{haS|QYxN@k{Uuub0m&Ch3jT@o(J>F zuQ1PQS+N)QE5YY3pD4wQSZ7S2ODs(dk|bNs=G-{p|@!a-aydQg0x0+{y+Fb ztTe$bv$}oDQ1MYk{c}|%6~0)vdP<06QVG-{2$@2>W+g>XAk>c}56*4q8K|gwEV;cR zC_i6I8Du7`yCUX;2^+vZ@y9AeNwq%!v*xtW^rU22ypE{LigSbW_-?QGSqvP0=LA&j zx`@}0HDP6OVJ&kVCOc2=OC zMQ-GUSQcqwIcDc9GJ-|_tb-?h2!k2ggj>3x;O4^E+WJqIpbP%ajj34PO1(4^AD=z2 z<$z;0-AxQEkm}-|Cb+0o*m!}e^r%{E&3-k%;6>#VRv*i=!AY}E;JJl`B3KEmZ?Fv} z1SvTRq(3RVJ+)GAreU5b%i^R|iZ>#)TUag@(D0Gm)2H5_q&^*+TaRzow>s2lH#=_b zIfzhgh{U25HtiM7QT!felw>eSh>k# zULV}WgS4!Sdoj1I*!y{Q;8Un-{>qp0D|*T4im3SVVZ{?ut2-H&Q6kv$NrS19@?E&c zr`uEdRrw>+lx@FAV5`aZYi>*PpcQ1`_DY!f`P5w0iMWj+UE<*<4{X0Ak zUiNrN+yhQ9YXwHOzqNhTqfxTatJAx2uAVJlbGKCj>r*a5RoqJ@BzIQ?8)7#xvu_Os zY^2;dB_VH_BXOB|tBd$9-!fq$R#t?VL9@3+63g+2N8Aud!X@R!=gXvw=@70LzAr|I zJu9FL!3006dJ`OOqW(n*2?L$4ny3dx$xo(h_2KxZ^qS15`6Op*{F*># zK*zh*eQ(|O*1K=L|7PSX=i6tWU4HwwcT_d!;=Av4>%AB**-D-2v37RtM2~h3&0c$m zrgm8oFr*2ig5kqP>AO?XqmNLu)hgFhe6~aF&e&R{8*1k;Qs4Uh8<1|?+#=tvE~W() z*tdAi7FrRklb8EaN-AwU;nWDX!;)XY^l%f1H=S&h4Qc^X%IMMz^7sIrj z?-NxLEmEEp7r1PTC(2@8#}jjNd(J2q8Q1$K6q}h}p7yfxQBs_7q=d*w*E`;wY;GNR zQ(C(p9K_kc`b^3_vf2?`tz!!%;ft!xPGxFxX?n*C_s0PbeWgh^3vCbVJUWZCef^g) z@#&4b1K1qBy(cJ4Yq(rVd8!JrK#8bEt}V1_p8N9116FiLH%jVQ{VX;t<6vS!9bM8` zRxZWUA=)>Es!M9`B_x&FYAczQsl3T-aW9?d^82SJ{fse)!l?}O*$YkprU}WzJY}4SAZH z-|)`cz$Ykp)bqr+FZzI@G|sB%L-B$@^5zBNu^+zkvV8gL*Vs>%e_E`%B#+)iVZ;g~ zVo);itXmf+)86?)#@8k@6I2(*l@m3c?+gTIx?5|U(=i^`m@rFU_(kDOS&}H@qJlTA9(kQtyFijtt%hTt zcRy-eGGVMwLM~ z;AQLG>XX{#ndQ%*gbn>OKUK&OTpTZk@nq234nSeVwXlE}6B?}tou75ppImjv>Y1!c z^Q9KGe{DSn^ScIAaXlLQsYZwX(zU4P8%`LOG~{eWYia!9IXTBWBWsrvn)*1E5P-~b zx0cjCXl52_Q%`A3z7&+cKLu^vYGPIv!$x1Zcj{rcPONq{DzMS&Ael_$_8zXTElX7> zdU~VyOnmpDh=};%^PRAQ( zr7bE${Al0d>d}Km3hIBLgDck$X{WU$Op?uJ3sRk1!l4L@@%%@=CDm+O&*~uU&A4eu zzTlw9#qUseu7N8jRJ+xuu%@J^jJ@6wr_A7|5^`ff8hkPN5msD#tp>0d0=Ya(0KYxq z+kNX|-o7Fge)}0)PswaqIWttPj5gwvd2rs^CkCAl_7acsV`uqaRYGt`GSSsXQ_v@3 ziLI@2iSk5Ts`E(5r55b%E|+=_o1&Wol@#ra?!@k7(ys+BC9P=xan;U$l#0Sswxxss zHUf}H*Nlz8n|5*cB_@*Wj7y-}y=P0(rv?g!HSqh2f}8w2T3j^45t!b@OD!(0#DF+# zs@e%kei%VIr8J$UWo*lTFnR|o-}NOTvHK8x&P2}{(xC^=!}|I=pGy;xr1Wc`*OlP0 zZGC4F->OIKw(A=+tvIHlc)Vn+OPY^0eKb}7n#A!6F*!$%pjILG8z&q^FCUhIP>%EIwmA z&Tz8Y^Xal{Gim>5o}dJFiIzlrfZrd76H2m z4unT(`7`#pyetY&`8~}>XLOd{6~131&8aT@Ahm97o1~&E>LO4@U>2GB2zL5W&YQNg zC3)@CwU9E9Q;Z$EZAo#EF>@_&E51I8l{Y(>_4-}|FaTc~d95j5Xf-gZa*DF#;grjLgXH2k z@KCt~${{;3rNk8W3s0lsXWiv4%=Y>7;MrWtzF&K#_|J?{;g8UT)93VX|LwfAuEjfb{HCd_qH)^b~g2 z;naa)fY9AJ>4EjtHIGek=>7e8Nvgj(&Q}%h;<#J!u%p%5M7Q|qScNPVMHrc=ob}8W z5u7vB$9ObAwt+J!1x*g-W)sAqH-{9j<7!J%Cv?q+G?b%!rCsaCxAmj8=0QkKVc`_| z;q*(c$?J^MoLWYmdmY>3*=a6E%KN(3I=uc8tZ@WK?f>F`_((O|rgE~)X{*bvdbOmG zzSkhIt9NtL4Gs(BhX0*ODG5MNjsnBlplv+r_m5Im!`b7lUv#VYC&D7uYKCW|l;&B^ zhRc&I9$VfgHu#vKya24ZL4r)dj z+xC~%I>h>Rk~ihOz2g>3B%7Im;Fq_l#I~AhI^k@!K_8V+VoZG06Z_CN)OB{ZZ&@LT zwK{D~)4R#aFszK<+}sO;5-&5-rco2KgM{B6oI(4U=u|4F2t<@XNuy4EQEduuy2wmd z`$I0}OrBp*H>rZLdsfruu~OWbJO#x1qV)Z~R^&-I(@ulV*+;Ywn|s$99PMih6l{9( z-CXbk#|Id{pb`w3ay~B$U#`(xBM$;(BLc=$7V2lRQMvg(utW~p9wnG+Dy`LFYf1UD zNx#uNMc>k?U_aUFZaA>|i}nce@s!7E08Uj1u$b6*07T0O0NIbHR@s+n+6*a^cVC5g zYXG#n>b?}9kvRyFtp5{qwf~u%zJGfx3<9x`JN?_SKLEJ!Bi7sh%wzsHzcsd1z&LK< zFB=M>w^^9-*C&lOYWC}LVSnEwNxlfBBv_aT{w=B9Mz-xY8aF5&!=S|6eDbnnyvjuEsV! zl#OlpT%m=@5D6OQOIL}Qgl9Vwrt63IclCr(=xvm2k_EJkf4hx6SyImWr~rkBe=194 zR*^xS$OoKhKr0m#qrmpRGZF4P8+|t66g?fKpU;{?6cdil>W9#0TBrAfRZQj?J_nhB zkEw3x^i1oI6l2WnlLfx%xxw?P!K;2@jDjpu&5MD-7e=f8t3zXE`e@5FW@d#hM)_G@ z<+?f%j=o)~&a1@k0{Sb^#Gl5$s^;DBy2@hLbFM8pYs_r__mPv_X~L9qd($NELnxR)&w2(Qf)l1HM}~=BztIM zRlr~+dqQWGs-q`?%V=@XC}U5Q_51X#onHC9sFGPLFR!xavoQQ;BU6D9L-oV*LZF?gd*yBUI*!o}DH;vWqJW#kl zl7cZbNxtJcx|dpKs0PRkI`{04#M(MW=NoTOq)KNLZEpSdKdSUV8Q(Ykeud|%^P}P& z?1_-UnI8Y!4&-U~ho-u7gTp1=E5T!OE2Li54E>h#8k6ez@9RPiF+_Z;((bQD323E5 z#z90ygwef+tL<+)3Y9OlMSrfYw$G>1KMrNK_qFoQ$njsr&*;}{Y530pJa9k!PvhlB zJmn)_sx84CTI)i_*Qx6_*ja9E{ zRhWEFv&#)gRV{nyoWXAzACXI#>s~AJk@P}-VjG`J0u*X;RJxXSWk99>bO*8+ z&noF>)sNKbhxK<)XV|egBPyIHhUV*GJj1r>7YzoswN{^wdy1KVBAxAA#mhvQ4r#ux z)fZy)*is+zNqUgOnQ&V5PDObf`K~Ix?)UNx&6*d+cIwSAei&U#@JBi2$u`&8ZH1^{ zt{3-q9E3r$MdCv^513D=c^EnV5H1esR;!X6T^d{X`eyfvj?%PV_SQjNeeqSq!Kmw~ zUyrAhADh~6>PlR{yR)N2QnTR>Z7nDhkuH(n2IKl=t&qudu&(WdLyZ32?)BzKpdmxs z*h5iO%w0}wEqa5{i--=hkGDkr^}-^EdG`ZFF*lhvQl&NLa>9Cif-JRd4<^#Tb3HXL zH&jVB^@&@?D_7RvC0n$D|Ei9v*8NU9$RAhQuW-hQ-396Px0Y;$Aw)+@ykmpZ|LT8t zo*o(X%He(NOv&yq3kjz`g>e`o*``Qm`q&yws+~r}sR82^Crk zd=`*S>^-4>21mqgwt$`SZ%y8_;Ez*oS=4{F{0}bkV~)_X#|Vj*4WJd9o)iD zgUwzS@3^+~3sWYalI$=wmEX7K5(Thini(qBGlD$(yf(RVsL>6D=?h{^ z3C_jrp^QrYnzl)}*)*T?H1v65p-W%TkX!bssjjtN)65YsJNCqET^0c!G7Lv#%OUUi zRY@EI9UTJO(=gjQt&+wus5V9LYDNMpSu=GcOyvZ%X|KQ~3m8L`3}S!6o98bzpv!8L6Q{aAg9i zagOG-5w7c%cUg+{_t)VX2G9Yz#_5C%nPt+$ua6Nr^!=X&QPw)rm`( zSRqFU2sywohKyR+W&82EMuCpfW@NU&D>1DT{h4*M{{lWWfmV9@ms~xITX7j)rv3SC z!HNqGcvL>`Zgss@Bp|usvXo`;7ODQy2?eaRApxFr%xw-`^9NZCs*d7H^_({jc(Sc9cLAD?z> zn%g%BXgTxY(TLO~S1)~a;Kd9O1>Fhdw~5Sjh{-o-RiQvk#y%ebdqCwd4LZh z+GyEHFXVDH_S*(@8D)#qs<2MqF7&%m$C8)~X}?-*nknQ7=phog)ZSJ@8>uRj$1$CK zhkwd1%(;pjQS3ij@k;>M{^kcEiZf9Uo9o#|c5)V|0-OM35>UX&cP?1luw+QM-T2ZP<_` zsT?UEfeVwcl_+bo(ie9zoLh{|z^^$JH6DuO-pt^4jPO|l$N*v(HQ6i2yq}47N&d;X zbMoD8)2_s(%9Zxcf#jzobL3TWLfpUikccpxM}KfP{;GlgvCk|cOgdOP9t4zyuQ6IL zAnm1;6hf>`(PbDq*Sbt>Qf5da*2fCa_mi;N<`Y5oy+r|vJo%(pTm_qL)4o0 zgE&ploCV_IL&}PW$(nzsh+8sD-!QZFG*b4mb1ot!@*Qb|RWU5;ZFwIXu(BwPf8V^}TA$_9`zaWsN7A0}E0F=Btf&jXY9F z*I|~n^`PE4(Mk*G7Saocv4xhxo@!q=DsK6>!3x>ktZUWe_xRUsitE`5kvJmSK)HFk z9m1a^_&fMF@3ydjvEis(eNLxhHUjyju5AndOG%3ZHKhzu*^?OVnH#WSFcf~=EV!qi zd%xd~`#CKmG<4hEhhw9motd)zrp6OSDm0snt`XJQLJB$0v_B}Au{qwExplli>Z>I3 z?fU+fL>lRAs-j7IPPr8gc?Ms4$_D1ZgU0>Gam{@-KTv{p#6)k)Rixb?ReqnT!IX0$ zV(7HuyG5R|EArLYbCwt7s;9=2biB%$?ZZ!P&IG#;P4Jaa12(wYFfBVx;(|6NDZtGK zHF0DJdea}!(4LH(tvS&W=5mrzkanjD^9$t8nRET+azmc}STH5&cB3}JF0@3M$$veP z^SRJYIUn$V8uF@nR^W(}SX0gIv&)0S2X%ub_xuH@Y}WKrE0cv|l*0ew6+s_|Fk&$L zvX&E_MLG6~o1${AI#XW$K6`OPRaZ{;fy=!)>|!gs^|>3W+vCbqaR#HM5OnQP`2NY? z-6RggA|jh~7^z##o!%1>aCF_4`(WvHwNT=j@E#?2QmYBU zGeFkrmeiQkEv(Y-k#F1GZueA-Z*&>i{S0imQzCo^b?V>VTgHVbl$&SEhcAKeCua>5 zzM9v+??Zly29j^0`4j?MPk^Sg-`7r!?be8yC0M4&5HMOr9%iVH>TU1oitQh z$1VK~|77x}&bV;2_EzA+w!Q<_y z^{V?&N{5>u-?vRzIGI6=?si2PU(-OHR$*5?l;+gzjvP;1aCIkvY1r-#rQGJn6!_Ri4gqm+1|kZ(|_&vV~-bQ$QAZ^6!kJMJK)ooZlZUiQQ4Bw*U)rk`;yIU2D03^D2Hz7_Ipv2Hs0LhhrBT2bZ82W4<5B;Y11 zWu>O~Ug_Q5f({0jWBgZq>-O3_tbyex%WR{>gJ}|nHAJc2_Eq*{=l(P&ZdQwW;pZ(j zHb$FseslDtV%(wj?b>igK|=V}@uQCq_V%B*+oJcjcIeh@_U0-nf&{uZrSM2*?U1)1 z|FgWnF8=f&x7ZaPr#g=`fpr@i>Awot2fw}2K5F+=4#Q@f$tM^5)^1!_d|^PSMx#Oh zijH^Q(FEYvfwd;TnjI{7re{cWh$CyS1yb2lbvZk?;8`{4^r@6GI!W5A+-GK*0tl#= zwW+7=bo#cMWpKQM0G#%`cAJA|nkJO_x?5~4#<|j5FGiKBY8uWSs;l=NnM~ieVc%w*~mj!>usNb?T$OksLu96euMIdW5rZpqS zlSe4FGbQ|NQ;a^B0@DgEXV8<9Sc4@GscE%Chp3P9$Vxu<#Bv-B8Om^^<4(#;JqD9Q zg(0yGdA4QddVQdaxU;5*!@-oxR^`E_uFwzHq}E=R*Mk44lnmnxK`*GZBsEwNre^AF z!daaoQ)TIdu;$%u)Tveu6FKKi_XSb#<3_115o_|%xrAL%whm)IJhFnv8f_!TsM3Qc zN~k6gI_Gtow)%~h(?7$)bRsL5W2EdX)gj%XCj{=CGPEpN2l=I8O&hU?QVyD%72uI= zvFc7*{lUX+ZG18`;u*_Et{FKs8xcrvq#|AFNF;x5x&WadNsSqwHIe6pmDO zfZFGuiDSi&MVjimJ}6LnwsO>0AO#>MKgBa~?l; zKs^?rx~a<78je)nEzAH8AAJA`n0#W9t?HAX&avP1Lj9pb7doM?;2JB|*KiB(-d^$<@AK4pggg75C4)ZC>;jZ%#qt{8R zjXPf{z%P!U7;Q&5`lfWI`6G9#pbQwz;0#7=5@wSix&DgH#o&Lg!kT7U7jHG?edx?< ztnk1EkFSGW4^+Odh|Jl6g73XZfA`8_=YPKNu9*8o6NNa&dUCVV_ul}Wp$2u8a^H~s z-iqHN&?lW&+IK?Dh>{KJ+MuLPj?Zm!Eplj*89^IHeJb$kv+SLInw`Bqf~J_Q17qXb zFtE9(Y&L;0HuA;-qTC(QQ&>ajt0QEU|FNP-j>!e1LXVN+=?!?jL|gg z_6Sg^@eHdJCn!6tTVD7r&gwSEx6t&u=I3FUUJ~@-#XzCV>B>6v-kE}($Ul()cHN$~ zGqQ2rPIBEW!PXiJ%@4~MO2d9iNXYA=J$#sMQ4ef*pq$uPZp*MOKF79*cW(z8asHix za3i>&5APR3eTVoX0Mp0B?ueA{dC}9}S7c-DT335|+H$t^LLo@*@gVjxx>If{SDDo` z%amz%kZ@)4+}!f@g@My(%LCby?49lNpT_j?8)vs?g0BNuS<86itTVjd1ioE2)HW07 zfQ}o4I7`z;_WYBDxP006?*M34CKaJ8qb^)exHU~RmGd)kedrJXeco>qx73e4$F9V= z1e9##6ueIISxKDj413XRXqZW%zVlATHRwYpr}so2D+BFi=-CeyVGl0Vsia_2KSv9k zd%ix-lY612@(PHng-?5YYIGKNg4uZ0Gt|@YaUw#^9qo7_RF`@N1Ep@M`n%Uk+;S9s zHaVJi`VQyr^InW`Tn28&KoGQ9A~Pc*xt%)vG5|Mxg&_@B$n zy|}&5XL10{Yv|{pwyelM^*1KmD`h)WwLA%g6P6hGu9f!hCX~e7>(FPn=tadZi}5> zMSQdZbSI`aOM4o0OG}(K6{&`E?`F`tKP?TLhQY&`*I{8qUHbjNTyp{gsirMTaAe!yjB~3L}?nF;$^lvZhTnOPpZs z5*}X!x!*o_jeWq@UeT$}#hMmz?$(>vQ=H9tumMFyx7vIPRyM=F?pSzFyyTb}&0Uln^$bJx(gDXeYtqztP z4L5?}N0r2K{9!kO^B6ORS{mG)4|@7J;(v-BsUykUNw?thvicS|wU`4#n5W9~ z#Wz_a2r)E4sv@%nI(jh=l#bIib@KF3x+*Tlw?|!Ug!r|dIsC1uuORX(Y zrW*T;z(WT7CXt^ja<=0klJ?B4Rvz+xO`IlDhF%4@Q5D>H|Hbfn!}z+*XIdQ3&XBm?v=M+qd;9^&}7LMcjnomw^gdt#U!O`v$ZY-b6cX4tIBnR-W|2QmQg@4`E-$wOWJXklhU(s;>=UF^jNc`sjgq!`ZZbEEc5YBuqCfBBa+KB; z&2=3%+MNVW8W7|r=5DD@gk5K#-EkBAuX;vdFmo%X&gYtOj!DYnvHN$d25A%|4y77v zEUW*=Co2;j8TgGxPf$jS3vac6B7JdY~poTI7XzOe(ir z)wTh~{{YR2m}zQ0((I-Cg+4;bO(}Pxbv=_UnhgF@uW5lKYa+eq2rf6uoZpGj&1Yrv zdlM~?vwjHMaPBkb>?}D<31LMKHfT4KB1DogS{^X?)G?a40=_du6nEFDxc7OZcGDm{y)+4um=*$2KzrLk>- zlo`yrMbax1mOVRS1y!?F{YuL<_UCq4CU+c&LRtCkT2u+5FleY$#Ye4~D; zLi0Ag1;F<8LSeb^+b?`KaSwRX%Wl1^-G?iiKPm}xO%OnKY*^T|7s1oHMdq4z#U&vV z%xW-v1deMvZs8$C#QMvOiQr$rmsw&qgK76)auVvsPK*gEF(Zbc*ZcI@`iso1pDN}5 z^yrdOf=Ap&j75KC+AO)V5j&ZG@znO8^3|Ms3wZc=ZhAs@f_);!Pirjh2ba@bk>{|R ztJFHhN8iaDkH7wd?>nRn4V5v>Yxn~~?D(*H7`jGqAF97Ls3+Xu{Zlu9kWmDJpk=yYd&I!%q`I)uSoU1TeRL zuz~X&9Z{k>_Se^A@s++K%6Avd{x`&~B?6r1e0_gr=Q%fm%H=m-A>e`iNYLL<#Fi+o zh&?us7cAP3|K*GSjuz8i=1Zy9Y0^v;V3g^1_J4D17uak%XLq#&a5g;Tt*d{xM^WP) zd)%~I#u87yD1{D5;PGXPUJIa4ane!oj?Sea#Ny}7d-Ifyc!~T%P_$kQaTf&FeN7^% zi&vT{2|Xa@uG_HGH!p6|qBq{#`Pkh=f|&BfAw$rdr$8KevjqVND6C`)-sh>JWgTwo zdB4*SJMna9d;GFfG{Hznz(1j~=%WgXG!YNPZ7(KG;LBK-d-n?I9}bx!$Z=;8Bs-a1 z@K##WQ--VirD_>?I+L1#5j=fM9*+lr(5L++f9S;|!y3(<*M(Rq&n`zjGuXbn!hlBn5joT^9I7-Cg?PB^PR61Rrvb2GkK9=(QUGi!QOF zlpvmrgmR@t_}YOO&3DYs3rwTy#inXK4Wg>#|ZKP`aa<{7*m(qc+b zWD+aebem4O-p^liI#BdS!O+3v#v&7yoBoSx1%SR#Rt>SRk27?bFr*~tW#Fo~s~+9Xw_KlL%LO%pd- zQ@eBa)K^@B%N%nswLtcHZP$63A%KlU0SFF+3(vwMJRT<6g>q-4I`)6fwCM`)>ng!8 zZ@_A5F|yydC`*7YjWuU8ZZQZvrcpLwW|~kOP&xI5V3{k&Pwk}JR2o9McL^^K|GnXP zg+kJH`1PVC;&OqM0p1f>-zs}A!_9uhyCc(*1wJr8!ni_ZtEoOKDYqtR$pQf&=9&Q& z?^wP~Y^*^jN4hdRin_k9wGTSz@Zk34PZmo!)?jsl%rgAC&RQ@v_Z>ou8KR2_|BT2p zN6rNoP7a(AK0!7rY>!%jB2r0#zxVDc1SoV1JdUG8zHEu0>-_kcdib>uy}tl@RO%+lmHT`>3XYN&$4kmn1c#> z+l!VxglGVi&@$kXj-*YBcn1ixeM3BpJA~_5>Zdwg-DJw@H($K%;1}PLxN>h~qaPZw zd9rNuexO7iC{(hSgIr`2l(i$$=Tu*q8!lu#M$?q1cUc7dh?H`k`6=ae3x3mho98j} z<4J{gm8M8@C@Ea-5ruvqTFCUpm8Nlr`Ki#U!-Bi*HP6GOmjMi{ZJx5r-3A(by9)v)m>#Ta;vMu`m#X{tmaI! zkeRsy4E!MGKMK3*J2zEVBBu0GVf{owGZn^X#!>voyKtZraJnpms{k0F{cO!ZOrAd1 zoqiQi*U*iE%H0Tz`IKz|3!ze-8nXzT6>UV(py13Xt52^y#L#5gRNW)AauwEPtf2V` zNZ(wx~%O#b*dAmWur6A=@27lBnsS6N67|z#6rinbCpBVEP1#!x&?>5I8s6 z+NgH_+O_9VLDsGSFBDQ$sY8t^3iAO%Ijvq%;hPX)fFXv4pj*x~YQ_3w7fvi2+l-uJ zf~aF;syR(=CAZFN``EI(wxNJ)*oHdl`S1jDO}QWzA6vGz%kgH=lG2@fFXCEip*-o` zhiO?*;sF-3BWi3WLZ)cw45Kz^<$_*;KLps%S;$1YR~(7GDahuYIP-Zan&ufnpX=KC=W>w* z)>~&x4sUq3g{frsPvEJ=%=%G*(UoeF{Qj+<-wRUD&QDJ_i>ql*3`;x1S8b{4NEDub zJJZHT?p#rI?W-XLZ_`Y~*nw$LPuBl&qAEyA3N2ODk%j8z|3=r=OMi`VKN7HSsoa2j zJ!y))KL?`!^d*G{QdJ80-BsjKM0QbO;%WIr2>#=eZ2 zX7{@y3KmZ0qToLp=>KFM4fh-BGt4+z5l^>^pvSaPBlyXHJv=eYXJiqurR?Z_Ehej5 zr7|RBwY}{3%Z)B{7h~ukkc^r0nezGT0`vJ#0wX+yD&CEtVrXKc|5@-LMjO4g?I=pO zgtzYer}jBrzzY)aZDo28z1L6d62Yfmv${heG>}npVs{*I_D%QKc}Wl$|LHby%e}~( z>jAC(n*?a9o)t~?t%$(xl>mksgPghvWYmKJGb6bz$cwAKUww-EwF}YbaV3D)(H?Qx z$8{9bPzq^pF}JtUUlqa(q8|EbNZ*P1D*#dEyD&s)-1{e8`eilK05|pOL6Az5@CPI1 zf1!T_Oi)jbrOPWzeEfCo?-c3E>Fa8I+!6Za0%(TCo0*#)5{6kcF); zjK)|ra=(o&vz;D?I>m76ZQ_#}L1T@nirC0^JMa^6hFATFu5zrBpH9r8-f;O&%GVu3 z_yhNpyN4#$C}oZM7-``}QN*H*^BW2{-o-#yp1;pNgXfI6@SRgtzF)jr?M(GfHiIQN zudnmm#}kl+PdHr|CYY5|4^V36fHo*hMIP2 zqtNJn+hX?Lq&)GU?a|~%Iupg@L0ZQd5uIsiry9tW&B+}X@GIe1d5hy+AExD`h5lv! zsEVvlhva29%y0PxHi2L5<$fOH3pPn}NE2)fQ0* zI$l)@!CyAY3rkglU-R&t9%_7&(a-Je*RyI0cDSHZ>2Js5BIyB;P^Kau8}cx`IZyx) zGMLiY&XwWhLB3m-s#6PyHt-;cmLb{@BEtR2H8CTC^nCD09LZ!{*r%~3@jK#-xck3& zGwwsq$?*b|WreD$?qsw?U;=(v0=oQH(d_l=saXX?WLu)ympv*mz z$fH|iQ&pW@(~&%8ZiVMvZFJq8F!SF<`TVB)68S=SU{KF`X+-llB^)pk>}5cc2MmPh z*--vXHmM14(J#|q7ZOKQYpF>Z>u-4o+CdUE86dIu2$PTkTIQcxu{_@&x!kvoSKj2? z=is*%G}CYnObZ&xLRhyF)B$DeyS-_}chs9-B=19`PQ+H5o2X0LYB$c6w+d=n1{ROa zl>>|$SHPGZ!zC91t}1fn^J!PWKkm2-nVa)Lf%d(7P-VjdSt>NUZ<*yKb7fzT?a-T- zMXA7&qk!vM)*)>B^7eQb3atnUS#@?no4y*Ec2MUtx4{)3aSmJiz!*=j9vGQc!A!76 z)U!hgoW?dx1aG}LpVqHe!D6Yaq{^)c|JG7~D%w~><>sQH29PC%h3r2Y*bbG{Lo$mPu zupgG7fa9PLeP6bl^y>KDYwp7lXkisO8dF(W%KPA!y(R3-_!TAQfH#_Oh6Q4}0Koh4 z`NnbMKw-~-@iKmEfS~T_eqs7`q2`ux#C~cW(@m$#^z8I~T&1$$0m`7qB1A->`YN4R z)ku#M_`&4qg8JVySTBHwh+^QV^_iz%+>90ljf*yRTuOCl!c575n5M4`q_fXDG~N9n zNbfW3#BFT8`66Qo7%Q&VF8-#}suZazN%{blG-52<`}o86-Wq4c+JH}us!XLsonV}> z2u*L@3tAZtU;kFxfL&yaAAD|bM;o*d!L4g#ZWQvolQzDjo$Kb@zxA~5C^s?Sb}#e9 z=HRuz&zRh8Ftp@ei>53XYZ2nmp6N(To6-#)e+~+QK(&rRVRRZ=2r)xS-V^vAd0b&6~rQC^g?N5#GMHshf zuIOp#q`>-hBvIYi)U}rc!Z%?oX4h16Y zgKgx3I9;Hqdiv+O*l{;7DGmSkMnadYHH7G(<(%FA+63zFh!AS|d`6M98@^`U?Cg=i z5MEHnPM&wi3wg`OvC4!YqkRp2T2s5>D=7Up?1|nbC)U_2BjTLKi36flfR%bAu$AGwFr8YE}kwoR7u@J&hxWQ^Yv?|3lj#z?E3x5|Rx zJK!%s*DySfJ@2J>x7Vff_e;7JClei51CVZCQ1q;WqTb375v3GBU;gSl0pR@vtwwf(j9Z#9F$>%yZ?0h)|oo4JNY4D1||hgJX51sa$@zP*_y{m&lC9b4 zz}rbY0Og*Abpe5VkjinBV_50G^ zW_(<|^mZ{Pj%p1U>l){7jQ$sYTCT$(!%!}G@Uk`mz%p%QIF*j+p9PU6H#;LY-Kp1x z)k89$WOo0?p@wJ`FDe*0l9BVe zzdX1wq-SRX7ZD!f-@FABB)QG70#@s!9)%jM{a2ma&{s-FUFZw!F0Z@EFd+xy`dWLO za&>zhWiTuSFGJjH3c89p@0+(&ez_S(q4)STUqE@KZQ=C_Xl*K-?*5!~rxs3s#Q4iL zR3Eb>W;%oOft#%Y<28;rZuRSngG%D68$E|G{Aps_i#}Nh0yiNB+c#3_I@BEO$f)jFWs}nyU;30)MRcI6W3JSqs(gJm+H`QaB0i&=hkg zj4d0xG|xd3CNZF895mkXJ=JC%P_+k-?ceEAtOf`d$#0NWKG9-pC6gg|gc1MB$z4a; z3k~hS#NwAR-D1d*3AwL8|MQK!i@zA@rbySv+T#D&j#&%ZFB%>zzXW50Z$R+KT^#e5>IxU}Bh89FAL7lH`LNvHq63DhaG0E#V3LS}QpYdUK_! zCL;L!0rq>>6hO9C{VvUVf75*(;Lr1F5OeC;3dkYTKd5jR8=&?PQ%D>biGFnNSKQ{S zam=6CeQ%T@CX5&D#18>Bu?S~h`1=gzS3AJ@pOK0Leb~UmE&Z-BiBkNg4j?nxQ_K7p zVhRw62*L&F&WCO?C7I%umpWTCbN;FNMeME}2>hk971Dz*(nI^X$y0A&VH1OVLg7=w zKl{JFSl~e|1U=n4c!_LaHkk4eFTp^AiaPWwLC>;hPOip{c`KN{D2t3YJ&?8`K z?({o^EkJDJz9du^N=X1l!TY$dem>ynWZ%L}UCwv_CTT4B030z<1p~&>2If{yf5^^? zg7~eppSkSGIS`k7J8=Ps6UILBptWNtW&V;+KqWwLcWIsB@OPJq0Ke$=TtfZe`s^1= zbsaDj1U;nhuLJHgX!=eW+PGdnSU6#0$%;gcyq}Npo;xUeHYU4kw~MGwi=nqbKI;Lc zTJUEMML;q6CX^GjErocaAqCFk;dER$)MJ=UqYX z1@$sc;t9aTeDXSXwwHeDNSGtyVy6qw>YGyoF3!J9p$Vjf6@R#?2}@&woCp6|CC?`U zd?RBfCeid#2k;QA%g~F?>kv1;ivuMM&DHwZQ&If|ZEu34YJ86}76R0-t{U)+*50iY{`g-U2n*6&2=(&0MEwBQ`))0O725`CXd<7v zSn3gwVz@-f3Abx*ErVfW3&HBwGyM?QC=n0NdF!H$`+EBJMvnJ?fS^I^>wm@!<#R&; z8v6%X&}`7L2=>ia{EmpZ^YsJx8wH^H*fRGYE?iKd6#QKPd4{YSyZ%*vi%SdGYR#7# z6ei;)41=%G;}Yq~_{}ayw+KcowD=W>sOEydiUinw9~8(Y_q>iT;WD~SQOT3w_wJ&b z0vjaqtFfL)`$^{KWuW!M&wz9E=kG+{eN8}L{Viz;57jN!%BZ?ibz_yJDPxtOA;?sJ z$Wm~GN#akPvs-!=XCj;X+s!d z?$s6(^t4~Io`qgtl5G=?T3Nw*;?|s7Ngo@4L8q%91r)0~h0sa`UfTnBZ7U4{L8RFr zyjRKRDrhrfjbwwTus$T3p#36tBXqz+K(iW?GxC92iD#Lrghz<*`{@k+$O(lgQd8mN z>BCK46gL6g#1EijZH+0K*+TA#vH}J{uoF=`4T(!BnjI@THqbVlVkjf0XnN3adpeom z%^xfWyf4WKjQvp2?s$Q1@paFi|I&gm;IQi9WOf|Y6Ju4ZOO}tpx}Fc zolQ!Oe$~fM3wH}02&lsPf-<}7M{CVa*Rf&gr~;bOSD&|d+zU)QAB8JlHEsmS@28nm z&^$VCN|s(Cgv5Xod{&b;C37AWr|v!l`Eh77x1r!$KuvsW)DEBQ%@u0Q4sr#`POsQ< z2j~=R?hhL)+8j2WXbb~oyR59Sl&^A_Pad`Qtg`LYj@Wxn>Dx8^Z8z}4_gw_DgQ5&ce3%}m1g=UnYt11Jcq*moXPB_j|Ir1UO)b_v z57lS&S*^oQXBG_Ee98?TRkIh6F>|*A)!1#yU}oBWHFZm`08+0ITp{W|s{GDZ2|tD` za;*N-LU3+^_oHie!>?7$lGj_Du<#HvXF#nIrWd$q;6-{Un1axd@OMJ~g*A57buQTc z{PZ74@5+7Um$dga)^qjcpjrX~tBT-|k;PN7C}zuD_eVj)a-P5?D6m(s!8wvMm^<*D z@(W%n*w=4#8c63qr{w=T7aoih`lwIyKI$UCy}<^|`>^>M$Ta~i@^sY_x`)OF_q{*7 zkQ}Syd0Q~k>$PgTsKa~ysEFLMY%!=bt<=!yJ8<{tkM1`rF9GAkhAsLv{k0|+Mi?!6 zR2^?XEEdou5Ui&Nz|ly~U?#^o(gS%-{m%r$wVKlc`$|}E5S%x{%1GGYZm1Ral?vy$ zXpGfE=9eg#&5zuGOz&oUaO7x{6v!%W2x=RGQU`VSy?0H9J07K>eto5; zFCP3+=s^B6sZ7AmSr$mCPk`eY!qUb`5!K}ztsE2I2Uv?62wl4LRB;YXH$o@eT-pM<{)opeI*^Bn(IXJP8735Y=>wv!>GZa*-OAVY51S_;kwa9-0Q z{1q|sbCu=hw=hu+0+aWl~p&oGPdohQDWaz$lIO>_IOrp1j5d!x+RC*DN(x@ zlU#n#73Nn1OL5J*=Aas;yw;gxmow`x1IU^2n$~Z?3<44*ZaaRb*TYP6wQ}@FHukt` zL~cCkt+(Toaub@@eT>1O*b8==(YC**Sj<;vOVhzx-nTXCv+CHp~l@#n|-G`r*jQXSpv> z$jy$+^)V?I&)jk_i->4_Wp3pHt<5V(Yb=L+z7SlI6W20R84Gx~x4XRhs_R#@Qc}W| zBZHM+>IUnr?Y|O3kW{373CCrqkaw6{-lrz5LnuwM34`Z0qSw%=sl`^x+S)^HDwP*-ckhb;I@g!v{t&?QZZ{y*eAZKk6@9%yUG}on#7Rc$8d&qIr`X~p` z85ykXGe$eQl@LHHeLE1Gw$@lyveuRJK6k1<0{s5bj8ZexXD+vb|1e}dIHoe$eetht!SfWSp@nw^T>3i$kR%KF-9K!kDC)dD35+LnFuT29vH2~;jo@-e|kJ$w^A3(Km9u8 z$LT4z&QH4sKJ(P*U#`c`hYxr~?quCHuLdRJPk-)j2?Cy~U*B!{yjd>wXw4tvWou6K z*H3%0;USk-Qjx#5a)H>>Qscf@U|7^^2H)G=>(72%;r#!H=O>h3;B{`)yrFL6)mQ)d85ECopyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using AutoML for Predicting Sentence Similarity" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook demonstrates how to use Azure AutoML to automate machine learning model selection and tuning. It also demonstrates how to use a popular sentence embedding model from Google, Universal Sentence Encoder. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### What is Azure AutoML?\n", - "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", - "\n", - "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![](automl.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Modeling Problem\n", - "\n", - "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence).\n", - "\n", - "For each sentence in the sentence pair, we will use Google's pretrained Universal Sentence Encoder (details provided below) to generate a $512$-dimensional embedding. Both embeddings in the sentence pair will be concatenated and the resulting $1024$-dimensional vector will be used as features in our regression problem. Our target variable is the sentence similarity score." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: Logging before flag parsing goes to stderr.\n", - "W0618 08:17:03.913692 32960 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Turning diagnostics collection on. \n", - "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.41\n", - "Pandas version: 0.23.4\n", - "Tensorflow Version: 1.13.1\n" - ] - } - ], - "source": [ - "# Set the environment path to find NLP\n", - "import sys\n", - "sys.path.append(\"../../\")\n", - "import time\n", - "import os\n", - "import pandas as pd\n", - "import shutil\n", - "import numpy as np\n", - "import torch\n", - "import sys\n", - "from scipy.stats import pearsonr\n", - "from scipy.spatial import distance\n", - "from sklearn.externals import joblib\n", - "\n", - "# Import utils\n", - "from utils_nlp.azureml import azureml_utils\n", - "from utils_nlp.dataset import stsbenchmark\n", - "from utils_nlp.dataset.preprocess import (\n", - " to_lowercase,\n", - " to_spacy_tokens,\n", - " rm_spacy_stopwords,\n", - ")\n", - "\n", - "# Tensorflow dependencies for Google Universal Sentence Encoder\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", - "\n", - "# AzureML packages\n", - "import azureml as aml\n", - "import logging\n", - "from azureml.telemetry import set_diagnostics_collection\n", - "set_diagnostics_collection(send_diagnostics=True)\n", - "from azureml.train.automl import AutoMLConfig\n", - "from azureml.core.experiment import Experiment\n", - "from azureml.widgets import RunDetails\n", - "from azureml.train.automl.run import AutoMLRun\n", - "from azureml.core.webservice import AciWebservice, Webservice\n", - "\n", - "print(\"System version: {}\".format(sys.version))\n", - "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", - "print(\"Pandas version: {}\".format(pd.__version__))\n", - "print(\"Tensorflow Version:\", tf.VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "BASE_DATA_PATH = '../../data'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Data Preparation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## STS Benchmark Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 232KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 227KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 185KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - } - ], - "source": [ - "# Load in the raw datasets as pandas dataframes\n", - "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", - "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", - "# and renaming the remaining columns\n", - "train_clean = stsbenchmark.clean_sts(train_raw)\n", - "dev_clean = stsbenchmark.clean_sts(dev_raw)\n", - "test_clean = stsbenchmark.clean_sts(test_raw)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "# Convert all text to lowercase\n", - "train = to_lowercase(train_clean)\n", - "dev = to_lowercase(dev_clean)\n", - "test = to_lowercase(test_clean)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Training set has 5749 sentences\n", - "Development set has 1500 sentences\n", - "Testing set has 1379 sentences\n" - ] - } - ], - "source": [ - "print(\"Training set has {} sentences\".format(len(train)))\n", - "print(\"Development set has {} sentences\".format(len(dev)))\n", - "print(\"Testing set has {} sentences\".format(len(test)))" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", - "
" - ], - "text/plain": [ - " score sentence1 \\\n", - "0 5.00 a plane is taking off. \n", - "1 3.80 a man is playing a large flute. \n", - "2 3.80 a man is spreading shreded cheese on a pizza. \n", - "3 2.60 three men are playing chess. \n", - "4 4.25 a man is playing the cello. \n", - "\n", - " sentence2 \n", - "0 an air plane is taking off. \n", - "1 a man is playing a flute. \n", - "2 a man is spreading shredded cheese on an uncoo... \n", - "3 two men are playing chess. \n", - "4 a man seated is playing the cello. " - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Feature Engineering: Universal Sentence Encoder" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have our sentence pairs loaded, we will convert these sentences into a numerical representation in order to use them in our machine learning model. To do this, we'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Google Universal Sentence Encoder: Transformer Model** The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Using the Pretrained Model**\n", - "\n", - "Tensorflow-hub provides the pretrained model for use by the public. We import the model from its url and then feed the model our sentences for it to encode." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", - "\n", - "# Import the Universal Sentence Encoder's TF Hub module\n", - "embedding_model = hub.Module(module_url)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "def google_encoder(dataset):\n", - " \"\"\" Function that embeds sentences using the Google Universal\n", - " Sentence Encoder pretrained model\n", - " \n", - " Parameters:\n", - " ----------\n", - " dataset: pandas dataframe with sentences and scores\n", - " \n", - " Returns:\n", - " -------\n", - " emb1: 512-dimensional representation of sentence1\n", - " emb2: 512-dimensional representation of sentence2\n", - " \"\"\"\n", - " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", - " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", - "\n", - " # Apply embedding model and normalize the input\n", - " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", - " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", - " \n", - " with tf.Session() as session:\n", - " session.run(tf.global_variables_initializer())\n", - " session.run(tf.tables_initializer())\n", - " emb1, emb2 = session.run(\n", - " [sts_encode1, sts_encode2],\n", - " feed_dict={\n", - " sts_input1: dataset['sentence1'],\n", - " sts_input2: dataset['sentence2']\n", - " })\n", - " return emb1, emb2" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As features, we will embed both sentences using the Google Universal Sentence Encoder and concatenate their representations into a $1024$-dimensional vector. The resulting data will be saved in a dataframe for consumption by our AutoML model." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "def feature_engineering(dataset):\n", - " \"\"\"Extracts embedding features from the dataset and returns\n", - " features and target in a dataframe\n", - " \n", - " Parameters:\n", - " ----------\n", - " dataset: pandas dataframe with sentences and scores\n", - " \n", - " Returns:\n", - " -------\n", - " df: pandas dataframe with embedding features and target variable\n", - " \"\"\"\n", - " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", - " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", - " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", - " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", - " df = pd.DataFrame(df, columns=names)\n", - " df['score'] = dataset['score'].tolist()\n", - " return df" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "training_data = feature_engineering(train)\n", - "validation_data = feature_engineering(dev)\n", - "testing_data = feature_engineering(test)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Calculate Baseline Performance" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before using AutoML we will calculate a baseline to compare the AutoML results to. For the baseline we will take the Google Universal Sentence Encoder embeddings of each sentence, calculate the cosine similarity between the two sentence embeddings, then compare the predicted values with the true scores using pearson correlation. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### What is Pearson Correlation?\n", - "\n", - "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", - "\n", - "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", - "\n", - "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "def get_baseline_performance(data):\n", - " \"\"\" Get baseline performance by calculating the cosine similarity between\n", - " the embeddings in the sentence pair and then evaluating the pearson \n", - " correlation between the predicted and true similarity scores\n", - " \n", - " Parameters:\n", - " ----------\n", - " data: dataframe containing embeddings and similarity scores\n", - " \"\"\"\n", - " emb1 = data[[i for i in data.columns if 'USEEmb1' in i]].values.tolist()\n", - " emb2 = data[[i for i in data.columns if 'USEEmb2' in i]].values.tolist()\n", - " scores = data['score'].values.tolist()\n", - " \n", - " predictions = [1-distance.cosine(emb1[i], emb2[i]) for i in range(len(emb1))]\n", - " print(\"Google Universal Sentence Encoder Pearson Correlation:\", round(pearsonr(predictions, scores)[0],3))" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Google Universal Sentence Encoder Pearson Correlation: 0.764\n" - ] - } - ], - "source": [ - "get_baseline_performance(testing_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# AutoML" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ws = azureml_utils.get_or_create_workspace(\n", - " subscription_id=\"\",\n", - " resource_group=\"\",\n", - " workspace_name=\"\",\n", - " workspace_region=\"\"\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## AutoMLConfig Parameters\n", - "Next, we specify the parameters for the AutoMLConfig class. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**task** \n", - "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**preprocess** \n", - "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**primary_metric** \n", - "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Constraints:** \n", - "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after acheiving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "automl_settings = {\n", - " \"task\": 'regression', #type of task: classification, regression or forecasting\n", - " \"debug_log\": 'automated_ml_errors.log',\n", - " \"path\": './automated-ml-regression',\n", - " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", - " \"iterations\" : 50, #Number of algorithm options to try\n", - " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", - " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", - " \"verbosity\":logging.ERROR}" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "X_train = training_data.drop(\"score\", axis=1).values\n", - "y_train = training_data['score'].values.flatten()\n", - "X_validation = validation_data.drop(\"score\", axis=1).values\n", - "y_validation = validation_data['score'].values.flatten()\n", - "\n", - "# local compute\n", - "automated_ml_config = AutoMLConfig(\n", - " X = X_train,\n", - " y = y_train,\n", - " X_valid = X_validation,\n", - " y_valid = y_validation,\n", - " **automl_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run the Experiment\n", - "\n", - "Run the experiment locally and inspect the results using a widget" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running on local machine\n", - "Parent Run ID: AutoML_96d2e4e6-8d8d-4304-a160-18c487158b8a\n", - "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", - "Current status: DatasetEvaluation. Gathering dataset statistics.\n", - "Current status: FeaturesGeneration. Generating features for the dataset.\n", - "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", - "Current status: ModelSelection. Beginning model selection.\n", - "\n", - "****************************************************************************************************\n", - "ITERATION: The iteration being evaluated.\n", - "PIPELINE: A summary description of the pipeline being evaluated.\n", - "DURATION: Time taken for the current iteration.\n", - "METRIC: The result of computing score on the fitted pipeline.\n", - "BEST: The best observed score thus far.\n", - "****************************************************************************************************\n", - "\n", - " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:13 0.1822 0.1822\n", - " 1 MinMaxScaler RandomForest 0:01:05 0.4164 0.4164\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:12 0.2106 0.4164\n", - " 3 StandardScalerWrapper LightGBM 0:00:10 0.2845 0.4164\n", - " 4 RobustScaler DecisionTree 0:00:13 0.2544 0.4164\n", - " 5 StandardScalerWrapper LassoLars 0:00:07 0.1246 0.4164\n", - " 6 StandardScalerWrapper LightGBM 0:00:10 0.6568 0.6568\n", - " 7 StandardScalerWrapper RandomForest 0:00:10 0.2216 0.6568\n", - " 8 StandardScalerWrapper LassoLars 0:00:09 0.0838 0.6568\n", - " 9 MinMaxScaler ExtremeRandomTrees 0:00:12 0.3674 0.6568\n", - " 10 RobustScaler ExtremeRandomTrees 0:00:35 0.3522 0.6568\n", - " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:11 0.2703 0.6568\n", - " 12 MinMaxScaler ExtremeRandomTrees 0:00:13 0.2410 0.6568\n", - " 13 RobustScaler RandomForest 0:00:14 0.3422 0.6568\n", - " 14 StandardScalerWrapper LassoLars 0:00:07 nan 0.6568\n", - " 15 StandardScalerWrapper ExtremeRandomTrees 0:00:08 0.1996 0.6568\n", - " 16 StandardScalerWrapper RandomForest 0:00:09 0.2429 0.6568\n", - " 17 MinMaxScaler SGD 0:00:08 0.0559 0.6568\n", - " 18 StandardScalerWrapper RandomForest 0:00:21 0.3900 0.6568\n", - " 19 MinMaxScaler RandomForest 0:00:09 0.1557 0.6568\n", - " 20 StandardScalerWrapper LightGBM 0:00:20 0.7423 0.7423\n", - " 21 StandardScalerWrapper XGBoostRegressor 0:02:19 0.6688 0.7423\n", - " 22 StandardScalerWrapper DecisionTree 0:03:03 0.2179 0.7423\n", - " 23 StandardScalerWrapper LightGBM 0:00:43 0.6779 0.7423\n", - " 24 StandardScalerWrapper XGBoostRegressor 0:03:05 0.7638 0.7638\n", - " 25 TruncatedSVDWrapper XGBoostRegressor 0:00:33 0.7415 0.7638\n", - " 26 StandardScalerWrapper RandomForest 0:01:53 0.4292 0.7638\n", - " 27 StandardScalerWrapper XGBoostRegressor 0:05:27 0.6608 0.7638\n", - " 28 MaxAbsScaler LightGBM 0:00:18 0.6966 0.7638\n", - " 29 StandardScalerWrapper XGBoostRegressor 0:10:35 0.5947 0.7638\n", - " 30 TruncatedSVDWrapper XGBoostRegressor 0:00:42 0.5582 0.7638\n", - " 31 0:15:17 nan 0.7638\n", - "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", - " 32 StandardScalerWrapper XGBoostRegressor 0:03:26 0.5855 0.7638\n", - " 33 StandardScalerWrapper XGBoostRegressor 0:01:54 0.6289 0.7638\n", - " 34 MaxAbsScaler LightGBM 0:01:22 0.7226 0.7638\n", - " 35 TruncatedSVDWrapper XGBoostRegressor 0:01:14 0.7168 0.7638\n", - " 36 SparseNormalizer XGBoostRegressor 0:01:51 0.7436 0.7638\n", - " 37 MaxAbsScaler LightGBM 0:00:44 0.7087 0.7638\n", - " 38 0:15:13 nan 0.7638\n", - "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", - " 39 MaxAbsScaler LightGBM 0:01:15 0.7516 0.7638\n", - " 40 TruncatedSVDWrapper XGBoostRegressor 0:00:49 0.7186 0.7638\n", - " 41 StandardScalerWrapper XGBoostRegressor 0:01:08 0.6529 0.7638\n", - " 42 MaxAbsScaler LightGBM 0:02:37 0.7303 0.7638\n", - " 43 StandardScalerWrapper XGBoostRegressor 0:01:57 0.6202 0.7638\n", - " 44 StandardScalerWrapper XGBoostRegressor 0:01:48 0.6566 0.7638\n", - " 45 TruncatedSVDWrapper XGBoostRegressor 0:01:31 0.7186 0.7638\n", - " 46 MaxAbsScaler LightGBM 0:00:27 0.7438 0.7638\n", - " 47 MaxAbsScaler LightGBM 0:00:22 0.6211 0.7638\n", - " 48 VotingEnsemble 0:01:07 0.8160 0.8160\n", - " 49 StackEnsemble 0:09:44 0.8161 0.8161\n" - ] - } - ], - "source": [ - "experiment=Experiment(ws, 'automated-ml-regression')\n", - "local_run = experiment.submit(automated_ml_config, show_output=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, by accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Inspect the run details using the provided widget\n", - "RunDetails(local_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![](autoMLwidget.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Deploy\n", - "\n", - "### Retrieve the Best Model\n", - "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the get_output method which returns the best run and fitted model across all iterations. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", - "\n", - "Our best model is a Pipeline with two steps: a DataTransformer step and a StackEnsembleRegressor step. We demonstrate how to extract additional information about what data transformations were used and which models make up the ensemble." - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [], - "source": [ - "best_run, fitted_model = local_run.get_output()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can look at the different models that are used to produce the stack ensemble model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fitted_model.named_steps['stackensembleregressor'].get_params()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also look at how each column in our dataset was featurized by AutoML" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fitted_model.named_steps['datatransformer'].get_featurization_summary()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Register the Fitted Model for Deployment\n", - "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered." - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Registering model AutoML96d2e4e68best\n", - "AutoML96d2e4e68best\n" - ] - } - ], - "source": [ - "description = 'AutoML Model'\n", - "tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"}\n", - "name = 'automl'\n", - "model = local_run.register_model(description = description, tags = tags)\n", - "\n", - "print(local_run.model_id) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create Scoring Script" - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Writing score.py\n" - ] - } - ], - "source": [ - "%%writefile score.py\n", - "import pickle\n", - "import json\n", - "import numpy\n", - "import azureml.train.automl\n", - "from sklearn.externals import joblib\n", - "from azureml.core.model import Model\n", - "\n", - "\n", - "def init():\n", - " global model\n", - " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", - " # deserialize the model file back into a sklearn model\n", - " model = joblib.load(model_path)\n", - "\n", - "def run(rawdata):\n", - " try:\n", - " data = json.loads(rawdata)['data']\n", - " data = numpy.array(data)\n", - " result = model.predict(data)\n", - " except Exception as e:\n", - " result = str(e)\n", - " return json.dumps({\"error\": result})\n", - " return json.dumps({\"result\":result.tolist()})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create a YAML File for the Environment\n", - "\n", - "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, myenv.yml, which specifies the dependencies from the run." - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [], - "source": [ - "experiment=Experiment(ws, 'automated-ml-regression')\n", - "ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)" - ] - }, - { - "cell_type": "code", - "execution_count": 28, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No issues found in the SDK package versions.\n" - ] - } - ], - "source": [ - "dependencies = ml_run.get_run_sdk_dependencies(iteration = 7)" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "azureml-train-automl\t1.0.41\n", - "azureml-sdk\t1.0.41\n", - "azureml-core\t1.0.41.1\n" - ] - } - ], - "source": [ - "for p in ['azureml-train-automl', 'azureml-sdk', 'azureml-core']:\n", - " print('{}\\t{}'.format(p, dependencies[p]))" - ] - }, - { - "cell_type": "code", - "execution_count": 30, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'autoenv.yml'" - ] - }, - "execution_count": 30, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "from azureml.core.conda_dependencies import CondaDependencies\n", - "\n", - "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n", - " pip_packages=['azureml-sdk[automl]'], python_version = '3.6.8')\n", - "\n", - "conda_env_file_name = 'autoenv.yml'\n", - "myenv.save_to_file('.', conda_env_file_name)" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [], - "source": [ - "# Substitute the actual model id in the script file.\n", - "\n", - "script_file_name = 'score.py'\n", - "\n", - "with open(script_file_name, 'r') as cefr:\n", - " content = cefr.read()\n", - "\n", - "with open(script_file_name, 'w') as cefw:\n", - " cefw.write(content.replace('<>', local_run.model_id))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Create a Container Image" - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating image\n", - "Running.\n", - "NotStarted................................................\n", - "Succeeded\n", - "Image creation operation finished for image automl-image:1, operation \"Succeeded\"\n" - ] - } - ], - "source": [ - "from azureml.core.image import ContainerImage\n", - "\n", - "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", - " runtime = \"python\",\n", - " conda_file = \"autoenv.yml\",\n", - " description = \"Image with automl model\",\n", - " tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"})\n", - "\n", - "image = ContainerImage.create(name = \"automl-image\",\n", - " # this is the model object\n", - " models = [model],\n", - " image_config = image_config,\n", - " workspace = ws)\n", - "\n", - "image.wait_for_creation(show_output = True)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "https://maidaptest3334372853.blob.core.windows.net/azureml/ImageLogs/08bb1d92-0082-4b14-899b-3b829cf785be/build.log?sv=2018-03-28&sr=b&sig=LZLnU6O2ZjSPlgRbrN2V9iI%2FthozymlHLQOJzYIzWJY%3D&st=2019-06-18T14%3A52%3A34Z&se=2019-07-18T14%3A57%3A34Z&sp=rl\n" - ] - } - ], - "source": [ - "print(image.image_build_log_uri) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Deploy the Image as a Web Service on Azure Container Instance" - ] - }, - { - "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [], - "source": [ - "#Set the web service configuration (using default here)\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", - " memory_gb = 1)" - ] - }, - { - "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating service\n", - "Running...................\n", - "SucceededACI service creation operation finished, operation \"Succeeded\"\n", - "Healthy\n" - ] - } - ], - "source": [ - "# deploy image as web service\n", - "aci_service_name ='aci-service-automl1'\n", - "aci_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = aci_service_name,\n", - " image = image,\n", - " deployment_config = aci_config)\n", - "\n", - "aci_service.wait_for_deployment(show_output = True)\n", - "print(aci_service.state)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Test" - ] - }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "21256649\n" - ] - } - ], - "source": [ - "# load multiple sentences\n", - "import pandas as pd\n", - "import json \n", - "\n", - "sentences = []\n", - "data = pd.read_csv(\"testing_set.csv\")\n", - "train_y = data['score'].values.flatten()\n", - "train_x = data.drop(\"score\", axis=1).values\n", - "\n", - "print(type(train_x))\n", - "\n", - "train_x = train_x.tolist()\n", - "data = {'data': train_x}\n", - "data = json.dumps(data)\n", - "print(len(data))\n", - "\n", - "#print(data)" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "nb sentences encoded : 27101\n" - ] - } - ], - "source": [ - "score = aci_service.run(input_data = data)\n", - "\n", - "# embeddings will print the error message incase error occurs.\n", - "print('nb sentences encoded : {0}'.format(len(score)))" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.7787081114999308\n" - ] - } - ], - "source": [ - "from scipy.stats import pearsonr\n", - "result = json.loads(score)\n", - "output = result[\"result\"]\n", - "print(pearsonr(output, train_y)[0])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/scenarios/sentence_similarity/automl_deployment_local.ipynb b/scenarios/sentence_similarity/automl_deployment_local.ipynb deleted file mode 100644 index 9a61e40ef..000000000 --- a/scenarios/sentence_similarity/automl_deployment_local.ipynb +++ /dev/null @@ -1,1085 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using AutoML for Predicting Sentence Similarity" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook demonstrates how to use Azure AutoML locally to automate machine learning model selection and tuning and how to use Azure Container Instances (ACI) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Table of Contents\n", - "1. [Introduction](#1.-Introduction) \n", - " * 1.1 [What is Azure AutoML?](#1.1-What-is-Azure-AutoML?) \n", - " * 1.2 [Modeling Problem](#1.2-Modeling-Problem) \n", - " \n", - " \n", - "2. [Data Preparation](#2.-Data-Preparation) \n", - "\n", - "\n", - "3. [Create AutoML Run](#3.-Create-AutoML-Run) \n", - " * 3.1 [Link to or create a Workspace](#3.1-Link-to-or-create-a-Workspace) \n", - " * 3.2 [Create AutoMLConfig object](#3.2-Create-AutoMLConfig-object)\n", - " * 3.3 [Run Experiment](#3.3-Run-Experiment)\n", - " \n", - " \n", - "4. [Deploy Sentence Similarity Model](#4.-Deploy-Sentence-Similarity-Model) \n", - " 4.1 [Retrieve the Best Model](#4.1-Retrieve-the-Best-Model) \n", - " 4.2 [Register the Fitted Model for Deployment](#4.2-Register-the-Fitted-Model-for-Deployment) \n", - " 4.3 [Create Scoring Script](#4.3-Create-Scoring-Script) \n", - " 4.4 [Create a YAML File for the Environment](#4.4-Create-a-YAML-File-for-the-Environment) \n", - " 4.5 [Create a Container Image](#4.5-Create-a-Container-Image) \n", - " 4.6 [Deploy the Image as a Web Service on Azure Container Instance](#4.6-Deploy-the-Image-as-a-Web-Service-on-Azure-Container-Instance) \n", - " 4.7 [Test Deployed Model](#4.7-Test-Deployed-Model) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1.1 What is Azure AutoML?\n", - "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", - "\n", - "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![](automl.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1.2 Modeling Problem\n", - "\n", - "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence). The sentence pairs will be embedded using AutoML's built-in preprocessing, so we'll pass the sentences directly into the model." - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Turning diagnostics collection on. \n", - "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.41\n", - "Pandas version: 0.23.4\n", - "Tensorflow Version: 1.13.1\n" - ] - } - ], - "source": [ - "# Set the environment path to find NLP\n", - "import sys\n", - "sys.path.append(\"../../\")\n", - "import time\n", - "import os\n", - "import pandas as pd\n", - "import shutil\n", - "import numpy as np\n", - "import torch\n", - "import sys\n", - "from scipy.stats import pearsonr\n", - "from scipy.spatial import distance\n", - "from sklearn.externals import joblib\n", - "import json\n", - "\n", - "# Import utils\n", - "from utils_nlp.azureml import azureml_utils\n", - "from utils_nlp.dataset import stsbenchmark\n", - "from utils_nlp.dataset.preprocess import (\n", - " to_lowercase,\n", - " to_spacy_tokens,\n", - " rm_spacy_stopwords,\n", - ")\n", - "from utils_nlp.common.timer import Timer\n", - "\n", - "# Tensorflow dependencies for Google Universal Sentence Encoder\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", - "\n", - "# AzureML packages\n", - "import azureml as aml\n", - "import logging\n", - "from azureml.telemetry import set_diagnostics_collection\n", - "set_diagnostics_collection(send_diagnostics=True)\n", - "from azureml.train.automl import AutoMLConfig\n", - "from azureml.core.experiment import Experiment\n", - "from azureml.widgets import RunDetails\n", - "from azureml.train.automl.run import AutoMLRun\n", - "from azureml.core.webservice import AciWebservice, Webservice\n", - "from azureml.core.image import ContainerImage\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "\n", - "print(\"System version: {}\".format(sys.version))\n", - "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", - "print(\"Pandas version: {}\".format(pd.__version__))\n", - "print(\"Tensorflow Version:\", tf.VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "BASE_DATA_PATH = '../../data'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 2. Data Preparation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## STS Benchmark Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 195KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 165KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 225KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - } - ], - "source": [ - "# Load in the raw datasets as pandas dataframes\n", - "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", - "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", - "# and renaming the remaining columns\n", - "train_clean = stsbenchmark.clean_sts(train_raw)\n", - "dev_clean = stsbenchmark.clean_sts(dev_raw)\n", - "test_clean = stsbenchmark.clean_sts(test_raw)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "# Convert all text to lowercase\n", - "train = to_lowercase(train_clean)\n", - "dev = to_lowercase(dev_clean)\n", - "test = to_lowercase(test_clean)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Training set has 5749 sentences\n", - "Development set has 1500 sentences\n", - "Testing set has 1379 sentences\n" - ] - } - ], - "source": [ - "print(\"Training set has {} sentences\".format(len(train)))\n", - "print(\"Development set has {} sentences\".format(len(dev)))\n", - "print(\"Testing set has {} sentences\".format(len(test)))" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", - "
" - ], - "text/plain": [ - " score sentence1 \\\n", - "0 5.00 a plane is taking off. \n", - "1 3.80 a man is playing a large flute. \n", - "2 3.80 a man is spreading shreded cheese on a pizza. \n", - "3 2.60 three men are playing chess. \n", - "4 4.25 a man is playing the cello. \n", - "\n", - " sentence2 \n", - "0 an air plane is taking off. \n", - "1 a man is playing a flute. \n", - "2 a man is spreading shredded cheese on an uncoo... \n", - "3 two men are playing chess. \n", - "4 a man seated is playing the cello. " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 3. Create AutoML Run" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3.1 Link to or create a Workspace" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0623 22:36:11.699752 28712 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", - "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", - "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0623 22:36:12.707345 26644 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0623 22:36:21.440700 28712 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n" - ] - } - ], - "source": [ - "ws = azureml_utils.get_or_create_workspace(\n", - " subscription_id=\"\",\n", - " resource_group=\"\",\n", - " workspace_name=\"\",\n", - " workspace_region=\"\"\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3.2 Create AutoMLConfig object\n", - "Next, we specify the parameters for the AutoMLConfig class. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**task** \n", - "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**preprocess** \n", - "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**primary_metric** \n", - "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Constraints:** \n", - "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after achieving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "automl_settings = {\n", - " \"task\": 'regression', #type of task: classification, regression or forecasting\n", - " \"debug_log\": 'automated_ml_errors.log',\n", - " \"path\": './automated-ml-regression',\n", - " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", - " \"iterations\" : 50, #Number of algorithm options to try\n", - " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", - " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", - " \"verbosity\":logging.ERROR}" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "X_train = train.drop(\"score\", axis=1).values\n", - "y_train = train['score'].values.flatten()\n", - "X_validation = dev.drop(\"score\", axis=1).values\n", - "y_validation = dev['score'].values.flatten()\n", - "\n", - "# local compute\n", - "automated_ml_config = AutoMLConfig(\n", - " X = X_train,\n", - " y = y_train,\n", - " X_valid = X_validation,\n", - " y_valid = y_validation,\n", - " **automl_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 3.3 Run Experiment\n", - "\n", - "Run the experiment locally and inspect the results using a widget" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running on local machine\n", - "Parent Run ID: AutoML_5d78c9ca-8ef5-4de8-8185-ff8d9c215fd2\n", - "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", - "Current status: DatasetEvaluation. Gathering dataset statistics.\n", - "Current status: FeaturesGeneration. Generating features for the dataset.\n", - "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", - "Current status: ModelSelection. Beginning model selection.\n", - "\n", - "****************************************************************************************************\n", - "ITERATION: The iteration being evaluated.\n", - "PIPELINE: A summary description of the pipeline being evaluated.\n", - "DURATION: Time taken for the current iteration.\n", - "METRIC: The result of computing score on the fitted pipeline.\n", - "BEST: The best observed score thus far.\n", - "****************************************************************************************************\n", - "\n", - " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:55 0.0551 0.0551\n", - " 1 MaxAbsScaler RandomForest 0:02:03 0.2161 0.2161\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:01:11 0.1536 0.2161\n", - " 3 StandardScalerWrapper LightGBM 0:01:18 0.2905 0.2905\n", - " 4 StandardScalerWrapper RandomForest 0:00:55 0.0993 0.2905\n", - " 5 MaxAbsScaler ExtremeRandomTrees 0:01:26 0.2212 0.2905\n", - " 6 StandardScalerWrapper ExtremeRandomTrees 0:01:16 0.2179 0.2905\n", - " 7 MaxAbsScaler DecisionTree 0:01:04 0.1751 0.2905\n", - " 8 MaxAbsScaler ExtremeRandomTrees 0:01:07 0.1676 0.2905\n", - " 9 MaxAbsScaler SGD 0:00:57 0.1439 0.2905\n", - " 10 StandardScalerWrapper RandomForest 0:00:56 0.0106 0.2905\n", - " 11 StandardScalerWrapper DecisionTree 0:00:57 0.1145 0.2905\n", - " 12 MaxAbsScaler SGD 0:01:06 0.1320 0.2905\n", - " 13 MaxAbsScaler DecisionTree 0:00:59 0.1490 0.2905\n", - " 14 StandardScalerWrapper RandomForest 0:01:08 0.1994 0.2905\n", - " 15 MaxAbsScaler RandomForest 0:00:59 0.0474 0.2905\n", - " 16 MaxAbsScaler ElasticNet 0:00:59 nan 0.2905\n", - " 17 MaxAbsScaler ExtremeRandomTrees 0:00:56 0.0972 0.2905\n", - " 18 MaxAbsScaler DecisionTree 0:00:57 0.1686 0.2905\n", - " 19 StandardScalerWrapper RandomForest 0:00:57 0.1139 0.2905\n", - " 20 StandardScalerWrapper LightGBM 0:01:41 0.6102 0.6102\n", - " 21 MaxAbsScaler DecisionTree 0:06:03 0.1807 0.6102\n", - " 22 StandardScalerWrapper XGBoostRegressor 0:02:01 0.2740 0.6102\n", - " 23 StandardScalerWrapper LightGBM 0:01:14 0.3608 0.6102\n", - " 24 MaxAbsScaler DecisionTree 0:05:05 0.1105 0.6102\n", - " 25 TruncatedSVDWrapper LightGBM 0:03:45 0.4076 0.6102\n", - " 26 MaxAbsScaler RandomForest 0:09:24 0.1617 0.6102\n", - " 27 StandardScalerWrapper RandomForest 0:04:49 0.1695 0.6102\n", - " 28 MaxAbsScaler DecisionTree 0:01:18 0.1237 0.6102\n", - " 29 StandardScalerWrapper XGBoostRegressor 0:14:31 0.3548 0.6102\n", - " 30 TruncatedSVDWrapper LightGBM 0:03:20 0.3898 0.6102\n", - " 31 StandardScalerWrapper XGBoostRegressor 0:05:11 0.3367 0.6102\n", - " 32 StandardScalerWrapper XGBoostRegressor 0:10:10 0.4319 0.6102\n", - " 33 StandardScalerWrapper XGBoostRegressor 0:04:53 0.3045 0.6102\n", - " 34 SparseNormalizer XGBoostRegressor 0:06:14 0.3080 0.6102\n", - " 35 MaxAbsScaler LightGBM 0:01:29 0.2507 0.6102\n", - " 36 StandardScalerWrapper LightGBM 0:01:16 0.3288 0.6102\n", - " 37 MaxAbsScaler LightGBM 0:01:22 0.4191 0.6102\n", - " 38 StandardScalerWrapper XGBoostRegressor 0:14:07 0.4284 0.6102\n", - " 39 StandardScalerWrapper XGBoostRegressor 0:01:30 0.2736 0.6102\n", - " 40 TruncatedSVDWrapper LightGBM 0:02:53 0.3996 0.6102\n", - " 41 StandardScalerWrapper XGBoostRegressor 0:04:11 0.3436 0.6102\n", - " 42 StandardScalerWrapper XGBoostRegressor 0:05:39 0.3030 0.6102\n", - " 43 MaxAbsScaler LightGBM 0:01:22 0.3371 0.6102\n", - " 44 MaxAbsScaler LightGBM 0:01:24 0.4611 0.6102\n", - " 45 TruncatedSVDWrapper LightGBM 0:02:57 0.2378 0.6102\n", - " 46 MaxAbsScaler LightGBM 0:01:20 0.6012 0.6102\n", - " 47 SparseNormalizer XGBoostRegressor 0:01:18 0.3183 0.6102\n", - " 48 0:15:12 nan 0.6102\n", - "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", - " 49 StackEnsemble 0:07:40 0.6485 0.6485\n" - ] - } - ], - "source": [ - "experiment=Experiment(ws, 'automated-ml-regression')\n", - "local_run = experiment.submit(automated_ml_config, show_output=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, by accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Inspect the run details using the provided widget\n", - "RunDetails(local_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![](autoMLwidget.PNG)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# 4. Deploy Sentence Similarity Model\n", - "\n", - "## 4.1 Retrieve the Best Model\n", - "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the get_output method which returns the best run and fitted model across all iterations. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", - "\n", - "The different steps that make up the pipeline can be accessed through `fitted_model.named_steps` and information about data preprocessing is available through `fitted_model.named_steps['datatransformer'].get_featurization_summary()`" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "best_run, fitted_model = local_run.get_output()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4.2 Register the Fitted Model for Deployment\n", - "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Registering model AutoML5d78c9ca8best\n", - "AutoML5d78c9ca8best\n" - ] - } - ], - "source": [ - "description = 'AutoML Model'\n", - "tags = {'area': \"nlp\", 'type': \"sentence similarity automl\"}\n", - "name = 'automl'\n", - "model = local_run.register_model(description = description, tags = tags)\n", - "\n", - "print(local_run.model_id) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4.3 Create Scoring Script" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Overwriting score.py\n" - ] - } - ], - "source": [ - "%%writefile score.py\n", - "import pickle\n", - "import json\n", - "import numpy\n", - "import azureml.train.automl\n", - "from sklearn.externals import joblib\n", - "from azureml.core.model import Model\n", - "\n", - "\n", - "def init():\n", - " global model\n", - " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", - " # deserialize the model file back into a sklearn model\n", - " model = joblib.load(model_path)\n", - "\n", - "def run(rawdata):\n", - " try:\n", - " data = json.loads(rawdata)['data']\n", - " data = numpy.array(data)\n", - " result = model.predict(data)\n", - " except Exception as e:\n", - " result = str(e)\n", - " return json.dumps({\"error\": result})\n", - " return json.dumps({\"result\":result.tolist()})" - ] - }, - { - "cell_type": "code", - "execution_count": 41, - "metadata": {}, - "outputs": [], - "source": [ - "# Substitute the actual model id in the script file.\n", - "script_file_name = 'score.py'\n", - "\n", - "with open(script_file_name, 'r') as cefr:\n", - " content = cefr.read()\n", - "\n", - "with open(script_file_name, 'w') as cefw:\n", - " cefw.write(content.replace('<>', local_run.model_id))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4.4 Create a YAML File for the Environment\n", - "\n", - "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, autoenv.yml, which specifies the dependencies from the run." - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "experiment = Experiment(ws, 'automated-ml-regression')\n", - "ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)" - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No issues found in the SDK package versions.\n" - ] - } - ], - "source": [ - "best_iteration = int(best_run.id.split(\"_\")[-1]) #get the appended iteration number for the best model\n", - "dependencies = ml_run.get_run_sdk_dependencies(iteration = best_iteration)" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'azureml-widgets': '1.0.41',\n", - " 'azureml-train': '1.0.41',\n", - " 'azureml-train-restclients-hyperdrive': '1.0.41',\n", - " 'azureml-train-core': '1.0.41.1',\n", - " 'azureml-train-automl': '1.0.41',\n", - " 'azureml-telemetry': '1.0.41',\n", - " 'azureml-sdk': '1.0.41',\n", - " 'azureml-pipeline': '1.0.41',\n", - " 'azureml-pipeline-steps': '1.0.41',\n", - " 'azureml-pipeline-core': '1.0.41.1',\n", - " 'azureml-explain-model': '1.0.41',\n", - " 'azureml-dataprep': '1.1.4',\n", - " 'azureml-dataprep-native': '13.0.0',\n", - " 'azureml-core': '1.0.41.1',\n", - " 'azureml-automl-core': '1.0.41'}" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "dependencies" - ] - }, - { - "cell_type": "code", - "execution_count": 42, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'autoenv.yml'" - ] - }, - "execution_count": 42, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n", - " pip_packages=['azureml-sdk[automl]'], \n", - " python_version = '3.6.8')\n", - "\n", - "conda_env_file_name = 'autoenv.yml'\n", - "myenv.save_to_file('.', conda_env_file_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4.5 Create a Container Image" - ] - }, - { - "cell_type": "code", - "execution_count": 43, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating image\n", - "Running.....................................................\n", - "Succeeded\n", - "Image creation operation finished for image automl-image:4, operation \"Succeeded\"\n" - ] - } - ], - "source": [ - "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", - " runtime = \"python\",\n", - " conda_file = \"autoenv.yml\",\n", - " description = \"Image with automl model\",\n", - " tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"})\n", - "\n", - "image = ContainerImage.create(name = \"automl-image\",\n", - " # this is the model object\n", - " models = [model],\n", - " image_config = image_config,\n", - " workspace = ws)\n", - "\n", - "image.wait_for_creation(show_output = True)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 35, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "https://maidaptest3334372853.blob.core.windows.net/azureml/ImageLogs/47396d4d-c4fb-4706-9f18-bb78e20eefbb/build.log?sv=2018-03-28&sr=b&sig=nnAVwkrhuOjZ%2FMO8%2BJtyov2qIYJMzofWqmcoSvPvKCg%3D&st=2019-06-24T12%3A50%3A55Z&se=2019-07-24T12%3A55%3A55Z&sp=rl\n" - ] - } - ], - "source": [ - "print(image.image_build_log_uri) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4.6 Deploy the Image as a Web Service on Azure Container Instance" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [], - "source": [ - "#Set the web service configuration (using default here)\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", - " memory_gb = 1)" - ] - }, - { - "cell_type": "code", - "execution_count": 48, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating service\n", - "Running............................\n", - "SucceededACI service creation operation finished, operation \"Succeeded\"\n", - "Healthy\n" - ] - } - ], - "source": [ - "# deploy image as web service\n", - "aci_service_name ='aci-service-automl-local'\n", - "aci_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = aci_service_name,\n", - " image = image,\n", - " deployment_config = aci_config)\n", - "\n", - "aci_service.wait_for_deployment(show_output = True)\n", - "print(aci_service.state)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 4.7 Test Deployed Model" - ] - }, - { - "cell_type": "code", - "execution_count": 49, - "metadata": {}, - "outputs": [], - "source": [ - "sentences = []\n", - "test_y = test['score'].values.flatten()\n", - "test_x = test.drop(\"score\", axis=1).values.tolist()\n", - "\n", - "data = {'data': test_x}\n", - "data = json.dumps(data)" - ] - }, - { - "cell_type": "code", - "execution_count": 50, - "metadata": {}, - "outputs": [], - "source": [ - "# Set up a Timer to see how long the model takes to train\n", - "t = Timer()" - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Time elapsed: 3.2256\n", - "Number of sentences encoded : 1379\n" - ] - } - ], - "source": [ - "t.start()\n", - "score = aci_service.run(input_data = data)\n", - "t.stop()\n", - "print(\"Time elapsed: {}\".format(t))\n", - "\n", - "result = json.loads(score)\n", - "try:\n", - " output = result[\"result\"]\n", - " print('Number of sentences encoded : {0}'.format(len(output)))\n", - "except:\n", - " print(result['error'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we'll calculate the Pearson Correlation on the test set.\n", - "\n", - "**What is Pearson Correlation?**\n", - "\n", - "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", - "\n", - "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", - "\n", - "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.6069626886131223\n" - ] - } - ], - "source": [ - "print(pearsonr(output, test_y)[0])" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb b/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb deleted file mode 100644 index 53150b96e..000000000 --- a/scenarios/sentence_similarity/automl_google_universal_sentence_encoder.ipynb +++ /dev/null @@ -1,6367 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Copyright (c) Microsoft Corporation. All rights reserved.\n", - "\n", - "Licensed under the MIT License." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Using AutoML for Predicting Sentence Similarity" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook demonstrates how to use Azure AutoML to automate machine learning model selection and tuning. It also demonstrates how to use a popular sentence embedding model from Google, Universal Sentence Encoder. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### What is Azure AutoML?\n", - "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", - "\n", - "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "![](automl.png)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Modeling Problem\n", - "\n", - "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence).\n", - "\n", - "For each sentence in the sentence pair, we will use Google's pretrained Universal Sentence Encoder (details provided below) to generate a $512$-dimensional embedding. Both embeddings in the sentence pair will be concatenated and the resulting $1024$-dimensional vector will be used as features in our regression problem. Our target variable is the sentence similarity score." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING: Logging before flag parsing goes to stderr.\n", - "W0614 12:29:04.807609 38572 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Turning diagnostics collection on. \n", - "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.41\n", - "Pandas version: 0.23.4\n", - "Tensorflow Version: 1.13.1\n" - ] - } - ], - "source": [ - "# Set the environment path to find NLP\n", - "import sys\n", - "sys.path.append(\"../../\")\n", - "import time\n", - "import os\n", - "import pandas as pd\n", - "import shutil\n", - "import numpy as np\n", - "import torch\n", - "import sys\n", - "from scipy.stats import pearsonr\n", - "from scipy.spatial import distance\n", - "from sklearn.externals import joblib\n", - "\n", - "# Import utils\n", - "from utils_nlp.azureml import azureml_utils\n", - "from utils_nlp.dataset import stsbenchmark\n", - "from utils_nlp.dataset.preprocess import (\n", - " to_lowercase,\n", - " to_spacy_tokens,\n", - " rm_spacy_stopwords,\n", - ")\n", - "\n", - "# Tensorflow dependencies for Google Universal Sentence Encoder\n", - "import tensorflow as tf\n", - "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", - "\n", - "# AzureML packages\n", - "import azureml as aml\n", - "import logging\n", - "from azureml.telemetry import set_diagnostics_collection\n", - "set_diagnostics_collection(send_diagnostics=True)\n", - "from azureml.train.automl import AutoMLConfig\n", - "from azureml.core.experiment import Experiment\n", - "from azureml.widgets import RunDetails\n", - "\n", - "print(\"System version: {}\".format(sys.version))\n", - "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", - "print(\"Pandas version: {}\".format(pd.__version__))\n", - "print(\"Tensorflow Version:\", tf.VERSION)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "BASE_DATA_PATH = '../../data'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Data Preparation" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## STS Benchmark Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|███████████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 271KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|███████████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 273KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|███████████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 288KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - } - ], - "source": [ - "# Load in the raw datasets as pandas dataframes\n", - "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", - "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", - "# and renaming the remaining columns\n", - "train = stsbenchmark.clean_sts(train_raw)\n", - "dev = stsbenchmark.clean_sts(dev_raw)\n", - "test = stsbenchmark.clean_sts(test_raw)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Training set has 5749 sentences\n", - "Development set has 1500 sentences\n", - "Testing set has 1379 sentences\n" - ] - } - ], - "source": [ - "print(\"Training set has {} sentences\".format(len(train)))\n", - "print(\"Development set has {} sentences\".format(len(dev)))\n", - "print(\"Testing set has {} sentences\".format(len(test)))" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
scoresentence1sentence2
05.00A plane is taking off.An air plane is taking off.
13.80A man is playing a large flute.A man is playing a flute.
23.80A man is spreading shreded cheese on a pizza.A man is spreading shredded cheese on an uncoo...
32.60Three men are playing chess.Two men are playing chess.
44.25A man is playing the cello.A man seated is playing the cello.
\n", - "
" - ], - "text/plain": [ - " score sentence1 \\\n", - "0 5.00 A plane is taking off. \n", - "1 3.80 A man is playing a large flute. \n", - "2 3.80 A man is spreading shreded cheese on a pizza. \n", - "3 2.60 Three men are playing chess. \n", - "4 4.25 A man is playing the cello. \n", - "\n", - " sentence2 \n", - "0 An air plane is taking off. \n", - "1 A man is playing a flute. \n", - "2 A man is spreading shredded cheese on an uncoo... \n", - "3 Two men are playing chess. \n", - "4 A man seated is playing the cello. " - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train.head(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Feature Engineering: Universal Sentence Encoder" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have our sentence pairs loaded, we will convert these sentences into a numerical representation in order to use them in our machine learning model. To do this, we'll use a popular sentence encoder called Google Universal Sentence Encoder (see [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). Google provides two pretrained models based on different design goals: a Transformer model (targets high accuracy even if this reduces model complexity) and a Deep Averaging Network model (DAN; targets efficient inference). Both models are trained on a variety of web sources (Wikipedia, news, question-answers pages, and discussion forums) and produced 512-dimensional embeddings. This notebook utilizes the Transformer-based encoding model which can be downloaded [here](https://tfhub.dev/google/universal-sentence-encoder-large/3) because of its better performance relative to the DAN model on the STS Benchmark dataset (see Table 2 in Google Research's [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf)). " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Google Universal Sentence Encoder: Transformer Model** The Transformer model produces sentence embeddings using the \"encoding sub-graph of the transformer architecture\" (original architecture introduced [here](https://arxiv.org/abs/1706.03762)). \"This sub-graph uses attention to compute context aware representations of words in a sentence that take into account both the ordering and identity of all the other workds. The context aware word representations are converted to a fixed length sentence encoding vector by computing the element-wise sum of the representations at each word position.\" The input to the model is lowercase PTB-tokenized strings and the model is designed to be useful for multiple different tasks by using multi-task learning. More details about the model can be found in the [paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46808.pdf) by Google Research." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Using the Pretrained Model**\n", - "\n", - "Tensorflow-hub provides the pretrained model for use by the public. We import the model from its url and then feed the model our sentences for it to encode." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", - "\n", - "# Import the Universal Sentence Encoder's TF Hub module\n", - "embedding_model = hub.Module(module_url)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "def google_encoder(dataset):\n", - " \"\"\" Function that embeds sentences using the Google Universal\n", - " Sentence Encoder pretrained model\n", - " \n", - " Parameters:\n", - " ----------\n", - " dataset: pandas dataframe with sentences and scores\n", - " \n", - " Returns:\n", - " -------\n", - " emb1: 512-dimensional representation of sentence1\n", - " emb2: 512-dimensional representation of sentence2\n", - " \"\"\"\n", - " sts_input1 = tf.placeholder(tf.string, shape=(None))\n", - " sts_input2 = tf.placeholder(tf.string, shape=(None))\n", - "\n", - " # Apply embedding model and normalize the input\n", - " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", - " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", - " \n", - " with tf.Session() as session:\n", - " session.run(tf.global_variables_initializer())\n", - " session.run(tf.tables_initializer())\n", - " emb1, emb2 = session.run(\n", - " [sts_encode1, sts_encode2],\n", - " feed_dict={\n", - " sts_input1: dataset['sentence1'],\n", - " sts_input2: dataset['sentence2']\n", - " })\n", - " return emb1, emb2" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As features, we will embed both sentences using the Google Universal Sentence Encoder and concatenate their representations into a $1024$-dimensional vector. The resulting data will be saved in a dataframe for consumption by our AutoML model." - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "def feature_engineering(dataset):\n", - " \"\"\"Extracts embedding features from the dataset and returns\n", - " features and target in a dataframe\n", - " \n", - " Parameters:\n", - " ----------\n", - " dataset: pandas dataframe with sentences and scores\n", - " \n", - " Returns:\n", - " -------\n", - " df: pandas dataframe with embedding features and target variable\n", - " \"\"\"\n", - " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", - " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", - " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", - " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", - " df = pd.DataFrame(df, columns=names)\n", - " df['score'] = dataset['score'].tolist()\n", - " return df" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "training_data = feature_engineering(train)\n", - "validation_data = feature_engineering(dev)\n", - "testing_data = feature_engineering(test)" - ] - }, - { - "cell_type": "code", - "execution_count": 60, - "metadata": {}, - "outputs": [], - "source": [ - "#Take this out later\n", - "training_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_train.csv\"), index=None)\n", - "testing_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_test.csv\"), index=None)\n", - "validation_data.to_csv(os.path.join(featurized_data_location,\"googleUSE_features_dev.csv\"), index=None)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Calculate Baseline Performance" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Before using AutoML we will calculate a baseline to compare the AutoML results to. For the baseline we will take the Google Universal Sentence Encoder embeddings of each sentence, calculate the cosine similarity between the two sentence embeddings, then compare the predicted values with the true scores using pearson correlation. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### What is Pearson Correlation?\n", - "\n", - "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", - "\n", - "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", - "\n", - "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "def get_baseline_performance(data):\n", - " \"\"\" Get baseline performance by calculating the cosine similarity between\n", - " the embeddings in the sentence pair and then evaluating the pearson \n", - " correlation between the predicted and true similarity scores\n", - " \n", - " Parameters:\n", - " ----------\n", - " data: dataframe containing embeddings and similarity scores\n", - " \"\"\"\n", - " emb1 = data[[i for i in data.columns if 'USEEmb1' in i]].values.tolist()\n", - " emb2 = data[[i for i in data.columns if 'USEEmb2' in i]].values.tolist()\n", - " scores = data['score'].values.tolist()\n", - " \n", - " predictions = [1-distance.cosine(emb1[i], emb2[i]) for i in range(len(emb1))]\n", - " print(\"Google Universal Sentence Encoder Pearson Correlation:\", round(pearsonr(predictions, scores)[0],3))" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Google Universal Sentence Encoder Pearson Correlation: 0.764\n" - ] - } - ], - "source": [ - "get_baseline_performance(testing_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# AutoML" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0614 12:37:29.502033 38572 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", - "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", - "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0614 12:37:29.827009 11124 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0614 12:37:37.015321 38572 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n", - "Workspace name: MAIDAPTest\n", - "Azure region: eastus2\n", - "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", - "Resource group: nlprg\n" - ] - } - ], - "source": [ - "ws = azureml_utils.get_or_create_workspace(\n", - " subscription_id=\"\",\n", - " resource_group=\"\",\n", - " workspace_name=\"\",\n", - " workspace_region=\"\"\n", - ")\n", - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## AutoMLConfig Parameters\n", - "Next, we specify the parameters for the AutoMLConfig class. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**task** \n", - "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**preprocess** \n", - "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**primary_metric** \n", - "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Constraints:** \n", - "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after acheiving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "automl_settings = {\n", - " \"task\": 'regression', #type of task: classification, regression or forecasting\n", - " \"debug_log\": 'automated_ml_errors.log',\n", - " \"path\": './automated-ml-regression',\n", - " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", - " \"iterations\" : 50, #Number of algorithm options to try\n", - " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", - " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", - " \"verbosity\":logging.ERROR}" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "X_train = training_data.drop(\"score\", axis=1).values\n", - "y_train = training_data['score'].values.flatten()\n", - "X_validation = validation_data.drop(\"score\", axis=1).values\n", - "y_validation = validation_data['score'].values.flatten()\n", - "\n", - "# local compute\n", - "automated_ml_config = AutoMLConfig(\n", - " X = X_train,\n", - " y = y_train,\n", - " X_valid = X_validation,\n", - " y_valid = y_validation,\n", - " **automl_settings)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Run the Experiment\n", - "\n", - "Run the experiment locally and inspect the results using a widget" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running on local machine\n", - "Parent Run ID: AutoML_91012708-fd8f-405f-afed-4c42f8fe3fc6\n", - "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", - "Current status: DatasetEvaluation. Gathering dataset statistics.\n", - "Current status: FeaturesGeneration. Generating features for the dataset.\n", - "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", - "Current status: ModelSelection. Beginning model selection.\n", - "\n", - "****************************************************************************************************\n", - "ITERATION: The iteration being evaluated.\n", - "PIPELINE: A summary description of the pipeline being evaluated.\n", - "DURATION: Time taken for the current iteration.\n", - "METRIC: The result of computing score on the fitted pipeline.\n", - "BEST: The best observed score thus far.\n", - "****************************************************************************************************\n", - "\n", - " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:35 0.1791 0.1791\n", - " 1 MinMaxScaler RandomForest 0:01:04 0.4340 0.4340\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:13 0.2467 0.4340\n", - " 3 StandardScalerWrapper LightGBM 0:00:10 0.2708 0.4340\n", - " 4 RobustScaler DecisionTree 0:00:14 0.2435 0.4340\n", - " 5 StandardScalerWrapper LassoLars 0:00:08 0.1246 0.4340\n", - " 6 StandardScalerWrapper LightGBM 0:00:11 0.6567 0.6567\n", - " 7 StandardScalerWrapper RandomForest 0:00:11 0.2160 0.6567\n", - " 8 StandardScalerWrapper LassoLars 0:00:10 0.0836 0.6567\n", - " 9 MinMaxScaler ExtremeRandomTrees 0:00:12 0.3599 0.6567\n", - " 10 RobustScaler ExtremeRandomTrees 0:00:35 0.3491 0.6567\n", - " 11 StandardScalerWrapper ExtremeRandomTrees 0:00:11 0.3217 0.6567\n", - " 12 MinMaxScaler ExtremeRandomTrees 0:00:13 0.2308 0.6567\n", - " 13 RobustScaler RandomForest 0:00:15 0.3675 0.6567\n", - " 14 StandardScalerWrapper LassoLars 0:00:08 nan 0.6567\n", - " 15 StandardScalerWrapper ExtremeRandomTrees 0:00:08 0.1977 0.6567\n", - " 16 StandardScalerWrapper RandomForest 0:00:09 0.2468 0.6567\n", - " 17 MinMaxScaler SGD 0:00:08 0.0797 0.6567\n", - " 18 StandardScalerWrapper RandomForest 0:00:22 0.3277 0.6567\n", - " 19 MinMaxScaler RandomForest 0:00:09 0.1681 0.6567\n", - " 20 StandardScalerWrapper LightGBM 0:00:47 0.7412 0.7412\n", - " 21 StandardScalerWrapper XGBoostRegressor 0:02:18 0.6772 0.7412\n", - " 22 StandardScalerWrapper LightGBM 0:00:16 0.6983 0.7412\n", - " 23 StandardScalerWrapper LightGBM 0:00:17 0.6864 0.7412\n", - " 24 StandardScalerWrapper DecisionTree 0:02:32 0.2330 0.7412\n", - " 25 MaxAbsScaler LightGBM 0:00:13 0.3161 0.7412\n", - " 26 StandardScalerWrapper LightGBM 0:00:39 0.5771 0.7412\n", - " 27 StandardScalerWrapper XGBoostRegressor 0:01:03 0.6196 0.7412\n", - " 28 StandardScalerWrapper XGBoostRegressor 0:03:07 0.7688 0.7688\n", - " 29 StandardScalerWrapper XGBoostRegressor 0:04:49 0.7275 0.7688\n", - " 30 TruncatedSVDWrapper XGBoostRegressor 0:00:59 0.7438 0.7688\n", - " 31 StandardScalerWrapper XGBoostRegressor 0:00:33 0.6567 0.7688\n", - " 32 StandardScalerWrapper RandomForest 0:01:50 0.4262 0.7688\n", - " 33 StandardScalerWrapper XGBoostRegressor 0:00:54 0.6762 0.7688\n", - " 34 StandardScalerWrapper LightGBM 0:00:20 0.4265 0.7688\n", - " 35 TruncatedSVDWrapper XGBoostRegressor 0:01:14 0.7097 0.7688\n", - " 36 0:15:13 nan 0.7688\n", - "ERROR: Fit operation exceeded provided timeout, terminating and moving onto the next iteration. Please consider increasing the iteration_timeout_minutes parameter.\n", - " 37 MinMaxScaler DecisionTree 0:00:42 0.1453 0.7688\n", - " 38 StandardScalerWrapper XGBoostRegressor 0:00:57 0.6063 0.7688\n", - " 39 StandardScalerWrapper XGBoostRegressor 0:05:10 0.6577 0.7688\n", - " 40 TruncatedSVDWrapper XGBoostRegressor 0:00:40 0.7293 0.7688\n", - " 41 MaxAbsScaler LightGBM 0:00:29 0.6281 0.7688\n", - " 42 StandardScalerWrapper XGBoostRegressor 0:11:03 0.5731 0.7688\n", - " 43 StandardScalerWrapper XGBoostRegressor 0:01:22 0.6788 0.7688\n", - " 44 SparseNormalizer XGBoostRegressor 0:00:53 0.7598 0.7688\n", - " 45 TruncatedSVDWrapper XGBoostRegressor 0:00:25 0.7171 0.7688\n", - " 46 SparseNormalizer XGBoostRegressor 0:03:20 0.6843 0.7688\n", - " 47 SparseNormalizer LightGBM 0:11:34 0.6405 0.7688\n", - " 48 VotingEnsemble 0:01:04 0.8130 0.8130\n", - " 49 StackEnsemble 0:08:57 0.8130 0.8130\n" - ] - } - ], - "source": [ - "experiment=Experiment(ws, 'automated-ml-regression')\n", - "local_run = experiment.submit(automated_ml_config, show_output=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, my accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "fb411f9a99e946958f9dd8717a6cada1", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Inspect the run details using the provided widget\n", - "RunDetails(local_run).show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Inspect the Best Model\n", - "\n", - "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case). The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). Our best model is a Pipeline with two steps: a DataTransformer step and a PreFittedSoftVotingRegressor step. We demonstrate how to extract additional information about what data transformations were used and which models make up the ensemble." - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "RegressionPipeline(pipeline=Pipeline(memory=None,\n", - " steps=[('datatransformer', DataTransformer(enable_feature_sweeping=None, feature_sweeping_timeout=None,\n", - " is_onnx_compatible=None, logger=None, observer=None, task=None)), ('stackensembleregressor', StackEnsembleRegressor(base_learners=[('28', Pipeline(memory=None,\n", - " steps=[('standardscaler... random_state=None, selection='cyclic', tol=0.0001, verbose=0),\n", - " training_cv_folds=5))]),\n", - " stddev=None)\n" - ] - } - ], - "source": [ - "lookup_metric = \"spearman_correlation\"\n", - "best_run, fitted_model = local_run.get_output(metric = lookup_metric)\n", - "print(fitted_model)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can look at the different models that are used to produce the stack ensemble model" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'base_learners': None,\n", - " 'meta_learner': None,\n", - " 'training_cv_folds': None,\n", - " '28': Pipeline(memory=None,\n", - " steps=[('standardscalerwrapper', ), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " ma...ale_pos_weight=1, seed=None,\n", - " silent=True, subsample=0.7, tree_method='auto', verbose=-10))]),\n", - " '44': Pipeline(memory=None,\n", - " steps=[('sparsenormalizer', ), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.1, gamma=0, grow_policy='lossguide',\n", - " learnin...scale_pos_weight=1, seed=None, silent=True, subsample=1,\n", - " tree_method='hist', verbose=-10))]),\n", - " '30': Pipeline(memory=None,\n", - " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_dept...scale_pos_weight=1, seed=None,\n", - " silent=True, subsample=1, tree_method='auto', verbose=-10))]),\n", - " '20': Pipeline(memory=None,\n", - " steps=[('standardscalerwrapper', ), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", - " importance_type='split', learning_rate=0.2, max_bin=63,\n", - " ...425, silent=True, subsample=0.85,\n", - " subsample_for_bin=200000, subsample_freq=2, verbose=-1))]),\n", - " '40': Pipeline(memory=None,\n", - " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.40578947368421053, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.9, eta=0.3, gamma=0.01, learning_rate=0.1,\n", - " max_delta_step=0, max...ale_pos_weight=1, seed=None, silent=True, subsample=0.6,\n", - " tree_method='auto', verbose=-10))]),\n", - " '45': Pipeline(memory=None,\n", - " steps=[('truncatedsvdwrapper', TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None)), ('xgboostregressor', XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.5, eta=0.2, gamma=0, grow_policy='lossguide',\n", - " learning_rate=0.1, ...eight=1,\n", - " seed=None, silent=True, subsample=0.9, tree_method='hist',\n", - " verbose=-10))]),\n", - " '22': Pipeline(memory=None,\n", - " steps=[('standardscalerwrapper', ), ('lightgbmregressor', LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", - " colsample_bytree=0.9, importance_type='split', learning_rate=0.2,\n", - " max_bi... silent=True, subsample=0.6, subsample_for_bin=200000,\n", - " subsample_freq=2, verbose=-1))]),\n", - " '28__memory': None,\n", - " '28__steps': [('standardscalerwrapper',\n", - " ),\n", - " ('xgboostregressor',\n", - " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_depth=4, max_leaves=0, min_child_weight=1,\n", - " missing=nan, n_estimators=600, n_jobs=1, nthread=None,\n", - " objective='reg:linear', random_state=0, reg_alpha=0,\n", - " reg_lambda=1.1458333333333335, scale_pos_weight=1, seed=None,\n", - " silent=True, subsample=0.7, tree_method='auto', verbose=-10))],\n", - " '28__standardscalerwrapper': ,\n", - " '28__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_depth=4, max_leaves=0, min_child_weight=1,\n", - " missing=nan, n_estimators=600, n_jobs=1, nthread=None,\n", - " objective='reg:linear', random_state=0, reg_alpha=0,\n", - " reg_lambda=1.1458333333333335, scale_pos_weight=1, seed=None,\n", - " silent=True, subsample=0.7, tree_method='auto', verbose=-10),\n", - " '28__standardscalerwrapper__module_name': 'sklearn.preprocessing.data',\n", - " '28__standardscalerwrapper__class_name': 'StandardScaler',\n", - " '28__standardscalerwrapper__copy': True,\n", - " '28__standardscalerwrapper__with_mean': False,\n", - " '28__standardscalerwrapper__with_std': False,\n", - " '28__xgboostregressor__base_score': 0.5,\n", - " '28__xgboostregressor__booster': 'gbtree',\n", - " '28__xgboostregressor__colsample_bylevel': 1,\n", - " '28__xgboostregressor__colsample_bytree': 1,\n", - " '28__xgboostregressor__gamma': 0,\n", - " '28__xgboostregressor__learning_rate': 0.1,\n", - " '28__xgboostregressor__max_delta_step': 0,\n", - " '28__xgboostregressor__max_depth': 4,\n", - " '28__xgboostregressor__min_child_weight': 1,\n", - " '28__xgboostregressor__missing': nan,\n", - " '28__xgboostregressor__n_estimators': 600,\n", - " '28__xgboostregressor__n_jobs': 1,\n", - " '28__xgboostregressor__nthread': None,\n", - " '28__xgboostregressor__objective': 'reg:linear',\n", - " '28__xgboostregressor__random_state': 0,\n", - " '28__xgboostregressor__reg_alpha': 0,\n", - " '28__xgboostregressor__reg_lambda': 1.1458333333333335,\n", - " '28__xgboostregressor__scale_pos_weight': 1,\n", - " '28__xgboostregressor__seed': None,\n", - " '28__xgboostregressor__silent': True,\n", - " '28__xgboostregressor__subsample': 0.7,\n", - " '28__xgboostregressor__eta': 0.01,\n", - " '28__xgboostregressor__max_leaves': 0,\n", - " '28__xgboostregressor__tree_method': 'auto',\n", - " '28__xgboostregressor__verbose': -10,\n", - " '44__memory': None,\n", - " '44__steps': [('sparsenormalizer',\n", - " ),\n", - " ('xgboostregressor',\n", - " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.1, gamma=0, grow_policy='lossguide',\n", - " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=6,\n", - " max_leaves=15, min_child_weight=1, missing=nan, n_estimators=200,\n", - " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", - " reg_alpha=0.3125, reg_lambda=0.20833333333333334,\n", - " scale_pos_weight=1, seed=None, silent=True, subsample=1,\n", - " tree_method='hist', verbose=-10))],\n", - " '44__sparsenormalizer': ,\n", - " '44__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.1, gamma=0, grow_policy='lossguide',\n", - " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=6,\n", - " max_leaves=15, min_child_weight=1, missing=nan, n_estimators=200,\n", - " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", - " reg_alpha=0.3125, reg_lambda=0.20833333333333334,\n", - " scale_pos_weight=1, seed=None, silent=True, subsample=1,\n", - " tree_method='hist', verbose=-10),\n", - " '44__sparsenormalizer__norm': 'l1',\n", - " '44__sparsenormalizer__copy': True,\n", - " '44__xgboostregressor__base_score': 0.5,\n", - " '44__xgboostregressor__booster': 'gbtree',\n", - " '44__xgboostregressor__colsample_bylevel': 1,\n", - " '44__xgboostregressor__colsample_bytree': 1,\n", - " '44__xgboostregressor__gamma': 0,\n", - " '44__xgboostregressor__learning_rate': 0.1,\n", - " '44__xgboostregressor__max_delta_step': 0,\n", - " '44__xgboostregressor__max_depth': 6,\n", - " '44__xgboostregressor__min_child_weight': 1,\n", - " '44__xgboostregressor__missing': nan,\n", - " '44__xgboostregressor__n_estimators': 200,\n", - " '44__xgboostregressor__n_jobs': 1,\n", - " '44__xgboostregressor__nthread': None,\n", - " '44__xgboostregressor__objective': 'reg:linear',\n", - " '44__xgboostregressor__random_state': 0,\n", - " '44__xgboostregressor__reg_alpha': 0.3125,\n", - " '44__xgboostregressor__reg_lambda': 0.20833333333333334,\n", - " '44__xgboostregressor__scale_pos_weight': 1,\n", - " '44__xgboostregressor__seed': None,\n", - " '44__xgboostregressor__silent': True,\n", - " '44__xgboostregressor__subsample': 1,\n", - " '44__xgboostregressor__eta': 0.1,\n", - " '44__xgboostregressor__grow_policy': 'lossguide',\n", - " '44__xgboostregressor__max_bin': 255,\n", - " '44__xgboostregressor__max_leaves': 15,\n", - " '44__xgboostregressor__tree_method': 'hist',\n", - " '44__xgboostregressor__verbose': -10,\n", - " '30__memory': None,\n", - " '30__steps': [('truncatedsvdwrapper',\n", - " TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None)),\n", - " ('xgboostregressor',\n", - " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1,\n", - " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", - " objective='reg:linear', random_state=0, reg_alpha=2.1875,\n", - " reg_lambda=0.5208333333333334, scale_pos_weight=1, seed=None,\n", - " silent=True, subsample=1, tree_method='auto', verbose=-10))],\n", - " '30__truncatedsvdwrapper': TruncatedSVDWrapper(n_components=0.2573684210526316, random_state=None),\n", - " '30__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=1, eta=0.01, gamma=0, learning_rate=0.1,\n", - " max_delta_step=0, max_depth=6, max_leaves=0, min_child_weight=1,\n", - " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", - " objective='reg:linear', random_state=0, reg_alpha=2.1875,\n", - " reg_lambda=0.5208333333333334, scale_pos_weight=1, seed=None,\n", - " silent=True, subsample=1, tree_method='auto', verbose=-10),\n", - " '30__truncatedsvdwrapper__n_components': 0.2573684210526316,\n", - " '30__truncatedsvdwrapper__random_state': None,\n", - " '30__xgboostregressor__base_score': 0.5,\n", - " '30__xgboostregressor__booster': 'gbtree',\n", - " '30__xgboostregressor__colsample_bylevel': 1,\n", - " '30__xgboostregressor__colsample_bytree': 1,\n", - " '30__xgboostregressor__gamma': 0,\n", - " '30__xgboostregressor__learning_rate': 0.1,\n", - " '30__xgboostregressor__max_delta_step': 0,\n", - " '30__xgboostregressor__max_depth': 6,\n", - " '30__xgboostregressor__min_child_weight': 1,\n", - " '30__xgboostregressor__missing': nan,\n", - " '30__xgboostregressor__n_estimators': 200,\n", - " '30__xgboostregressor__n_jobs': 1,\n", - " '30__xgboostregressor__nthread': None,\n", - " '30__xgboostregressor__objective': 'reg:linear',\n", - " '30__xgboostregressor__random_state': 0,\n", - " '30__xgboostregressor__reg_alpha': 2.1875,\n", - " '30__xgboostregressor__reg_lambda': 0.5208333333333334,\n", - " '30__xgboostregressor__scale_pos_weight': 1,\n", - " '30__xgboostregressor__seed': None,\n", - " '30__xgboostregressor__silent': True,\n", - " '30__xgboostregressor__subsample': 1,\n", - " '30__xgboostregressor__eta': 0.01,\n", - " '30__xgboostregressor__max_leaves': 0,\n", - " '30__xgboostregressor__tree_method': 'auto',\n", - " '30__xgboostregressor__verbose': -10,\n", - " '20__memory': None,\n", - " '20__steps': [('standardscalerwrapper',\n", - " ),\n", - " ('lightgbmregressor',\n", - " LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", - " importance_type='split', learning_rate=0.2, max_bin=63,\n", - " max_depth=7, min_child_samples=164, min_child_weight=0.001,\n", - " min_split_gain=0.42105263157894735, n_estimators=600, n_jobs=1,\n", - " num_leaves=127, objective=None, random_state=None,\n", - " reg_alpha=0.975, reg_lambda=1.425, silent=True, subsample=0.85,\n", - " subsample_for_bin=200000, subsample_freq=2, verbose=-1))],\n", - " '20__standardscalerwrapper': ,\n", - " '20__lightgbmregressor': LightGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1,\n", - " importance_type='split', learning_rate=0.2, max_bin=63,\n", - " max_depth=7, min_child_samples=164, min_child_weight=0.001,\n", - " min_split_gain=0.42105263157894735, n_estimators=600, n_jobs=1,\n", - " num_leaves=127, objective=None, random_state=None,\n", - " reg_alpha=0.975, reg_lambda=1.425, silent=True, subsample=0.85,\n", - " subsample_for_bin=200000, subsample_freq=2, verbose=-1),\n", - " '20__standardscalerwrapper__module_name': 'sklearn.preprocessing.data',\n", - " '20__standardscalerwrapper__class_name': 'StandardScaler',\n", - " '20__standardscalerwrapper__copy': True,\n", - " '20__standardscalerwrapper__with_mean': False,\n", - " '20__standardscalerwrapper__with_std': True,\n", - " '20__lightgbmregressor__random_state': None,\n", - " '20__lightgbmregressor__n_jobs': 1,\n", - " '20__lightgbmregressor__boosting_type': 'gbdt',\n", - " '20__lightgbmregressor__class_weight': None,\n", - " '20__lightgbmregressor__colsample_bytree': 1,\n", - " '20__lightgbmregressor__importance_type': 'split',\n", - " '20__lightgbmregressor__learning_rate': 0.2,\n", - " '20__lightgbmregressor__max_depth': 7,\n", - " '20__lightgbmregressor__min_child_samples': 164,\n", - " '20__lightgbmregressor__min_child_weight': 0.001,\n", - " '20__lightgbmregressor__min_split_gain': 0.42105263157894735,\n", - " '20__lightgbmregressor__n_estimators': 600,\n", - " '20__lightgbmregressor__num_leaves': 127,\n", - " '20__lightgbmregressor__objective': None,\n", - " '20__lightgbmregressor__reg_alpha': 0.975,\n", - " '20__lightgbmregressor__reg_lambda': 1.425,\n", - " '20__lightgbmregressor__silent': True,\n", - " '20__lightgbmregressor__subsample': 0.85,\n", - " '20__lightgbmregressor__subsample_for_bin': 200000,\n", - " '20__lightgbmregressor__subsample_freq': 2,\n", - " '20__lightgbmregressor__max_bin': 63,\n", - " '20__lightgbmregressor__verbose': -1,\n", - " '40__memory': None,\n", - " '40__steps': [('truncatedsvdwrapper',\n", - " TruncatedSVDWrapper(n_components=0.40578947368421053, random_state=None)),\n", - " ('xgboostregressor',\n", - " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.9, eta=0.3, gamma=0.01, learning_rate=0.1,\n", - " max_delta_step=0, max_depth=9, max_leaves=31, min_child_weight=1,\n", - " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", - " objective='reg:linear', random_state=0,\n", - " reg_alpha=0.8333333333333334, reg_lambda=1.875,\n", - " scale_pos_weight=1, seed=None, silent=True, subsample=0.6,\n", - " tree_method='auto', verbose=-10))],\n", - " '40__truncatedsvdwrapper': TruncatedSVDWrapper(n_components=0.40578947368421053, random_state=None),\n", - " '40__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.9, eta=0.3, gamma=0.01, learning_rate=0.1,\n", - " max_delta_step=0, max_depth=9, max_leaves=31, min_child_weight=1,\n", - " missing=nan, n_estimators=200, n_jobs=1, nthread=None,\n", - " objective='reg:linear', random_state=0,\n", - " reg_alpha=0.8333333333333334, reg_lambda=1.875,\n", - " scale_pos_weight=1, seed=None, silent=True, subsample=0.6,\n", - " tree_method='auto', verbose=-10),\n", - " '40__truncatedsvdwrapper__n_components': 0.40578947368421053,\n", - " '40__truncatedsvdwrapper__random_state': None,\n", - " '40__xgboostregressor__base_score': 0.5,\n", - " '40__xgboostregressor__booster': 'gbtree',\n", - " '40__xgboostregressor__colsample_bylevel': 1,\n", - " '40__xgboostregressor__colsample_bytree': 0.9,\n", - " '40__xgboostregressor__gamma': 0.01,\n", - " '40__xgboostregressor__learning_rate': 0.1,\n", - " '40__xgboostregressor__max_delta_step': 0,\n", - " '40__xgboostregressor__max_depth': 9,\n", - " '40__xgboostregressor__min_child_weight': 1,\n", - " '40__xgboostregressor__missing': nan,\n", - " '40__xgboostregressor__n_estimators': 200,\n", - " '40__xgboostregressor__n_jobs': 1,\n", - " '40__xgboostregressor__nthread': None,\n", - " '40__xgboostregressor__objective': 'reg:linear',\n", - " '40__xgboostregressor__random_state': 0,\n", - " '40__xgboostregressor__reg_alpha': 0.8333333333333334,\n", - " '40__xgboostregressor__reg_lambda': 1.875,\n", - " '40__xgboostregressor__scale_pos_weight': 1,\n", - " '40__xgboostregressor__seed': None,\n", - " '40__xgboostregressor__silent': True,\n", - " '40__xgboostregressor__subsample': 0.6,\n", - " '40__xgboostregressor__eta': 0.3,\n", - " '40__xgboostregressor__max_leaves': 31,\n", - " '40__xgboostregressor__tree_method': 'auto',\n", - " '40__xgboostregressor__verbose': -10,\n", - " '45__memory': None,\n", - " '45__steps': [('truncatedsvdwrapper',\n", - " TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None)),\n", - " ('xgboostregressor',\n", - " XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.5, eta=0.2, gamma=0, grow_policy='lossguide',\n", - " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=10,\n", - " max_leaves=255, min_child_weight=1, missing=nan, n_estimators=50,\n", - " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", - " reg_alpha=0.5208333333333334, reg_lambda=2.5, scale_pos_weight=1,\n", - " seed=None, silent=True, subsample=0.9, tree_method='hist',\n", - " verbose=-10))],\n", - " '45__truncatedsvdwrapper': TruncatedSVDWrapper(n_components=0.3563157894736842, random_state=None),\n", - " '45__xgboostregressor': XGBoostRegressor(base_score=0.5, booster='gbtree', colsample_bylevel=1,\n", - " colsample_bytree=0.5, eta=0.2, gamma=0, grow_policy='lossguide',\n", - " learning_rate=0.1, max_bin=255, max_delta_step=0, max_depth=10,\n", - " max_leaves=255, min_child_weight=1, missing=nan, n_estimators=50,\n", - " n_jobs=1, nthread=None, objective='reg:linear', random_state=0,\n", - " reg_alpha=0.5208333333333334, reg_lambda=2.5, scale_pos_weight=1,\n", - " seed=None, silent=True, subsample=0.9, tree_method='hist',\n", - " verbose=-10),\n", - " '45__truncatedsvdwrapper__n_components': 0.3563157894736842,\n", - " '45__truncatedsvdwrapper__random_state': None,\n", - " '45__xgboostregressor__base_score': 0.5,\n", - " '45__xgboostregressor__booster': 'gbtree',\n", - " '45__xgboostregressor__colsample_bylevel': 1,\n", - " '45__xgboostregressor__colsample_bytree': 0.5,\n", - " '45__xgboostregressor__gamma': 0,\n", - " '45__xgboostregressor__learning_rate': 0.1,\n", - " '45__xgboostregressor__max_delta_step': 0,\n", - " '45__xgboostregressor__max_depth': 10,\n", - " '45__xgboostregressor__min_child_weight': 1,\n", - " '45__xgboostregressor__missing': nan,\n", - " '45__xgboostregressor__n_estimators': 50,\n", - " '45__xgboostregressor__n_jobs': 1,\n", - " '45__xgboostregressor__nthread': None,\n", - " '45__xgboostregressor__objective': 'reg:linear',\n", - " '45__xgboostregressor__random_state': 0,\n", - " '45__xgboostregressor__reg_alpha': 0.5208333333333334,\n", - " '45__xgboostregressor__reg_lambda': 2.5,\n", - " '45__xgboostregressor__scale_pos_weight': 1,\n", - " '45__xgboostregressor__seed': None,\n", - " '45__xgboostregressor__silent': True,\n", - " '45__xgboostregressor__subsample': 0.9,\n", - " '45__xgboostregressor__eta': 0.2,\n", - " '45__xgboostregressor__grow_policy': 'lossguide',\n", - " '45__xgboostregressor__max_bin': 255,\n", - " '45__xgboostregressor__max_leaves': 255,\n", - " '45__xgboostregressor__tree_method': 'hist',\n", - " '45__xgboostregressor__verbose': -10,\n", - " '22__memory': None,\n", - " '22__steps': [('standardscalerwrapper',\n", - " ),\n", - " ('lightgbmregressor',\n", - " LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", - " colsample_bytree=0.9, importance_type='split', learning_rate=0.2,\n", - " max_bin=7, max_depth=5, min_child_samples=60,\n", - " min_child_weight=0.001, min_split_gain=0.3157894736842105,\n", - " n_estimators=600, n_jobs=1, num_leaves=31, objective=None,\n", - " random_state=None, reg_alpha=0.8999999999999999, reg_lambda=1.125,\n", - " silent=True, subsample=0.6, subsample_for_bin=200000,\n", - " subsample_freq=2, verbose=-1))],\n", - " '22__standardscalerwrapper': ,\n", - " '22__lightgbmregressor': LightGBMRegressor(boosting_type='gbdt', class_weight=None,\n", - " colsample_bytree=0.9, importance_type='split', learning_rate=0.2,\n", - " max_bin=7, max_depth=5, min_child_samples=60,\n", - " min_child_weight=0.001, min_split_gain=0.3157894736842105,\n", - " n_estimators=600, n_jobs=1, num_leaves=31, objective=None,\n", - " random_state=None, reg_alpha=0.8999999999999999, reg_lambda=1.125,\n", - " silent=True, subsample=0.6, subsample_for_bin=200000,\n", - " subsample_freq=2, verbose=-1),\n", - " '22__standardscalerwrapper__module_name': 'sklearn.preprocessing.data',\n", - " '22__standardscalerwrapper__class_name': 'StandardScaler',\n", - " '22__standardscalerwrapper__copy': True,\n", - " '22__standardscalerwrapper__with_mean': False,\n", - " '22__standardscalerwrapper__with_std': True,\n", - " '22__lightgbmregressor__random_state': None,\n", - " '22__lightgbmregressor__n_jobs': 1,\n", - " '22__lightgbmregressor__boosting_type': 'gbdt',\n", - " '22__lightgbmregressor__class_weight': None,\n", - " '22__lightgbmregressor__colsample_bytree': 0.9,\n", - " '22__lightgbmregressor__importance_type': 'split',\n", - " '22__lightgbmregressor__learning_rate': 0.2,\n", - " '22__lightgbmregressor__max_depth': 5,\n", - " '22__lightgbmregressor__min_child_samples': 60,\n", - " '22__lightgbmregressor__min_child_weight': 0.001,\n", - " '22__lightgbmregressor__min_split_gain': 0.3157894736842105,\n", - " '22__lightgbmregressor__n_estimators': 600,\n", - " '22__lightgbmregressor__num_leaves': 31,\n", - " '22__lightgbmregressor__objective': None,\n", - " '22__lightgbmregressor__reg_alpha': 0.8999999999999999,\n", - " '22__lightgbmregressor__reg_lambda': 1.125,\n", - " '22__lightgbmregressor__silent': True,\n", - " '22__lightgbmregressor__subsample': 0.6,\n", - " '22__lightgbmregressor__subsample_for_bin': 200000,\n", - " '22__lightgbmregressor__subsample_freq': 2,\n", - " '22__lightgbmregressor__max_bin': 7,\n", - " '22__lightgbmregressor__verbose': -1,\n", - " 'metalearner__alphas': None,\n", - " 'metalearner__copy_X': True,\n", - " 'metalearner__cv': 'warn',\n", - " 'metalearner__eps': 0.001,\n", - " 'metalearner__fit_intercept': True,\n", - " 'metalearner__l1_ratio': 0.5,\n", - " 'metalearner__max_iter': 1000,\n", - " 'metalearner__n_alphas': 100,\n", - " 'metalearner__n_jobs': None,\n", - " 'metalearner__normalize': False,\n", - " 'metalearner__positive': False,\n", - " 'metalearner__precompute': 'auto',\n", - " 'metalearner__random_state': None,\n", - " 'metalearner__selection': 'cyclic',\n", - " 'metalearner__tol': 0.0001,\n", - " 'metalearner__verbose': 0}" - ] - }, - "execution_count": 29, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "fitted_model.named_steps['stackensembleregressor'].get_params()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can also look at how each column in our dataset was featurized by AutoML" - ] - }, - { - "cell_type": "code", - "execution_count": 37, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[{'RawFeatureName': 'C1',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C2',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C3',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C4',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C5',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C6',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C7',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C8',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C9',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C10',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C11',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C12',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C13',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C14',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C15',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C16',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C17',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C18',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C19',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C20',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C21',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C22',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C23',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C24',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C25',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C26',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C27',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C28',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C29',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C30',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C31',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C32',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C33',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C34',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C35',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C36',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C37',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C38',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C39',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C40',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C41',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C42',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C43',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C44',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C45',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C46',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C47',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C48',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C49',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C50',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C51',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C52',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C53',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C54',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C55',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C56',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C57',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C58',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C59',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C60',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C61',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C62',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C63',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C64',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C65',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C66',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C67',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C68',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C69',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C70',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C71',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C72',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C73',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C74',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C75',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C76',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C77',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C78',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C79',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C80',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C81',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C82',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C83',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C84',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C85',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C86',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C87',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C88',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C89',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C90',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C91',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C92',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C93',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C94',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C95',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C96',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C97',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C98',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C99',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C100',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C101',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C102',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C103',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C104',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C105',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C106',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C107',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C108',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C109',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C110',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C111',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C112',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C113',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C114',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C115',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C116',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C117',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C118',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C119',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C120',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C121',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C122',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C123',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C124',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C125',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C126',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C127',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C128',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C129',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C130',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C131',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C132',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C133',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C134',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C135',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C136',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C137',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C138',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C139',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C140',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C141',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C142',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C143',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C144',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C145',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C146',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C147',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C148',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C149',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C150',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C151',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C152',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C153',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C154',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C155',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C156',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C157',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C158',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C159',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C160',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C161',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C162',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C163',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C164',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C165',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C166',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C167',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C168',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C169',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C170',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C171',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C172',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C173',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C174',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C175',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C176',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C177',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C178',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C179',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C180',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C181',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C182',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C183',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C184',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C185',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C186',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C187',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C188',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C189',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C190',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C191',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C192',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C193',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C194',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C195',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C196',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C197',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C198',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C199',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C200',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C201',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C202',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C203',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C204',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C205',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C206',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C207',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C208',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C209',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C210',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C211',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C212',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C213',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C214',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C215',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C216',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C217',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C218',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C219',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C220',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C221',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C222',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C223',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C224',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C225',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C226',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C227',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C228',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C229',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C230',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C231',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C232',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C233',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C234',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C235',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C236',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C237',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C238',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C239',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C240',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C241',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C242',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C243',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C244',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C245',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C246',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C247',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C248',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C249',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C250',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C251',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C252',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C253',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C254',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C255',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C256',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C257',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C258',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C259',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C260',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C261',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C262',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C263',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C264',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C265',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C266',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C267',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C268',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C269',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C270',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C271',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C272',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C273',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C274',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C275',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C276',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C277',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C278',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C279',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C280',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C281',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C282',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C283',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C284',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C285',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C286',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C287',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C288',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C289',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C290',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C291',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C292',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C293',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C294',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C295',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C296',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C297',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C298',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C299',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C300',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C301',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C302',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C303',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C304',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C305',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C306',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C307',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C308',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C309',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C310',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C311',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C312',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C313',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C314',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C315',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C316',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C317',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C318',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C319',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C320',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C321',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C322',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C323',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C324',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C325',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C326',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C327',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C328',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C329',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C330',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C331',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C332',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C333',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C334',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C335',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C336',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C337',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C338',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C339',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C340',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C341',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C342',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C343',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C344',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C345',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C346',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C347',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C348',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C349',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C350',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C351',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C352',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C353',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C354',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C355',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C356',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C357',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C358',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C359',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C360',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C361',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C362',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C363',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C364',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C365',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C366',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C367',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C368',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C369',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C370',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C371',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C372',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C373',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C374',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C375',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C376',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C377',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C378',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C379',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C380',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C381',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C382',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C383',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C384',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C385',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C386',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C387',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C388',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C389',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C390',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C391',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C392',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C393',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C394',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C395',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C396',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C397',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C398',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C399',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C400',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C401',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C402',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C403',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C404',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C405',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C406',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C407',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C408',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C409',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C410',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C411',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C412',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C413',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C414',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C415',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C416',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C417',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C418',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C419',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C420',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C421',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C422',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C423',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C424',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C425',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C426',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C427',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C428',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C429',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C430',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C431',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C432',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C433',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C434',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C435',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C436',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C437',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C438',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C439',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C440',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C441',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C442',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C443',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C444',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C445',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C446',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C447',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C448',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C449',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C450',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C451',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C452',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C453',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C454',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C455',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C456',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C457',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C458',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C459',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C460',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C461',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C462',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C463',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C464',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C465',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C466',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C467',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C468',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C469',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C470',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C471',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C472',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C473',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C474',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C475',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C476',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C477',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C478',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C479',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C480',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C481',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C482',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C483',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C484',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C485',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C486',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C487',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C488',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C489',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C490',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C491',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C492',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C493',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C494',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C495',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C496',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C497',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C498',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C499',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C500',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C501',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C502',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C503',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C504',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C505',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C506',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C507',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C508',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C509',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C510',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C511',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C512',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C513',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C514',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C515',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C516',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C517',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C518',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C519',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C520',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C521',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C522',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C523',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C524',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C525',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C526',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C527',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C528',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C529',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C530',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C531',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C532',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C533',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C534',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C535',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C536',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C537',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C538',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C539',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C540',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C541',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C542',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C543',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C544',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C545',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C546',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C547',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C548',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C549',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C550',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C551',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C552',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C553',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C554',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C555',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C556',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C557',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C558',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C559',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C560',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C561',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C562',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C563',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C564',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C565',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C566',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C567',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C568',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C569',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C570',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C571',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C572',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C573',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C574',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C575',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C576',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C577',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C578',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C579',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C580',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C581',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C582',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C583',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C584',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C585',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C586',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C587',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C588',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C589',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C590',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C591',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C592',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C593',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C594',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C595',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C596',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C597',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C598',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C599',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C600',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C601',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C602',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C603',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C604',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C605',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C606',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C607',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C608',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C609',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C610',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C611',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C612',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C613',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C614',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C615',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C616',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C617',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C618',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C619',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C620',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C621',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C622',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C623',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C624',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C625',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C626',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C627',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C628',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C629',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C630',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C631',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C632',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C633',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C634',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C635',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C636',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C637',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C638',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C639',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C640',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C641',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C642',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C643',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C644',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C645',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C646',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C647',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C648',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C649',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C650',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C651',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C652',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C653',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C654',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C655',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C656',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C657',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C658',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C659',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C660',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C661',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C662',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C663',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C664',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C665',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C666',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C667',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C668',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C669',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C670',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C671',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C672',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C673',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C674',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C675',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C676',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C677',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C678',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C679',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C680',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C681',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C682',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C683',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C684',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C685',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C686',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C687',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C688',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C689',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C690',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C691',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C692',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C693',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C694',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C695',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C696',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C697',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C698',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C699',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C700',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C701',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C702',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C703',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C704',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C705',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C706',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C707',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C708',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C709',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C710',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C711',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C712',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C713',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C714',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C715',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C716',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C717',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C718',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C719',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C720',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C721',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C722',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C723',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C724',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C725',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C726',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C727',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C728',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C729',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C730',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C731',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C732',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C733',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C734',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C735',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C736',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C737',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C738',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C739',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C740',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C741',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C742',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C743',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C744',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C745',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C746',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C747',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C748',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C749',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C750',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C751',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C752',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C753',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C754',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C755',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C756',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C757',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C758',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C759',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C760',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C761',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C762',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C763',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C764',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C765',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C766',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C767',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C768',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C769',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C770',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C771',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C772',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C773',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C774',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C775',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C776',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C777',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C778',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C779',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C780',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C781',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C782',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C783',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C784',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C785',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C786',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C787',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C788',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C789',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C790',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C791',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C792',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C793',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C794',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C795',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C796',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C797',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C798',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C799',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C800',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C801',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C802',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C803',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C804',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C805',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C806',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C807',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C808',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C809',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C810',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C811',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C812',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C813',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C814',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C815',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C816',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C817',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C818',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C819',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C820',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C821',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C822',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C823',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C824',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C825',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C826',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C827',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C828',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C829',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C830',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C831',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C832',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C833',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C834',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C835',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C836',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C837',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C838',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C839',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C840',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C841',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C842',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C843',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C844',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C845',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C846',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C847',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C848',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C849',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C850',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C851',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C852',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C853',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C854',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C855',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C856',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C857',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C858',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C859',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C860',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C861',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C862',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C863',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C864',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C865',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C866',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C867',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C868',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C869',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C870',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C871',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C872',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C873',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C874',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C875',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C876',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C877',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C878',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C879',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C880',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C881',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C882',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C883',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C884',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C885',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C886',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C887',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C888',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C889',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C890',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C891',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C892',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C893',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C894',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C895',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C896',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C897',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C898',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C899',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C900',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C901',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C902',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C903',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C904',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C905',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C906',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C907',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C908',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C909',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C910',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C911',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C912',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C913',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C914',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C915',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C916',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C917',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C918',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C919',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C920',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C921',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C922',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C923',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C924',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C925',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C926',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C927',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C928',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C929',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C930',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C931',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C932',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C933',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C934',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C935',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C936',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C937',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C938',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C939',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C940',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C941',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C942',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C943',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C944',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C945',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C946',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C947',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C948',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C949',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C950',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C951',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C952',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C953',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C954',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C955',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C956',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C957',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C958',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C959',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C960',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C961',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C962',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C963',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C964',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C965',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C966',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C967',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C968',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C969',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C970',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C971',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C972',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C973',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C974',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C975',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C976',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C977',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C978',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C979',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C980',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C981',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C982',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C983',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C984',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C985',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C986',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C987',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C988',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C989',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C990',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C991',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C992',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C993',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C994',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C995',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C996',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C997',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C998',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C999',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " {'RawFeatureName': 'C1000',\n", - " 'TypeDetected': 'Numeric',\n", - " 'Dropped': 'No',\n", - " 'EngineeredFeatureCount': 1,\n", - " 'Tranformations': ['MeanImputer']},\n", - " ...]" - ] - }, - "execution_count": 37, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "fitted_model.named_steps['datatransformer'].get_featurization_summary()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Prediction\n", - "Finally, we can use the best model to make a prediction on our test set using pearson correlation as our metric" - ] - }, - { - "cell_type": "code", - "execution_count": 36, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.7722744639946534\n" - ] - } - ], - "source": [ - "X_test = testing_data.drop(\"score\", axis=1).values\n", - "y_test = testing_data['score'].values.flatten()\n", - "\n", - "y_pred = fitted_model.predict(X_test)\n", - "print(pearsonr(y_pred, y_test)[0])" - ] - }, - { - "cell_type": "code", - "execution_count": 44, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['sentence_similarity_regressor.pkl']" - ] - }, - "execution_count": 44, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "#Save the model as a pkl file\n", - "model_path = 'sentence_similarity_regressor.pkl'\n", - "joblib.dump(fitted_model, model_path)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.7" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 7e1fd612f..10aadb125 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Using AzureML Pipelines and AutoML for Predicting Sentence Similarity" + "#
AzureML Pipeline, AutoML and ACI Deployment for Sentence Similarity
" ] }, { @@ -27,7 +27,7 @@ "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Container Instances (ACI).\n", "\n", "This notebook showcases how to use the following AzureML features: \n", - "- AzureML Pipelines\n", + "- AzureML Pipelines (PythonScriptStep and AutoMLStep)\n", "- AutoML\n", "- AmlCompute\n", "- Datastore\n", @@ -59,8 +59,16 @@ " * 4.3.1 [Define get_data script to load data](#4.3.1-Define-get_data-script-to-load-data)\n", " * 4.3.2 [Create AutoMLConfig object](#4.3.2-Create-AutoMLConfig-object)\n", " * 4.3.3 [Create AutoMLStep](#4.3.3-Create-AutoMLStep) \n", - "5. [Run Pipeline](#5.-Run-Pipeline)\n", - "6. [Deploy Sentence Similarity Model](#6.-Deploy-Sentence-Similarity-Model)" + "5. [Run Pipeline](#5.-Run-Pipeline) \n", + "6. [Deploy Sentence Similarity Model](#6.-Deploy-Sentence-Similarity-Model)\n", + " * 6.1 [Register/Retrieve AutoML and Google Universal Sentence Encoder Models for Deployment](#6.1-Register/Retrieve-AutoML-and-Google-Universal-Sentence-Encoder-Models-for-Deployment) \n", + " * 6.2 [Create Scoring Script](#6.2-Create-Scoring-Script)\n", + " * 6.3 [Create a YAML File for the Environment](#6.3-Create-a-YAML-File-for-the-Environment) \n", + " * 6.4 [Image Creation](#6.4-Image-Creation) \n", + " * 6.5 [Provision the AKS Cluster](#6.5-Provision-the-AKS-Cluster) \n", + " * 6.6 [Deploy the image as a Web Service to Azure Kubernetes Service](#6.6-Deploy-the-image-as-a-Web-Service-to-Azure-Kubernetes-Service) \n", + " * 6.7 [Test Deployed Model](#6.7-Test-Deployed-Webservice)\n", + " \n" ] }, { @@ -83,7 +91,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![](pipelines.png)" + "![](https://nlpbp.blob.core.windows.net/images/pipelines.png)" ] }, { @@ -133,8 +141,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "WARNING: Logging before flag parsing goes to stderr.\n", - "W0623 20:14:33.703469 29968 __init__.py:56] Some hub symbols are not available because TensorFlow version is less than 1.14\n" + "WARNING - Some hub symbols are not available because TensorFlow version is less than 1.14\n" ] }, { @@ -142,8 +149,8 @@ "output_type": "stream", "text": [ "Turning diagnostics collection on. \n", - "System version: 3.6.7 |Anaconda, Inc.| (default, Dec 10 2018, 20:35:02) [MSC v.1915 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.41\n", + "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.43\n", "Pandas version: 0.23.4\n", "Tensorflow Version: 1.13.1\n" ] @@ -191,7 +198,8 @@ "from azureml.core.compute import ComputeTarget, AmlCompute\n", "from azureml.core.runconfig import RunConfiguration\n", "from azureml.core.conda_dependencies import CondaDependencies\n", - "from azureml.core.webservice import AciWebservice, Webservice\n", + "from azureml.core.webservice import AksWebservice, Webservice\n", + "from azureml.core.compute import AksCompute, ComputeTarget\n", "from azureml.core.image import ContainerImage\n", "from azureml.core.model import Model\n", "from azureml.train.automl import AutoMLStep, AutoMLStepRun, AutoMLConfig\n", @@ -240,7 +248,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 227KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 175KB/s]\n" ] }, { @@ -254,7 +262,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 246KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 186KB/s]\n" ] }, { @@ -268,7 +276,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 227KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 220KB/s]\n" ] }, { @@ -466,17 +474,10 @@ { "cell_type": "code", "execution_count": 9, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "W0623 20:14:53.197878 29968 authentication.py:494] Warning: Falling back to use azure cli login credentials.\n", - "If you run your code in unattended mode, i.e., where you can't give a user input, then we recommend to use ServicePrincipalAuthentication or MsiAuthentication.\n", - "Please refer to aka.ms/aml-notebook-auth for different authentication mechanisms in azureml-sdk.\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -488,8 +489,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "W0623 20:14:53.476237 28700 _profile.py:1082] Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "W0623 20:15:00.695510 29968 _profile.py:774] You have logged in. Now let us find all the subscriptions to which you have access...\n" + "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" ] }, { @@ -571,13 +572,14 @@ "output_type": "stream", "text": [ "Found existing compute target.\n", - "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-20T02:00:52.824000+00:00', 'errors': None, 'creationTime': '2019-05-20T22:09:40.142683+00:00', 'modifiedTime': '2019-05-20T22:10:11.888950+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" + "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-25T23:36:16.901000+00:00', 'errors': None, 'creationTime': '2019-06-20T23:37:54.322324+00:00', 'modifiedTime': '2019-06-20T23:38:26.084645+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" ] } ], "source": [ "# choose a name for your cluster\n", - "cluster_name = \"gpucluster\"\n", + "#cluster_name = \"<>\"\n", + "cluster_name = \"gpu-scoring-jm\"\n", "\n", "try:\n", " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", @@ -622,15 +624,15 @@ "Uploading ./data\\dev.csv\n", "Uploading ./data\\test.csv\n", "Uploading ./data\\train.csv\n", - "Uploaded ./data\\test.csv, 1 files out of an estimated total of 3\n", - "Uploaded ./data\\dev.csv, 2 files out of an estimated total of 3\n", + "Uploaded ./data\\dev.csv, 1 files out of an estimated total of 3\n", + "Uploaded ./data\\test.csv, 2 files out of an estimated total of 3\n", "Uploaded ./data\\train.csv, 3 files out of an estimated total of 3\n" ] }, { "data": { "text/plain": [ - "$AZUREML_DATAREFERENCE_5ae3e36f570d4293ad895beac8538e18" + "$AZUREML_DATAREFERENCE_fe1e1b2408a441cb9aba61c4686a85c2" ] }, "execution_count": 12, @@ -723,7 +725,7 @@ "conda_run_config.environment.python.user_managed_dependencies = False\n", "\n", "conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", - " conda_packages=['numpy', 'py-xgboost', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], \n", + " conda_packages=['numpy', 'py-xgboost<=0.80', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], \n", " pin_sdk_version=False)\n", "\n", "print('run config is ready')" @@ -764,7 +766,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Overwriting ./automl-sentence-similarity/embed.py\n" + "Writing ./automl-sentence-similarity/embed.py\n" ] } ], @@ -906,7 +908,15 @@ "cell_type": "code", "execution_count": 17, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Parameter 'hash_paths' will be deprecated. All files under source_directory will be hashed except files listed in .amlignore or .gitignore\n" + ] + } + ], "source": [ "embedStep = PythonScriptStep(\n", " name=\"Embed\",\n", @@ -961,7 +971,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Overwriting ./automl-sentence-similarity/get_data.py\n" + "Writing ./automl-sentence-similarity/get_data.py\n" ] } ], @@ -1028,7 +1038,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -1037,7 +1047,7 @@ " \"iterations\": 50, #Number of algorithm options to try\n", " \"primary_metric\": 'spearman_correlation', #Metric to optimize\n", " \"preprocess\": True, #Whether dataset preprocessing should be applied\n", - " \"verbosity\": logging.INFO,\n", + " \"verbosity\": logging.INFO\n", "}\n", "automl_config = AutoMLConfig(task = 'regression', #type of task: classification, regression or forecasting\n", " debug_log = 'automl_errors.log',\n", @@ -1064,7 +1074,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -1084,7 +1094,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -1113,7 +1123,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -1126,17 +1136,17 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 24, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Created step Embed [8da9e158][d4cc417b-9bee-4f25-9980-8d490c2716d5], (This step is eligible to reuse a previous run's output)\n", - "Created step AutoML [505d087e][eb49a173-bec7-4812-b873-7d5a0d2a051e], (This step is eligible to reuse a previous run's output)\n", - "Using data reference stsbenchmark for StepId [fe0b62e2][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", - "Submitted pipeline run: 0db34daa-1d3d-4531-bbf0-6b647864f4ca\n" + "Created step Embed [42ea8959][75c6d120-9cb7-418c-b54b-4eef214eae5d], (This step will run and generate new outputs)\n", + "Created step AutoML [0c896fa5][eb49a173-bec7-4812-b873-7d5a0d2a051e], (This step is eligible to reuse a previous run's output)\n", + "Using data reference stsbenchmark for StepId [9b655bdc][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", + "Submitted pipeline run: 318167e4-994b-43a0-96a1-54fc159043a8\n" ] } ], @@ -1146,9 +1156,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "861bee4292c5496f91d41f6e875ce9ec", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_PipelineWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', '…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Inspect the run details using the provided widget\n", "RunDetails(pipeline_run).show()" @@ -1158,7 +1183,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "![](pipelineWidget.PNG)" + "![](https://nlpbp.blob.core.windows.net/images/pipelineWidget.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Alternatively, block until the run has completed." ] }, { @@ -1192,51 +1224,69 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "## 6.1 Register/Retrieve AutoML and Google Universal Sentence Encoder Models for Deployment\n", + "\n", + "### Register a new automl model\n", "Register the best AutoML model based on the pipeline results or load the saved model" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 60, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Found model with name 4c242e457281488best\n" + "Registering model 76a6169d7f364bdbest\n", + "76a6169d7f364bdbest\n" ] } ], "source": [ - "automl_model_name = '4c242e457281488best'\n", - "\n", - "try:\n", - " model = Model(ws, name= automl_model_name)\n", - " print(\"Found model with name\", automl_model_name)\n", - "except:\n", - " automl_step_run = AutoMLStepRun(step_run=pipeline_run.find_step_run('AutoML')[0])\n", - " # to get the outputs\n", - " best_run, fitted_model = automl_step_run.get_output()\n", + "automl_step_run = AutoMLStepRun(step_run=pipeline_run.find_step_run('AutoML')[0])\n", + "# to get the outputs\n", + "best_run, fitted_model = automl_step_run.get_output()\n", "\n", - " # to register the fitted_mode\n", - " description = 'AutoML Model'\n", - " tags = None\n", - " model = automl_step_run.register_model(model_name= automl_model_name, description = description, tags = tags)\n", - " print(\"Registered model with name\", automl_model_name)\n", - " print(automl_step_run.model_id) # Use this id to deploy the model as a web service in Azure." + "# to register the fitted_mode\n", + "description = 'Pipeline Model'\n", + "tags = {'area': \"nlp\", 'type': \"sentencesimilarity pipelines\"}\n", + "model = automl_step_run.register_model(description = description, tags = tags)\n", + "automl_model_name = automl_step_run.model_id\n", + "print(automl_step_run.model_id) # Use this id to deploy the model as a web service in Azure." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Retrieve existing model from Azure\n", + "If you already have a best model then you can skip registering the model by just retrieving the latest version of model by providing it's name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "automl_model_name = 'f775e327caee4f7best' # best fit model registered in the workspace\n", + "model = Model(ws, name= automl_model_name)\n", + "print(\"Found model with name\", automl_model_name)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "### Register Google Universal Sentence Encoder Model\n", "Register the Google Universal Sentence Encoder model if not already registered in your workspace" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 54, "metadata": {}, "outputs": [ { @@ -1249,36 +1299,48 @@ } ], "source": [ - "try:\n", - " embedding_model = Model(ws, name= 'googleUSEmodel')\n", - " print(\"Found model with name googleUSEembeddings\")\n", - "\n", - "except:\n", - " #set location for where to download google tensorflow model\n", - " os.environ['TFHUB_CACHE_DIR'] = './googleUSE' \n", - " # download model\n", - " hub.Module(\"https://tfhub.dev/google/universal-sentence-encoder-large/3\")\n", - " # register model\n", - " embedding_model = Model.register(\n", - " model_path = \"googleUSE\",\n", - " model_name = \"googleUSEmodel\",\n", - " tags = {\"Model\": \"GoogleUSE\"},\n", - " description = \"Google Universal Sentence Embedding pretrained model\",\n", - " workspace = ws\n", + "#set location for where to download google tensorflow model\n", + "os.environ['TFHUB_CACHE_DIR'] = './googleUSE' \n", + "# download model\n", + "hub.Module(\"https://tfhub.dev/google/universal-sentence-encoder-large/3\")\n", + "# register model\n", + "embedding_model = Model.register(\n", + " model_path = \"googleUSE\",\n", + " model_name = \"googleUSEmodel\",\n", + " tags = {\"Model\": \"GoogleUSE\"},\n", + " description = \"Google Universal Sentence Embedding pretrained model\",\n", + " workspace = ws\n", " )\n", - " print('Registered googleUSEembeddings model')" + "print('Registered googleUSEembeddings model')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Write scoring script" + "### Retrieve existing Google USE model from Azure" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embedding_model = Model(ws, name= 'googleUSEmodel')\n", + "print(\"Found model with name googleUSEembeddings\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6.2 Create Scoring Script\n" + ] + }, + { + "cell_type": "code", + "execution_count": 73, "metadata": {}, "outputs": [ { @@ -1388,7 +1450,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 74, "metadata": {}, "outputs": [], "source": [ @@ -1402,33 +1464,49 @@ " cefw.write(content.replace('<>', automl_model_name))" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6.3 Create a YAML File for the Environment\n", + "\n", + "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, pipeline_env.yml, which specifies the dependencies from the run." + ] + }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 32, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'autoenv.yml'" + "'pipeline_env.yml'" ] }, - "execution_count": 14, + "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80', 'pandas', 'tensorflow', 'tensorflow-hub'],\n", - " pip_packages=['azureml-sdk[automl]'], python_version = '3.6.8')\n", + " pip_packages=['azureml-sdk[automl]==1.0.43.*'], python_version = '3.6.8')\n", "\n", - "conda_env_file_name = 'autoenv.yml'\n", - "myenv.save_to_file('.', conda_env_file_name)" + "conda_env_file_name = 'pipeline_env.yml'\n", + "myenv.save_to_file('.', conda_env_file_name)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6.4 Image Creation" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 75, "metadata": { "scrolled": false }, @@ -1438,17 +1516,17 @@ "output_type": "stream", "text": [ "Creating image\n", - "Running................................................................................\n", + "Running......................................................................................\n", "Succeeded\n", - "Image creation operation finished for image pipeline-image:48, operation \"Succeeded\"\n" + "Image creation operation finished for image pipeline-image:58, operation \"Succeeded\"\n" ] } ], "source": [ "#trying to add dependencies\n", - "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", + "image_config = ContainerImage.image_configuration(execution_script = script_file_name,\n", " runtime = \"python\",\n", - " conda_file = \"autoenv.yml\",\n", + " conda_file = conda_env_file_name,\n", " description = \"Image with aml pipeline model\",\n", " tags = {'area': \"nlp\", 'type': \"sentencesimilarity pipeline\"})\n", "\n", @@ -1461,45 +1539,101 @@ "image.wait_for_creation(show_output = True)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the above step fails then use below command to see logs" + ] + }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "#Set the web service configuration (using default here)\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", - " memory_gb = 8)" + "image.get_logs()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6.5 Provision the AKS Cluster\n", + "\n", + "This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it." ] }, { "cell_type": "code", - "execution_count": 17, - "metadata": { - "scrolled": true - }, + "execution_count": 81, + "metadata": {}, + "outputs": [], + "source": [ + "# create aks cluser\n", + "\n", + "# Use the default configuration (can also provide parameters to customize)\n", + "prov_config = AksCompute.provisioning_configuration()\n", + "\n", + "aks_name = 'nlp-aks' \n", + "# Create the cluster\n", + "aks_target = ComputeTarget.create(workspace = ws, \n", + " name = aks_name, \n", + " provisioning_configuration = prov_config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## 6.6 Deploy the Image as a Web Service on Azure Kubernetes Service\n" + ] + }, + { + "cell_type": "code", + "execution_count": 83, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration\n", + "aks_config = AksWebservice.deploy_configuration()" + ] + }, + { + "cell_type": "code", + "execution_count": 85, + "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Creating service\n", - "Running..............................................\n", - "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "Running........................\n", + "SucceededAKS service creation operation finished, operation \"Succeeded\"\n", "Healthy\n" ] } ], "source": [ "# deploy image as web service\n", - "aci_service_name ='aci-sentence-sim'\n", - "aci_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = aci_service_name,\n", - " image = image,\n", - " deployment_config = aci_config)\n", + "aks_service_name ='aks-with-pipelines-service-5'\n", "\n", - "aci_service.wait_for_deployment(show_output = True)\n", - "print(aci_service.state)" + "aks_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = aks_service_name,\n", + " image = image,\n", + " deployment_config = aks_config,\n", + " deployment_target = aks_target)\n", + "aks_service.wait_for_deployment(show_output = True)\n", + "print(aks_service.state)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the above step fails then use below command to see logs" ] }, { @@ -1508,36 +1642,44 @@ "metadata": {}, "outputs": [], "source": [ - "aci_service.get_logs() " + "aks_service.get_logs()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6.7 Test Deployed Webservice\n", + "We test the web sevice by passing data.The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated." ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 89, "metadata": {}, "outputs": [], "source": [ "# load test set sentences\n", "data = pd.read_csv(\"data/test.csv\")\n", "train_y = data['score'].values.flatten()\n", - "train_x = data.drop(\"score\", axis=1).values.tolist()[:500]\n", - "data = {'data': train_x}\n", + "train_x = data.drop(\"score\", axis=1).values.tolist()\n", + "data = {'data': train_x[:500]}\n", "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 90, "metadata": {}, "outputs": [], "source": [ - "# Set up a Timer to see how long the model takes to train\n", + "# Set up a Timer to see how long the model takes to predict\n", "t = Timer()" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 91, "metadata": { "scrolled": true }, @@ -1546,35 +1688,63 @@ "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 44.0914\n", - "Number of sentences encoded : 500\n" + "Time elapsed: 18.0523\n", + "Number of sample predicted : 500\n" ] } ], "source": [ "#print time here\n", "t.start()\n", - "score = aci_service.run(input_data = data)\n", + "score = aks_service.run(input_data = data)\n", "t.stop()\n", "print(\"Time elapsed: {}\".format(t))\n", "\n", "result = json.loads(score)\n", - "output = result[\"result\"]\n", "\n", - "# embeddings will print the error message incase error occurs.\n", - "print('Number of sentences encoded : {0}'.format(len(output)))" + "try:\n", + " output = result[\"result\"]\n", + "except:\n", + " output = result[\"error\"]\n", + " \n", + "# output will print the error code incase error occurs.\n", + "print('Number of sample predicted : {0}'.format(len(output)))" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 92, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[1.778579367510987, 3.50046288707411, 3.2538821258681896, 4.069305058515004, 1.8320877577432195, 1.954834972650947, 3.5939983298935054, 1.897463629391319, 2.5197130778016654, 1.8823957738334811, 1.8823957738334811, 3.6895210831755922, 1.8599267741539272, 3.1787909087040562, 2.7087431329327916, 1.5672046737532321, 4.451727638080559, 3.6796594501884523, 3.544795325159195, 1.9030900379648785, 2.8150889983631284, 2.183746733671711, 3.601602614164534, 4.206702507830564, 1.6307750689271787, 3.234565416414611, 1.867143843466714, 3.4361162329783883, 1.8026588109513297, 2.9688938044152415, 3.8929035816615896, 3.219924328188107, 2.0385247308394234, 3.798599603279599, 2.829788590045488, -0.3670312508170009, 1.8917018009786544, 2.5162131407684174, 2.5313683231655797, 0.7973264807516905, 3.262854998024572, 3.445451775581639, 3.313722165367336, 1.493230130992738, 2.5124437403792377, 1.3942647183703063, 1.2066715622591626, -0.028170077204135217, 3.6913052029330533, 1.8416574429556754, 1.393522016496808, 2.1291520226739578, 0.8828945371654574, 1.0923645190907914, 1.5209416690403, 0.01411515817131176, 3.258076062326671, 0.3944014864215918, 3.9701938861436727, 2.65596017823039, 4.51027789495544, 0.8117156468744592, 3.8409064709009497, 0.9678984253828021, 3.6720236594806943, 0.46319548835196217, 1.4426295438413104, 3.472301928135604, 0.42877110586924716, 3.4961889679526936, 3.6509575980863804, 3.39986439948315, 0.5707791995828623, 2.869269733905783, 2.909406927853261, 1.069238870706429, 0.09037136322296623, -0.9221399392952758, 3.083637155759759, 2.938458926392478, 0.3601067645921665, 1.2931620160489248, 2.6363382703619926, 0.11964411998628588, 1.0572862960365454, 2.844225439387958, 3.761174189987671, 4.022002929758196, 3.3617845329434126, 1.1598854829034062, 1.8305925299376244, 3.010449519213801, 1.5489415586274968, 3.0812518140815306, 0.676443308258562, 3.399204958974649, 3.39309775397624, 3.0641143777928077, 3.1282233227697516, 2.7204313476996553, 0.29152788619753, 0.779047535753565, 3.1687422653361224, 1.9176853344927747, 3.3321058405690747, 1.1355139054063599, 0.19445404142194367, 2.444253058126319, 2.018812357170213, 3.183675072930556, 3.943197137837966, 3.1776859187919704, 2.9883945395962472, 3.165448855970407, 0.011844066343963096, 0.8109493587815171, 3.6147243041057626, 2.935130768261376, 0.5927827346386821, 1.110259638056291, 2.560192324588774, 2.188104367329256, 1.8181030925038584, 1.5512957862163064, 1.176937367605996, 3.837496263533031, 1.1488700723496037, 2.609085088117903, 2.593289335087266, 3.1256889825726555, 2.197362681402585, 2.95073836833081, 3.6276470533672205, 4.160257728988088, 2.337142722917102, 5.189557474922617, 3.5079713967109316, 2.672819812348777, 1.8666935068270534, 3.6082266940857393, 4.005789113761765, 4.320823713979545, 3.1445787773470815, 4.338162586904133, 2.6285780131335343, 1.425600353770486, 2.28287482851972, 3.5047161825585107, 3.0199188671351362, 0.673381065561002, 1.6627484112582618, 1.084023477955743, 3.302986567953453, 3.515407342295263, 3.5413629060270777, 2.67418314447512, 0.8219920717592755, 2.489964350440359, 3.8003392361106454, 0.9813882025136177, 3.785274109889692, 2.5363700284519477, 1.5221492900900118, 3.812134025177163, 3.311918195339916, 3.6679683056591044, 3.5379166832852555, 2.854685946626756, 3.3897548617691635, 3.0473535373619876, 1.7845902076699358, 1.640145290482612, 4.474406326535217, 3.1099724212964626, 3.5252712800230164, 2.5140617464751185, 3.4851093057575624, 3.3316070545920877, -0.32247731924334333, 0.7470290419632193, 1.0025240287346724, 2.3587528771640867, 2.976263481225407, 0.749772255004769, 2.064102498898625, 0.5499526537455514, -1.1100450199843115, 0.9001430425514165, 1.1813195378269288, -0.3075496293353543, 3.3485033392977095, 4.249854521376428, 1.2047193426571745, 3.211374916497645, 2.9371099207645934, 3.7537103912169796, 3.086459491426497, 1.7175081883640178, -0.8367950773792318, 3.4958657234585466, 0.5584281748843918, 2.323993453802556, 1.1340020590641742, 0.2356620764047892, 1.4538762747953384, 1.911838161497753, 3.1099496853938677, 0.23857139517780146, 2.882212411206142, 3.332557151158329, 2.686762264721659, 1.2029051997428413, 1.9195652913137002, 1.2927466451887675, 2.6193458669682803, 0.8779286643399858, 2.4582824470374387, 3.5708840021174066, 3.1162068762394908, 3.5079043258209213, 0.38212743871609245, 0.8316898599713287, 2.244775240629253, 4.237750716421176, 0.2833692168000635, 1.4837853266557446, 1.9043627021605718, 3.2824894314338167, 1.5846240742393658, 1.347688268997379, 0.9355077911484706, 0.6393522345073741, 1.376999844855894, 3.520482105383186, 3.142535035520448, 3.0606485216654824, 0.6195833191332852, 0.0036039967120473203, 2.123771981524081, 0.4896998835349229, 2.038955063484078, 1.2394798988996667, 0.45110671257032176, 0.09946523480007285, 3.1744291393034407, 0.9724493800881352, 1.7487825757156128, 1.9900931658230014, 1.0748908587344606, 0.8851361122230214, 2.426671086111677, 3.4500926535694045, 4.079383374711955, 2.5497050943659794, 1.5901068224247974, 2.2474845113909816, 1.8102377929728803, 2.0354997057634896, 3.438190616794957, -0.8409201319618589, 4.4152553864820145, 3.329262298809403, 3.6723305059381746, 3.137159574582116, 1.01194997368975, 2.248942340047155, 1.801456034326579, 3.2163405831583156, 0.7882770357141339, 4.1288542122606104, 4.208285691235985, 1.2939999107965408, 3.6734639227976733, 3.3344765766819613, 3.3185377881225326, 4.3640700487132, 0.6568143533256078, -0.14954943159733056, 3.948994639949973, 3.8288724151269977, 1.436171570299873, 1.5419234570110598, 3.352340308369982, 2.8689506243032192, 4.434773405337667, 3.7987107535985736, 2.224143497403188, 3.5729735148951542, 2.73056653522326, 0.9034274079367253, 3.9939259482412806, 1.881260125599088, 2.8326671662420484, 3.8035985440038242, 0.55680347687452, 0.9661744368188914, 2.522422499786577, 2.598167760485692, 3.324533010905306, 2.785942034321825, 3.7914037683391437, 2.42984399264765, 1.4544035058832208, 3.17947473327575, 2.672935737507584, 3.6418322103370295, 3.6813814563363234, 2.2088651952686464, 4.239600673608598, 3.5912350066040086, 1.1744558100140334, 0.20841975791091144, 0.7211884545157704, 3.5725948619348498, 3.3912555987880237, 3.1179802841775976, 2.6485560905583503, 2.7134727882698813, 1.3790661164523925, 1.58398237205808, 3.087249141869335, 3.056483502700675, 3.4316755635363947, 3.0829216101095955, 2.4888850621085092, 1.7312292640253861, 3.2978667011291734, 1.9594851357992347, 3.501338745474774, 2.4572497315382997, 4.122385804728194, 3.5817915826451214, 3.76214425759962, 3.2238514698767076, 2.546189741738114, 3.588995377424493, 3.4673166462763314, 2.9383263946396747, 0.7182739285820456, 2.447474086847735, 3.1341213548950275, 3.6140627531634566, 4.135000756513951, 2.1590189159692263, 2.266793858319305, 0.6449591249502928, 3.15248167992217, 1.4182645073945896, 2.635548518432059, 1.423133889474279, 4.180928482953819, 4.234189274960988, 0.4966707872612917, 3.6912995494240084, 1.8449468831611793, -0.35009998531577413, 2.7468657115379256, 2.843140454129701, 0.27835314679370826, 3.678106851850904, 3.4059432307665958, 3.627707779696665, 2.9911358871717546, 2.3443291460356437, 3.749331720399955, 1.4343394020212272, 3.0688989720140354, 3.473973728603321, 4.051035413553973, 4.264851433051465, 3.063234134998642, 4.1989173726447975, 0.065581094872943, 0.9685839226664584, 1.4946268979965747, 3.6826951062750473, 2.8709584392926, 0.6243856036049147, 2.0371924532634247, 2.4341002586518985, 4.368599248431015, 3.0403320338819055, 2.1176754853409645, -0.27862101289514013, 3.703637526452533, 2.848593499974329, 3.4966677604257432, 1.227404300795719, 2.251568046282213, 3.2004004637285544, 3.07847752322984, 2.2237632085951766, 0.41587967523391006, 0.9748130956419503, 3.3212648765536077, 0.395991093813953, 0.6279202330260872, 4.008528518464035, 3.3536558405650245, 1.5731798732464972, 3.1891170928041856, 0.36113351200360905, 3.0864733891075313, 3.8392719759792913, 2.0288861058829877, 3.9706279749754034, 1.273236566674368, 3.3611135103194205, 2.1578221335401317, 1.9774519227544791, 2.448611378799904, 4.016633044765161, 1.6902305448696988, 3.812007616595003, 1.3340641549055285, 1.1415530027719636, 0.0760641525897117, 3.7273208351489906, 3.672385332583226, 3.7476533205977027, 3.89809414169982, 1.9992880687939183, 4.088823387943097, 1.4160064713276737, 3.0283405725031405, 3.448796410363719, 1.2956608140291663, 4.135488026727736, 2.752502149973333, 4.3360230156644715, 1.404887628226119, 3.9958126999501675, 0.7466476427621315, 2.9695500039330076, 0.3169057141567397, 2.8811440689905687, 3.0801109677349783, 4.248441094968972, 3.7933000884479067, 3.206528023602644, 3.622602285006396, 3.1141315987569596, 1.4651911730579796, 3.236369487465259, 4.14816278837152, 0.2702178262854273, 2.4483758700395613, 2.8018509440076174, -0.8671041792185687, 1.4262309389339065, 4.056026973627861, 4.184003944047801, 0.6480440947173031, 0.19496594166971634, 4.9854162736412935, 2.774382992575682, 2.856655403018273, 3.707442049412853, 2.8787216882728783, 3.335011408456148, 1.4208730964286322, 0.8896856343077626, 0.3018756909266505, 1.3515810409904838, 3.047248733066036, 2.510384411750882, 2.367533824986736, 4.6934113328466545, 4.04909656881364, 0.250424560062975, 3.195284307013469, 4.1507606931298895, 2.2219526041281994, 3.2019339908690583, 2.6762653051251135, 1.4181207629812869, 1.1481837877008814, 2.757521500627893, 3.5763225613938237, 2.868749566822571, 4.955239253826305, 1.789880822641129, 3.417808767359762, 1.0614909475403267, 3.443165508984839, 1.5300799223198358, 2.86378978019271, 2.6725634246818317, 3.6959181046368355, 3.353453765270806, 3.853827294123369, 2.445840875315773, 3.6397662739956256, 4.6814294541661585, 2.925762000203469, 2.4988469180585176, 0.5439297118150208, 2.8627109987831845, 1.0139115362949667, 5.495696535722694]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "print(output)" + ] + }, + { + "cell_type": "code", + "execution_count": 93, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.8352580253296595\n" + "0.8200285709168692\n" ] } ], @@ -1582,6 +1752,13 @@ "#get Pearson Correlation\n", "print(pearsonr(output, train_y[:500])[0])" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -1600,7 +1777,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.7" + "version": "3.6.8" } }, "nbformat": 4, diff --git a/scenarios/sentence_similarity/pipelineWidget.PNG b/scenarios/sentence_similarity/pipelineWidget.PNG deleted file mode 100644 index 45cd68b02311b6fa123d6253236be015a2f671c8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 109142 zcmc$`d03KJA2;qy(=s)aEtZuFO-*BE=9sCWB2#I$jgFH`=E9gM=0=G^hCtIYR;E-| z=7N+~mK!OGxd4ro<_3`=0wR?PDw~RcfWQxH=2_n7d9Lesz5l%Tr5D~e_kGTNKIglg z@8^5G@{6DEw~N;;HZwE(_P~DcU(L)GUNkdXFn`ek(-r#O6VaxRFW|rW?lG&TTaTGe zzJl)ldAFHa4Q|P7)O^$V*QfS}!p+Q9q|W_*F&GOtV`gT2c))vi;F(B?9z;wFx!IpS zHIlnO1uERweeG4|w4HPI%_p{D*YgjjHneA2|MX_z{ztviwCJMU*-ySCWd4%<9q0V9 z{YSnW9;KB3^6>azz>m&%|NPCn@~ER|04>uS7yo{Ikfc6V6RGkNN#0Xt-wX83She@E zuXg|EPfN?pdxD;XRsHX;8OB)#VbS^Z|M_z{&k`{E3Nrg@<$q~;*;kWOA1}~myJ#63 z{_|1OmB`49i4d$Z#Q(p&*Rq#gS>^^U(Wt%hUpiiN{!IGhpMu#Qf#>r7`n02?*9^=k zrGfs}*P9ou{BIAoyqS3qGpbNR*=0|;+_REUhyVo4`-M!}1!|&HSEKN2VcAVfd;(VPh z7@z%R*@ANkpU^o)KfHS;m_VD!$n-__o`3psBly;_EsGBQQdtxY7&vIU(d@l;R)@5E zFq+=AAxY%?DyaYXQ~I~_qs-0#&-}K?%jXEIKstW3~K-?ckSY z=O~QKhS{H^7Pn?Bn18O~{qNt+p=xpQcXaRk59QD8+qFi_fCE6wKPLL3(BLMRg}wge zouqW`wTC7y9sX8xZAQj#22e763Eyr?R%))F|IyUzv&FV+OoCCgclVlpWZn90C4x1R z=8xRXtM{2tCt4L8nSOIx&hgqf8flHL-g_AAuWfAp=;B3kjJ%qNM)1oc z%)mS3qaE=gxdJON)>^$;fsr+HCr&`)>8LqshkbL5n? zyTKdbWoAaaWXyPnhE0n!>M6RYI?UqY!|d^axD0#3q3XzGvW%RYkNcXno~zI*Mbc#Y zH_s34**$|(EAw`deWSpnK*Dl~=p2i&f|xH;jsp3sh?6qvnJwi?&glxo7cXTI%kIeW zSh-wJDZ^_0jbr9&IbKx({i@+5Wn5jZriOJhg9Hh<)fY<_9$-#-n9sf&-CcmYZ61mD zjOh+=;tIv0H<^Msj3mgrF!de80)uHbpTDUjGo@5{1MZ$aUW8M9@S{2_9F}KWnw?{> zKTDHho+{-H21Nle{Fhwgqh&nN`R%pKh%>XyQ;YL0qUrag^^6uhi9^;bx zsU^T}*O1goo8}f<{aP?DxQ~$3&cMN5;P|57c(}F|JrQ-5FXO5%$$PQa2RqosVo2`7 z2Rj5)(58`wV>8Z|Ohc?xtX=P{z-+6T(wrJ`zI1W;IL-#`w+_91dC=~o*COTP-Mq`N z7s3tc`{1!6`>mUKz$=k?Pm?C*rf%=<8Fh3R8vA0c3e=*#cCX!sfdBCk!3GdRL#kgL zI$fbmuS;8K%cm@Eor+wu>cXpdoyw!8j%dulF|Y#_ZOKl)A!s7yNJBfbK) zVK*z2ye<992v@;0D=&83Qc`#Zfs;8IpU5}NT8o_wvzr%Uufb>}ulTyYRyCBz>tPE! zvzMElE5d^D=>ltf7@8bV%J{~fu;u$$FT1Y=`=){Imb>+5e2K_3Vb$A+-uP}zwY%cG zD0U_z+AOkw9l5Meot@`e?HetiRWKGcnM1+8@72?!H>DYWjLEX{J_d^wapI*#Dz(ud zz-mcg9VuF_C{@VtotA1gIib)UIZR0$h$II5`h;GD_3#uxYbXhcNR@ancK# zh@NrO7SGd2e$;k9)#bMqXe7()CpXXkkk9A}h45WxH3;;K=YsbSfuYM(a*q{L)a4XL z`V?j*Wudr{i&Yh`)nD47N%*!DqojOy*8g(Kx_TRmf8c#R7gMXY(-liZyBTctNnNpB zf>2QUz(HTJ8ZlU|R3`jVl{cZVBObysBMaAJ?+i)qO00c!2UBxy43z5gY+xtps_-k@ zMD(=BKylr{$>rhH|aZ*j^fGlgR(CsX&8Rk6;WQa9ZiDnI`8sKX+i`b!jG?Fh0< zz1iM9lIw9AQrOsl6#lS|7((&iM_7sWMl)L6w1yJ+NDB%q-(@l`iOV9s$sT=?sXw)* z9#xXPT;RL9cCy^j8?%&@)qpB`3fMneL&YZ!gj!(+-9I=|>DtG1_aHc_kVwef`~Ymb z?@O~t4B;?^z(5WmmxD)8!(6sb6If0=je4RAfa*az>QbReG6~HIs z86CdG_YWGq8u;~UcX2fCO52I{75tElK*g6W-gZ}8`L4l>TT_0{x!O{K8aTLnm#!#) z5^}5|1Gl0~P8mF?D|Hf!S1@&VrOr)A;sC=}o{jm2oRnVW&Q_-#>YX3eylCoJ;BvsX z&$r+87QM)XDZH&V;=SCDBMWVgyecltc0pll(=|oq>Wp-U@v?QucbQl~ z&lGzC9#Rzoo#kR(R2(M`9PP)2klPSm8O^1jUn!B2R=ZjOZ;r6JHiz8QT0jq2`E1F%bE1{kCCk)m2I< zyXyA&rxYv)byx+L-;oED>Y92^k3=?{g%LV{1(p;nkL@l;bJkR(zxIOk6T}Isyo35( z<)BkcAQYkBwuBB>6N4xKj+S-+83zF;-HXdUH%q)qDmAQ)p zv&X!t2Zb-yBc;bIO<=kkZ3XF&X#(*3K`b^8LRdoacM4vRw6ujrs715=>R!CYVYCYDn^3x!H6M*qBFUC2&oP+_UA zk(@_K=JlQKC&Y+2pdURNDsn6?CVgDTS%1mz-oCoEvrd>j?n(VU>*}4}j+K@=XSOn; zT{5XXlJifv9aB>Z@(x*kqqy>jOQzja_Up5+3DH+EOALVkGc(V1MykD&_S!1^kl>Ji zhF7@eu#*mWa=Kuh(pk|+tktF0J3~8FF$$ZitZ`fDT#!=A-LiAH{Nc?_0ub!^b9LM-?4CnB#KRQCb($lLrD9;*#l{(<;4_J1zG@^+Upi zfMjhG42eA4Df7<{1G)0@_!LVUc26k%c80`DGF{kH^Ek4LOJ+lmIi=z{ckCRpfjt*G zt2r`l)#&g@`vU@~0`5MC>EYjz56`t6S++VQiT9Cc6xf&|Hwos6yZS~L#aD_Ml=L?? zK!?a+9GC9J4r&Zkux&03ZxV?`OHh%nG#2zt81UC+t6kHx6EBRGoSeCGjXSk+NBrK= zJ+YtVlYV~!y$|TCh=wcl8g@`0`!F^C6P~wwuVnc8NlhNgHewp+y~j(hj<;?Z(1;{z%y*UERBu*I6K{dXRU7SE8M287dWVjbabbjsSTX-T{k2a z7o{M0D)F6;WKtvb-SOEVmo=!mRAgBl|35swupLU}hTtTAR+1M{yg0}_*Z{Qv_?DEK` zg1;5y@b|A^KG0(JS@FcvOeg0|=8SYwwKrtC=`!~^uy68^RjHtMB3cc6t_^3Y@OG`lvff1pJdiV) zbd8Q{O9Im>G1Ljd?@dvnwuUItUT-lUZ_W$_e~L%_R$cUEd{+_;rLzrs5U>e$Xn!&5 zf~S;+g);U-eoefipw3Qp0hGKRiaXia64Aoa`6CRSs2ZsqcV6&q!drE`(Xgu%qEiv2 zIUQ4kjkQCG)%>SJJ;Hm@bqh-3X=fc0ntRKx=17YORalh*HT-=Y77W1<<6fU;K~}FB zlYNVoTiMV^qH}qbTW6|QUwC!JWD(T&If8G3o##iP{GZ6O&dR%GoA4-cC+gj(^LZC| zrOCqL2JKV*lu#~0M0vdKMdxgGs;E*onBK`e|Mbllao#IOc#@08=cBqSKd@kA$zJHA zUf*_!b*$w9i-rOL$2GXmHwZnJUJ4BFVPpHnFC|`In!VptplLb25e1+qMV26yH@Fqs zx7KGm-ytDdnAMhOyAE-fM93`B9OamEmzow`E#A0rlcK?FDbQf~!cZC>;Zi;z)%hul z-{ddRc@6W%i)d4kiENM0NNFYr-<8kS^B)WNPNA;Gac4gyWaiE3lXPc7>ASdZy8@MS;u`P)S+-`IY4%=?YU$k;7efZwC5&~SFbY&f zF1wOrlg*&Xy9aqv(LzBP{gMF&!YZ93T(Is&X2R`qaDtGczGzz9dyK7Pp5VA01*Ejh z^1dM3(9XFBtTRcbSFGc-AHaqL-(0R?zIMGY_s_`{)V=0*2_NQO@J=;I1~fx1byB)@ zwa`k+*%qr_W@hG?kHuD(C_bdC6;aCKV)Ub<`l6lM+4Uj~riqa00e@J__@(r2zK0bj z<1$mLNhcG`HFI%l`bCo$RR^nsf!k3GwW#Avt-3}M>DVrYD8`4A1+abcukot#^wAD2p$$x@-@;*`2o{XxkR^MoOkB{3$jv8 z)F{$Qc9tAarU$}4-Y}tP58r%Zo|w=>8oI9Vz?O8V?Z6F|yvH^-M(!ot#)CbOTz^-( z%mk&qNTZn)1uMQXd!FEW*4zd?_;eO!=oan0FCU|RtA8(pcuWGf=w^?D%{kWF1jLu! z&|qVJg;gjZz{{C%sicujorS`Un(K2bn473o7p0USoV3_c9RW%fQrgLYktFP+M2L#= zr`1R@gfSRaMGp^Dya9rbU0BPsed;cL0Gt6nnPOu~i;(RP5J-BQc?6a_noO9l~y0_EU@OfyNyQONE&e`V3&Jfla4~5&hX4^7? zW0Qi1mV3q=UDh^KQz$Ji{MwY88lNee&X8DcxiWrextV7Z{`}KXFKaGPK(*t3yoPGLd*YIPSjFWjJP<9qE*LQos&iNKGb!YrxbWua6#-T%(ogST zb;Kt3giR$HS-4|RGG{icQkqQVoW>WC;83hgS`6rUDC>xqCh1IrJO&tn*RuD!w&M|B zc!Kn~*eU;FgM?wIMEAtnK3QL>wzx8mc`VVD(nP5;H4`0F)#97ZA8O&Fnz26{IHG$) zxdf>6@;Ioxpp>I_?SUy*|-huk_C+WAb!0U07Dh^wY$W<%x&DkN zGP&yaVyXYsq@1xrbr+v`gG&Unahu|K!XQvLsa-Sqbs>#V%DnJXb0nvqv@k!^z=gWx zc|@1@CDh$PQ1-DefoYu_+MS+Uv;()$5FUk_1O|!4q941ExwonvQL*!(K@!ahhQH;I`lvfwO*$N4>Xse z;HlMKsnhM_duy2&7;)REKPMdmRBF^RzZuP~ni<3cv`s_x1;RpJY;QTQnW+f#Pa~6UhrvRlz7LfTf1NONL_1x=|+Ig~o<8 z0!Ar^TLpv>3|tH9?BvQb1|>%u8!DKnVv0Xho`6_d673G(sTkZf?*BIoX|9f@`A&E# zdcb2k`Z8beB!^a*wFL6dPrz%<9V!+EnVq`n^-Kicb#5(gg0uLU`{LG5WP#a2kb3VS zP0|HPgLk0X&C=}NUilUljCU1fbhS8__xt%DpW@E{`03W?yF)fF5<}u%r`+2-@BQ~W z$NUpMQ3f9ivx?1&=IKnopYF1r9x!*1Vh{CLW{mjzAdS3yvo|ruZz!`}n^{;Ga@x|A zGB@STKjqHn9ys-8mF+QZ$*I*A7rrPuk<}Xd%Qp+2p8xl5)8n?#Za}eCM=yX^e)~T7 z!RAEM-y5}6QD&niVnAZQq?M+Z!oL1Yn^;<#OuDD-vAM+i3#k2XfWP^akpJ@w?2dJx zUitlcblic@giMi%O?v)MYyy`3VB*Xco!AHCYD4Qr+8_;SO9h2!U!+k0%;9dkT((h@s4J;#3j zN0(EM#wndC*cqYC=E<=qFMOJX9On56te$98j=>B<)61%0#ykJUi(ZCarWwRE1NFzl zpD<{T+kMpF;H`OMiYn%g_vx<{Kuz|CL#r>+^eSE%k*XaZE5&)8)xN)Aj=Ajbr5PTR z@)};8xLr4CqK{OQ&dD6tb8IQrVARu!F{(1-`!VA*Z5D#HFb)y0nrWI*Y-IGx4LT#m z)tF~g8VxV3GmsZ_)!iC+kF=_N!GP7@a`=AT_ON9J35ikcH9PSFu@QdF-%3SP08modo;Tqoq+`6yX*>51j=YrU5NYtrJyO9~tFX6V?=|k*7{F!i)>& zN4@{*Z`pnB5?;GLG3!q)@A*{NGr53f^crqR0qduG0X8dFUD~o}-kF``HQVM3jAIO% zHsJIxi}R*PMuiFGk0bEaKN9Ad8aqA5*nK*xKcvi$3X9W!%yR6!nD}AW*3Ms-EtuG! z=yWE~qHg}-D{_pb@!`e1KhFO+vFjhI#Xi+|{;3|e0BLKBnVoDG0Ic+P=2$n}9s{pl z8%E=wf8Pnj<^1O}hCY~5uz);$26BtFrv~>p!f5x{W!UZ3h<}joz*t>XdmwGZb6vu% zJZpRGp29D8PSo}bmKJN0AX_%wiLt{x)R2%lX^3SqneuW8oaUCE@ddaQL{bTv?HltC%LtwSDiOZI3K&ZCxFX#B4-NL-9#WmSwWk}Q&@)eDpyiTQYvQJin78LCZI|^;&}!H2@Tp>9X$WT_yF^+R2=Z#vHsQ? z@S_4)6hsYoQCPiwzVTlMvS~>xbxpU8$z2A_; z3$0OG9{6+3nG=7T!gC4FPV7Fq|4$dKT+6+FJ}&TDP!O|^3;{jpzDi<*n&hUoX!|1G z$K%0kR)x}3qR`WCS6_r)ZNig%`Y^$7dal+)A_g9>FH(wq+Vv$hw)hgKiMW3eD$e<* zn`q1JU-8^vgBRz2l(@zC`tWi@n-}qnIkl@P0veWf3w%!8wU|^eb*sB+XIiKM81 z&Cz^2%pvnaoX&R)0VT>Ght^QX$5uCYPk&QHXDaJ4ElWc-lW9K>YPecdE8 zP2&6TgS*!_{%gCn>nk6eiq9e&qN2;zTd&G1eFilFrBk_GDji+j_(eFc8+H%z%QD%e zq@mQ^M@>F9(&Jy$x!k1AmHdU78MarMOzk$=$q4~fN1n->k$ z|6LYgX3~_!t$UU{;cqw@bhj!}tb%u)KXvEA>Z_#)mc-N!`F2UELjf0#$g0LG>d!PD zS*E%aVxIuEuSW2C0_R69Up3g=BxX8T&%>EU{#pA@eVb>$h$k4ZS>=%w5Znq*955w~ z8?P;HEfpB*MZ9PU!{gNn%-%I;fOU%sR&8c@yrEGurfdTd)}+n3m%@CFLmdKA)EMmn5bdOw8w5%$=}z2xVBEAsmb>P&}3<9@aK_2VKAOSols9tGIb@{k6Q0 zgY!QKW`%D4V55dXz^F(B{W99@h$D7t?aw*61lz?XaPj1>U+}dDK^f8DF`x%Tz6PaG9ttGS^PED>5%k9yp@5_ zQU7ENcRmXnr8-K6A0f1A1Q9-2o0~oMuQG zMvsN_Md}^S)KT|hisjK}cI{gPLg0+-lfOi#1l>Zawap{?;nj*Qrz=odJ#Y(Wl=LNv zemkOe{kDvNXIJA-mwc=Z-==aqO}ZW}Xi3jq4Iz<(TowR>uMXV20QOI9GydwfX@aZy zGHP`Ivv<61<*HnH(6T%55u>lu*3MX;Uiw`US>I^b1)1*IP;ew*Bvv%1T~{ksT`YBU z3pha_4$qDs%9K{TAhR_oJI-Dw=^7_q&g_G9$BN>HDa}etPPe3GiwCw@{H%Gk zJu0cLz2Xgd`op*q!6W4i zPfnw%xK82Km|J*V;Y-Me4(6Hw>{lQMm-zFCLcA_`s&$8^HJh&DHA;z>wX`ueR>h;I zsluCv#Nq8L)Gc?aR?71|#0@CIzzrooA}fsVj}XFBA-8q`%kYMbRCNJ9(|&NC-?PNs zM}25j_8+it*NvATSgO~pZcJFn)#pv`$)_Z4yIJI`Q*6QhK5FUtr`t~B?0B19PDUOT;h%e zwCyWy9tpZ<>E%|KOW!g-s>Om4w2V%T>o-@ay3S>MM>779fS{tU9|y1GI&S1U0u6`W zk9J+@;j2E-M&%h%92)|dPQEch8@dXA9Gj9GJ>v26DsmNnHdgdAN0Zwh`XbRkjeQ(S zOcRD37Z$(4?`4R_u~xfNl*T*yD|*dlKhMqA`V*cr%J^X(C~IQxU!ac{Xw<)=96JVw zwxNsw6OA<5;x7~aAc5}ja0}w)^BLx@qGUq;O7$6VrYR6`etm@+!b$5bDrI=BsDNA5 z=pJ)nc0G~?$=T6xC?h5|Lv<8ugY*)eHHmM7uDFqeVNtdvsK58fvzH1!Vp8J0M#8~_ zDbo0C&|6q4fOMk@-bQ~hmO&hzGWnM0UCb@6^UkcT4qQeCNJ!>NRn|4*ad&r`J6i$S zu}{=LL>iJfHAA+Bw25e6?E^|Mll9fRGUHtvmV3aOA^Ss8a_j5Ozu1Wo?gyAvNp7Pg zF1$={SGk9IZDJ1I<*7!}t>4H&|8loR(TMu_i!+H8>a`$ZlU+!X81;PAIpI!-GJhm6 zd4)-fN1mC+ZqWN57f~s?P=?T1xAsJX+=jtyz%RU#Nc9Wlz|PtXj4Or@kcLPJ#SzX3 zut0k(Cs2Jq2e0H&{AF(kf{9sS;k|Z3(mDn_+@H*#qcSN!#L`jTb3p*oR8DmUWE!Xp zGuBD%PNgo((M~`fjqK8k@CX(?ua7^OM!OXcu7FflduCQN21)YXvRq%rCnF;C=`7D+ zchsvKo_ubEAY`gx98(iLTHKsMFXc7b+OrH_PBc{xK%3Op$Mxmf@LEOwh<7p`$W)N5 zlXNR82mnd=sK!GrjUTY#E7F!GRX3yMqp~MQBwp|8n_~p_x+;=wh^FP~4k|~tA2D!H z5Mi%|)RQoFvfO3_v7@(b9$VHuak(KZNxARH-s!?1eG^&qRlM2Q;XY zm>ZUMjl2b&%4NBt<8tu2aF-Ac_h?Gw9U_5L%}J}vAQ;wN2RA?&VH{}RxQ3KaHSF08 z-Si+k-tT2+I;R+YixO?DE9vPF7S=l`9=6YDfG&ppb`5cZJM$8Iw&J~fcG__v76p6V z%0~66c5X}%?KtoHzymbOA&pl*l55Ss=rGLH~OQ*og?-8{rl z-4l9`EVob66z>(_jXK;uh8w#UCG@KCAh<8&cwV7rET)-%UrxDR1Lt!p5KA8r{i<=- zOi{u6WK$qWY-bs)T5Xtl_hor`%+qhl8COgcCp9n~AAM2Zy35&Mk}n4MLpgkNFubeZ z@kMDm3sXy>-7K7T6!mAWbuEj>ln*&rR?XCC0)e!?Eq#h&s4r zvO;$_o6cyXaETP?iRH@1vyo&dAWHr^of=`|D;k9G3I5l>&Yej+VOj>p1ifY%!mkdE z&)RG%gej8r@5JE;E|ME-S079{s-L}=h)Kz)r)@)pTOfL%?d+cKqDAYQ1`~^@I4)>Y z!jKLitzTO}AanRfO6zkk3uDq~X$tq#KVR!RydlB(QW^N94JoEpr~y_1$sS@8@ypL; zy42QInYrek(zRt9Gc@Lvq@5z{y zdc8b?A8JnGI%1rM1U8WAOC97b-z97?w{VB=5HE(`9aD)pK&b>Og~rN>!+m^v)h^?v zCLZh^+d<8h6lL*&MSRP(%`mhK5x#4jqr&VW|>0asxhr4~KRnLKIL+^A`( zZLFPNl9L>HQ1o2p3lhj3V{kR7(0+r9d-~CUrPPQqH{CZYYzjx#NS8#h2LQQvn4-E> zMA`#?st-X$XsnZLr;aIYSJ%LAC^;0mds2|3KtnA*9fE-WN;Wb*A0I(yjZ>oPWOGxn z*9F`xJ{DoA%D_fw$(v?1_UbFBaC28;eD?}Wx3k&|jKzC~48m<~<{Sp!G!v0HU;9N+ z$gi=~nN-^0C-lj)-ErY(!|nVNkQs(0xdymJMZ%HBR_j>zMjuJR!1$rjgUKG|Uj#M? z9uvc)F1%`QBpoT5c*#ZBo(*j82LVQ+6}K;Q8Mg6hbY()kjwlR)RP+HoXyYXAP-)`| zPI$i;N&hS8xm8VLHka79MT|ret}qK?*lpv-LK2=#d-4S1=^HxtjxtK&?^db&?u0@8 z%Ej@}*^QyeB*h(@l^v>bC{7^_8DeSH^<2V-fRf$ZdJbWJ&?o2u0;MR9yH82u6IEWV zc&##u_d4gvjCEp|S4lkye!WT>S5!(_UPaUAUyOi_`7iEQDNPl%p)yuvNWfsEx@j3+ z|45o$t@jJN9RNlQ?rVI^FM2mxaHVKGtRHohEaMnBp{BCDN=Ot=p_f!Aq(xk-Udtn2 z1n+})nW%A^Gr^`*Gg{dZ7hc{>-C7`*U{p6hWjLOn#+ke_8FuqoBky9lUynm5wHL$_f_6&mbd@LAg0tM zT*pI3wu=r4&H`+D@coUeZwaNk(Qw|`WEPU_wgKNbKE0($VS6K;B}lZc3bnJW&&{nx z)uU{(R-)}~kgPjW2Av_(j%o1dF&(g`nw7>}8|GRq)9weTyqIH0Dl2Ie+6e{7S;z$4 z^H^=F$7qCkeNNc!k_9YoWxcR+QoMo+ms}pTCAN>%sQi^2$6IH+yJwK?P)6Qs>dq>! zGi&BVx~VY~;RVyoPjzcxX~tAm%Csy>EDoQ)8V4JvI7FjajB z5q>AEkB~PKnf;`*ku=EB@Qa$oV`(WXnWyn-IG>Kgq)fQs=Iukkv&MCGeF8F^OL4ZVXONTh`(Q?uhSESYV2 zkt0l*$^kV~G)iX&s#7_xqAL%<-vzL1$Ometcp(56`V-ew>N-?)Th;kO^#DX#rtDG&^|H)=z8H zU9#niTk(!Bogo=bkVk>)Gsc+C%;D{j6V2(znukatT$_^z*e2xei|)=d%AzB{6TvBk ze@SPVnoFPORO69pXZvIK@G=_@cbej-6VtyW>}&`HgeGeh`A?<6(dDW$&}x#xLv&SE zl+HE8`THaUWrwCIESWXU=9{6d^xr9MN&9V8ABYWSBVGOR8kCGex>*<$5mKhTETI=x zD@UL0C;RU_VY|~!U>Ggv*LT+z6w(X5>z9(mEFiTYmx&5+1MvLpU|lLFt|QaXtMFyV zx%M%OIWg^)J*Q*qT%A$qM}mV;aH@PcpW?E&Z5x|-u{*1d-{B)2J|X|s9yK*+X`_X8QAQPZ7~NXBQsR9%w6S<(*dc% zkv{mdXjeCnc0N3~NBDzZw||2mjwq42bkWlg-LE^Xs|W11Is?7ibLjHn!k+qSb{vM} zRXZ>(a*pCXZVQx7(!-%5Bz+YhsHoF3i`PYjgcA6(KB<+4#I$hd5Aa))2WZTi6`_9i zq8P=ye41#`!@e<;m6leHdo61jb8&LtzHx|5u&Z~_XAf?}F;TZ_hkA#a38BF$y@UgS z=|N(fwIl9|VjCr!4<_57iU?3ZN+l?^4pNSi#`TbD#<|z%keK!|eU8v4Co83XUxJmg zxr?0N!;*kD96O{*x1jn1Bk8!DdZ$jGv^J4Ox4Xd+7MompS>{%nhU@zcs^127awv-?kZU{uG{a98&aptn*Wz?)|fh)fc4;XX%-< zw;f~oj!XHD5F4hYRUyhEWk5EApk<6%do%O%!WbWNI0Js@?_@{w8)T8DdX4P9<4EEY z4(g}an5EFR_Z@dncpm#^*=K|q)hPQyu?*9Z)0GmIkE~dC zw$6T1c!8qkMEisiChe2HH%|?YNp6fcxwCwS-?e1tghh#7;!dVfD)9rPfhO^9IB^Ru zhs#nHe4A8Jh^+s?hL3*O&QV6>hoQpNn8)p7P2G`8N>vVp;=}#nl7c|s_-TB?BWE^W zIZB7Tq@PSPBm(#Ym++7)bmg)9G1Ljw+1hq~w7ypN>Pi~RbrwTL2PfRlrlD-ar@8G< zGm05UHm|~uJobESMM9l4oRD8 z8u2>9YT8m*(ep~h&kqx{j~DkD`5>6ecE{U%Qf~qqNG6rl)=S3eC!w>!$nA-Sn|tf! zO|gAZ51hdqcWE^QN+;=tuY59##l4D663r*qV&Pi0RlOey+7~UZW3uvbzMuE4wMRTz zp9p@3qqg`+EU%9q`g1(*4~`=^l!woBxXqvBC`dK^j#r%FJ5z20ubFBTC~8Z_ZO0yj zEmF8&M;A67%feG)v6$od5aBO9;pQxT1sqn9vo`xa$&sY2O<;BOh_0JT;7PBi%0F4d zvrrMEyVTpI`m(SuNkHZ|R(rrBZF+&=&Qf^sS;wsa!sra?(^+pMPYXir6*2!ZC zFxDOBYUe82+Hg5%O9E;ql#5KI+VsXzOBG1CX^8toQ1qD!aRows?`cw{Fsl8AG=G~p zD%lklq{FA9q9uubk| z90t|z69ub_R*H{}xHiRBpg7~`J(BJ+8+j+_0h}tdkjH@J7@63#JbE{Fj zI-<+xV4WMENWM4^#bvd9RNn$UQvCE=43Xot>49{#iP$Qt-H~|*kO?3P3Rf_T0;T=+ zm+62xi0iSU#dKJebBK&`ly z5p^8ZETo0r9)+*l^NbF{XRCG)t{Gol!iE7Rxd#7Sv7PCR^wLqWf zV8o!LKk>9Jjc3Z~rM3JWgC3k8WxNEp2(U7A<=}2R zep!%b+%cA|z0=@%(SMt;}_-yp6g>OiNC z&*mRdxUR#VNCPhcxp~B@oxA*!k2hdpY3SQMW#h>q8@W zd{9k%9h=nwaw7@$DRz(;6#Cs0k>5{ak$v}^y(n@-X|utU!;BGFm8WwX+0vrIo{lSN zHW8N1>RMJ!Rd*SGCI2#))3~r`Ag@*UtMohm3ryvW&cAy@QsI!s|;RGE&dq>Oa z97G&}>-Fo_6`;Y6GIxlE5(l>dt?!;0n4JgGq$A8_!--WA=LF>}tA6zkKuQsaSg_Is z6-oBBJt+^Yt?xx$Adkpbm9+uJibRSWVU@Gt@-&#_8wjN!i1n<2cge=SaqZR)ABH?* z-$XR~^dZS`R1cj<|Gj72^(w;)!_IN*1@W zvZDjNrM#Y$Dlv)ZhQGr7gS79yF)K^EYE9&fWjYi8j6Z#qa+yUj)$g2RU;146V5-o0 zzo~Rlh^dUw*WHo@TYH%WdKH<`m`p*MsI!Ttzx<&o9ourk;?IBNC$4|&o%sD-BciAz39i^K!)T|`Bxlb;x0%Vq{?SfhTjOc*z8 zqEq%YY8qwU$fAuT#Um392+1I1(yD`BM`~sj4D1eSq60#7NLvVk>}{_YYN#Roz{I?B zh1J;I#&@kuNPoN8jnaMso=d@uIMD)|;f8?(gk0?mtH%Nvf-ss!SXd~mOxUFHNl1Gr z_LmQFC?18#EBth8rQUjHSzIE5-{j;$w5u+|r&BryZ+V{ZN=;7n#52BRpovYj5dC-v zxS)4id4k8WLR7c>$^f(@ay(qI!yq}E&+oF6JKIH!44Msm`4>9%=TA+a+j&)TNIAMB zMfqyiah^Eii4Dl2AHC+(@r+W(X_6^QsR!1%H-gynu5;3lAPflh=@`=p6Xuw@$>Obx z7{e(kkaBzQaCu5EI+uT_oK5r;=EWoF1bELr_UM?}nUMp!T4~%S4;%2kC3Wr+A7n#< zJgs}0U(&gds`ZJ2EZ8KqA{h+=o!xzA#QDZq8H&a7A@E8r3y%sBqUND${6I(=QIiM( zwTpCNjjfa~o|$-b)9mh&HO{T!AFB>M$lx>>gV6}8{?}w$H`g(XpSj)ep=b0^)f^)R z2$Xm(>;Df%OmLUjO0F*ym2 zU{~FqTRxBqh)wwQA5#~e5K9c;98zgXz!=@BJoyV1_$;`dC`h17M45?x)v5XzwT#)+ z#YioxP!F@z*q;59Yk0`74Rw6%Ba1fjR`|_BC4BN!>9i-VD>56q`irOte0BCj6a=B z#iUh3d2b)nYucghoakWcQ&|p!S`4xPYOZVN$UwN&qk~7NJOR$0t)(()O^y3fn*F zTrQlH@FLQ;OoX&k+f=ZBg%YLLPNovR_%ZXd7B~O8`zXyj$M*Vo-f!4&QP<>S^Ypmi zmL#MU85Dla@uH}H)2a&0F-3 zcM()19-jA9=7Mr5O=v__4LNzTP22QJJ>-_RWq0ebSjaPx%Wv{Vt>L;7^$_J( zhv2L6eAHBxs%KlX|FDsm1D&Qe?;zUiqD^xKhsiKr(rdb{C>c;PU=C%+KE!o`J$3P}Wjp@N2?7iVoo(Q&qMrpcNO;O20rLEhQ8R2nD@Y zC{H~M`&~uBuCSC0(d_b*d%3-2mvX0$@D3pBSeh{7q1>-|Imq$qN6F{kWXE}~YL>f;iyPZpVl#3) zP|lqJBA`315Q#8)r4h#qYzPk(;|#fJzuZ2#;_aPTPY;MFDMrF6Lg9ykg-LeEE}1+) zpmMKlv*z%-ZZpwsg=^uj$U{0-QQgmivgr`%&+i*Qnf>RdLv*l3NN>om9_CM8j_x|i z!#YVa(u?)y9i09Twrb&$e`Bj!e^UPTvOI+iw2dg^554Qoh>>WK+K31N^K9B2!c2Nz z%Mbq_dv6}q^wsuz>(krYR__+A15Q94v7%B$pd=>L0Z^$>s(_498G?jC2_!;D&^n+p zi58I|M4<{2AVNq&2qeyoNkoJYLWmYffDEW1i6oGm9kuuHtaa9T-*>Hdowd$7{il^x z!_MCOw}02ZuJ8AAncOm?;c=1y@1=>giaUy1(chHeqtc~&_19X0TDDJnDgmitq<)9x zg={?V$vF0?PJ~tM_~!MyIGi4ruI}g5igQ*b9jcKgA(XuMbrGUw}wBT zHb<`>z|c9bqU}YutXKird~Il`ePQ=1r4|-c+})JbKXaEzgdqhxq1YsgUz~HJwGY=CY@~G zAA?Q93{82_;|}lX+h@HcNgB}ylJz}G0Tn$Uh7j#FtN_Jn<0CJIq!8~4Mdal> z`wIjJn?!=l1*lj}!t)(tt7xc5M6PtNp|6eC`&KDJab<`aytB1mODPRnw2F1OI7^MC&x0(g5!@ zoYbw#J4&?Vwrd?Gunl~LJaDy@@IyqbEn&ToK8YSwib^MCuV2lKwd|3>2hdj9<}EhG z*nDanpU;RfD~?+i`J?5@Uq^cKB`FQH*TIo6SwSoXFF?aF4!=vWYOMdWj}oq#)4`6 z(ePUke&tjX;*y{23@mErcEhuzs2Px8Mm!u{NdIr8rX-u7uAug~C`v@rEN_YiMfC3d zqhmsURdJ`dVWY)q=)6C@O#`_S1BO>Ge~b{(EtJsIL;c!~Wm+h6T3wGmqDk#jsY3c` zYly3fi1Cq4rD*#j5Y)<+O*74r;ruM=Q@UHzCFPGhtI+-ity*Dd$gAlcTO}k~lFHfe z3EfRlYE87nNFrM!s`I&VpLQ$K!_8$))m_`M@u zOz}e!FDlC+-bv5z__v1C#p#EYKh_(|aNAEU`xJ!^t_R*L%T&|dTK$f(@q#C+Ck4EB zMEgeVE$>p0wM&y2laI@EQw4w8bsdkwxK;M5qpGJIr~x~~1gviKA27r^kxibMP>yQ# z?8Q`=6vuy8vU)Nuz}yVXi3UkKmjO#{^aoo|)b3>W_apBA)j9M9QroY9h-2)NTE`>N z@gn(%f^U3lWGS9DsR()(LHNTo!FalUfXx?6ccSA0a8G-}sV!?2O(N$6=Diqf0!FqD!qXDD z9N2T6ays0}U+PX=g>Xs0l~kLeIY=2stK|*V;T&VqkwX~tl?8`j zQ6G1p%qzlTu3{ImwYu@I!jKQ|2BTOsF_UCaYQgryAb4FDj}|_;yzAAC3?jxfeffFc zxI`5P&9zgba#w4_DYux{gE2W(Vf+Cl&wZzq_v1xncoa!BB2}s`=FgVWT6QG(#gqM8 zH=sAm4n*KiZd{S<>A}yBC&+Vzt4i&rW(qU%p-kX0 zi%ZtO#mEL#nO zNa1L*gJ@dgnxs0^JN*#-@rL!O?qi>$M=tTf7Y0x77{I@{8@v5m{j(rm_WP+;!?j@y zC_GNWA5c+8U|T1;I%{O7T4UoeIV@Rq5%*BZEmAzEJM0}s+F2@MBKP@APx&X~&F@}c zkySQ~rSilhWr5{`BTghyG~x&asl@1#cS=>vJ$zEiQK0l=?2!0ZSR`P1N_bUH=YF>x zVXmC3g%00CFOALQvpwU2gsC~TiyewZJWX{{tY6r=iB6u^(t$8+ZcJ_Ya4hFKHd(3C z*2r?Dtv%|pYw#o@6$3?~gu|ZUnvoELFj(|LvMx({3K_;SNN?gCGb?Q*H%HKas_(;G zibR{I8pZGY@DXnWK~k|LfqIL$`grTn&@Tuke_SGVsHtp_H7vX%FA8-e%usPDTJsbJ zFfpn_#Jd0Df6g_qV!$i6U6FQLK`6|$UYea`la##AB0Nsz+jg2YTs-~KdbB^~J3ppo zjp`PcTl=iW`p?Sz6uBz@AVb0ns^$q4ubzWoV$MYQmh0|pGB%5?~ zV$~u$rchJ#74(v-$g?$DFBoo;-$(yqg_M0??1f?^z&JexcV+Ht@zvGE`)-~O&AdT6 zT#p;!HDh#KP;sHgQ zqIx7got~JTqhAV@4+uM{;r&+VdqJ(dHAiG$@UuVP15WHdqg~k2)LXz7&)?{;y;c{F zO%tCUJ59D|SQk+Eq%Agnfb+D*W=(aM=*s-nVrR&74X*%}AbZr^v1Y>6(G`hycJwqB z$!nj0m?5l~`|e=8(6uTRhGIZ1yKk{%80q?Yu$fqv5d0YyI$l--;q}zzyw85V0Y8y& zE?b|)lZ*!^$pUlL^1#q9$RFoM6)q1UQw>qcUF&;GlKLv46%Wy$vmdqoASJ$>Jb&Th*%Lwdb0xFyw|L_V8^VL9 zA2XqB-ZaZ%(GD;v`*r7XK9ym>3Duv%t9_&j3tV|#+{#inLo>v3s zs%mr}hQ{~M6-{WW#VR)M`Cwp1ezGNt8Axn-Aj$}!!Jyy-4|Ddn?|7sQ9KET0M`S;X z&IR<2Kq99&IP!M}QIL=GZoSe{J&U9uF$Q>^;VQ3XAZVO}22n4%t5Y@fik(11Dpv`^ z70lj7q@W+_#1^tyAZ=ZlVjuxpnqshB?ik)*$Pf=|WW69|-6!@DHbuyfMdF0=k;$5@ zls_QG<+F2j|GMxhI4}5w@2QBfvW&T8i{L2E&rxxcyM9_`uG}NT>nsA1AXT#ky`nJX zc3o43KX>GwVwb8b54>uVk=y*GovKGmMk z^)qC=^C~omr{b{)WO!-d{40fWR4Cu*NL#Q85IrkLv6%?xui%KHG3+m<c`4+OW_}0 zpSSr7f*5B2DdD#C;M=&FlaQRkwAow8J@Ev3Hg4C2d8cwhI3$98ye9)751%B@KC?tL zOorQ}&%CqH{nq_1HC6_%D=uhD%v9+TDd}qIA=H{gV|;vHi!Ac_&)^XFdV|SZ)m_AG zcJqwb2Aw?jGC=zXBJu%IsW8-t&yM8Yc%a+&pH&`8`b!=I|2SXzLN49pZ*%rbpo=qswQaI+R-EoWI#XKTnq)n8 zSvXzaDtGNpMqHc>kNoB$-(B$pDoPsdww1E4?&xB{Yx&_8Yx3#?5ndlJ7R)GQ%Vo>(hCWA_WPo) zb8#a66~tjy6r2$Q()8OfLq1gb!IHLZhc$MwlGG9+)S&;FQ<~7KJudSr8$c(Pw%B^U zkr8MKe#G<`!Y*jM2oWJYYS$S$K-E>mhOpF-6QW%GSQj2+zodJpo+3iyIWG~I(g$&d zX3tq(|4Ian@VNF4388dZRh*u1qjKP8vobkYV^@auJZ@MHRnu><+VnRGO2cJR0L#Bu zwvt5Y7j|}N4_jFVg1fXT)U9=(01-fj*qn)fpQgG#?~NH|>1iRLu7EA-n_o+Hxqdil zjiW^@%YsO~RyW|Rdbeo#Lw055czBXMRoBu@OPHiOwt93kTceoCs+3;X)V_Z%y+RL+ zB-;MXn5(}#UKCX329MI;50z+^(RxFD+}K1Wn`UPY>z{f9^O*f(c)k5dKCj?_!$j~2 z9l=~Fn(MyQ4x}7ER*J3=GyV>4nKL}F!mb0u^{!UR{O+JGaKaNdmZbz0?(G%kuIg>_ zS8tY$e{ooH2`D2%)K3)8K@m#Fp-OEMz64+O&CsMLa!WVx>pe{@+uM}o)}|RH|@45 zR+oru!S9oEUNUG+Dzc(_6}Pb6S!w_WMQdC;86x~i4X;U(pog3D3{ zy0n@pU0!3F80yhA$L~`PrVe3rBjdA;tb%7eymX(*Ki^B{*v&h7Hu#1NZMNskA9{cL zFrm}K7`KAixN&56)wZG}>jnIfDckX*QP%wjhamno5bKjZ&zD-QA8xQHo;0r!+z^~j z|4z2>r{3A0DYL%2`IbA#h6@~9dCSl5>vey2mUDwCRFj)$*xCB}xRY7ZkZE*)Y}&f^*F~yb zo8S53AKlcBB;#f}zS!)bJ{zhnFfrb2v@j$jkRCwUikUGl%Y$E8f{H=Js@~N$p;9bRmH{EhXZtv!LE}y#twBi zCqGY!)LDKNdcGUp>%;|Du(E`_8l;9$6dEvbBz~w8!7;_!q=xBA1#enUiUc7_$b_-< zN^E?TL0j+i(%HKxP_ELH1Pb|ms^%zf6*oHT+w#+@T4o6+ky1p1z!zuxp4iE&^hx)Y z8JEy}#W;Lvd7BkfxcnFc6$C9k-6dFwKv`O# zrOc$f{!|02S=#m%(*DJ-)(e1Je9p&10JzkI^K1Pa9>Ra;U0u%rX`!DyEB4>j0c3#y zQ)Z~J-*W&O^q@n<5aW*8rjvqo)^TcjYx0mi1iwqye8fLd{{CW=2!pgDhx>#4#1JDu z+tam%SBlKlp(7E+frqWi8!Avh^JRXAKlB2hzeoFM44)zS)?6(*H4mIa<{@OXr8cL3 zAX`-9_bEHh3&$9gk#i`m?Ysv3cz`h*VK^3Ge5xrx5T>8-)4Jdz^ae@y%n^&)IMwwG zn7@eb+VMrS>8+ky?D9l&McT{w`ZhZZ*?%IB^L)l>tKfKr#QU93mP+g!ueJ6STh{N9 zTq#qa`CgGuF2{2_rT(qQS{+se(3gkhBlC;n(zQWjyhGT3gfZ}5vK|VD^u?^KoA6RQ z0-(Nbz}x;1=DX<93;ZuFx(5N^JIo0=w~NG5Bz(!=RjE;&k6&$kbRS-wo6~#Qnm&-x zY)nhJZ2*q$cE?WNJ?|Ty#oGjaqdksWUOsb95K$2wJ)UeM4j*x1+6&X$?>6~t#&IfD z!P>A@G`~^j`rt*iIXG3y{c2#eIV~*lihpnagHYfxJhA*RdAMqD2Bl5+YFiaGi;+E- zItRItMeO_*q~SE?D$6-xoWnAamVj%n{YrQEm$M0$9=P2mXqrA%yYizAS+%>AA6oU@ z)~UHw@O>JKYL6iY})x=5kE5Ycl=|3x4O9{t{VIkV8`2uI`S5Rlu+t9~3 zK1bA>VhsR@#;lP3?M#vpzE-!`B+?|-M5fpMG)|LISU*=hyc?IZ0I40w87Sg%I|fuz z=ioM$ths9<=b5#OqpbWiw=1&GKfjnr*-|#s#1!gu-TFYsa7#Zo!Z(mOxx>IsPC`gG zdf5fdBi7%Z9iL-+a&iCP=N<0{=ma~`IrHXuy#YK;z!Po%Jf>~0&n&u&Xn&Ya99!5w z_6V!_A%M>v@UO7xkKvSSt1>#v2hCpKb*l@~+pM@=vPUj&i^en?bSn{GYV$$^w7`5+ z3lW^qm4;8+u|e|thzwpLegaf|vM_c1pM5%hcL{Gw5^B@QE9j~bPrhWd03xC@V*)R< zcDW%^42u4w^KCnlcOvvn);_yGRDfhmqtk-NZEd(~&9Vn>AGb+9*d`&sp}P3k5x_;x zgaz+^+X~)L8NUm^2r|5Af|w@ez1gheJ$WA+B1{9oo3nz4VK=@t4n<8ye%p=rp@<4m zRfk_+T@X+T+Z2P^=yJRjcygL_dAuKMaNN$T!n&?i-0`1HcDiXE?%uWr{h9{;L>jJz z$PT+;n)!D0TO}z*p$&S$_DN?qUc_1c1?_%1GCc^)%r|%an4dv5<2v5VC1)#+SK`kW z!5-MOTSsGUl1PMOqLlt^U=x%NLewIr2r~4;Thx0GW^RchPriy;Ayt;`fPrWzxD-AjQXSD@0 z=k8m~78eNuW3x)!c?4TB0T%fLW-HYL4e3e8;{BsV-MGtaw|2R28{wrx7jUEuU8#x_ z-4siNCF6W$-N!toc}XV@X3e#;CqWm{At&#iZ_a}<2&ZPIsNw93JNjo zM!9?~%UL5UH{=C~ZPyuB7040kUpS&rΜy29o&v(;C$i-K_$@3X$3aJ)q>M;q~ED zVVeQM1+(&oaXA({c@(4AaSy;=GrCXm8Fyv-aJ{^{MD$;!^Epjm6(bis7VhO=#aB+) zC~~a#%by%c3EgX5-9ft7moY8eI~=@horj0TBR|ax!tXfQZox7H?JkmuIXl+ zjo#6%uLmbz{;wLnd9e;JqFd}iTECC7xJ1ri)8GE0$*1ATd!AnYxJaj99WCkyyH%F!ZMc#aXH8M*wnmU!dx6`7p{xPXX1mxB>0(mN28qEih+W%W7Ls`mb}1OX z5KpNC+P63DOh(+aF3^pm=TP|$|6G@>HZ{|0wmlmER)~KXxXhj`Xqt_%U{gsra3WyT zwbmiIi4^If$l(d;{MWeB->hi3UZ?doI^qtMlo(?Efgz02s`)&~E|hU?KciDYFbQ(wV6C-A0pjukD_ zHZ=%{VRGuTZQb9W^1Eo4FSdYysC#98H|+GNb(%ay*xerIDQk(_spaAJM>UiRlW#}y zNL=N}U;sKAaU=O!dQRW)a)W&5e;?AtZHxA|J@Nj24Ny6##=#zwWZ;jjg8kwrd~)3M zu-7dMU@gqNuzR$#}TbNQxm9Y zRIWz8xW&g06fu;Xokv2~cK{|p0h%gkYC=mKf^1B#m5`{&t-3w`Qk8r<^WR$RC0ANw z44A1$_tPJ&xwyt4|!!vsTXrYo|vw8u?n(!k1fLRzb zr4b<9MsC?SV6{T|N;Uce7btF~)If1#fMNJ1kW`?M>}(t+ieY9#%y+=Z3jPJOJM2q> zqm>4;wSD$h`gA8^sO2bC?u$l}V101hPN1%;{QOsLe_D9+U-|ojyvG;0nxN7O~wHJYFgZ zTk>gctQUVS0+axn|51C3r=yP=FU?FQu&1TAHW)y@Fg-axt5}d3Yj=41AhN< z0&8A{x3@Li4IX$zv^vn<@5kt`H8E4{v0|mZUb<1|lgPuAsSq)@SJjeEI;=8SEvU z5QbKlCEr|8({qsL7$q3UksyK@%>|K$rVdZ{@K^LHqe58M=HTo}EUrv&i(1Xz2DMy_ zw|AxWfp0#i_A}N8m-E|$U1NOftEhbzM&*;KvE$5b^Z~SH*S5`L4!#i(XQoy z8ga?8xv`vY=?N10g{g$xI;Q))OEC&|<#?+Stybph`vg>1O8?-@EqCmpdII&g2UUqS z++Y_MQ&R&}zwK8EgTGCLwCYXOYu?U=rI13`x_l+U<_o-eR%BG1y57z5^8{BwJ+8!v z<4O}tjsB-A65P_uAU+_u1_pg5?Q@;71poz)WBn^DtuK2%yIl3t+E|=#@niZM^+3DO z*qnNXv}|YWo3#@xM|#zn+e*ng7hg@w;a6=^(M!NDCZfr+-$8~uc*}$iAU!M-Q?67d z^84z;lWL{bqct*r6p@zDip#`nOPJYuy5SL>a|7KvHW*t&Mxk~FXa~pA^0<*V6KqMb zSz1U?pss2%2HbIACw_ z0==tx2>xl63zh@6<7O9M07w*gjD)yS^pn)66zubsphdM;_m}cWaply~@|5)2-%d$8 z3|&T-m`P$VHtY`zSB5^J$D^s$J-)85oLMi6nvFsijuwxu7hGm196{fpL04r~O3Mar zlogqX$>w%NZ&!RSR^palfO4k)XtL;vi-^P6-1cl$N(Ys=BWKzcY#`3mGuLqII`k_> zH({=>W7V^vs2xRutyqz^M%x)ZR5b`i-K=9Cki4WDQwBTob%iquW}=*z$$*l;T87p< z*)|y&DDs0_1+o}}H%`bxz;B+D`&ppFLhkN|kElGfFGR~Ns)jFUp{ZxBmugNX(bnR6NhbaPl}bzbT|7kv3*;^MVhtB z$azofCLNUto4iNqO(v|28lGH3PF}(79V=QQEQyT6*4laj=wHUvTA90YDADI?aOe~B zfUN)0D{6jwuuuCP!lugW$UF-r_Q;tl){iHpgWL8;SWHj)?kfb8!kWx=>w}udt8?Qx z)J^3n+{DRRAmUe+q&MA)Fl0T}b{DFv`Gt)Fc#kpa&m-}vDuvO_iR9Zq?BCZEl+hiM zUF+)5R4`5V6x?Xu9@j4S`+)HbUrfgR>WD(u>o~+GicJ+#$E(#`RZxKZhYnm4_lgh~h(z>X!)%PGe(Bv8kNnzS z_s3Pj<;d(?~qFFJdPpP$kwZ@1DVa91sr4^%oAt8O&p$$~tPG_NgVcqpGyhYcVpWTSRR_Eb-P2=J3;%rxpr6P>{B2n3qOf;y{d@LI-5!MB;xG96#F5ye z?aVC;jtb8kedivlxcrh>(6!m+blK)Vlhr)Es=B>0cZ-hKe0i+VJH9-H1-gG2YNR0| z1~XY;hXym2F4qh(lly``z%erF@Y5iJL$-9H)O-!x-=~V+T2_?V?Nr_H^|-Ae(>x_} z!mK~Oxqeg)%{z`?v^@_tC3t*x*H(d>_93|Zfz1M+Hs%^^|R}_FfJS?vcd`b@_uBN4r;y0xS&49C;h za`D=Pli^qxf`9dt@C$u!3AyZS{<0Mw7w3y~v5Vb5)ES)P1DAr%2TWu`0*Ts{F?$H| zwS*n&V<;Ng5a%zux&_OzQOvZV%#cHpHO)@VBKhayYe+>=c z_^!mD*utD<&!V)0#1d$%!4Vb5SL1V}F|x3mG~(!RtBhYad}BV@J z91Q+!!Dr#2`5kG0EAoxa@AC8SsvjN~(JUsEPq3!j5|6&xqg?YE2cA2A4W-vTohaaXY}i=T!y~M$LDFm?DF5*;9Q_j5cKrO} z6&hlVI))c&hYHQsfHz~caw|!t`TmZH)kC7OZ1dxgNSHaVvFn@9i|o4K@j>e((R|ga z_t(TmkD>?9t|%TcFXyN&q_a=Xv_CSPoap+QSN3siZ*ICT^%}TBpwf$q#buWtP;v*} zm(ku%`-kT~-`YC9YvKds;;Ji~b(=k1w-4Bz*oipNP->n^;pK*e^v*!^c*x#QqioZj z=%WO+r#x2#1%z91^QZ30ctf4_baGm~YXNYnl&EO77rA#PEWk(~c7Bq3x5ALSKZHT% ztpq{G{)I1^)6HLl^7&vhTcggRUh3>k!QJ4;bHcWN`g$ML=Ym($>Q^6sy8PdN`W-+% zO($e^rb~_z?2ZmGJ}8fY|LJwIYtSO2G9lgT69uvPwV8)G=b=--&Mq57ZI>k9m@`{p zz~B2xw!RPgQMP{S>-Vc4>j@hLMg$hCb`WjkZy*x1Q3A3fhHcC{^Oszksqg-ZYp3b zuqG^8&L$P2VR3Q+2Q>9aAhAje0NQxhxSlnPGZ|-ft)!yzJUiZ>~Pp8;- zK~eQhNKcvH9p(65*lN4s0CC2;9rtb+rWOxQ%&ij7?=`xm@E^-hJ=@de#lUglvLdmX zdLTkW1?k+>&NO96WGhj2#e4 ztexEKuQTyxZ@+BOdh0ljh!c@+rA_?izkLopL0*(RM-KYPp`k)z~f7ZVdW4gK~j zw3+U@%r?U$vou7$N=o%r< z@cJt#Nvx3T8hr*rHrBf`J7!rOPuQjpDFx~gWclfwka75{P!f`cBl-mw~I_2NEiGzsBVMFXqzWy_~1)LexrBfLEKO$ofW^XV!b2E`(lYs10`-$uXu{gGcWnkAA2P63rt^I6rON07{pFl4oeW2LI znWJLb$pFt?UjI_;IVX%AEJkiIKT=7eKIS`Jy9$}LBo+bkvT3ij$znZjwss(Ix3)*`n&t~aE>LhE0Z`~gX+x61XGyBkN?hZQm0HR40+DA0PMZt3c^GK zIZpp#4OGX9sja8gjWlmXxFJKS^5|*!tkZYtKq@8^*=UZ}C=NTY`Z^{9zd9NI`=lA( z1fyZ=OBzBYJ{ho9BXeKz4X%r*;Ciw3u))m?adL6>(7!Nlm?_ED`fdtZawX>H#2nS= z78$REuifU~S}k*5Ok2ifS+5oc>@uu$@AGOM*A=`+-&2F;3Dvb!OKwXol8(W^>m9y4 z$#V2}V>sdlq?LEnX(tF#g4z?h(4=iiIx6@37-ZxjR$30h$U;+aK{*0+94pLaGL|ni z)~m6y=RhiN%lv>wN-?8^Dg&@e)-(O5BT?ZsUBz)&q_`nLl+df2NV{@akQ$Lhs*7q( zbNYNLd;&RHX7DV3aDaFZXb~Hri0CNJ)c&rlbfBM7`+Tw$HnU#3`om}1> z60y@|Ttv6MGdwnkiz5h=8IH!T&5Uj*pw<;sms5Q=+YXFLKd5X3`)*=I znP674b*)kq9xEa2DmR=1Cz(2iKt_9p({Ma8sPGT{%_s3cNw7?TcF`HjjsMd-=PBtz_ z*c{!J!})deCe4niN5#{Dmj6#(ocr_jt#W2}kZQ`R=7wZ%qPIqP2y{LiMXT|mvb{;{ zm#}hIj}%<@kBU02rfTw_la8H!B_FQPPph%o25#tMsfy2+Hikzk*AZ$xagj-1Bh?cI z=CdMy-9z4B08SvCV53wN5a3?h-;1mgREWVil8 z7g78&A)!*bu@i-5VI0=}dbM0Cf4;mT@`sM(_Ojp^k`~&$gkdYWubFWtwj8BWpq6`R=_px;h}F z(pKL2tcev+UAb!kfH}5|h*Tx=$@O$o3_(Mi7~la|Z-c7=^AS7r03T(zoBCqs3G`|j zqULZfg`2R`ICkrQ0(d$?hd<__e!n3`_}S!n1IpfbNT;7o?*}`;dJ=?Se!k~uTb@i+ ztU1uOOHu9F$3WQ5$~M>2D*LLaIprx=1}XvPNqt=-Ia#rzLrf=G9lo!P(-hTDPlqdJ zBEkjuqcw=gWaT|z6ewr|RtK|| zUqa9nb!acc0v<~@8J-&#{$L!;>2?AZ5{%XCs*c7Hq+Wjz==T-80A%yyoVcAb-_pW= zoBREM;yjO^eu`l;;i;UkyfC)#@E>N@)5ZTe>xo*0Cov=A2k2i5?S11@V`(H1d!;Gg ziKiFYpwPzG)nCD>zvugN}JL)lTFPzVs4@4+km4;?(v43!o9Zu zq{AXXksN`35MGnm3GgtTdUgFE*}=~h+XsbmiqTkDi+B7wNG;V{3p$=;KOeB-Fj#$% zu&-89TTB5MzFPhwkQ8eL5!}8NxE^KeLheul9F(G2h^p;~>+QG|VeL72-YO7Gv!4i( z-t7QHS{;dG?JqLO7*E&HbU7bYCt}AzQRzq@EE+*$VM>Wlc&?DBuU2ApRO2?mtBE0G z84xI!W^qzQg0jJr>3RWct)ul z8P;l8Xzh11y#WXqeyZEk$0qIV?v_pGc2q= z!;p6E@36j}G|3J8H^`JKlgQ+ORGD29T6)q@4qH~^=M>fJ>fzoRg^P#)U>e-Tk>fU3 zab#JfyqSuPlCoJA05DVi5j(R1O6jS1L9@0JtVMq(*&UhN&j-c%Z!vHvGuc2_JH@yZ zKgpK|zQ55tvWDyRxD-JPR@&bUUp?A;((a|%AjO;MA10Sz0L$kV3nKv9>4Vi>cP!dF z!z>4+HGQvo-eo82ilHrm7!kMa+!-P@{gaW%t3KLF4_ z;mC`ry7$jR>t?f6_P&pLTia(8_|r+b99{F=F=pcKqwIZY#%Y;b+6M}|v3A#nu_5f# z!u`5TstM45FeCp7sEV>V^&fnys4fL=vX2?*fp|H4Yp2_+PONE9ZW#&DXH>S5Y+1?>RLcUi;#NDOdIk$W^|8GgR^*?%+@_jYX}9b92;X z@S01GLOZS9aH0CNjTI{0-FzpSpwgn4W$m&meP@B6P2fKDrttph!H_yv9*@ua;9+oF z$m!eR0#c)*U1S(Q5)D~Ioj}Hw!wN&yEQoqkKuD^=QFVpExznZFK?mu11<&3sp1m}% z-qPF*;env3g*3e9_BT8Aaz?(hrn$@U;PA$s{acj=mOjs^DMO zWNs;3$IF4 zO+lf{6EtD3;@r$9n3%S-yKZ3847p++_~zxkY~L`kx;_m$+$L>=>WC;OU=#4uq*vwc z$An=g$DCXsJnArQEpCshTQ7>V3;$KvkI~a*K!7!=o`5@b9C{+VCwE#+H>s?z=1|=- z_vFaw39{P|Z<+79BCiCbk7y97vLL3o)?xZ$YS(wO#h`hz0%K&MK-WqbHp5ENQG!(W z7m|p;)kN9RgiKbAzVyh)+46(M7F)`j-D$Sa6c6vNfV-)=ztVGgr4lQ3en``IgDGdC zGgsM4HmL4z0nI3^8s-!&b3+n}8GkOkyUN`Av$`cic;ej_01003SXbmSM_qe#krttC zBC#L{x<9+24a+#BFx zr=H=YP>i5yo#CO8euObQ(Hv`q(RzdGwIC(}yfY?DAKw|?q&>R+u^&5g{Azn++0?;5 z_`SMEVzxUre(J|`x0kM&s>9ln*fbDI48`^v$qvDwQJ5+PlU+ZoXxUcO+QuuTyZXuG zr^O{tuh6K9!6fw9W$EZhk$)4Ia4dQHLH)Y}l3rT1Kjr&6M>g<5^s6IFsJb2@>LrN+ z_#>zUY(oPTZpGFXh8PUxBOz{}lidZ)e01UCR>PSEAe@ zN9eO(TUz~}g~j1?@L0gtii{gV4b7=$2JLP%MB10xOpxu7RK(_evW32~&LHhPHh>us z)mTn7(md%Z%P=QgS}Tvu@Rr?E9yp>VDr>yqsqsl!Vm>Q8h*+GDdaR1XdF$?YV+N!D z;Vg)?h(tvis&n`BLWrYWHa}zF!?B2x<-H9(ZL0;L+l8-k=|P zC+LzFqLkjNGWuv1xAWw5K|7cHJSNgLngmoFxZ!xoI+iR*OL!x@hvrkHhxrAS&lCJ~ zalOQ1;CQyx)?1k-*yQ)T#OBfDqvO>?C)^=SBHi^vxXQtPz*AR5RYw?9T_nLulBzwH zzyHYa5c8gQ_T)`GQZ6BqwzMwMofoE1Wf&cvZVMgm1EmmhPeO7{Fr1MG;-0)+B;1GJ z9>7{HK`+t22Ys1G>b&%F_>Z67X_X?<&$p&5XSKwUyXiM1FSYpSxqm=yv>?%i+52Lc zW>feD347D=EOm-;l=`@$Y5}!8-QoGr`l+E8Oit3|?tI_)4oRk_^miim1R!JZ@_~Nc z>(Ms$<|xFFCj_zF1D{E>e|^I20*u|N%~W-PW&i-LI4V8LdSlgT z_nnM*16>)@<&We?wI=m#?FdKKP(dFF5?OX7HdmprTAa zc9g27#7uu=a-TPM4H;tCE~1{m9)c!?QQ;S0&$Zow*t~3C5tmmZZFXd^1mIQ+F$5W| zp_K_x>~onwV(miJfnHopq>5=j;{aTWgJW`*+6hf?vUN78nkhNcSAo{#iRA@%|5IZP zg73fdV)^C?tmUSzlS|=lacN7fe&YYjc+lyZ+(e>eI54G zH3u(JH;QVu9x__nr?vbT@WQVQIr&XXeEf+~ulp|vGy0N#-Ji>uS7P~BV%@$sM2aR| zQrd1P1EI^mTIsXX!tcX7el_}OZZE%1QvPi>1aQHouYTl$&2LC|ld&haxTM!sCaPun zK@Y=b5s5A?Ogoay<{lV$fa_pyT09!kJ-9PAr~Z;QOd+f45WW}u((CzqI$(o2;o_4^ zEBgj&srWCqP7fF^B6@&q(~VJ;`~Yn;`UmCY2iK~k#3iXE35fSMOQxJmP5oBscOjGk zsIh2uwxq{l8Z&F%{TBV_&2#$H&1fWSf2o9z~mAr1XU50LsK*=Qg$GA_;jQCCWI|%D5cC5$S&w@d^`3UK=rB!-jog+N2hV&LSCh7xz6at}MJv-Acr*}X*IuRy# zEawDrDAoj^&CJIPyDBynX6rBWQijPN=TIQLU@Iyq|91`Zl%gpStd1+?$m{(rzSjl2;XZlHv2j1%g{~Xmji%&S0c_K2$ z;}aV0IA=qxYP}oBR@>6(9>1sWDSYN?*?^;+ftwS#v5Zy{DC>&ex@yP07!qdS3Rw3? zVQzt@0R{;R{R|+ZSp)JOx{vM_kW1F4AzM5JKj%}4|9B+;ZbxAG}zXZU&JF2BT-fkUj^Ci;5EvQnUtt zijhFW!8|B@X`h&kF-6M8!mc$d2J~&ud!5kR1_+8UrN_>`n4CCpBhO7N7^^)HfK0{BnpWJCO*;9j`Hk(_KR}M>bFI)fMKms@yQ%F*7twxLxO%0LAT) zV>P?#im69i_?N}tn_(<~4&NR00)N+2DmY2m(l+qhDcZ5}#}}04TUHq0RWr(4)WQ1j zSXN-&-HwG1UHN|fiiQf^qV{ci^ap4E$tA)TbF->`U=InKF7w0B%`>}0(x&b{C^yXU zZ?m*9C2aPx)=EZT$FXU^YsdMq;Zu7Gs$r)Hw^Hf(yi8U)sH*^SC)j!uOU*oi;nZ|x zW=C&B_A$#0jztTvsNkVOS-C_1e0yQj*ptQMowYQH8+{v$VCw>$u08Y;na!z#IlZNm zBZrBUnXm?C6x#JqwA!M2L`FoFiz-UiyJ8Fv{I&OwhN9N0>#k2K03XhkhAqRBr}vLd zJ4(0^=KWY@gIipuEr*#(mKgxhag7v-Tx z2}*Qw7Aw|_lMl1?H&?IfNm-7UE~do75@2~R3JC` z$=y6^w(stv1NladxmEV;{oWp6$lvAOis4s$_m9D$a?oZ89*{^@;X#L)xBZ}Ma^~yJ zk9#-&GMh)ttlr%0h)_H$I3!@`OP*MRfto+DuX{5CUNwXFQlT7j$a$nFnQ;I;@fg0G!!&pB+yziIi?9k_hcKVkv0tpy<4 z>V>AEKM>)5FbxL<=CcA%5c;L* z@QIuenf2>yiz`q$Er{c5EPnfIphb`K16T{?y1C&Ms$C6Lsz0(O63O27We z0!SL|m(6Z}+yueXYLx=hl|1MKtZL!-8-b{6+hrr1TqSo{zcUNFbV>8f5)N zT>EYlUuI1QAUnM>o$9jX%cKQ=r(b)MJHB+E<`(F(Wq8}_=jyNa9j&Pk2kR%;(UhBY zwcIXh@(bZb{(wYoLaLn1$^GTpQq6W;Ul*|gSGb=!y6#;g5!FY zyT|Poa<)U}`4&^%woj}F+7|b(MfCJrF_}LAO(0?`>7-yH%jO{*afGHT<8fvjwq$uU z>E@ma+bI{1X9p2oZ`WS@FTB|DYskEA zU&FV*R-REyxfaj9G97!heEZay@6To}a2B)i4#3QR3TA}AF5eO}1o|pLbVXY^!OJf^ zYu6tvj#k4>z7{VUW0AnGda|Pf{hiNxRek)@M6!oBcE4X|{Tq zxs@ErEcSkrA+ggx18w1ESD1UqxkVUrc`Di@+n};=gUNuqf=_o52AdDIIsb#bH-SoW z?fQp1r>Cb*p0YZnmQ$y>OwCLU2RI&&TA7-enmJWeW{9LXBiJd+%CXEeCo)sZ0dfKn z#j=u0g&a{4$sAA-&=61%{BG>@%>UY_Yo+&nU&9`*z4x{EZ`U9Hc2TR+ zwB@7*#nwgea8JJ#9#9;yqEbIAF69D3nU}O|`rxt2L@2!1bLHCUDvTf%_ zeZ#&?0ZcC7*<>;h>kOFTsok`y_g?xwkj_-_N7}ni{?;;hLpL_6D)GXYuRfe z8zW0Yjq|O(45vU8l-zV#=esGn@2_LqTA-KsPU42DOFkCp7;&FJ#N7G(EGD1xPO*8@ z4!)7%NCW_V_%_=N(ShX6SP06AwYzlR195X8-yRF31K+GDi!3IM(+jb52vA1=HF?VF z=05%4D3tQrQhXQCs2(z7)og0=M zVmUJyodEmNZrP7mC2I&R**c|y0K4;l@(d!p%)LpkDeVkD^sqap4*87J4_V}G|Ik7^ zm+qIIU9Z!2F+JfH{XM}`n@nzY3kuKsqv&nA5{T+qMn~?U_HTQ+9qe5*>=U&$@^ik) zHIBcEx7c?qA-5gfq@FirB-SxPrc`q-%Ja68T3++^*+q>b?jGxX0yi-{PehO}QP}NS zVPO(m_dR}|C<~x#x&?_h&Ub`hP&w5uJQ*!mP>0X_>tz}a-6ZU#{`)BM_Z`y_Ho>?A=5u4NQ z`ghj^sYLdne*p{!PK=Sm`B6pa^@c`!P@*pp`bht{J3OMFSJrZyaBtJvQVAp*8X*rv zxiy~OO>{FxAs=Ahg$$q(ys`!zWZhZJP7nQ64l@p`V+qUDHZ?i{$x=;u`fJ1SE}H-+ zt})MyiG^EOcrXdWH7njt)lX&8wE90^u6F9yN}+Aa6^Wv3g`XyjTpB&GPpZ~>28A2g z4VNb`3Ffktqf545t?O~pzHF5EY;mRR^t#1#eKpCpc(A@MKfFINPPDym^c z<7Zg1u&ILi8as5v7*e7b@MKPA@&`|Z=Y_JWK+HC{8-N8SBfu`CFg?lX6u(KC1PQcn z-KH1gW^2*CSK|@~Vs9Pk=5J6Po15VD-AP;9E}GV4)CK^<2Iv_|snn_@@p0B+-frvo zWjAgoSG^tFPkdyeOyver-2r1*X{Lf>M5=rXo|`J7ruQ*~$UFMZW&#j*1IU32-bK~r zI~$+*MP*hU^jn52lx1Ic8LCC8j;lQn)D7|GXsu@z?+EALGdJAO-WwFPs43DPPKSUN zfMToJSJEI%u1P}jot{^AraH`I1?VD~QPw>W-a6&@Xt}mccf`X2KMN2|aVOx;R5TAK zo0um((Nm8r1R4=B{1&HoU*Del*{gVK4r}{w#Fw0loC&(9=1PoD=}nl$$^zk&sHxU- zZkC&JuTNj*7XV!r?B<{C!0J|E0*W=F{q}+{%6p+t%Y$BB3B`73SiL0JN$l2ddgnYo z%L?As*gmMiyR+vB`U&qc1i+pR@v5mav#$)F((0G=?TK03jel<+Q3Kfw_se+Fdsz^= z8tQqk+_iOM>09EIWg9hqWcP~cbaeQWt(J8w?UcFjn?M$TJCpeYXkt>`_GnrjAwFdm zsL5(5lDmHG1^wwF5F1hTTZhA>m3I1y&h`=Xhjjb1IR8pnxQ;nLkk&OR-1)#{QO8r&-s)^>RG-Fc;8zX*vrmwyVy3893M0h0YP>>F8 zLtXr7aqYP8W){`lmPoS-4>c6lK|8`PT2j_c6d^Kv;0@s7O=7UF?L%0E{3)DmCqctah`8Ja&kBw`v>HX&a>GZM z#u#x3?!NxwcLf{D|{iM|gTzIu8_{#SZQ_Gs6!chV~bP$RCz`wKJ!rybKEu}H~ z7f5G>t+X1>tM*sHVOR@wvWlc0{61;;>L%dY_|$w#vwH>S5{J1^YL%_cp+b<3lpP)h zt5oz;kNzfm?D|V56bJClioV-zu(AG$k~JO`?m}d@_%@_aR;_y^_mP<2(V5 zt2~&%-)#tAXXZX)8Fkx0QlWGn?T5FpiUaRSx+++*iQ{bTaE1G7Ot@p!JYM>&hfYyT z3OluKu&sR99py?drFC1SCHc8U^@8sHn#_KLwZ94!=2(IEy=dF0DK{b-P;5B?6gO-Y z@+lB-4Kx0**TT@ED2>~?WnN*z9ydvE3wY>Nv%74cfCk&(Nz1yCv#8W|SC8*DV}id) ziW(p(R~Od|@t6A;G4l`!*AHAnRWc}XYS%{x57ZgHlfPUfhK>#vHo~U_{sm9`!VuLX zhp`cAESYEVFFpF7!kdkl5greQF9VIqtRhZ5XcgO<1i>w`Q`!f6-ve0uuBgsR`7v^L zRP@y4m;%cY(IeQ&UaR5Zk-_@-QXo3-wycWR_kGsJo~Iv+@be5?vy3#Lb_0@f_jglH z`gyGW<9=ZwTvq&yS93NYwLs`T_F`Y&YGz}j9WQc69dv_R5QaU64DmD zSiwF7HoMJXTmSa(*o!bFx(MLME00P5@L+BwxzFZ~6Dl`;tlOqHh_)VuXc-E3g=hQp5}OLxo3lBy|8W<}%geGSf+cr-Bpq{dS0Uu}gGDz;b7 zw>4W$LK2=`1`sIy7XWPBi;`m;>HD!Ru8#Wqp5iTl#ywMfZ9Hl9Wy+CDHh0gTxgc6m z8DwzgY{Fec`jjOkZ;yLIR4$=F#<}8@3$k=sYHaMkJ}KaVR}JEjISAIPWiH&iepKHM zb!PF=A3mC40uYFKcm2h<SU8ZHUlW#YDo{EK@kY3cWmXe>B}|?`m7gUDlIq zxGVov-)jzEUgq`5jTK%cD>GpdwHB2n%5vhl?JTYB7PcPu8i;+v{!L>Q+01wR!D@5t zruv{k3$AR-V54sS*8NjaF8L0=5?dSnhZ7`4eUL%1O4G5s7#E=YMf6(?+L z8Zgy3AJvaVAk!#v#C?Y&I5Cm9oq+zKt^MwpYl?RHO?undQ{g(%(XSQM*Qh(c25Y8l zhR3aAi>*+Jhyvk96zBJ8Bp^;(%o)LMmXiZ|?swmNTD!PO3`GtzG9?|$pKEs zdL>MP!7Ao(!AeGMe6#rX3P_RgM1-k-DbNgfbmvsByY$aj*W`Y7b&uQ-js0*pqV(pg z_Nd%?Il2Tna6H*O{LZ+}Wu_Gfl3T%qy5GfA3At3FdGV8e#CbEKPi{rhpp9O5OfG1e ziz)?-@DuBrmm19~$Hw0Q2seKH#S2w*K|G&u_N;FGF+XKSk19vXcnjECH``caXLWbM zreAD|^k&IUg<@Yw)_YyMFZ)e=#=iLSMHN3FO^#^SL%Z^OD_ey}qAX77koqG;gHXTh z+=!OOmA#PV=4Y7Q!FPZ-=F|uQc8GA1fq1cfabg2Uz?8r_IQ>uK4rK{1?4-N>ZYS3Tlq$8U89DP13K1Vl^%$u zrZY>d7$Ii4%l>>-zYjeLI0~<`tEMGCpzz~4f-sf4E{$M;%kS;Us8L^naReFdKa3v> zboWng;7e`ISDdT4&&0L&P9Y2K_&93B^Ff{XyTO3X_bE> z9segrrL0u?+s+*Sn>%7KvV;Z*AUj~e3)8QQRRK&u+1%#@!@R);lv%wsUaHKg>cYN3 zsv|DwfC1P*0Tojk>`}ew3)!{1uC8}E*K=#5%t`(86EK!Mz4NA%{-I6BQ*RptE(aPYo za{KaJh=+UkUOw91s^(F{iVzISKN9rIY=>0LPHH94?pY2}&=xVn8tpR<<65n1#|whF z^WjsQ{j?Ya_q*4^ag_`5xrK#ejV|{FY3V66@1zi+Ftx^?`F=IJ!=13pr7>g*h{$|s z&=y4<^bZa(tPlU|Qc{(-x8Hr*+@=;+aQ&hF`HT5)5BkN--rK}1dA2%u`#}T0(?HOI z8TJ1Bsn$-@>PDv-s{)b-ryp7}fYt=uRF@#u=gINf_N=m27-3g&h`Vxuf6o%rt#xmm zy)FpfZt~hw%{#KbRq?P$KQ3wY1##HkTl3IBaB;##J>=&Ur9o}`i+(E8s7&oUIM}Vd zT|Jx(D{}wB9qEzlpSf3lk4NSBGX-M?oKwQ!ZT?{)hS&ZQWmUq9e|i*f@5-sGmA1cm zc~5MVKKZrx+17!*)qNCawHUjb7AEGjWFzxk**Chkefn_b63l({#eKis#Gl@FyFdsj zfm2Pidjw*gv)jX}!wu?RvEG$5TK$bi#lKuinYoD%%BL@yov~kWZg1TC8NeloEYdJ5 z*f+5EySozV?2TB_*a*o*0W?(hh zRUHhcsTB)4e;AI-K5ZG9SH{f&PMzENwnWJWS8WI@_RcR7rcI=4dOrZGF>+1JcG<+g zbP?`$lajvevA7ddY(YsM*v=XVPbeks$@>7Vi8|UVkAAW>Cfe|mWJ#YW#ft&p_5lBR zXu)`V5jR@Fm|K$2 zgjJUr`DsGmeCjm9fFX%+Xe`U{ux;bF~IUVrKuE^Koe;sOQI zj|QGtsl6XNK@BVY{G8>{ZgL=7s4xCrar-aSUh^eJSz&5Hx2XM))C%1t$9yZ5L3`ztnY~_`!f1|y$oDV2OHadcr*ZbUddmW@8{J}-^G3f zpHO+SdPgbnCM{{Q)gpHSf0{v_eDhJ;mAVV?ntDkp?@62x;6UZM5eAId>zL z9RZsE;?CKg2>&e+3{R@|nyP!Qr zVa*dP;2L*+?)#Ez5l|Rg-4KK+U2$$xc`UKKTb{!s@%worTKQY+|6P}@|IM@fKWc#W z_iF=sCjHMuzWP8)L0%HN|C2j%_+J&HD;4RIHF15ZN7@r@pTBT040u6VpZ=%6YV-f0 z|9_Kp``<4S1|I7alco4Pr6sI1md~2!4WApP0#Mj!wqMN5+pAyR(B^gkP*K^sHLsSt ze;U09Tj+F#przBVRby+#qdG!BDfFQ21P&pM8rpbjIDaQO3aCC>GMOdRC8HX^D=iOx zHxFR0OY$-{84O}7o zo{kjU`TN6e`p@V+O!TJ)WAG2Fn_jkT+GK(T5-mFHB#P-if2{l?qqV{%G=bm}lq);$ zHfw$_J4_Ju$SBRLu!^xGMxiG-SGD3C+4+gare7lH^s+=NuHKbWavtU@+L4m2m8g0x zYH_31voeW*8T=(5#R#-Mtutarau}~29SSlHpLk7MuX_6TjMs`DY(w$f_b1#gbjeTY9)#srineLqR`)Xt>y}47fvq4Ag`rAAE>fy6jZWP-$0? zLxD8wb@QZFwK%>0o^)OQ_qA)h9w+e8gpY14#H>yh3sR`R0CGO`Ws8uAA6#YBhF}LMH{-%qz5v zFX@wAmq$H}El%O&Wqddmw7rUyl3W$O`u+J!y7N%?zd-4d`WcscPEq&pCK`7fr~*s= z&OMNvS80-dJk6|tb8UKolWVOkRk(og56C8~$^cl&2#d9{Yi_YPPRk7_He&Wfzs`1` z{ee|h8lSWbv!7h#-v{92u7&GHmjjtZgTr>V&3nvHy3hIAp80~`T*M?!A<%vP1$Cap z8>_xgra7du_Ylnxg9{@-8!-us^j_R~1OUoKijd=+UlyaPvDH=}dwgzsLzv;;ZHplV z$PhYR)3&E1{<-uQOABW|O{+J&cRE_AKAQkJDaTN?tF*pQekHJPfVpzQGx5vHX{g>! z;_boZPq-ewvM()$Sin)0yoU8Py!mh!k=KCz4emxtngVj(hVxLq`$|bapav#U6H}Ba zF)7WChId0co_|$bXZb+X zsBIxU0Ya8+Ed%w1Kgrp-P&+l>{@Tl;7=xaQ@tOb5IEd7P+vnn$lPoyz$Q z#0-{!xMhwHHLv9iI*eo`D&DTq?5pv|v-mX(k}p}Z6NQ+8myZ;cpen?mx<%Sx2H(U` zj~|wjtABx6AI>K0GjVo=L6+83a6tpKjn)8E*~um@L%7`N81ek*vA=T+47}Fm-5DQ3 zaI9m*ztjj1)Vm(aYu7(;LAB6Am1K`R3B;{bOWh=gmo@t19ex0M1#{`X85`Q=PWZ(~ zTmy2(X>sN=zjG;f`p>*~> zFz23q33<-?HZWp z5-S$%xd55aqSZ$*NrK-bRq+5gZC($S3AGn+JpSsM0xpZ>^P*-GE51^E?jrHyW7P}( zI{W;q>Fuvs`#Dpbp7HO#yube9Ppa2`H);?ANzLKtNxHwlCKq*URIvQa^aF4e;S#cn zcSP1LdpU)!^EynX)|Z*H@={GI8>1|$Ql|1OXTZJZI9s`$DMwSf*`N7*?G~|~?4UHG zbB8%^o-mtp8Q%{gz?{BWU%zB1oj)zh%enf8S}jk}fHISKvL~&NUJjsD>r8&{KM@A1 zqu4PxQ&z%}uw0fGfDJ7CxHrV7aI5zt9%^K^p_jk+POjA<6hArrI_?9m;9eRM+ve33 zuV_sX%?{SB_*(%7T&h>geSvJqyjYp=C+Lc4`&^jE1&Mt_#3_zlojm-4q$wyUnxciO zyUJ|9mZDl)O43L?wHr5mh(FDbBSZjxwU28{GHMqm4T)fW5%Q@;i%ea!Xh`Gt|1``X z{{}lZz9Wwi23$RkkF7lG6vpy&nSs~T?lhMq(u_RUvIeGVFYya=qpGN(Oo|c?b(VP! znTC>1%6RW3zgQ4T8m_P`=EpJFZS!3ND$IwvH_+IJfD`slRJFH09N9am`_Nw1%OgSK z5pb=;i>#_?7Z#%Bh|Aw=baEkNOcm;!sX*W|`Sb>fe=f zF52%V-lNI9>_wQ05+V4sj~>tiJ_KweDd~rZO-SJ**aF!2KOx8}O@N<^E%%@o6==Pf zFVeHM`|M7sMizKiYkR%X5Vdzltuv_1DXn$pb%<>1U`k(JcCPAHcOQtj|KSS; z(Kk(x;l;`Oc`6KO1qqqa96j(se+x1cK^y`Ou~U+X{qakaGc!Zw2-viQ0V=xzrB16! z+Oya$vn~2ieuh4|#RWi$1p_V6@nK;cNfL${+E$HzXq0d38r)w=7G#jme?7Zz53uMc!v`j@ccV+Uo1vySDb z^mv8%aMWB$>{?IVca(?MU)-lmAzlW&vwVlh=FwJ6pP82>#WiqFM>pUGV}XH9(lW+J zOuaH6yyH7~V!F}$ku6bu-AnyQUUK^HOv%q*$}8J%<}l_q2pt6SjteA@_`Vt71ymPb z);SruJr|^bzV#QgNI4Av>ivH*M1jNgpF2hWkE5*Qj3v9{Zy?*2e(>MpDF6QgTJ2w@ zGrwHKHY{Gt*rm5>0de--pPzuv_Albdx9ecua?S7`Cb_sD!u;Nbc^ECb`l@;y z>-Jfn_|wrpUw?Pt!X=H6E!W=!-)>6X`D5^<%;yr26`hoW18tIp-lVYcShjsFkz86Q z?V6}V*GiPh5~u&H1bN=*|Mi@G_)2M(y!BiPIb6H=0-*6P!CztT&Tifh0PCLs?j)2% z{_lJ(7fAzUC$y-d$8G)?%5F$Vng-=6hq`-5r3n$$;#HD@nJvgpIDYj04&~oYbud;1 z+j*1D9h419@)rJtf24fcJQsj{v|miH`Qx#cJ^R;1R47-07>W(fBu_tBdi(w%pCLo( zO~23M1!2~6%XR0jef_cW-+)&&6Pa2y#gIoAmcF`;J~bh`mBfgdDw3 zXwmO*@{BqkgFK0(sV>bKSmH6|7nk|X&^2Nd;-0aXz%Db8OtX7QkjQz+DJ+1nSjD{e zw2&kB{`;MW>k*gLxBS!4WpC#6D)s~#o$lxtVhsMC+f2I&(-z?_hyQ{8?pGFgl}oSu zJo1mDv~~NxifH}kQAXx8tCn8L|EE{V%`W6ejQ{g1vvZn%j57L* zs7%T0oY>n?P$Hg`$M`uB9xyo;4RMyu7DIB;t!OnIvJ>{@fT1(GjU0zgJ%xMD>9nEb zC7Sqn^rWfO=_FbZc;7T$>E?kjLu9hC#aiDmqd1{s$9D zO!OY)5iA#^(v$)JLBN42<_E67*rDzFO$&=DTP??IKAhk?vQ%G~&n6FpIQ8!AV>!C{ zKA2BG&JV>#s>Mkdy&y>SnCg#!%H}G+q87(tt~@$=rYRO6FyvQ}?%{0AwI88X81T)7 zR1-^;CRr&a(B_(H!ufL)xAx#nyS&P|&dXtMc z5~b*e2MJUPqB(>Pp+UY9!J|$(*P4`a8omGKB;HmU8N5)0hXoyvrg_SgF>bG5;JXG!8vk$N$iln?K!4_Vri`vLnea_s3UD!y;?p{E84NG8y1^Jez{C zoW#SP&oE{c{hS7nhrA*Sll(5}J4nKjj)ULMY2$f-zZ?k&k=a7@gk}u?`x@WQ>N%2tjk;(Y{9yw3jlKCFTaDE*TXzs2TuKfj&x~UTauC>HEm9ogC%Ra z1%9zKQU|iX_u)f^w}9i=%}KwiLu}J@6rN$kxME$ib^J4;YP!Hj&e$W{ibYW+3ghE< zu)!e=l89u49zgqfh(UH}oP~`eBSFNSCeXLrApIhAif7Ek{g2BBOebX%{_BsP@%YB< zmtte?bJrYC>$dSs9&-po|L0 z1pt<%6hj-i%XwCzuU`ckq%$Xt@m-||*tjhCat0}wArYSD2k$5^CXIDU>pig!;quM9 zB^sg*rgN}QUs&QT+A9*Q8ONQD-ieb(1SHR7pvt*K?D>V!Npr+xH_=7+Cyd&?r=s*f zY?ey`m8N_!;`~q>5td)VU1M>oG4%-hM=pYZ=Ib6a)Cp0K8ocUOQj?1$35pwo-PtJv zAO@#?P;{Nb{nY{ku?*^6G^QPX4QhCKELYbi_M=m23Y{QHKd>uXQSI6(=V&m=^@tvc zewQe3^O<4UrROsql>|mmAW67gQcJ36H(ewqCE#Q|R3E-634MrAc+pfMi;`QG(^uw) z`c7u4{z{4}h5IHDk8*MeQG2Is7$&xN(5clpu?aR@KX7#H?JQU{Uk3Ic($vw?CU!Fl#ERu#oifk&9Azhontv+3CM% z1G8M|K%*Cyq>OH$)+jO^8HYv8QE=ihsy%=TA!;Z9izn$UfWeu7?)MSeJ_8qbBhqL; zkVubVPCs^Y2)HObCL29<<7rq8e*TbI?9;F|5FB}_6WYZokV325JccToc#<%m%~ve! z(2^Q%Z5Q#LMUagj(3BV0foF6R%$*ta5?wBs8BmH@oF&b9{&v5|^V{;@tHnWw*5DP5 zL9}%8wJp+Htk820d;>#$u8E=&3CyE5zy>hf7xK%v3N*nbHYZlZ5@D}(M^Lk zV_eZ}i#l+vF`;nOlmpBXMj03S<_un%$~lc?W{nk87m>=9V(lTzawg;YCXr5ZFm$Y2 z8guGAWi@kLc$jRkO41PYV~v#)mT~SKLCne#aNxt|bcgYD=%EIQsT#36AuDe66-%K{5k+uy4Tri_okP_Gq=CNVwR^*WTowpz zeH$qct{aQLDx)k?D#xC8%^Lw?jx%DL4rU*VJ(7(I6i6w39Gip_XbV80g|UUgO!3Tj zzhD{FZ+e||VXY{8M&?1~BB`>dGjdCosG*iFCj;n`Q8jOX<@qU>2WEe0^PE5*TS)AT z@O4m0@oe;K7^r7(qH#QpH6V-#z6PDM(+5SH!3=WHu#y=f7j*@#IRWd7I6e>1@}b)| zT6(I-al6^?CJ{wG2!co^d9Kiy$PQv=M=@=44iS`+&3x0e0y&3tv=Ly*j&~Afrsd$q zz1kt`8(IOGc7%A}G`cTa_OscpVIG&rb2X=o=`hI@gif*Usp34W>=Xn?nsGHuCLOkC zAlMueeIs$UPJ!Ut8VLf4FrO#&9pET@QYyIhXU^;HD90<+SB@*7zH&WeV7sz_wQD4M zgO`}qCfN_|WuxnVl#wUXi5&_nPq}e|fnuB*Mld8@QqvAbr6BW5)$JYnrQf_Wl+QYy zknhwT*t37E(X0vFtqYyX4(jAR{kfc%(}HfKy!rKa8(;cL zuw*pddswxku-WhZB=kU+>00D)z9LHkf`n_!L~;$`V7q829yoT0d)ahZ7pI{X?mH<1 zv%<+8!pTfPx?)Ju(Z7)3bq7pV8&eiQz6uA)R~Rf|o4l!;DAl zV15w?+&#xNCiIsewxhu!XveI#Y|!ccA;)j0s#vkR0Ey@qnRQI>IupA+u#Sa8^!jwO zk5j!5AAu@Y$nMG1rAP;g=p-L@Mn31_op~FX(y*EoF86;%SItH1mOdv@#kgS#UCt%` z@W+`NN($c6d_-=xJuApV*8xopjV9+qD=CK1L5s;07hTt8LiIoq1|le-&kvdyiuy97 z@McRpubpD+j=HR9aGQ^8B#7%zKX8ToXIQ+L-e zUNK-p;z0hoNCPy%uhCI%1Vm!Kg#?txE6MB`CCn#obTE*szF{K!Qe@TI*keLFvpSbB zJc`9VsQYk??-!dmAbj6o3o3qtvPTDsq6~mNNfe7P;HLy3N#VPtg#Lpl>N(7p8x2wI zmTZrSefP(q`|to({K(iQcu?D7_ZZ=)EN)#TCdqpd)9Yj&&yW~*i8M&uY3Xol6pw_T zA-2#tjzqwFEk*1m8p3Om$OKL*8NoNb|8(M;F{w1QPw<T6hal4FnFZz6xW`9nkT)ECM< zO)ub-@nM@$Tg|@OkxYA%>W_6-qSxHY(S=eKAC1&Y1olkm8_uObhn7G;0mtSTy<6&S zG-ks#5>De7f0FgDJjxB=&sW3OZkA?>sFTKKoFGQA5hGp1ag9yPM_%P;Rlqd!rJ{)( zKtFAXg+pRTd`rR9%2K0YJh%UGsnm3m{#q%?ly8!JymG77mM?Zaq4^B);R#Iy8!jSU zUM1@YbupW5^PRks#PMc^H^C2e}y>Zw2% z9Xp5E;suzIXx)N|m901#gY5uVyYJI;uo4#yd(vVJ3P%`*j4X_k+Xi}Jn#Mz7AAvTU z<0xGKoZ2WJR~~a@f^=rmh7CBZ(io#Lqw|h}2_@@CXCcA7EJ)p0!2wlbn>~7Ltal?L z9gVvuqYVLOvhtM2A$?JN*g}nbVf}}OLDBGmw*Y0KsQ|?myG+%k_lV2R1lGl zv3c7iyJaDxI@={YCA*xDY6$%S0R^(fl%a4}~KMUjs%}H)W6>{~)ha zB&-dfOk}~%i&PQu^;l|%5L%1I!tS0>T9^w(dCr}%#{H)K;t2iXMzUzNBMX{ z?BI-dj>t$x1-EJq;31=(*NzU-#a4=2-kKvNh=PflRyb9;A+3Me5X#p2*;~H1kL*;s z<6UO|Rfv;O3FJ)tKmE~OAXC+sHBXDc(JTb@QW zAr|;AW5IAQ(Ob5U+6|lkXddLd2``It!~2~QUgiHs!C^FT6yAXTN8kRR*!BOL0{*iA zH1A*UQuM2U>x7+uxl0LnTQgC!^~dAGKVj$I?D|xhqucs^Zuy?qF{@A9ntuS=9J}m= zzQjMLxb^yhvP6|Dm&X>)IyLM5vU$w)+QHUh<+In|N{gy$>yLWF*SBPWAem`1D}i@6 zKK@a-7kN;z?5vm46WG7%^@rNWw=U6C$1W^<_uw3O1u({koJ7d7-iJUe;Gjy=i-QKw z@*fFrD95~*>HOV?1K5MRhgFvml$Ry{pB|paw9%D*oU9kpS}=HoTHLvd-Y0B~IV+XB z2kgT4z;0axHhozjV72`7tp|XreR{Kc*T>fRSHN~EsiNGzM*tO;F#-MWbMx(|+I2Y! z&H$WT`EQ9tC?EKK+5m{B|Jw=taeLAG@A<(StrkPwzuIfvt@L2)HEsWjtB6Ou5bu5! ze6&6H?`hxJywD>^m7V^#pVG7dS{<7ukZsg%K!-4Z-aErV>;K=&a%v4*V?OLHK9xZp z7&TSiCs@pP%TetKzNc6MjHJ|nG;%-9Z|`=?u}!$EHw!qS!dMNhX!|IF{q}cMCdrLcif*Fi*ve8aWNhW1!)A73b&(U$r+)12H99hzY-&B%!P#o2 zmYgz-=J_8j#0rDQRyvIxdOMfE_+A2BI;{QN0QPO1jRMa|%P{(3Y}sIhfchkWsl1zr z_KCPDTYQ(Ya3l=qA*L8t41pRytuWN65d>?^lE8V=+BJQ|{9B0`r|zJS;a@ZFQVA0^ zZ4GLL7@fmA#yW=p^LDJ0?)4>D4}`rX$LSKmGMzl6G$wnCvF;;}VwY&JK6-6hz$k2lj$zHT$cJfhv zZAK(d7KpkMjLjOPcS!0fqj^j)z1XL@=cI4iK&IwFjv#u`#_e<6p@_iZNW&wX8KrNo zA8uW%_@3C1y7R5Nzb>=X`9VG<&6P28L>I+S@p)MOW<|}4p^Ft;y=nI`pGM{$onO!& zT`Abm0n5g5I>Uw<>D)I1BCiGlj{|;rR5P0O5bVnc8|k9L-%-|>T&RC~jMtf|>KuaS zfn%4p;aqERhG{6+2iOLah?{a;?09LBnyPlxLR5)-2YZQ2ZU zo-;YId}sYGGHsqZZ&l!cl? z8xiQm4uLM`&d}jK!PdD3_rXJ0=hD1TerI&p`#XyX`&VT^t!r}%7hb-f0eHsigX$o; zWN}@eIA)UQh(~sCv1o5{ckDA9!b1N0wg`E5b*xC&PU;K%>~J92^lv!iJ^C)kS8JtV z%#cb|A=UI$ES$?$O_xS}N(H=k%@E9SE5oN9aH?<*OWY-#xS!(no}`B^n+4tCbosM8 zW$~QcY{_|aGU`LUdD2N?KxgD;)Dr8AZ!tp`a$>bgXM&qd>+(=cP;JUE;Sj(<5fjmC zki&J=v&MYs;mexEA~g0qW)j8UKWe^@#g(LXi?bB#H=5=o-L#LN%Wt!5agXt+u+|JX5lFpf$GF1Px&smFER z*yuYjhb@jwyK@7oYltzgJ&z`)DPBBaHqKV0aGzfFeo*@uXSk|Zr6Iog`t(d?obF)| zrAQ;zVg40Lox}z18qySJnX@)5O0q+=ygRhvq2jCyyAdZNvdr8TzdO(qj(wiY%tULR zACLS&T;Xh!>?U7HDT0YlGv|&K4)3)jQY2p6g#i#i;J6mpKRZ|j6>@O1QKo3SRGJ3~ z&1OWdx@Ad-3abY;FS;z;Y9;f*VbzYA4Vp5LCquY4D-Xq(-}HuxXG=~T2V!@Hu)reZ zJMdUtsEiccYwFY?aGdcJq$2&_X;GXfxEU16&cWs6LvYmEiNh*MXoy3~QNEG1)svi@ zU>*Jj0nA45@XSZHab2Rerfcg;_#_^sJo42@TfHGif=_0D{v3m49>aP>E)Z1i zpVCS_cc58iSgH_h?$x@rLA^^YF5~U>t00HD-_wR+cY#2nst0m4!+2t4%tXzJPT^6* z4@?|ll99YKu`fKD43>5IYfcQuNxf~H!UmeBy!@ce4lnTNEzx`v`HMV0I~ARP4`9-l z$G?f`4{Dt^>aI^UzQmMeI%mWTbjTPI_!Re}q$(-WpbLoTky0-D=GxtvukHf%7IsCY zCd(rijbw+$7Mu|bPOXU4mgT72B)WGM9d;d@qD_6(CA`W4wg*cMyj_I6%#geUKMYY6 z4ey|YL-U;1rFii{NsoS_YGnY*DF^@lqmf4}1}#SKg$Au9x5h8+4)Yr`ad+F91NULm z;556Sbx)bkGMJb6;-ML-)9s<~8AycV)fv!D*&+GkgCiOhK|2keIqY9$reEERXV1<8 zh1!Y{q%$7FpY>x77^ta%v5`R-vQI+6&`hlO4YW8#oe)OM7{t$rCopSS*i)SZ>Y$kp zN~LLZ|IkXOWc=P{05{6A+UJ*I#+D(jh?{ej6|6iHin$53Nhbytaa;nJuGdSA-+7KX zQ&-9}$|f{$RpYFs3&UZTu+W2VgEh$r>u7Qjk8kdKi|?9&gELI5s8YsoEI)dnR(=(0 z<6C>XwV3zW^j9b?1>L6N*hJvuIo~MMA%6; z&6ZS?y>d(*W!F&V+onv^D;3{VbW&61D!et;=%Xnju}|E~Fl+F`Nuf!7#JYTrlRUFq z0s=c>;SoP@PV#47M|FoL%4l9bM9nJRshpJ|c*p0^pCmtP32Mu6oZK`pc0eJDqFAG` zA#`I)cX{m_rWmA~-6b)KSdf8(f^=U8^H5HQ0=6K7{feXM8q$Tya8fLvl7Dq7Ypg8P zdkvajICkS~t1vl5lWMml8^o;xhXM}yJnj~O=0gmFqJQmnkZfl)7G;TzSay-Eha>y* zT+uJg*0f5YW>yhB_%|!U>WU1iit+dBkD7c*w#?UYj8IL+Y`FK zY1@}II;aWgp-t*9k^N!|)^3$1vYveT?jMcb&cW(vNAb(E43e{P71w8jhv6Y%KJhg_ zFb;@pQ@_&t2-b^f@(FxMn*oL#{}Y_3LSO81b!JKPpV5sTeXdPn5w|w@bcS(zXAXvA z>t^RSZ||Pf;`$lR3t#?hge4eb$HG`a&#C%>&^fK&YVMTvl&UY$VTXHnK+R{P_Z^WKq077-p=55D>S(eHo0~N|uc}=|uBWv3 za@2Xziv$MbAvspse~W*{ljsrm3{ z`88(Xw0x!$mP3G*<=5euQcL_+bkI%-K4rG|sjQcib!c3sRRAVpxwRrzP&D~xMmahq z;L7+6{9q{4arlX1KsM!9?>G?Nxuhh`y%`!jlz~?9{Grp6!J1#ZKO;4acUSWwq|wzT z;(0OJXS;Cn@XoQ$4V+Jz!R8~Dt3I24t{*V z;I*BMsCN}VN*t)CJGqT51TNB{7A-exX@V9l+T1?H&@c#7(r*S_5h@b__>Vl(2}~R` z6YM1Ie=E(4b&<}&sfboWC!+{VkS3DBpNF3Ufpa9fP{y2%E|jst%b3Zq&dlK1vn@8VW%GoKVzPiF8>)k(~=Q$idB ze_}}pYqk!Aep?vB)L=z$`+V*cUHpj&#P<+~G?;q6^ccWP@!v0T8RC3%sT; z+}uFwT@_&!zJRHhgK2LIjj(GOCdnaC!`%(Q5d`-@widrSnZnHE9JG_p+oCNdPOpWU zpcUs9J5+a3EjbiC?5wXg9ykUiHT$fB;w^poCV|4?zOMYTrW36H|O!bwu-{;fIbItkL?Rm=fVT{SOB$L2gimq$ zeI5D$MHCfejb#{gOT8X~_ zo{nfR2%7*`vm&NsC!!k(!{AFyGzWL}$JZ73MWh<~&9EO^VkGcKOU!OF;i!esz^7C^ z9o8-BHJh*)%W6g}L^B9Tq63i?I{1|M9AUAWML&(!*?c~Oqdpa~=))-kL&RS!o9fnnZ247}^#?y~#uZc>Wg^4&?jTzvK4Kiw7dhO%9 zE7r9DIW+2~v%x`gj_hgwXIQ}HAgCdb;eA-b&ChjM2x%@`p%%i2fzax?6YifEu2p>1 zc5uc|F8p5)O(Nxhy=SwA6;!J@8hh3W>pJIKYj*`zcCv$aD}>>T&ht7jTPtpmYf<64 z&>h{9%%WpF*8y?rB!g>R2PypOnho6wiYST7Z^>Dr4@&gw3>(K@{4;DQol^u!xKiGTouqLmqZtXdRoZN!#fV<1Ue1!|eB6AD5~We|}>iX?5~swaQ+LX^9t&xG=eCkc-Nl8r%nz%d6?FV=twT zYFVQ25f@T7O`I?wflG0Abl9nofpk$<(bXSR8TUsiD!I!J|B@C}vYCfiN*>#6FjEMh zjN?-QkE>>tUcQ=`Rtg>Sx(Ya%soi+pv@;5XymHQ&+tNRW*d!9K~u(di5T_8 zbB#gW&cRJwojPf{W39JlAe_D?mE*)oiJsn`24Q^5B_l5M_*=zqb`pSN9Jkr4WU`s?BakEB^I zl_tPUd^#Cg3cnC6cs4xOo?>nd9jX!3pu#pQgr2`SI0YL_n18Db>CN6Ae<)hcdRH;ubH#JC^S8;=@;hjsUWa&-&*Pbzk zkv{rXmv62YY$*0P1Ps{b!vc>8kujd>+ViH~=aWpK)_wP9FN`_MHp3yG3l;D#Q zJbsm`QQnFa>3&Aioynp;#%+1a8F?Xve|*Z%2@7*+QZR-Fi{o00kFqCl`-*YkEJ!Bz z3#zI6C@relG%y)nT};~J1Im^`*p}%q16sxs*YEQQm2rKE?a?ixU_J(Bmlrot8GchZ zQ(?J%?tpe=5`e~u?zSE13(?^WhkEX_eot>tf*WC05Z(8KlqTi#&ibZ9^UZqoR1*n_ z?Yd>r(wbeD)sC#m<4F#HgG<2**}5!@`po{?2Lp?9ARXCw398)u>ye~+f`OsA4HxI~ zp3`8iKeD(Diq;*q&*b|TcR*J`$!E}klecQ1aJ}T~z+-S{%D`Ix(8UV^Ur+^keF^I{ zwqm@rYcKm~rHvm_qYwRhIZ)K=mHfW0its=Qg%w0r1%DdB=6Ej{tWq#CmXKq(LdlliG65492G%GOLkdjnO zZ$NNU%reN?oMlWSXsHS7n=B@*{n3CH*HPt_T_A@M4-xJk+c@;i%v_N1=jAceKBkVW zOatmYPh`Rlq-2f}8n6Fl>lDtoV5YF4;4)8S}U1S1@+v!=f5cOdn zj%QC~jJDw}gEx0gt{@`sbxy3rer#2Z`&n1Eh?venu+ z7O>Oh%6{_KLeH=CpSaF?GV)vIhZ!nO=JXHJH0e_`exto$r6^(Dvk-`{9d^15V3mZZ zRYmzVXvk4jX7XiKYc~_RS!tu5oq)ak#VsTU#m~wVN$j?FAfWQRi!5@!$;I8=(i|2;E9a%TDD_sW&yF9*#C6-&Rrgr5 z(a1XY^DZSj>2pumDG-`lr-+*Bm$Yaf)4=8WAp)iVcWaY8voDSft@Ta6=RVeK?m{s$ zoR~#nm{6&2iAUijB+Yrw`LuLxFegZ0Odq9t60}UNHr;n`%Y2g5_p!db_(g7TsOjsd=JwQh$8s zuGNIw;=a4zxNbXYMdldOk0@wVb+(OQ_zOEm_};^@eDx1^8H`85K~K-?`nAqY#leS; z8Bz<|*AvQRbUDtPQMTV_2q>L>bHkqsqDDM(gQp`ez$KyjHc%p|+gq4ws)jxn?G_{} zVnl+mRARzwIwlwAa!PO`LD*ba4(Dw0SH?`<1uB`hR)bDyR|aIejL-5!lZx#Uy0aRt z!wPh8i|X&H6Q%a9`%o*ROSfxBF1k2a6q+(ZKnwQ^?mpDFDHET-+z4NH=AS20O~xB& zQvWZjgTmTo+UH=`UG&2N`fIJ2aFRiSOev0v5_mY5)FgDdYUC_;{vm11 zU3pVR860uo*&M++%^wp}@PxjB+s@6XlQYARU5UQ6a)#=~;-2wJVa8161eS%Jey>Qd zHn3}Nww*>%3xv0HB&ayRc%>GGc^=K%!115LO+$N&4`L8S5TGu?R{}t{tSi23gCrod zm5YwK6E8X?_DdhDYA$5!4~MbSzm0Kw7YrnMKO%QLOQU;E<2S<7n3F$&tM?9iBf+qr zaE)F#{u(q?g=T*0hdm~RpMh!xSbPDzUjZn(F3kOSgG@5kr~CPfY1-*IO`+yLeG;Ux};A4-ar$dXT?_PTa=(hp{IU2h9`F!KtcN9P*tP|L&p6G#{ip8lrXY5 z*TYaPhec%TXO|C>WAwx0S%H*bIj8wq_No=5M2pfCivqXFx8|0-Ky3yE-xAXf%R2sJvtx_ zcs~e_>2)`%*>ByG4F5yy+acBbb=8y& zKpQ>5q}h5PvzCBu8J(crrW;97Wbx!rzPvBL{V3V-x{6gzcYI-hib@XDCHuw-ypddg zBasL9GWsR$1zYfHSQ(1z8SX@P&n@^KfSb@N_847=8-DI5>(>pZyHjaf+{wF{*G`9s z@AOb`$9;&L>BzH;Ez^Oa^(3bQuWuU9kMFKO;B{QfloFaxX6F)ahE z=XcOvL6RAo@kA>Lg#<7pN2o5F@V^}|cs#(j`{kUj32byGoJs?-PU+SJ`0+c%RWa;n z0bJ`%|2RfcK*PNyB#BgSGX2A!n_-GPsqS{LAn+GfEJ#^TDCI zSqDn7OeG&DAdIMwJmXujZOU)?Z))V{;D{A9#gVdgKXTKq-!@kAqXY2c{$H|r@@|#~ zQk2^68n}Ys+~q=d*DE6d(8c-Ue#Y5c7wS8f3rfOBN7qXiRWHo_wJQZ)NGWXh2@1rPY z*Y3VM*jaht$&t_uSIxq8ZgM1(z$WKqzUR_O;b%=p9Xcim;EC@bFU)0zZqhbTxP*y? zSJ~v~jc0<`h#_v}{QS7r!UU@;^GD(*`qzshdBPoE3Q}XDb;uQ&2ss{`7>;;nHd)V_ z)QHc!T$p$2QbuHS&l`A+QKHZiA#O#gKle3iCPpysuCM4M6cC7yuRvbTWrW`68inS) zBJwq4QDhs|x|fivUd{!Oud~x@Rtmx8Ev8i;Om_bL3;ZPS`G@3=)<2K~!eay09_m`+ zu>(i^V6zy@m@vQABqwcJx%Koy3ZuLBw(DfLg|6{4&7y>S{zngdWX=5Myi(JdRk!^n zc_7lXL;v45d)46Sc&m6J{F(`Vn`0}lLCeExrg^xq{e*tK<;NXODO020&|D_82}^k0@ZdX^20p_(22w0WMznI$Dc z`u(+IeB$L>b2xaf0#B?`?Fp1ThAn&#t8*nSJPI?7RXsm&jHs0LjY-5Hq;R1P5T07P zJ;c^1?=jG$vEp4k8a5jQUdB~q#!?9uXY;8Yd(L#=h$b}|aZ)7SIklbt2|<`UE$9Q> zs5`Ggufwj_XC#(I3xo;D<2goZSif;CUgU8c?mTu*-I=XlxSTEQt_7a7OBG?kjOmPhVTBF>F{fLzl#NyoLEEm9hSrblAB<+m4xrJ$cwCCLXMWME@ zgH;1PIY3x(`CYh*;FlI2$Pt|5Cof-_Nd&5;DizaS$)VMykkkBOX_>~LUR+=>qNz~h zza|+j)TDm9xbbJc3S!SgCw`SD@M3-b`6^dSM29iyhH6P9{^(JYKkL$364JRJ%GRGg z#aB!9m3T5@M#~5V5Uz4*@tsPq5)bF#2|K5%pR25xvtV0*#X|1)|Me{?K$v7lNWQP-2RXvjI0S z><##0aApB_E>rP>DcpmT=^jLHMfsNLo=1XY#`jO%mzRdZ&yD%>k;Bm z*iK1Zd+QXA3=3a#=F#zhmZ(j=#dB{}bC-1V3FJvz+)b@M7j+AI`D z3MN4r4HhcYwy%VFn?fK57rLCqkRk5hF@E5sNp}1R8|Y|}hcetxH?i}+G3vFTV)PBu z$PgPQgR*71QO*@q&?L{a(i>4z;n4p_I#k)hK=zTJ0Cz`#mlzew6vQns;Q#fM$zCbWU2A`|Sz^5=#_2Fbx9Z})AZO3z|pcZi+o) zMY%gt+RiM}odovf3<*!Sl$YX{BN@^W6S}iU+|==GXb3n9wv(eh)l%i9(y-u))0%$% zhq|j@_}HBiPqrup>ns*L|A+Xoz1VO>ku>3A)=J=S(XP8I7@(t0iPD@mO4E*oKm!L_ z5sgTAEjCL@7JJ2RBJq$rPCBrXKt$pQCm!M$V-)M!a#c@* zx@w+j@Gb3Fi(&w9{vW*GnX7C#3Tuu8D{9*^w=IBP^L{e%Zolp28ePqt@ljj-c`>!l zgU!veot+DOKGXS`fg8fDRo#Oqh^NoiH7Vu*Hd@E^?B^`6=AcVBD;0PPrAY+9<35%) zeu`1T%BvED=&<)$_xL*!5iTW3W&*Kqd)y>X+jR{V-eb$?O3Z>;d31QQo~6+H6l9K7 z5)>B}QbvNhYpZ*IsnHjloe#hpn|*pAS}p)JYs8UMY4b_nEN0=iua`lA3%;$|rozr(}6;79c_S7PkM&fw~MhPy6VZG^l&% zGho7?Qisx41Ql8E5H@)%b~$I;Xcxeo;dwRUCb`AR~v6|*0UPgUlUVRvvn}lT$!pzGnD{vfb%(;6Cot~3>Sx;T?~G?E!5>FP>kVwu4gUBbNarmv{hAt0o= zoqK{6RLqVFarlwo`jf!*HWZCee5md4{?z=JTk?)puKJZ~E?SoGLydTy_*OdUnGuA@;Yza%|0^Wl(?=It&SgoOY%0U1S8;f(f7VNyE9jitM)D5 zcxn*K^Q2Z&lUrdix^3AFs(IP_e%d~7R&=Y|QFB*;G3w|id~&)d%^xmJC{ymC)NoXI zVx^uSSA}Sj*J_VZPeuBXs@3MGF{o8dRn8So!nejDUVYmD?A_T@*x` z-uS@@p81WBl$irztd#Xtr&&@{2y{r4W`ulhfm(FO$NBH z>GdGd(o?F{rF5VnMeywZCcTo-dOwsCK``7wiqDKtiqq=kORrfrX=#XsQ6njH0T}H_@1R~M7ukCFry2dIBt*KI7E?lWvsm&=+t**qx|I@HDg3Cw z?MvgXY5eql9wBhn#86pQTnp~B+d$f>sU)CT`ws&0_}4owx^@iFMR(Q}Nxi%9shgLX)}yIxR1?Uy)fpg^-0dKXPyhiM`wc;O%tY^_> z6i$(-m@eWlTJ8X7RlaTyqcH-6Hm3#lHY3Yotn*Zu=^7~e?R#K~tUM7LR|ZuMm*#@%^KYT(qc@DK>3I%r4`YW>}gA;sGWZIt8>zMLMC zHW z65Hdy#=&emLKJ}B^c+*-s-0Z(V<2Ho;UF7ixuK-k*{N2d)w8Z@f(SZ!Rgqw*_CPMv zz3Gc~=pay%OD(%by6_Ccqm8DRYg=CqDvrYdnmoO)Pt8&(Bs(!sMWq~OiKC*90QQFF&FizC;k~wm(gf>lTqetfsZ+8JcK+$_Gdb5D+H<<;8UqIP{u zx^vSUgcj`8k*0>nH1k!!X~dap4$(Do-MgJZp|UMZr;@`$wqDL_HqN!&LHqgj=kJ*} z?BZYPK{L^CInI^VlODsh&WtngBk-PL9RJo>^PoxI7{d^xhTLzgG0L;dEa-;j(8M?R zn?Og|)z{aTBbrO@#OiYqfyOhqic(|Y=pCfC7G2pA9c2h1*?Q}mqX4&Q5DBcS2p=y_ z>Esin1ln25L06g6$}W$xx==@WP&V22F)iT-INv^yxN1t!r>JpwJf0=Ry&|%eLg4S+~2h74F7FLeSXBbwnj*~qSWn~;ZJqJD0ih%}wCxft1-_Zk(YUwd>~AwJR?FF;Fh zB6X#-ky8<7(XJzf-y~%m?e&r0ihlm`ZvNT+xX-Y7cX3R5c^d=bKz8qqhVs?l-9s09 zC*LUo*G_GI)zQlBTW_TuM6D>t2kb;%YjD5-$M?N>=5!cd#H!9`6CCj3j%2=K09gmy zi5qnLqI7IJy<8>si)M!m$KoGXJ#XgDIwZ8ZOyRyCOItZ0@rk)_|5t(|p0FBIw#AR! zf`wQw%b-A{Na8uyV#hFoP#{1#Yf!m|0G~K;IKQG!<+be9ZEg?Y;qp;o*fQvIao>kz zJ1RFVaFVCE4CqY2HLHiKz}9-5KDyar$N=rGsKh7?;>@jifNy+dqAqZ99cct-wEMppM3A{1w zY8)kdI+~Zu4g82j}_+73F z94EN9|4=kJ~&ajAFo<&&w3WP$qbr>hiuaNG?6mIi>dE76N z`OY*G5lH%W?QiU+{;uf+%YrVVW*JF0)>+9-RpQNFcKtZdQC($BY$3Sby`C99AQM?2 z@PJV8{T+Ac$Vu`9t*4fEV3FaJ9_7))wB8p|oqEn=c}~n4^B!IZHG(G($MU_2Q><## z7XRI?+GhD|YtiStMoI}h6whoSF(%+(;ZGQuMV~7fH3E#|cINBF0j(q?XH&sFicy9~ z;{>D4lv&T%wkj@_o3RhJaWpy-4V@p-7)}9Je3522c_0q(6vFD%N4w<_?4IFF&*?oz zfF$V90xFA&_M^(^`8pULz6jbRO?rl0h!X{zXCg(A=b+9M^3s94@LzdL&D-p1xpf2I zBnb#*Kdtv8RFj^55-dWDOb&z71|Q?lcY?D+qcD|WHN|; zbqul1k3WOxU6x-&&~0jvFAV03-7#4uV&U&1<d@v(0c4{FXwt1M*)p=Dfn0i z6=E*6*9!QLccV+kPC$0&g^K>K1?e681gT%iEUFM;8AzPiuPFL_pa<<`+&&nyCSlvM zFcBQc^*6E7I^b3Loz*ZDvspfG%MYB@pz-K3%2It)l?uN>s9XB=22DFl`aS^z?R4u` zx2oM7-Uvur+qa+)4)0Dxh1ojd;8B(D$fy8-WJ zA~tW+{9)UW!N&A3jD= zMGb0NW}(1*Zo3NHq%De5*?MJ+GEGXPqkYF*G3njOQamPJDz<-bbT{642wWg(gx;%g zYB*Ui=TBtx$E=Gkt}(ha6|%%1Tyzn=#$OXi8$BeFjHUqhC$X|-;Vu7~n6AV679>A^ zR<0?XoN1JVTL7c2%i=JrQC0f0jxeU;3{ZmK>S!UF@Zy=%#M%ZU*hdzK|- ziz82x`Z?1AieP9)yYNz+51Zc8$n4f7gB*j~H;J|%q*>TX2?EcTp{UJw{E1|F9$c{Y z2w9<~WS1URuR&yH8Ok^4)DUPr4{7N2U{*ru+6z4Z0wcC?%<~*ffG; zINx0km9BU*FtE4M9T!gq$`y-?Xizoo)v>wBa^5+9Wdn;-D@`5IXdFM>Eu)_W`#Dq%AQRHPpbSs7YF|;;|Ky$?MQh?&7j7ji~CCEl}LchMjKA2C9AJ7!Z z08qqNYva3VxN0aIp_J-6(mF`JXl!@O=aCjg(aKCpJZlwoPxeUp!W|S4ZX==mbR~`d z;XrDUGauyJ0^|&S!m5+LMNHOiG++jaWVnL@qk#4)mY+%ELPz+->f%?|sacJWX7{NC zb0ajc48#?_y@6ffJak$ex-Cbai12qMMAP9wG4H0#_NTN7WOzV#`9wP!N|ty{`YCu6 z$*DxX`Y4}|y`T9ho5UvsN@uAeilULH^6&@LK#jOxVv;uIFRtJ<^z}}5(HY5B9mHeh z?V8oq318)wzQdg#ZcK3;Pv#-SSF@&t>Q8_PmBCb!$Gq;y|6cjrq8Wl|s02Ep&SbjA zRi)35g^;!NWvW3sQ*NpfJt82Gb1N-`SL5w>uPM`8*E&5!M|J}9cgH0bDaE?utuiWdGjHO`icCoMp7JS1&6-grGw$U^a{@h3n*WV9g++LnS z9w|}`Pt2aghgRxwFnp9uV+AA$-$9>}LdR$2;td13`E-A_P+rQUg~*#m_lsf#2hXaH zmVklH%BdkrlT;_?Y{+)=cfb{YuS6+5sGt)e`fGsM-j56y-`wV582xS zY+!2ZXu9P+5oc%l5)HPscd8ZJf-(?|?s~%y-$zqJ7+<|A^C9b_eOEqFx$|oo{F(%j zfZ~W-ndzM7jfs%~MO=~VXLd)dDp^NgoV8jJ7HCneE-$jaUP#H9a>m3U+L%sqzgFqPIQz#0{wqGV<&K6>Y8D>HoZBy6X;fY4sy6u}dQKH$QxV9B< zN;It%;qOkz2(6jE)b%LrT%AhJmNT5nCbEb5SL8VOAUVQE1k5@gB^p}e;!C7TQ`hQ+ z7F9Dgpym0lTkONLjbfwBEp0e<9#tW6*}iQ$Q0bkgs5U>UtdZ%$R1Ug${@ITofv5(} z5kQtdnyif@r>T3;$25q+Yli?^c8$vxW5~0^En)gHHlu?mQrv}i5%%f4UDrmg8f*Uy zku(`3Lg;%f_smf^6?FpyjX(AS>y?qbUAu0>m{&A&fO>qwXqghY(iPEHyK><#n-ke4 zwrLs{TE$NY)cPs~=SnHtn9(JAgsM#&eVJrc z$0Prd!2iE;6ht59lioVPM>M@1-`ZXKs5|@p1~+xxAF}2DGj}2~mRJ^Y1O*66vG7Yn zB?|}skmLUU6cM8l)DmoORhO%x%G1X(pQyY%o)Q093jZbrzbAU?$C*As3d^b;;IO>-;G{~upzs1izy=c|4K6ZKnmsiuoi8|wT4Puq^)+Ks<_ zimVGh{}8Z|e*06;XG%43-)B$22K@C;Ri8g3jJfT5zwCGR)sA2W7uvM%IGEz!2wAc3 z=`z`G*Cqbm#s00&;i{6iFQJodJJMO|@_KczuN%}(`@Ln55)iz;|5`byQgEHWrDO!e zE_VLnueihe{h5>%*0>{ze^HK#iLuG`pa*! zdwbuJWHeAwEOmXr&tW%9X&>v)(ucp58~piekeF-nk4ZmRD*9M|oZkJ zZ+f*wo_||<`xXAq@gou>tn~(7wFg0Gy!?voJsX?bVU`j~S9Tpe#&L(WOv3b{8Q!2}W_~^M%hq_|y3kU1AWZTwPpb${8DacY|>gQ-e zFZnDuKGqEi;7OmRo<+S5UcARLlwlwO1MZ3aPVnx5(WP8n0_g5^mt{F;ttR@;7pfkR zTHXV1RdzHF#JRn&d|Y6uX7yEjCR=0&+8kl%vhQM-zhv`4g{4H4cJi7IdBwQj(hH+4 zd((6}c+1*H)&^&1!%)|ZA6Y)?gr+tdFN3#;4_k!b27k)k1tj>!BXS}5->nv*P2clH zq%jAcc~?M3){!l}{`K3>;CDdl{#$$QmiFM5UKSj+cIQ6fTZ@!tty8+f+WlS1KugyPxt6iVEDrM&>{t+} zft79P!P-u%P`-y&Sp*^mSr=`7Ui*Jv*QE2QS^@mi_7}op>_Y-c+CVVNok!H)7e)Sy zkosrkT?o_mK@Uo%LY7ZWZjd#r=2Ntu!OC2OHoSh!j87o>SBb5w@DKBuV$hpltbLYj zc&t|8bJvLW*c{t)=7pu$J2E4cuH^^%DF1cVb~6I{#!{51V~T_YVY5G3G<4s8 zSx@oa?b>oOrI#q!>`Aio0pWFo7teI?rUezU>h$) z7AI>X5sezbUComF{5EkvBEZ>JXJ5P;OJ3u$c{-f05bHWvgSt8vLh}=6{H{@~lLKhn zXm9o>wCy*HJvP0~-C&Tkn9-u(N6vXOZTduCp{qfcApmJ^lAI<8Jc(O8;Fp3u`iDgh z%D~P0KiyDVkC~4_97KZBDc+A$>#3#@DwQ}9M~ZKnpC7Je-EJLfrbZ$)CnH2#opLG3 zJqg%Xbj#_>^9yUsHC?h2qQY~KZN7{Z`R$%Jr4v;42kZ&myrMOpye0RWxc=-`F%ov0 zL1;k_Pr8|84fT?8BjwT7Gr3D1vWfj~4u%;GZZL(|3uP9#wj1lm7^zSK5(;)5rNfL5 zdUPFdLXUm{SUldTpMW2Iun*Jgc8*{+A1{nQ5sBS;GV z!3c)zpcmbE84DHl#N?Q>!AMlGzDKj9e|?4>)4BrtZq}o|zI?d%5@d%T-VH~xa z{n+(U&+n?eGo3mV{Y%EnMh!!IAX4=fWTsk^B$#RI=Y;Ac^=z%9r@+3L^zbYdCB17l ze@E4q$^d(aG02ZnIW~!Gp*65ATYK}CjMk%!wqAmj9Peq#8`O;IUNq@>1WNU+krk8* ziP&=G)sdW0CFiD`7$dO@$pDSC%d50Q)Pvj4GO-rF$Ro%Qhv9wJ(eWkIhx~h|h=)uQbz= z4qr2JwxigY1r8=2bTiLyvaU>YzcBxz^siqYp3d&OZmRT=H%rj8bRNpkyt+8~VeDhA z-Dd5kF^Ri5X(;yV54WBlQw-#}D8wRv3^5KtW8(UHO=LhwYGO_85M6;cpr0xO#fVQ(U)L;RzChA!3GY#@hOP0v$tH{hdvWEL z%!bAu9evH2u(sZ?tY6AJZK3f5^Z3i6PyNOB%-M5?&tES2my34?shIHa^RgV_?e+6N z=b3sYMWrkA$w&Y4q3gOXOu7U*Fizj?m7lkn6yvEq?FE{DYl02gi;lD#t`)l&Y4L|Y zu`7NPTjKf=e4c8ay{mIYwymHO79!!FAhFLP9x;`8> zccn+Zmgc;sP^=KsEY66l)Em)}P zFV(8&xuQcM4sU=n`1ZN0@ZD2%)l6QQDvxuPRI~7Y1ao9kJ09kpCt4QD)H`I4$BH_! z_8b9cSD(rRZryI&x8O{P6e~zNjQvi0^wABT?Sys(DKI0;Y@T~HY-~DsK3c!I4oHU& z2GC$!E(&9pxbWt(5ZcuUQVM&<+}WXc3HD-McA3r$PVIyl>ttCbk-2pNH!@DE+yftH zlswU<6IJ#Y<7CoSfwpa853eMCtOd z*ZD3RQv|-F(1(I=^Gi`#?bzY1_dVZm_&}^+>y~0NuI}m);`Xw&vJ324!GYmWp2@7a z(7V%;cI@G#ww9~wmuPj|jN_VSlx}4iqSCkvm0j4w_BI$1hK+-ymS0Fd$%0^Ay^0nn zmA|@%-)T0FI~wOFNvrm=8>*gLmD@ikquZ{}s76`O3Sl=*G8H+(NENk>J`k;iA8(pk zyCz>eaZK<{($pK4Ey)gm_UK6Sv0r-|Xwrj}Q-dyW1~7~Dk1 z)W(^}o3maSVcxSl$F<|!z&z4+ujvHJ{LVm!GgvdE^Vg4DrJ4*AsT2^VzX!3<-M)1h zN0q~YCgmh377`ha=4yReZ97;(%km_qzCDQ48OvajIn0XI%pMB3#181P`eHl*O)%@< z9mltm)|%Uew{6y%kYl`FeCOkd31`z&)}H))DcFu!qk(OkEhV@ay-uTa6mFu4Ah^Bv zO``A4t)9`+pa`EM!Vrh`VOb`5v5T+4yd7m!%7WR`TRcU~$Hm5h$A6s!r&T{mr*ky- zumy&zlNsa;_Dt|{f0v>WaH2&?rmLv7g-OziRm4t+Xixk^3qODP!hbp9j($J;7yC@%WZ2*G zc8x`pPhrX0pRVWhFojS67}>aJ3Cr++D$)!rMd8VGWa|gn#Y^GYYV6B-5V*{Cjl-Y^ zWDyvlx|ILGby*&XuT_PTq-O=h?51UeP8d|zlh!#g-RY99f1IY$a7fJyy0e`hKMykr z&3=q>wO0;gga{u>m0MxJj*U} ziR)c5Udx`^HVCfRdp&&1iFs7*_~a-Irg^sgCRHz!qggKNnCqP?xJ}Von(Sqf3Axz> zJ+F5&KcrGj+Pm@#OmfxnFD*9GAJ%J+D29CMP-|7eA6?Ks@aG+TyQv%)MdHvK)dCjptRRtd&t_K;rO zC9tbnWe2IW1;x|IQ6*-NEx34}(M=o`Jee#{bdls!GaF<<5v+%?^^QKE-iRP1Kf?ViKhmmT^K^^dy?Tah0P@wJ!<-`Xi*b3bs_&YE_Ql;*U zE)QxrQX`8%9OIu(}Ybu|7w5kAl0&GdIm#?hMGakUF2z7369ypdO1lzx=EDsvElxj z1?-LJlC6Ks-Srqk%qI`>Nn-Zs*1fsJ=ZDgTTi-I7+D%pZ%UE-7&s$0JxK4{wi7`#9 z>qy!wJ><8Fif2246nofPoRnE3pJQ(lCmk@vIZ1#)fay2R3J7sV+cSE*$DcD-ku^ZGy=?ru1r{38J*B7}R?6_>u;w|EW z7nZIubihZg+R_)sUmQ)tk4-f?d;iJz;?NbO`RLY$;%p-dKc5&GFLZqwI@8ke3O&k`44bzt)fp!|gstb5;v! z-s>Blpf(3M_g{6P63O#MkfB6g8F5Yh__~VEb^t0Yd2S&`kptZoR6&3v@VZGcKHm;( z0lt`j3fexGHGAbAf6Fv#XIBi(`6M2G^lLw&f8s{3HE;iyN&XMW@E`mGKxi38rW*F8 z1rdt+1K2U!rF(|BZvmoO{s2Tx0Cex7alI8h%X*aM=9hVCiH%L?BCa%M`h|NO(QWz8 z%$WY`{Ltv*0O5%F50X#EG-52lMK~TqaUty zh5xZxS4P#%v6Qi#C-xo|e30$Ml6)h3Oa2RJ!+NB*XY%umPfh^f3;7D$*-&J!$36ZB zKFE0bWAIIW9DEq`>N9o+Dr(_bTv&7z=+`o9zv^`)0KUpaim*KZ`CGXyDzII3xB0+) z?M|ED5WvBEB?kr>u-g{T(T=K_+8@meMu$TVMCG$xZ~UVV(Dbg71K3PePMN(Y4weC= zDOKkr)OeMr2WX_WhLb=EW?I3uw>#JbU6(FOUu%zpWm}%>PpI}{0U%~0xow#I-ODyV z0G=Q~Yv0QLVd1K+TU{@KZ9)I5@ZFYRU=YH_G_<&)vM+fS&Oc4hjLC|&t8)CuZyR4|mDKfL=CLH(6ra>k=`|;zvAAsod zqT7_qNo#ye+t*?JRNu%NU@bTF0BnUP7k;@3T%0egtDr33l~vVhk!f+Z?7Yk$wjiAB z?4!4NWq*W!!F?yF->zMWB-celyd{PddZ^l+SphmVe_kF-)u_^)GRV>U&Zjt+^=!pv`d&@Y{Mq z#vR~^!y6Zyh>Rl>w6UKKlTTX)w%ZSs1p zV&Pm~d{MdeR}rYS1=@D<`L%nsa82Fds4|T%doL_ANc;`;j}P1R$H1UrnHN#)b#o6w z|9Ek{z+Y>bpmSYYm$BtY)lC=EOdgiV9Dd6kG@SSAT*P;OGqWXn_B$D!D5oD$S$t#( z$E>|OJ#%+6ywOr`ow1O98m7IScd$6)?nc+9_1PowwFh-NDw4oqLlrT z-X?agN0~J!Dp8V7j8hxu%R1Z$(%^c8k%#ba6_E`O$Oo`^UYTd{a`2vDkR%64VbC`J zm!q%h?9DUwUbf7mv)Um#OB0*ygi2 zd9G^nLHq2dFL~)0(5Q-)d9CHJ;ptLD3w|mP*2)N(*Ee8N>t(L#jNsJSdk$C^2k_F% zi{8O{?c-wNoz@;SCVbSSrBmVazt|rFGUR{_{cPjlX_vVl9s#jE--dmXZ4M{v4`{~i zNf~NMWoJk7;bDKXL0u1uHpZwU?~T**JH@$zB@&Rbd6G?fae*upBDTJhCs^2K*%Fr* zC%1l$Y30+~I~|EQFIU6MAo0QEanqM`YOvH~$;DZ~1PKy>d<-Mmh}TIx-6prp2gjAG z&!EqUKDN_k{rr;4?a%kx<@bzhr3%pG7x9bB`eaCc%PgHB=6AAT*?GRgqw2Uy^30jd ziKfJq`G$$xw*?@@$_O7IN9`i0T?Wd)6no}B;AT&5FE-to%a5LW%euCB~h6<<_a!SQb{9s@ArHY?`aBU+O;<0D!|u zPx_Osudv0=s$oAZ)~v;1U3@{p3)Q;UJ^RQn zEu+nzFcny4c3E={i0X?6i@P(ioS=O&2#%Ha+0Sjk3GvI~dGWt}P*@!`Hak=P=?Iwm z^<9fcQs4ipC)0mk2oC%IVx_u?P#^$#T)0jqT5|V5*uk_T$Ymo-_4g zW>_d*+HA-C8~D-Wymqk|tT}0zVC&12XF)?=jFF!#=anZ;`pg9JsJ?k)Oeap-bvD+R zOj%$$Z`*bw<~qrN0fg~Ar6%4VKFT})dfI@RK5KDOKQJ&Yk?0|5J0|@O@A%n@K)!A& zDTqwc3doEV#8lNrbV}=}bm+?o&>!AE{z*V`V`p?DNe>jqle(R|_%X?uIrNF6zDsJP ze1jP(gST!$$NY8d*wr^#0Fs7PW^Qw-`(%wpTw`@SH6k{z*cpSdA17Pz4M>sMQ^U^gLN zKxXm#dAqz-&1LGi}i8x6s^C91rLqwgU|e8K7e=J5C$2P3R~Ib z^ph(#G@TXMOvR>;I~rt;rA!Tt$=z^_F}%+4J>Jowkq3-G889~{(Nl388m|hnNtMAr zWG^WkWH6ODW=-%9W@IU(4tolw;!E287Cq-N$bc}MJ9+tv5;RG}g$;a_JhYrrn=%3E zn|<8yenoW+9Gm%wKj>x<8nl#g0|?X7-cyqEm~*AfKK#ahM{AHvdsJ&xW0M`=;c&>% z6b(#e$X?QnFDZUTS33<|*HD9PiwfHVRab_fK;?umKd${4cW~dia8L}fz;m7w?W++v8_Apl=67EKZmO=wXaS(-_l})$#v2yGRL@44CWi){H#}p^mZQrNdMPlZWB7~n@29y8 z(czeV@mXAAzrYqUL8r}X)X-71eK}bm!3)O$GEUX#?6)Vqmp2KHZUl#*|MRPA|v=ofiR5{-N8BUkO{#1@Otco|JilDKKp zV)wl;WvW+A!B(US2t!|Z<6g5nK0N&fN*dj?AwGSQlGqp7qmz&fZE=~Je6PgKTI{}E=FHvNH-L$bdx6zm zhTNkCr*}7=jaA7z4qb8AwnXilF=f_SSvbXIi+m$I3YR{D(tkM6IuI3%DP z$p*={w!xZ9O}fTLG@y1{A6R4)=P+gNZHb#wRJqz3X1AfMT9-Ec?&HCJPUDx!s{Pu@ z;S%^{WFFGO2E6;bBAySTg)!Z<(~DrHg=`ctG}CHuZnvPx-EHl=oj@FbNoRF4in@ahd^#T zldxfNx^5`|BO$Lae3H0^7}arh8u%)^3NS4z^E3mq6-njo05Dyv0c$@UJukNFv^Qf^ zWgvVY(@0(@D8j+t17!h(Tz-Z>lOPXDQsK}!y;z6t-aQ(0Z0tZtp=iUgMOCE*S;DD zaac?vDVsE&LRr#?#&NlB8d$ZJ0)%(Rscsqd;AVc1tVjQlj&vtvg!KPf&w@`q4ju-+ z)aiLADDxlsevB$>;L)>{dILazjPb@Lj^1F=bHjGXU^*xT@5dvxh7aJqPp(#}s+zXk ztzr*~ji?^DbZ}pC9`CEgqr zINJ7b`TE3p*Td*5c_cc7zWuL^5Ix&qBQwA?o@}sb3v+5A@R)xF>f`Brh^DuavNsg} zs`*us25Cq-Xh@QgrkC-p0Q)pSb!rQi9#9=L42{S2d`!{&Jo_L6@e^U#S@HlOd!G~p z)6Q@on&Zb0hz^4IV%ToFqTF%APswHdZpA+98|=<0)5rt`p~uca+c63kblp9a(F6)2 z6QNCi_`do4hw0~f6Xb05xQ?=^WRqk$`XNt#kwY1yfa(!Lf*rPe?RGotz$ksNA)sOK z2Xuc!#JTfB)3&ENn=STPE&>rDBXBes8}jpa_&aGXx(A0;x3rn))E_U{el>TAfyn`C4qMwg>KFjnN^A@#EPc7ff>b@%QNrI% z7cNJ?B;Ik{-u14La(yW&WvDR&87B(`f;I0z#;$HeFEWTDczfwMxn!Xb`6g#=WKCF& zy5@Wn4uFqGOO5+naNr;M9Wx(GDlIo`&%MKm*5g4m=3k%TWxtwhoI9@;zxpr3!s39? z8LA!sJXr2J$h{%#(u8HgE%4< ze1#)^NJdTTuSvE7C+#Q5gF1af^Q@%34ZjU+M30l$@qZ3I^=h3R{|YpVktp_w&xMF! zIu~u?clDS!p{K-gPW38j9gvKRkYqf&R6u%e;tXr|yXq1OQ%~Uh8TP<4^n;?g3j!xl zNV(&CbK$z}xev{WZXeE3NcKp^j-^lzvZ)$cyGa5+??)rn4zd`v+!`kh8jDfD3MsQKK zZ^{Q+u+e;OFczHbd)%K6?uy-Mgx32UAQ5|XxXye8?AfUD9~mR@2b)j9UW0`t7+3k% z!v3PzUp)4gRQ|717hMdjhRHb5?S;T5Dm0p@P#CGL6LHRvPiw=(OmXZ^CbdsS@&2Sq zsZ9={pGAL@#QcY@hQioBYf!(k+MA?G<>wyVtjG+~`OFHC34%6+x8z1ZJMZ4lKG|`# z(fj^R5VFGPvz9(M4$Cq0XR|C%MjR8t#BunA37TAEIOcb%Q~b!!VoP3M!l5S*_W0NR zGoh|Ev;vf@%IG-m){p?AqUfRhfx#|9a-*~DW6l=V7cBL5HugvY3t78&){c}SDNu-Z z3hA}=V#3s}fk7oYm_9}+yh+~~0_H^GizHO&;$L<(p$+tz0Tce;YCPfH-HcxkeKdU? zOuIv(y8rQDsMV=_l6n*wLJ;MI*37vZrmVngjBKK4`9r8exr{OFN8JOT{zYl2)oxfE z)&3u-bzghlFx%Z@BRN&t`jmRUOHjYMKOrBD$uoq8ie3jjWXN$$LWw}u5aeAq_nNA| z1j^H;TY8#c21DiQvb{#@^gDxOlPLP<2ucT@C!?llR`x{kF{6QXnj2%a4&a4Ep);HK zA5Rv>xW+8%;MDXr+Nb%33Mk9}@BvQm|c$~<|6T`ozWW%>|#N9L24ulCd z-%V@i*GKd9Pc(-%tCIh0eQgqbR(u1K){uUJxD5mGR?&E!Bk8BvTXx9eIX+Wr7|<#W zvtmOL_FXuvHu%kwj58#u3fWF{Bs9c7mr{5MfeCyhJ!CeHIdd&U`ANn_5#gVMQi`gK z`7kDhhVsO4A0gcqFyk`#`hqt>0yJj7r3SdgQr~>lQy7S+Qa5e z*!lO5cCqL>2_N(X)Uo09hEB1(NzEx?&{gk5(xqgZY77U*XJDU4QLwj#blv{2xqQhf z33o3wxwWzs#u!uvFGIBak7En1-=(ezz$MRCn=@r&?yf$ffA$whFxHu_&c25uU!+4K7NJdu%U#Ckbyed%YqZw_*O9sgU`lOSBvjT&riN8nJ zS5AB|*J2QgOG`s_CwkTgdEcEo>)0@ZQ0kdU@B7lHDNTn3ByEM&&z!ocs)wLr%aI#D zx!mB{uU9Zht>s+ibV5KWDEUGkAPl(UcP4<8!QJ|y8x^(hsc20AhZ>WhL3v^kdyH(W z%6mLmedxIh5pGL0s0!Tg#}G^N_VPa?b1sJCvzD_(G=`8{Dr}h8-owOepK!I{R>OiK zq4Grp{K*^xrmYw+WsKUpDb1;SJseQ_&r$me5=XPO|w;9Q5rB%OO0wSGXt{}nr3 zG!-3|bJL-_1zpC!~%e`#rw{o zmppwViCTfbVUGqc7Cr`bse9bHfbRWILCGCq%n`2*>Rk+)2`HntZ}=Txivix zNQpIVai3%~NgtEMOPTxW;$ie8xU@6<eUu}P`fu(-s~9=)HV`zJ7XfUY@sQ9>zTwOUY* zfgKqYJK_Y+rD)YXKZ_3L2v@J`9E$8dJKa-3>;ZL=YEZs>+|Yzprm<3*XsG1dlRYR( z?8K0QA>tEtor=ar?cNIPjq!Tase*A>p_~I6@$e$)CsZL_uTqw_q6WL1rhg@EBE)G> zb2sv03WYhu_c$Yv*_P0;y zKC3=i`^9;e#a~<>zL%q<@bhTqcJ5wTYx97YZ_A0;?$5LA3aHxok*N4 z8gysMZ{WA0ak{>=F2&|rQ1DyrKqeBO#gvPO!izYG13T9QWu4jW~y? zMK2?=XuO-O_auC`xSMBB@ggdAoAYX0!YjlQVIE(7ipWrzFSsPMcgWx@ZJXZ@ zO7{=VS!M{T8nibYTgiqIDk&yQn7vz7MI|LLc&q!poPp>3Jz*tqSns6Xu(=*o83i*7 zAI_>I3d(rDT8>`gR+~4%P2QT(D=ZfZWQSu9a6c$JNg)Y~&=`UyDob*opg}V4K1Cb& zoFE=ib%N~`(>tDeEPik=VL!R%i`40)B>l0uk|cUUmXLP%U7eUNncrY5kE);~5K^aa zTTm_|6c5Z3Na*QS*)#13mEdNsZf1G zN6b34Mn#45gd5?DS?@VPp`T&BD-d{#@G_}V*QmDEFyf15x zvY+A*)5N>&bjL>6)#EAQ7dC8dt_{e%#h=qZlsuYL*H{0LuryhK&=Z*!oS5d@KnV2d)hrWWnrh& z@RT8<#`aN&SW2zab@IAxqIA)#C!B`5((cv$ zLT$KAz>rhD*TF%2y)}oBB&1TcNs&F8&<3Hj zL8u!U?iH(g%^5dQ{N51ioW@b_AdpDADTZXR)a*ZV+cZV5E*F6%C1ow*y>)DUpBHjs z8S#ChW2m|xz1^U=ZPRVRm=kI9xs2oD8yuwFWP_%r``r7ar?&R(GlV7sZYNgpTEm!~ zME^+9H?rgehN`EaD;-ho)2F||y}N#3;Zf6<$T?9`H;qy~N3)!)G#unj9rP zZbUpzNR8FPB{Sy(iY1dWR3wL@L+gdnsHZ!LOcH76Oq5o4a!lU5l4Vc|46h_gK4Tay zOIpB!c8$gf^SVDumkeOF_Ecoj2!AB1JCM!$#$4<4Hx^%zj%4`E4M!xMgflkRCLMz! zb#vcAO2fq7>uc&U3x2Crcw}aBlz6CAGx{mo;G=vUak?56=-u93H}$dOzS#x|!9eS# zl4_D{zY(xInN!p3NH%KfYD`_y_hQVimMH-U<=D&(G#(Lcc%4%d1B=iwggVJ83x}2I4$I&CGk~)2KiA@N#l(5W!N43nvEiImuU@#Pg zuwUYit1fhIf0C^fHTb z^O{j4HLN43qbPo)m5Ki1gwIGhFx{|8cLu5keX;jfoBfc}vbOoEs)D$u^LYzBx-hotf1N*7{3d zIN4UBnS%W_WKtNHFlCKgThMJN?y<$qq`FtgUC~p0eBmrShLI1KvcbC5ED+MKPnbo`ss> zxa`+88JvHB|LQCL=N59-J{bR#Ft6MGaMPbu?Dne<+aZlLdlce^rGbf1xboqjsPWfO zMsxHRtsq$O7c>0D4FATMe=)=Vo6PX;X-d(L<{;Db^HB(Z-FJkPb^N3BQGO8iAf%Ub zI};?Y`P+;i+RuL&EVsLEn-9+6p=}|Fq(p-p+MR4j?{_>O-H^W@3njcB=F($-`r}6q zg`vNFL{nXv;{OP$xNPQ6bz>;QV`rY%!Q3A`fuzzF5*d;lLVC-cu>Bje9H(d?QvruE zQhn*@0FwZQsy7amx*_DTOIxURTW+W8y_xSBYqtl~s7{`)SIW?x%HY{LCl(u0H*uIruqw!>wD)+-J9+1G=XeslO{DXXCaw z3-*uZNm=Zo>4%5jd2HCepCt7PpX=Wpd5oh|mwnjMS}!BTg8P2rZK1<-*=OB=s*R7y ztvQm2JvsQ+B}%_g{Q;C+6ossL{C4;r!t+bzhviM~en;6~xc;J|FKe4vbq$$V#>_{L5N2F4j-tNO`_Fb;- zVYjWY^3(l~LKckjk?U`nBttwKt}GsGN*8X;w7g>FdFse2*AqWG#NZ@!O? zcIEpHtp?SCcPcHRg6QLAmPz4n=!0oiU0rwmx@9A+aZVF4W1#Mi%<`ChyT=T$@Pc@2 zXfF%0_T~>*rA~L-#0dZdvZLUiK(5y5?C|*owPf4mvHHbSCh=NJ|KVlXmFL0ucdq?5 z4RCOf)!+$pr<^A)fp`Jj=>GrqZ;rq#$v>?KuJN!tZ}B z6|jVxlgB!K$#Yq(oaDNjH~QV&AYy<+Td*nBxQv$EZx8*`_8hftG}{sF!-W(qOSf>lvNFD|Amv;FSK%dgBDq1XLv&sjJect?P${o@zS@6t6zHtfCF ze3i@bYl{Xe+c(*pH!fMRVeF4{eRuRnFc)KP9GVNEneblBrXZPrm`nWJ1okF%`q`O< z?vcw7pg2*sy1%EV=Z|wGjK2kQS$=yE%mrOyXWkfqP#;kO^|7F1CB(rCTza2IcFR`d zd-euJcdf>QClZG@t1j9V{%Ju@;akCiYKJa?1vxfMH+nBd>Q#b)95BN75#K_679IPX zBig}&nk`WI_jcUA%G~2|iB#yFaw^v)@~2!6%wg<@GbUZNK`Mb2q}MG8Igqi4h-_IvjX(I;X4>yd2GZsW%$YgQyLjD zcN`&bM|?S_;G)?uD;xf&)^O>I<#m8vzjIHT>hC}M@sd9*By1xiLRCSV`qBB=n|Ye% zHW~Smx`OX)ojMIDUq6MMx&?P$ z84qygMCGwIK&`hA_WXXJ>C8H?%dSU$eCsiW3;o7pqHU*yIsL` za8_M}SKV;~37ipO9yl-MI;#jDlp`R+NmJu0*tbFx->fQx8?~U{p_Lrhz0)0DPWjUamZ{Tw3fotEN3`eT)2b>jfi3Q{njieP zkE~(NbinBzrI5^5LH_E3M`7j&)xlNY9oeRD=Z{r$WBlZ!ZDNMoU8Z)#QSB8k0eUBX zwkJr)?N1hDsogN|KMy)^#W92Xluxxh$0Eb{^(d_Ya9ln5@#`WoA{RzY)+guRc8*9y z(O!op4OI2X^FfhdlxZ1;G9)BpYwchwCra>=ySXyzyEXQd=58AYL*cHV8v}yc)Q5R^RxlNR2poY0Gj`~_-e&|c$ z6S~cBdq5#s|9tIyxl;`33NKir?shD`Lc5`ABf+h;J8gS>>0q-CR zg0O#0bY`gu3?Iwz4nuhd_9GVc9X^<~Tm7`#-w&wmBw}ImP8N)WXY6#r4873qvy#cf zi0Tn_-BOPar|~)`mi0AKwVBO1_u<(zk0_4{`!L@I_mV>F2?hhrXQF{9(QxQQ$5jx6$CTR1j;)AX~2@&)@dtDPpMz!9dnWQiT?hOgpB zPxOz1+OqZ>Gh1&Eck?;ww4+6LxGd8-Q{aY4i^{y^bZSKt##;hJC)WWe92cN4@NSs? zQ5I5zy;Kyl zmxZV?Z~Mg(mvN>lIp6Ze5^3%wXI#eSWC9ZH)Y#H7pvN~~rZYCH^co5O4##S!+Gz#L z_eXP}+$L-IM73uOQ>Vb^g~Zey&C;Bk9WXA}sA!&T`n)LyUlzwcoV+1sIsfB+gj>r| zsnbiiV}|oyuE}7b(tc}ulR45GWeZEO4nKy^jAIL7ZYTHEt4H(vd^=Uy1B%uR*7`!) zvIs-VNV9SURU*tHhi9`UJ++{Z>-OCo<oJ8&o`YVK{@5RWr!oAW@nTu_nc__QP3itxlsv!}(gZ zJcP_-bh;vrVSU2TQp5}2edLwSUm;1E!;|dYZi~?`b&$1z8}(<|9_j8@DgBxB%B$UY zYiV@NK<_FG_oK_Q$r0J)qYk#p%RWoDgPuaNEOwQ0hn z@FLn46Yi2NKCIe+7_GDP6A13&5dN4zbbp2FYtgK(K}v7N+>FDUPnXy5a;ao8j@Kc9 zcrV114cRcU8=O+4J2?*e2J8lac&Zg*8UWi1=;=o2k+8}u@#H4i%Qf<($(DUq&8$yZ z2ZJ*+^1fTlTG=S=Y)Q9C+^4@F0xH0c^ag|JjWxXw)CNT^XU#UtVR3Jq%2~GF$+7yv z{@R`1?Vs4?cWer;vD_!v@!rGUJ?&j`zDhV$)?XXyv=}#e2EIzA?_>P|@I{pik17Mutd^f_c`EOI`tS&1a#S9!!9WH{)p*Tm8+5dzN)A(`_X zkhosKi)~gB(=7>74@~vR129qyGt@gk8uvl~pXu;P`2jw1eb0V=x^=Ho-XgXS3+Q*S zbq`C@ri{ORFi^9f2G$ElFVD$=`{sXaowG%{$!&%RmN&E12-2`sb#cPcN=%0HIrsEW z*K8doKX-_FeSQ5v4FHR$H?rrAbD(J>CUJ7~r~Ju~o0ajvT;0EFJd3bsUPDUCvKYh5 z<6y|+|Mi=h4hk~nGfuY{4Od5L8wH6pZwzN;PyWGfgHgjo%VrcV~o$BFV7=dYW4BXU*nYnNmfS4u=cmLI4vgI z!uyQF35@7?tY;qS8B-Uh!=j}_%>bmjp13{_Y>kS$qjY2NleOlJYkad^jMIDkHvL_{ zoYBq6FD`sKk8oMPE%W62d4!S$MHCm@39?@_2poj@h0McwdaiN!uOJ6(0T>>|@HTH$ z_Q#R`Fl+N->(tmI#xWx2;SAO|QLfRE{GMtNJ`D`@{3MwF^aXdkY8oK%km@ocIzB#+ zvgy1FpRSqT^>60}5J8SZXrB=tyUauSu=%szV?y~*q>^x8O|1sXbJ%N8skDeLpoTuumd8)b}dvoh| zNdI4ZDKswW-aKoGvuwQF%o{^PL;o*k`~Aj}TXy8RZ8Wp|b~U70BAYLLK9CcGSZK2J z4Vej^L*&FN-ba)`zCl}oSB<8tB*Lc<9U4QQ?Zq1&>?uh!x=?v7&;p-1kKPQ4+3qE2 zhy&QKvijxfhE@k!F@*mj)v}R+oU#YWX3V)Bs$0r3-eqA(4rr`MQtRCZ;=5O*Knh}v zV5G5k__!OmyJ+~@Kn?=VezDpcctr6{X70tJ?vfK|Zoz%!*ge$F&3uHr>A%lt8 zs<*4oQIdx)PK87>gdus!sHs{(Hs0_xk6)M?+dm4>M-1IjYQWHj5)J?tL-eR=X{*vx zRD4YqJ>+%8r-3&#>nl}$N78HYHemn1>GRooMPHPB+A?)yx3A3&BDf3mo`8&GxFQPU z+C5WDb=?&Gb$^e+bMXMY+#5*Z!)=oKF)5keqqo9kM`@GQA$p-_+~~08^VC@HQQ)fG zf{}nH6`+}@3?RNatjfg5AuBuwp;DN0%yO#T*5TSVfJ<)rj!+C*p*jU@u4=~mcnR>* zvf+w%cp4vEi=cMO2JjJ|r`Vhv2j|znxMfAQYL2?d`5c*~S?x{Ks`LXEQ_*xV|7LxtI@dtDJ4I5F;%F3bn(_C=L5cEO&?R$~@!L z9+>5PNHZ(WVX^%j>eiZPHm|mBI-Y-VRRlvn$6@+IhPDe>RbKrBu)D7WBS4O2x=Wiq zTHaU|gulsb9b!0r&H7*+QPmRuB6{e}U^1M8A`uvftzF6ahnmvv!VD^w+O;LFS#SV! zbVivzC1@0FIZ%_a+D)2!_}eV*i-W6t&>DGj)cfnunZ8lo5)MUN(y;c?z&=Jb7OB(k z9*4_EineCGmf#wulCu$nxnXg-Ijqn`RU4o!cp>nf8!J+K4oSY!A8616_2kN=T74G? zsX1$-Jom0=6uE7E2?6wSGR$Gohr_Ek>hbE493e8@a--l$92;Jf@cwp0;cdr^%5cFG z5Y5{caZZbCkSOVa0HwP3UUC4Sj-gHf&j$O6ZD@zWBjy}w*ypK< zroGL&OG<_?X)(*!k98WRv;q#6?W6EJ?vSFMahTu11KkAQ@}FGVhqFs$i-X;MT#-{n z+JV8mPZ1X6I09v_QjI8_Ds_pNMbX;0{GD96x~LY_Q2pc!H^e{dbh3XrjHU3P{7+^6Yzgeh#!AYGD@!0 z18=8(GZi!&o<8=Qa{+n_ciUPE1efdr3iFX?YmbBIliKZQ(Qv(kJ)rWk?B|t$j101S z`^hZwmTXdX_hXZ4_A>Q#iaJ<0! zD?pO5>f#shLILP@^AiiR4{c(|-KS6u5ujVxQMR&8`uHv8t2q2jTt6CQaHY`cC#b_q zip&VnK)_uxjH)`}(sLKYH=YcGJL0Sd;w#TfWT!#|0n2E+x3p0+S%b2LXRtP=btG$5 z{{1RB=+>;nYlhT<1Q4NeKu)QJ6bj5PtwTi|s%6xc5&jOJc+@1L2#W3l{Pp-X%Q|ic zqYTtaP$az}=z(Iu!>W}6F9Mg}t+JdJhU%Vyb0vJtRO^Tr2Du=o-S`vnojf%upf-XYwPhd^+TS}-TW1;2 z1Ui1-WAl?UK~S&+JhB2Okmj*`IQ` zrEhcsr7E=+D7t>z(|S_9;BLLo}K>a0SSanUfWZ51fd8C z4i5I0T8r!}Y8zQULOOT%JaF5e{|Wx}`jTSMN$eUL`~2fN(7gOD6f6vl@7z-Ez)%gv zzgm}5?B_^-x;OL17V!837IIYO!B7;mWBqS~;CIBEHbxXA1nReqeU11M3m}B(jEU{G z<#zsWcILXQ-&yGGx%2jS>y#f-&F=wxG7>}x#4y`E)Uj``wBXrzFn#X<7Q~h-*%b;sT+#;7{Oa76)*`pn_pDj4;Z}gS# zbdI*=<(LctdyxbJxMvocjBfTo_^$#JZ8~q%G+V{v;~@42+FL8Pr%vD5oGM#w&36VU zX+c5b{7N?Nd$igd+EFhENvE2ax2H}+KAAD(I5&^Yk1w%s0D)UzkLti6m%s3lt@Z{{ zp?O}=&l9Ha*DVaM}l)?5KE6a*w#My5t^Lq-ZJ{fF#PBSBd_5@@GPypDk`RMn3q>+KM zR;&SW(aw9Ojqd%iK46S&8}9GNXz>Ec1^F!+UM-vQbK_;X^KQYy-`sMV$IQQRTtl1F zVt#!0PPNk%fHZ&DZWx{vn^`9%G#(u)D$Y3UlFi-Xm*da|HdyQm!AR^i1Qt4mhep@s ziFX>fL&0^11h&4@j`Cz&P$=*^UE~zb8b-2KGM!{w0hsu4jc?YDHCr=Zn!ym>1J2sQ zcqKfkM4vOuCV$FtL_Kbp?9{Oht5nQh(#!`Wa9B_KM*zR*blCAN`wKyuDu2aB@sIKx z?<=E@gbi&W+T*!2)u#`xgvNF6h4MYyIXv&x2Ki+x>JCoX$!W8klFc0I$&m!HczntL zb3b}S6t`{XH$=M^k~Cws#~uuMHT^J!ei+KJJj1AULA|Y9(n%4rBcke zk+mJ9-G1C7%)Y#Ky}|zi-^`p1%Q@CYxeU^dH`9lj?MdkyP`mY=Vj#$HNRaPXTp-tM zO#9%w!uz21##(s!VQb>yPM;Krt>MN4~!rx&SzqSk08;Iu%K<>=X zd>AC@9yHqpyzt$|D%lOrW)?b|(~G`Ae1$#6dDpd21yY$6hd-OzFS}1BgHLjeMNtbk z?ss#9_q$fiJOi1~Jeo>1*4!#gGOPgLb=)Y;X3HZ?ko5;o<+>#u5p4#k$m$;|gY;9u zjKjf7^ct`h2jg14#UxJ8)Pc-yFIdRkONM0+qF#j$Ftflx)0=eqTvs8+&2bEia33j(~#W^*<9q&KUIBH3Hv$xSDe zD^F?eMxNlRY!(VJCFS)oahc^-Az%UG!i%L_*`P#wo5g{YAM1~Fh$0&&yn2)KvqYnv zK_jeH$=x4Y>6|4>F1$N1B>F}nxJ3ulBGw-}G1W5_vZa~5occ@I;Tdn5VY{eYlDr42#PP4hSYoz_!lRFCY-@2? zF_ONXFdhf79FrQA1VXy{A|9BrGEiScP3f$fQbYXOH`Ah#Ko# zye1p@t*&Q@;hSj9uhCu4@*Lje+<^1aW@gcZ_2h>nLvGN$S7dZQ`{c5~y?-)%Jq@5=0mk#Y`_1qloPDb?4oXsi|8o|qqR!XX$nraOcGj7)mb3+NBWsz^tz(j^b z9mOTFr7g<4y(PY7i(N4luZt6XSfdP;_{J#TtS)R`kG?q$MJQM3{n-xJ({QoSzOzvfsd$U#y|BPJ!C^$w_6;m{MA5))0{d%!|M z!VuW3G0(-U=sq+`f!_j$;X+C5-3NmDtCMX&FUGY$A(6Hys7yV*i$vffh0aL^4Gz_| zhELX1%3Km@5>dio&9o6B*!)PX2D}M=%=Y3e^MSu*b%2C<+oYsWOpdlTN}jbhINy5 ze20`si7+HlMBZya5{9GYlB72kpk5wzU zcgq7AWJkh#{)l9k?I~_^`Q{oS;SxoC=6zD}H8$cKorXkoqbH6qVW1afXJ6t`E?s+9 zq@n5AuQ`?D>OK?;bk%&NJI=uHKWVlwXGV;+4`XEZ8HNB%!Aky(}j2PvF+I z^zheNyCtu!+FMimrZ4(*^XW6|QjT!LDX=okR#D5+#uB9$f*?X7uYvZmR@n%bK6E!l zr5PekT%QAa`Jpi1#f2cJp3~n*3X(KVl#=8S)0|u>xG|am2{M2iKvTSxof4ylVN;~c^s_kCd8WCdAyt1}A zYT)u_)e@9Cey)Z+af{DV#f(@qeE3)`qY~xvNZD)Uo#gf>o0C86w7ADfAUVpj4T>`} zOr%~iqq{*kH6kDgyihR%OkDzgEZnO&N#Dwq$(tv`WJ#rrQcpc^>kivqEW@KowT6;R z0vGIIpPwMpo6-%gWI8%UYzUbQXPSGSU_PC#cH99~v~<(>ns%+K)5|b2DMIm-?KjxW zaiq()i`~8cY|!>PU1yBLs@W06D{@G4aj@1L)-CsAed7__n2FT~4g`T`qSPu)5Uyg( zoV3YhffmK z_SJsSiuM;NZ(5(@vd1^}b^P)qIaIm|TP?6dbYS^zM?mcKPBIK_hvmmiGHVxx%q^iX-6)mF`G1F-yW1>L%BDQ(G$PA5- zW;lHJ9|+Su_Q3sVOSOkjTvvVx;#XA?Yl1)bj$a4}a}iWP^4(irgQr#CBAzH9>&6y? zZeTAzsz2_{gy%#{vUSrby!Rz$fnJsFeI1Vh8I0Ct zK_AZnhj*t8Sl4g?8&zO@fXsMwyR|>d7B3o@d+U29$E;B*WwTSJ9+D61br(%WoYQ>a z$+}fYAC1^!NVPDKhF~CTRY`+)nv}0N&6?$~Pi=At!gLf$uwkr*F4(DF(~Q)q$s^2C zkJN_bOt~ZSXqJ#Kd^YeS-^A5eAN_md-Tg{v-Lr~|&5mPwB$KsLIwn-}zPj_K<++S6 z;>m<0|7K-(wZ$57C5>sHI9;yW&p*SBdza<^<0WZLMDzRN$p~;) z#i?aH<>>)GU%Kk+p&<;_5_CMd9c`LQB2CX~y0Nh199}Cmc}xPPK1#bM;!Vth z%_@MrMxEd--HO8<4p#NJ>S|6;CFqLu#RN_(Nm`_uMnEak_VNYVn>BPN-CPG<)?r%{ z%vo(ct4--cHMui%BR3>%F;NXclae5fARs@E0~))k47fx8K@%2vvCJX~@e(iIZ0}4n zb?WI>FFn^JB)osUQ1{rix{4fMbPqABYs3&JH)=w--$Medf44Nsw2@RZ;3p$6qy^OC zS7dmC+o{jUpRPHCKbL@KP0ytb5EuIUk#ro=4P@Ay2~r)%;Dc5~=Tf-v=BjhnHmNIOzzqsW~k~!DTDXzkPJTn)UQ(qQd!4t%dyc3`J-ezG?VaFZ<%hbd&@e& z_ZYg=1E5QVN_jY%O&v7`jedU=n`Gw}?!i-IOe0J3<9-%hUJ*fKjBVkLa`B=l%Fb&v zR$~*{>khaSf}%@znHuEE9woKX7j^|P^y;dm=O}tDOP+uxa;>_jw>QZMm&8nIf#XQ! zRl&*O4C+)~%MRvx6msy2SJw>=C6A}ib|29Y=i*8kA=Sxmo_yqrc<8Ye6b`nu*7DUk zl!m^j_m_cmr-H;%eMEC~%?}AH&k2y07bsOKuS*6pQ;av17v7jN_jE=v9)k|Ufrh~> zDrlOVE|U29#L$vQ_|i$Wv?#fK3D-8Fr`EtxE1ahI@WH-nl4H9cnk3AVCq&_hu6BRY z!<(2Uw)Dyb1)5F%X(|}tf|`t96xEM|gsnrFojHt&b!u4>T}bH~Wa&4NUPuf&6|-Ax zkh(_Mj3w$tNwP63PV?;oyL@8uD$oF@B|MYfGN@WOBbA%^I$l!oLb57_p}!z7-{D0Z zTBs&caZRQ2ef|1dQC7RmJx)q`Z{LMfQ8&Fi(4c>Bw(=ZlE~B8kP&4@i2t|BZ>uP-C zYbKjAsVvea#ex>$QAwVjDj~X($Z>d69p(&6cA`_Pg6P#RW+Qe=OIFmk)EVAcN$;WDc~@qN_-D0)C##R=^~@;y;bCs^N;3 zzykPlOlrAY-IM=^_+hGSTzmLnhGen6F$hu6PM;`F$Ug2=wxT8)b|n05#z#skzWx+r z3P;lh)sLsiyC2aUq6Ua#1vwScWP+o2MFNFN68nj+ZZ4rJv`tft4L0~;Mmg)kQV+aD z`|c=J&Ns-N)ovw)hPO%976cN<(9XFrkQA-V;BfT)N6VwW=fCZpl|SX^H*xtw%VCaU zlYll7N1t2DyJW>9c0059XMv!%IU|Snkf> z%;56)=m9rwBgN5=^&p&x}e}yyu3j8kkD?|?EA^wWnFX&kPSB7K3|IS>{ z%eQrFI3E8aBjsG*a?|z})O2~T&8kY~ZY-4;H|EvNWYb#dT@!FQ>BEP&LzM}JKqy`K z3`)71eTREqJfB$WS+m4;OkWPV$ljhYX4Zx;ZZW^{TOTLNWlj0?74SmUF01+M8t4=3 zGA_geq{dvcb}Y$h7ytgpUomKpwX30a&Fb5s;oX;>HfY>I^Vu#FLwB*> zJlWT9MRakCGmT^grTdHlptRt}hf{Jr(I<1~j;KE}CNa$&Epu*e2zfi-mbnB%d@dc@ zyX3nf`PLlUN8sAiIQMAY{kK&|{FUf0K%Udzn5f$Yl9)R|Qg{PA7ECt!4dR=*&hzG@ z0rx@5b=+7UZ1%ayXoTaf>-X3;d$9{Z?sn%!eNz!=VJ$vte>2gb0E-%*PkUA$0clSh zNHw>8yDSlJASwV-1cbaK$&=`>?;FZ53rV^ucvuRFg$^fvyP84IAHe;)rD rnh^T?-Uosk#=lknH$(pPqXG6D=41Bg?zJ<}54Lad|Doca2haW=`!3&} diff --git a/scenarios/sentence_similarity/pipelines.png b/scenarios/sentence_similarity/pipelines.png deleted file mode 100644 index 38f1cd5aa89e2c5d0e18b9df39439462a6b638f5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18681 zcmafaQ*7Wm6ClPfgC0kP`R|5wV zFacv*LlYtiYXdVAMH2&Kcl%KjZZI(M3<+TYWw({HEI2Rp`OZr=D8UVI;;@4t1XP&c z;V>~!Rf^(PG;=o#Kvs__9x~OYpC(RdcfS{{J1Y{4|1r7mOWh_rW`M`HXrNxUYR^iC)1bf7hXOWd@ETj5yU?p z`&k8{0fA3HA)lc^pU9Lvpo3t^9q0g^3q<>OPEzjwJ&}|h`yuFHC4Pj=$NOz(UO42H z2^S4?C59v+rgjazziPZ23tP~O?aRZ_zz9DttY9Je0C*S)1mNL)OtMNkW^i~rSK>N1 zBzb*G8?dFY<bCpt zsOi)7PhBn5kmYehNkIwGA*Q#9PhY_k^6NXrX-tblQV5wSZI(-ZgnyD?g>+2qi|bFg z`TG=tb=B?FWp?9GJzG;FC9(lDTFUaxt10qq4Mlx0M^}{{ve$top&K2F0%Z>*Lu-Zm zhkWO;7Um}1xSVXSa{c3m3<61P1`gf=A9^?#e7*^)3k>J;ODj%ZGDc*LGRS}pJ+8_@Ne`V@@`lPIxA)W%f^KW$>>4n> zzgu~Aw>8a zGIct~n`VnI?PEjwAo>aspJoP7MpUGV*F&>PM2OV@!&11Wpf}fOJHNGB)Qkd9S5(%2 zIkMwu*v`_l7excWiY9m_E}itCt&ykPm^*q!R>&ak+oQ6A5jUGi+u1~zH098PDVcP z=S^Ob8Z-_u!(ej}(G!K$i-Mm3T$1UZLe^(b`~v)MhWZL_`>4tu(-6_h=AIgYioJo8 zPZD9q_B6LyHgTf31*hY;UCf7vgyVoExKsj;Kz;Z6o%vi)hQtTd5@JhXvsjS`jQ|Fc zzhe`TX=$dFozS%|J%?A&6-ucqDMz8%AGWNmUH2=H+Nmx1bk~ilU!4U!cS!At7$WtD z6fAE?qce?iR{-iTD$t$mRXidaXT7i&V8LT+6Nn+X)DD>rjE}&(evnHMLNR80%MMZC zZajZC0VfW3fEsKZ^LK-`rSAG7mgK<4Z4ae9fJJ8eAimb29Bc_m>dJSmLEe;e^HSbE46~l(VEr=^8%^EZDE2xgUo{X1{XYI znrd#Dm)mW`(Qs&u<&Ga^@Rb}YY5V+}KJe=kKk zai@K&J^zf=j?Q9dUjBPTuBN$K@C!g`pV5XoF+AY2z9{DLBtv}J3)%BcwYsFD%MpQQ zM3z{8Cy^*uCb}YQ28=H=Z*>!P9(jL3-#jUy@T8_`JxA2aZY3V?UVvy(qP0in!Y&;W z1uX7oQc$A^fRd8S)~Q{=OSoqLt9+th$WtLX;{3k*K(I@<5>U% zw;PgB*(n_{K|xC0&`8xt!@A9?4z*;9)*AXp1e8#w4mLn4r|X-*q*I};hLB=zSE+-# z&?$Oj)C&=CgZa1Uy)R+8H3Lid7+~&_U=iPDH&k^$Vti=tX{H9On`aZ>k};@?_4eyi zDX&!Gk2(4pu@@A1O}y+KLwdcusFuYp`SeZhj< z%52tJrqS@s?|M)|;KG?V461N*VGc)0&w6!jRP*p~<9^*nR?0`KRvi~~Is8hVw|PL> zfC$cJ<)T2*D*E}%%!0%Q?;%2zPq1Z^yIhjtVw6(c4?xtTzHz=(W8IM(U4mGQ=%zC<;&%Ki z5@)fP!{?B4sLxSRA8ZxDA)?E8Z11}KfVc{yT&|uIgJg2* z?TVvE9{vlDjOLx$gRhX4hH6e3=b=)RC8g<=Ff0C99&6!TY)aB!_?q7*$)i>!koIf- z#3Rj)IK`)sXjJ1XGWZm?Rr^_@F+(t+u2ewpMwk%Y`9qcu8q7v2mW+ANS=E|~vMLg` z-wyFZD#cWhkSr|k2cnY*g5kN^GM4Xi?t?$d6iw17_fBzX z?XZMom~VAd7al(C*RkA)NX0+jaj@aTertoLtXGDYp8l8GvxWAc*8%rutdtG=M<`72 zz`-PedC5CMN|mrVN@Z2|&r526xlpQ4q*#a=9Yff~7B=iMUI${TAZ;o@1pL2;sMX;J%r!3#g@(5A1^^)Hsx#n!_3 zoVqA&SQxf>%&g+Xxx<5K!YZ~6b~kY0%TF7{YQfNp@GIAAZ)K|03qyszxfQ;g~nRJ*MNL_*$!zqhMFwAT|=Ct^H0Gt<_VP97!~^ zCIgLR%}}1cd{atDcIy{7KGmr3vQ-JqOydT#SB+u=<3aL%o~;rK5q@1S_l*=pnsy!C zA9b(?BJ62R#8fPlmXe8VW88_-Djl)3bd51QqGNq)C8VcUBJ9S+D#x{`8xli8Uleq| z#W!c~{m8@SqwxO#G?&CF+0<@-`ux^PqLr)kbJ|2kHrAOvby@tpoJvqUwMK)l94%|> z)b~NfnOP_?wR7I5*@t4OfQ#KTprq0fD+uVp|Zu20)| zayBfDU9+P{5jd5@_T-BkFWXDet zrRTuk7eC|sSO;~&8Z>eoJ)$$1ucV)^2H{R}Mjo~CcQko-x+jxphRPncqeB+aA+$q~ z=!fu|S$>s|1$=^O!v#+>1touE@x!pPlOEQCw-_E!)c=muFzEM12BmDyu`V&n>PNzd1a4(m|p%E<{!>4{C5ZF zq-@aP|2`4>F-}B2e1X&J;V0!~cVO`-IzoQQB(Z$H#%ZO0MMRcpX5TibI`1_9?L*vZ zq-TV`s2J&h){K>8!WS&ZA*Y92uEKAE$m&!RQdN0#M!C#AF53hz!`Pj&2fWvj0Yqe1^gPsB?2?fGqWm&g}6UkXs ziM)XV;)&XeF$7dJ3iazdGCfjOJ%M8Y)76wAN5v9`Rl zl-m5jL2elHU3k>kdGeOSPQTlVpH87pu)ojaCA9C~KCgsZH-6@r!p`l}rhvwcI<%E{)x#;L#{mom! zcYiiXAG&!QGt;g1+}hqSeEuM?O?*)nd!TWk_U&7Qv^?Qd3Gvm=J|UOtnAy&~B+^hM zb~#nsLfxa?fuVWHXsSCK6ak@mQNVPdYFZOZQFBVhwbu+cfbEgQuHs&|Sd z^^Cy^As#eDj720w`OL8%vwC7&@WncgT z5w*f{#BC@H#g~T(KC@vXnFqgpL3_QQVllJ60p5l$$=A})({Mr+VY3BG?N*U~J{o^kJFsSkl^l46xAaBd;NlA@prBd0Y$)CW5%-)6p8ut! z4I_Yaxz?y~hh|cI8rggf-@B5**Yu^+&=@drRfw!!zB)O<-!`4brPnHKNUMW~7UEba z2(}2z>hT(#Rke9Vu&&eZJ*K0TTuMRt|B7-Mm*VKPIRu1F%Do{*lVSH)J4P$9X! zra_Pey|48Axu41`FM*PqV+NlI^{SfY zm+%P0_%lZ#1$4@ zE<0Xym71%AUPo&;Ym83%R`iZ3W`Ai~BkH#w5I&!tY&SE!7n^=#%3nP(Lm#skJLyVG zTe*&M(;!`YmE3ucf=+!pFX;HbuU9|+gg#c7i$p{+qa{d4ywjuWmK+xvpY_EW5SwY% z{$=5IznT28|Ja?L^S-x~Sn5R-cn9@zwGs1#_sjigHi0+4;i+?8!%_Er8_2{`47wel zfqNjs>t!f7I5N$3To{|jBNtSr^kbRQ~X9W6kF|SofgOve2};3yI&OAK5O?(>1Qu+(6er$kX>mze?!Hxuewxw+^<#0#x+n*YLWi z=*s_=!1A!ZjAoK%K{9S~6h2~WYd6g?UAF~scDCnyEY+f75xLuHVbq@_thyZ(cRN2O z#tU6mO{E*HxIZouX6C#X%)`;IM|W`R=;*AkmKIeD{lxJL=k9dBbhv0|6EucAZ1hK< z!+bjF$8$ezkxh-J?J_xpNghrF*B>f;ITEoIa$6V7>MXmv!6~0`BatK-RBwFky%=|( zxFU!|2nLR<%`C-&df0p*7CsskT1B70+O6tPWZGld172FMOctCNDH++q$w@Z9qJ!pT zOoYO;zm(0SE#gqH@>w*{#X3P_a2_n&zSqkw5txRRSp4X zFaTXCJXx!z>6nuHXi|Cq7&hj~Sq-!swD+tB(jhLAnqK^ocL8#Ck=x$TC_CVGI68^A z^v@bJnH(N2GL}ZDCXb;NuBT<}l@y_#qLNOO<2D^J>TI3nQ2C=f1XNHU&2uwC$;zXt zK>}O!mAJ_Q)BV~N8j_49aFJX%;meU&uvAei@^L-hi!ga8e)kPh9 z#!+1P7O5B~1#*{I8W>7iHsX3lk`?Z&9va7wWBn>jSX43c7P>n>^jAlr5jEP>$=FzF zP4}-(E~-+OJ13@f?KgMM{>)tIc2)JqQ8OwvRC;GSY6L1H&Nnn)Nr7<-6-?C{{Rkc2 zhIOW-3=^Xfl{Z>f(r?5`QU}*a*Tg7I6oHed2C!I}55^rdAEh>7u+MToCqGy6U@2
?ky_Mz838L z=?ea>php^T%(SN#vH3fP(Z6l)M|rttqJ>)&upLpcZ!RoC8HCgLCJ=zY@>f^eF6TAh zi?=^=oI~0L{jGv=7{KsCi^c~gaVe=tr^a8=M2mzsf~51tUG@cyC5=y_Hc=_bkJPc& zrVZ6HeJteW?pDDjQiFctS-@XHRc5(EqAD^@(aNvL^&=cUZK=zd+Uw=VueFamrOlNo zg}$rsmS_@}aC3t9Symt*K|#JCR%XPacLzVGC<4qOj{?tr2uHM-C7`+;LZ7&Tk1C&( z6pE;T4GOmop_-E@n@nr(S*o|u4w+FecWl8ymvP)okze;^Y$_yCBkD9QFl#|a@^Z=q z<#GT#30XVWP1$bHeQ2{f!ERlltZG`qiXT>?MDV>u))K-1S=4z|Gk0s(P?mkJEM=z@ zk&3bLKw_9H4&hq*p&mbj?)@C>Sx5tshzTwsbO*I4lopB5`CJRVdCu|`u>5H%)&22} zqkcG2LQt7{yDET~{N2E9uIy3Rk6}K(v;k^f_j?VZT1mhV3E++m@M+aBWq~hfq#shQ zz`(&x$~O}3QunY3x>2BhGtHNSx)z+-b@b*5^#5Wv=Ka6rmX}ry5n$)op5Qds;Q|d~ zIvUV46T!%pAbI06sLDPkCW@XN2=h?}ShU?I6OVh;yx*{=RFIn6yrO@%WTt@ldtOppj5itvg(GxPXeT4I+ zSJTD%^b?*(=)RJzI`4UKm%>X`cmHB1sKZZtUyZwW`>YBTS(1VH7&oD94w2qcSrN;sAm6O^2&}`gZIMz_g zb1)T-(R>A=V8DLQf;z_H31EV_!TtX{q|q4M%OjcSbjJ-n&usD*8f#kIBrj{e&Oey zl|5S?%5}d;{8=jOd2s#%9!Vbu1!ybq&g*G8*vCa2Zxq1urrJ5D?(T8=9yIjA_g2@l z4LIwzwP)RIaYXrEu)b{6j8|1&VyVIv@&!?l2d7cl3R8`RZbdyfzXbV@PgKrNx7Kz_ zM^j<7IUQk`f$T^%DOe0LeZDe8>@O>I#804A!Q!$rz=g&hNKR zeiH^u#Cs0ATGpZr{(>L~cIN74`vj+H5P=nsj6(G2ShdfHmV!)pB`V)Dix!fn-ESBw z)8q1x?BLbU{)ZLJ2%`Q^g+C|eLaF}t&(a^E=JB%zaszO5>aj#$k;SgjL?MeH7soQt ztKl(aLwfxUdjGCvCADOBHufT5MI~W6_ z4XRy;&4e^U_`H$RaF=oR1@3S;aB*T{<-NaGuIZQz7N?J^$@F|%=JA!g&oyZ#nI#c4 zcBPPI$G?ZnG^sLZ_|`+AIQ?@#j1J%-wmy=0FLrm{)a5>ZrSEJzYTw(|qsP%;<)vz& zOfHJs@wmaI8X`HRJ_Y#emN#`%5er=|)SJ6aVvB!hp4Q7O4>_%Ep+-$sfaEU*=!3Te zs|T|W>iiw{i|VNskiYPY%mh$(g33F)qv<(ivLD5zIppIX zyo}eZ`@_3GU4WIYC?oVYW|q?Nr&LLwTJYIIkvq0PW(=l0MZfgN9ojq45vb*iWDBk$7uLP)G<&hxQt z+T%-^V8B`*iq-~%G5YZrkr4g-4ysoY+@M#BK^%QZ&GISKU6Q_P4%An<{*IoxKO00F z&|OLkgy>oTQv{EgoOp&BgcyjFV0ZtX@&nPcGYm_i!b>dimcNq8cpk3K3z?VPwZZup zTv{eJBJ=1|wR2hPgI!tN9IRNV(XfFQw2sh{njSmy*Xx5CDRJzK6J4{u#k1!RTCQ(X zCo^ITyzd7w)ZQ;5uAC3Yn@`u22W`9qa0>F?ec5CgYpZSxoj82uUFP< z3p8^uB)_w6EtANG7|7#}{QRjh)s|>Ru{s2_q*T-CU?VRml&R-*DqR8o4rHBI$H&P) zwN_ENr__P}smEb3w}Lbnff}L?6nFG647h$X@Qp(T>Qb)fjVP3y0K@>?O^&7me^$t& z_wJH%>O zF@5)Z?HCR57VDT5AV z*MH`H6Sj~nho4}0_ywDeQ}_}9=8_KIFc9o$58Z^di0VJwx&%oFdyRYQg$gM+_Nq35 zRkJ>nKf=dI{hi@RNKQ07N;VonzzW)L?RR@TCEU*6ul`N0?)eY_TZp%aE3czi~;u7_9Fm!U{S`&z=_g>kSXMGAsQ_g;H1qL_*91 zDX5ogso0b&!}>e8L*ExED=g>Vn{t8l^wf6pT9{Vt)dzNOVp`;u%kCL22`up`4yL~t zi0JbKJ8LFI$}Jc_G{|+jCtwH+Q>4>oH&Db*GQLJpDg{{Svk1%HgpFPR2+RKBjzwa* za@Mkv8Hs>YOgHTG#V&gAco?3h%b)ldM`o6nyNS7fMGq$oRDcAX3HOYFcu8TPw+wB? zhZJ^uO{iKaHif&L<@%1g3E3*Xfp(BzQ+uC{YzSp@aWF9u-WE~D@b#jup}G7o>XRzM zR@KY~{LBMv;l}Lcx87tQ*RZwk8|)wbNmAcI{WKlboG{6$v~O@RB6^pp+mY{+HbdvWeq zD0uNIQp22HBkY2wv6c?D2@U%9IhX=>ep@}Bm4~7*R%tp1P-YdLszKl``Vr~+C#VvK zl$djaB-~h^@Y3$tIioU)+FVFn#9E2*^Z9l6i1${wgDRPk8gH#OhOe*Ln!8)5uZU{a z3ry{4zL7lDvc#kVhX$uSfIUveZmdHrvVU%mYnB2IoeXGq@^1_%J4!Y88{;LT*!>~= z9uP+5EqSL2+g$Ex;ch77#p`}ZIgs8ULD@d$CJC*>@+;~;=SZxNBxsEFo^g%OXmK8* z?H~NlR5Ac3jz!QD!XIkZ=i?zLuBLE0tgV%j-dRY@L5tN#=l#Z5T}cgVGrO1~6$<^c z$|AWj0$De?r1xv4r}^k`a0h&pqsdo?G^61L7ev6oX_Aln)}LMevy;U;)=x$WnKs=SJ-P_<*iIcd-v6Gre(u0HL>UhzL$jMr>bY2)y&n zH(MU?bM#ujVs1Ol9v;&E+U%ly`};hcYS|9Gsdq#*XUKtU`#QLrd%*}p+m-tw?*WBI zPT&kdJ{}RLtG~{bQ|bB0#2Z3j_&k^G-I#lsL;&GyX-ovgyR_N+$3I@~v)O8{IhjSz z%5&1y&-zMc`*FlY`0A--Mq&x;j14Sqi{up~VyNVFE35d%b@2i^Bb#8S>{NQ*@u2KD znPRB(@~5MGH8fy&RRlW}kB$1@hc~|)J8fFxA}P;VTmGWxbb40eTm8}P@1uH{UH0nv z8vNjWe9{Qs^at#-<>>0JmhD3>3@@j>3d+~3Xn%K>X3kU4qho=9xkP8A`j^f<3b@@` zy<&d29E&S4tJ^2=m!5)at-b&RAss-s=4b zwG}1T#c(`n+@$zq?BjY_stFv0P0R}|9!bo1ElQv{8hXvQ4n>!DHY1k6aD1CHv@LRk zYE7;EDmoMtT*lk&Q|^1AHFmz0k-M|Q;59kf$d?KpBX1Lbx5tt`$HUGJ79f2fzfz`M z5{JUd4wkm5jZJ^|dR{mOeAEWqhY!AzXHZT#d=V1#fb#y*dOE(eL}zD~X(Lz3RAjo( zBs)#s|6&!4J}R#^l8*YL@+A=2^*G@17|>+*-)C>7dHTyl`Zte7iVeg0>^F<;I6%%$JycI(h72afV zR~Wm$xZTk4*O!9Hg>AgE0zGiPkF%4oI)tcZz5Qf23J}=6eN-ODgTLtu%5T$wpK#8p zV6fFbOO@Q@(eA`JWBXGmMu&pJt!tg)JGAgx)w=S+{fme6RuO$yhztVCuaSqa=*X?} z`}0oOIP(|Yk~?1t+ZHqBR4cje`$bz{?rGmGbmzOVd8)}2EgV`_-37LqZ>zF~aJZ=9 zmv*1A0G|(mfPmlMzUjuCtjrq#9>=)c`o7Ii8WBUjS{wEoEo>?yR+Y3vFKUpnpk3;b zk&mXMh6!D*piuA5qp~ZOxSAD;Zhz0%Ft^D3R@qYa4?1%M&KAP-y1``dVtK0n(GwH~ zI~f7B8CEm0o!Ge}>z5NUAQXA=xSTeznW)pl0<7?lV1mb3HZEOJ=h4;{Av_hOGIOCG zc2`b-?mtSQZ%`oiwNQ7Au_H9{x1u`ObJ(SVB_FrzHluWibQY9W+^0+9#a(ld!h^71 zu3sxX!S6|hWXXrU$?%kDNUU-YdrG|TpWLcK7BzKt`h{wcKM-vY)1eH!zbEikI{sX= z{j7cbyV?iYKzob)xl|p)S93|ok;BrZvv@O_2IoAP(|))k#lt`U^((?1E(X1m@D@UU zz>u7bdd!o`NU=-STf_D3N5vg$=QEGVyMq1L>LE+SXa);o;aR9|45A+a$u5DW3)e$F z{2!?g-7YUVF<)I`u<>wZ%+Wap%TzS<#Fj z8rPzQi#!R)^Egnqzh@(&7s~;8HOe)tPZZVmJ4YofgyHx7?>%eSVo2=tVQ{GOw;)H; z<3M39kZUXda_1T~b@qe1v+5w$yZ|@*#*k11~~Q zf2JgGowf3QvE5$28+lnM-*9M#BO9Xi*n5qr>$;?2!^7lFC0?ibsfJ5gj=5){fhSNA z{tg;J)68fpxpt{si&@#xZxIlIxr}unE}z>vid)sF0h9meakibeIai9N42_YFy}*rK zY&VtOO$~fZIg1fwv+rznp|UxsYxz#h^kiJZdp%7Zh3u1)cHiIL=CJw{_~01vZ{>os zHCrF8KDoG6f(&<6-sNSv_porOD3#e-sxj#l0|f5tPRgBAq$Ig}>%ZJivbr+gUlkPV zr#3YGSC7WT7aeJAiqD$xyPG*1npYT`oINco5NUD<&X*Q__i#JYikd|{mdjyhrW&4z zFor&0tl-MdU{HjJo=fh8K63w*Nhtt${pR745hJ>p(R<)9tNWh=BW*P3XD8V+KjS2A z4;ll^uF{xn*hxQecY{|sj?(pb$sA-4iTPaGzR12s=R422dB*Bp$H7wXHT7SuQ17RK z4>2$@;=LPcjy&E(Ob!)7ey*vNQzxBaUgC)r-s@IqsFq1J8dutO zm2ucEj5O)KO-+3+btme&-P)R6_YU1D7|hzj3_oTu^a3L;X}?is5M32EV#ccdyqdbD zD(fr}i!i$+-2;&k*3K;0?YO}NBOzsYpvez#c{l0FSA60hc*P`|Dt@<*b4Nyw#M`Ik zlwWA0vJ{txo0Jh#Lj&C?%5n%qh0eZ`wtDSDt$*6ougd>YCtnHy6`??uo~90mFr030 z+;)^kU@g{q&Qp4cfKDActC#qkFJ*8ULu%^!M-eNkZ{bUF;kN2f$KL)Q6`{5Dr zoXi?nfAA{nJ&a8E^gLMu*`ib2d}MeWfnay`;AT%)T}AotdWo^$MG5e z+TLhK%*8rKbrb(~APo}_yT*7GTI)EPw?{~(ZEHt!hXA+F?#BR1CNoH^5+}^R*DMm< zzo@tiZ{OF94UFB`it$~#pGNXMgtOU+xxJ1%uc@hl3_%PDs@Ds4Ya^`I?H^xPIi_K9 z2YCaosbziMn!DdV)ZprJ;9>76mS0c2>lJ5m)_#DbvHu2FOUheDzURw_3!H0w4^8$++%2gRa{s~OfuN#bHlMdUB_vroj=QSq zx6;+2D80KLHvBm-)oPP)G>{vjfX|`s?6SQ6pyJi%AT3>qcXuHqC?wpVNYK-?8mSne`Vs0*}}3&$|V^PF*I|y|;tKjIy!@RoD_AZEfybn161J zj-nYOIw2y8EVO-z_@LkQHyFTgi> zJkYfvd>6KJL7=X#NSJ{M8N`v0^^{-!ZV#$V%D>QroMu9er=cVuGmv^piMZ;xf{^}= z3Yy{qVP%Qspjpq2w=~@J_P}#F`P41LOfh-j6VwWDrCpY6_^XgdR@=Hz+hbe*@2?!) z#DG9vo?}yem+7kPR0VlOWz|iP-WYs2MZooT9B`;fiz|ddE#1Sm)(%?A&Es4uhUpN6 z$nYr$OmTH*h6l=Z(cyb^vfxrglLlzffvmPPYrdL_@``ATbWy8M1dR{<2L3)@;KXtr zhz^OT#l>|x-cgAmkdYXN4km;HN7q%tN@~#lj)~Nofza%8sa`n-pM6`){g6p_RCL$v z)BeVd>9Ewfz`3w;sQksP$G)j#qH*_5*})mj{*$RJk+KJn&M&hcrzi%oyXr2#-{7|l zqb-!^rj8B;NDnkq*yRyC77M|=C`Sh2I3 zD#(XhfGcxwu#)c1$5+h#gr=d*T{$d#3FR628td-08+*blrp*3@flFDB{F;3Di%^HX z=M$Vm*iIQ)DVz8{I>WfiRTdM8Car(Id%dWnOi=6xnAT;O3|{+bm7GQ5U$hfWn10Bo zSRPoKu$V=vLf1kLojb(_LSlqVy?t!qAf8azMxheAuEI;gF7^{B`JMW5a=`$T7?nxL zskWqqb5ftuBuSYs;5AVoRYqUvrpCg!Pj+nrBw^}lrBDVSr6NFkdjMnn?gLX2@NnUk zsJ`L{qkh%Yv@tN=R@l}G3aR`aB9ZBgC7%gBJno{+Wx>vZk#aP1meiyELEWiFKD#eFAEW1U z8pW*rj;WD8 z2G|hSXt~E{I~0cco|GjvQHIpTBg?ldrwxZfT+r@)Si*9VjS|pJRQcHAyGOi6Z-Mos zq2P-0EGdYg#xgo}VO>oOqWT?Q)uk0oLd$>oE%b75YI|LT*H=Ci^T%7CSLUxQRk=W} zY=;pOK*!d7gtnZuw#$NWX!}pt$gWgR;UyuN3Qz^w>_p+z(5I!~m;?y{yT4$#?Xdp_ z7YN^IGhM;VTM@fo;GpXx za%wWJVnLSZQ05+&9bs}B1fDyy=8LDS`P`Ta@0UqJ4cPuXF8tR!+c&qQZ$XI!Q=s%&SrOY~fB}ttB;`Dim8eR* zm5RM1EJ353SGC%6IpHiKlXn#7aI0VEZKspxZ9_$xUG5-D;X1@7s9OHd=AIoy3+|3~ zJ8urbHvE6Sz!$9DaCbZ;IZ&zzhF15sn#1IRT!`014H8G7B8A&dO_+j*+rh!4uu(g) zu3?T>Z0Wp6XOlVM~Pt%A%9-jU{5>+VbV+n)gtC!{>l1SM7O(GbARB9AmA#Ms5- z5bXztc0rYO(dxtg*%iWkF%WGhOtSke`{>X`EsNW=Hl_NtvB02q`$O76uq|3T&FgPwVcpoz9y3P6?d8W6^pGcnzzMl%_DADbip71Z8)UXWhQj zGM#@Ojq-ojDUwj!20CHNUs#?hUQ2{R&tNt!2CwW-geGT!_H9J9+4%nSEMFk>b-5Ql zi&K5?^KL9*5Dz?e4s8-Ssd03DP;V5+2Na;)?T2oyy?THeE8uLd9m zY+&US_mQxUAbXZc;aZF!M7towX2i1Tr9lDz?gkn&4jd@hE(_dTx7tp~Kf~18b~&MM zV$$m9Nmbt8%%k)MM47FBKOm%c)3-|X<%@ebdNZ16)Cs(#`xkTB#BZ#XDLk=Z!l1Zy z!eO-s1>J8Y!X+HuZ6l%ry;l$jXm(@esq4BgYhFA59cCUu@cfZy>$9n4!%pi(e~a~R zH`+`5DlufoX>e|q`L%#FJLqog4OmgT9`;<3$nrPqPTKp{I}0{cOw%eei1wBT=7%cz2( zY|nST%O5L*tTwUdJ+U>%w*+lw8~t9OVInnUIv%doY%ytqiPV^yY^;Pt#MlWFtmK|q zZ(m?0_Fqs;)&O+d@4o1BfrhUOi6c8@rW*OuF2t$QnzTLk!^d^se0)U(%t3pe%xk+r z*@D<9ZB<1V@8W&!Fwpw1Nl6t$QsT9solUqk1|-ddesefc{=r+s^RHRFa9lK98*FcT z$p^YM00XoxQs3zKJig$5`{?k#Y+5WElj?3H-YnCUe>A`E5L0VK_mxuWhm-26by}wj z78|6$=KPkZ!un@;W^Sjf2&5hS`4f^H6|x|>UdVwTuGM7$0fof}c$j(Wdv6rI{>Ymp z^wWe;*7&|s)I1~=&_V6`wNcfRXte}eyb|JuE~P(cPk1d}vjYzRhe7)1L4S6UZ~n^% z>;)4Q%a~8L@*+c7L|8mive6>0+?+->I_kFFCupGiM_zK#Sx1ll(PuY%viljGw+%KvoGpMdH&fgFrXas$9WB24g|8Uv_lO5*87- z5YWSK)X>CfTguP}W>nR$yvQp$;M8ZvCbOPX>AkPyd^~L;#_+a#(BOmcxAgH~cgRu> z`x|fKN6^{p^nxl?vF|ID<8Ek;<<|SST;OMlnXlp9V(sTUpLI1Y-5{$VF@e<< zE=!nuOw~*O5CSNS(q%5GpBBQog`;zz2_2#l+Vk^<_Wt z)s@wa+}^Ukx~x|;xO}hM`7_6_j45SUL_M3DaeO`&ZQ*L~?!ksBqE|gMMt&R_CT~^D<%=#m}XaId@XD5X(uKr^9fS}VXM{7JpD72Z9 z$Lo64IWE{sP=1ib)|{2CTgWMfke8^<^29F$>~j5MWqF12wbxs0gC_kg$$iIn_h{q1 zI)~kh-%y?fa@H9IiJ5?qLnPA^Ph?-e|Hig?UiM`cG<(w_2b2RHk*(A+8X6C~L-J)pQ#7j_MhXh`{JJ|zSPpNi^(cv*+_nj0_nKec{as6UM$Cz*DN zt;0pUg1I}x0#aW4=^9vO%Lxkzag8pqBUaw5#I5PR2r3e7xp$ab+Kfr}pC1t*yxo$a zYmaEdedG^2ADnL)Xu(l8v-E}8Xo+rV3#`rJ$B$QV6GhV1(HXSuc59_z?Q*}I4-5+V zJZfzm?|O7wovxwhybHWz=DFwPQFeKiTj1}86zX&GMo93nKW4F=)m(4odo^c?j?PK< z7us1^bsWb}uv5;WBKCfrbD9sL>%50HLk7ZeUv2Ho zHI&wMdWj%@+d#S@iSAxBdtME<=?&a={};xna-J-P zH$1ll$yq(@)PBf7H2y8jiB3LN){D4`%NuiK;4J*oU2odQ^R!kvj?djW-8etDTzfOf zaH|@UcN@d7=n|Kn$)FnYpcylw)x78+ym52f$wquxg#y|C_KltU?s2wKw;_353io4o z9Ars%$ymIb84f~f(wwR)dtM2RD)wi{CN!KL5YNml!rl^Cf67VWJDw#)j^JlXcmUPCM&#L%VP(%UV53pj;m2(?NEnZGI4!1+n8G zvrSdF9CW$}-0h>hC=(Hr6+@8>CbS=ZZ^|lA*NY0yBO8h~+KdV_m`b7Xv34`lIxQWZ zqv{&cAa@i?8O{r8D2Yko?imd_sut)$+8t4y(1I8_sCe~G{?W~QmxdpvG3n6!4Y%E7 zFU5}U87^nO*qD=@7W*MY*szhIT2yP!bc)aJ^VDfpK!1P=C?)gF^+M=JSN>O6FeXP$0G457lDulStnm z1CgZ)H`U-`xNTj$u1igiJV1^aOgZL*%G z2v}oD7W`3yi8C@5!xEhloj8^kD5lT5CJLq91T!T}$4!oyxvji#A5XbR25pmFZu070 zKp!&UB;Kr1NoN~wlYas)j!(M;6nsE5LL4Km5yNNUF z5|R15&x8{Jw;dZ*{8LYxB-q<60Dx(n9#7&svP+!gc=m@0X=-h+u|d;UwZ+N;9qu-k zZzs=1PAoY9sp#kZXVd}A&;B6-QpS}`LW16+9B(@fp1yjUo|b+bCqi{C?!uE2CA{#{ z;S?WQ%ErWTyv|)K>mN#YXuT>}<;J>fK%<8Q4;Lcrj3gs1!m;mUBj>H=280y9QN1B5 zmAK`)(vtD^9+ihc`_ZET6iqk)ge%fTt=Dqa$dUmVbritGnM~%)Du|_=kO?o7C2|IO zN@{An{#!Z(`tJR0M=@bm9#cWwc%pA<*;O;?2RoIhE>)-~#<*m2<=wEbd}%9F(;0aT z87=t~{f|?uSmrEs+DLY%H4d=#SnSM&acI<#io;XY1~)fy#scs;%N0KLfHXZH1YX9 z{b&D9`p7J}8#h4$SOKCSWaa`Dn0$ZVy{}O+H9MN;W~hhu2S8yC2tiFOdfGI(*tQnm zZ|+JCZ>Fx2_@3HJ3iD(mj$hg3bFL|iB#jQP$EY`m$rlo3aJ_DT*Hc$W zFSCeud$L#nuG~qq8c_DwRwkWD-^R_)Bi{lHcXF=WD@~?9%UZVx6juZskDCljUDS)x zJmj4s)SNy07LqpgBeHU8;bP6c@s!M%wU`UOd!g@=qET(NWUc5VwY!*VkaSh1=Ps}B z(IuV(raTtFS3c;EY-q2|Ho-yq?bWI9;b&Oai$`y=ljJjJr8`#bU3K2G^#)T0rJ~-< z*%-fGW7o#EvxUfmZ24cYS)27Tzm==SJER&0Dp#g%mB##Wr}PEoV2@}8N`T~ITHNre zISknM2Ae0x96$+`yfJBpcTTfk5yRJI_t^xl2IYX9gumDW5F)Pq?$k`TIDnmX)6z5x zD5r~-*}uaM>31QcuB$>uch2M22D*LL6?CqF^wZi$W6#4XZ+)#H3k$|~Rnh^p#(WdQ z<8bLF0MNJRXzr2~x;{u8!HjgG^Oa-YNQtX32v`nwaimt&^SVDTT0J1EoAbbHt)5Zc#)$yzWAf zJ7D8ymw^NVOF_`?@er|SS#jas94Lq^BlaXEtDQ0O6BPu@M0wn68}@t^mEG3?Glx9I zPB3^AGZ5`4ik)?H@sXp+ zTQ{}{@Tt7M9#&v)sxPy{0{uy!&IVtNO7$Co;6|96s4M#ATY fL23NG!Z%V`XY_>TKQmUqEF-C7P8d|BgMZ@Rc0<6r From ef03948287bf4fc2bafcace46eb869444fd5e3bb Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 12:08:41 -0400 Subject: [PATCH 084/108] Added missing automl_local_deployment_aci --- .../automl_local_deployment_aci.ipynb | 1112 +++++++++++++++++ 1 file changed, 1112 insertions(+) create mode 100644 scenarios/sentence_similarity/automl_local_deployment_aci.ipynb diff --git a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb new file mode 100644 index 000000000..df005bf90 --- /dev/null +++ b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb @@ -0,0 +1,1112 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Copyright (c) Microsoft Corporation. All rights reserved.\n", + "\n", + "Licensed under the MIT License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#
Local AutoML Model with ACI Deployment for Predicting Sentence Similarity
" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrates how to use Azure AutoML locally to automate machine learning model selection and tuning and how to use Azure Container Instance (ACI) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Table of Contents\n", + "1. [Introduction](#1.-Introduction) \n", + " * 1.1 [What is Azure AutoML?](#1.1-What-is-Azure-AutoML?) \n", + " * 1.2 [Modeling Problem](#1.2-Modeling-Problem) \n", + " \n", + " \n", + "2. [Data Preparation](#2.-Data-Preparation) \n", + "\n", + "\n", + "3. [Create AutoML Run](#3.-Create-AutoML-Run) \n", + " * 3.1 [Link to or create a Workspace](#3.1-Link-to-or-create-a-Workspace) \n", + " * 3.2 [Create AutoMLConfig object](#3.2-Create-AutoMLConfig-object)\n", + " * 3.3 [Run Experiment](#3.3-Run-Experiment)\n", + " \n", + " \n", + "4. [Deploy Sentence Similarity Model](#4.-Deploy-Sentence-Similarity-Model) \n", + " 4.1 [Retrieve the Best Model](#4.1-Retrieve-the-Best-Model) \n", + " 4.2 [Register the Fitted Model for Deployment](#4.2-Register-the-Fitted-Model-for-Deployment) \n", + " 4.3 [Create Scoring Script](#4.3-Create-Scoring-Script) \n", + " 4.4 [Create a YAML File for the Environment](#4.4-Create-a-YAML-File-for-the-Environment) \n", + " 4.5 [Create a Container Image](#4.5-Create-a-Container-Image) \n", + " 4.6 [Deploy the Image as a Web Service to Azure Container Instance](#4.6-Deploy-the-Image-as-a-Web-Service-to-Azure-Container-Instance) \n", + " 4.7 [Test Deployed Model](#4.7-Test-Deployed-Model) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.1 What is Azure AutoML?\n", + "\n", + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", + "\n", + "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](https://nlpbp.blob.core.windows.net/images/automl.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The AutoML model selection and tuning process can be easily tracked through the Azure portal or directly in python notebooks through the use of widgets. AutoML quickly selects a high quilty machine learning model tailored for your prediction problem. In this notebook, we walk through the steps of preparing data, setting up an AutoML experiment, and evaluating the results of our best model. More information about running AutoML experiments in Python can be found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train). " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.2 Modeling Problem\n", + "\n", + "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence). The sentence pairs will be embedded using AutoML's built-in preprocessing, so we'll pass the sentences directly into the model." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "scrolled": false + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Some hub symbols are not available because TensorFlow version is less than 1.14\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Turning diagnostics collection on. \n", + "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\n", + "Azure ML SDK Version: 1.0.43\n", + "Pandas version: 0.23.4\n", + "Tensorflow Version: 1.13.1\n" + ] + } + ], + "source": [ + "# Set the environment path to find NLP\n", + "import sys\n", + "sys.path.append(\"../../\")\n", + "import time\n", + "import os\n", + "import pandas as pd\n", + "import shutil\n", + "import numpy as np\n", + "import torch\n", + "import sys\n", + "from scipy.stats import pearsonr\n", + "from scipy.spatial import distance\n", + "from sklearn.externals import joblib\n", + "import json\n", + "\n", + "# Import utils\n", + "from utils_nlp.azureml import azureml_utils\n", + "from utils_nlp.dataset import stsbenchmark\n", + "from utils_nlp.dataset.preprocess import (\n", + " to_lowercase,\n", + " to_spacy_tokens,\n", + " rm_spacy_stopwords,\n", + ")\n", + "from utils_nlp.common.timer import Timer\n", + "\n", + "# Tensorflow dependencies for Google Universal Sentence Encoder\n", + "import tensorflow as tf\n", + "import tensorflow_hub as hub\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "# AzureML packages\n", + "import azureml as aml\n", + "import logging\n", + "from azureml.telemetry import set_diagnostics_collection\n", + "set_diagnostics_collection(send_diagnostics=True)\n", + "from azureml.train.automl import AutoMLConfig\n", + "from azureml.core.experiment import Experiment\n", + "from azureml.widgets import RunDetails\n", + "from azureml.train.automl.run import AutoMLRun\n", + "from azureml.core.webservice import AciWebservice, Webservice\n", + "from azureml.core.image import ContainerImage\n", + "from azureml.core.conda_dependencies import CondaDependencies\n", + "\n", + "print(\"System version: {}\".format(sys.version))\n", + "print(\"Azure ML SDK Version:\", aml.core.VERSION)\n", + "print(\"Pandas version: {}\".format(pd.__version__))\n", + "print(\"Tensorflow Version:\", tf.VERSION)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "BASE_DATA_PATH = '../../data'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Data Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## STS Benchmark Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As described above, the STS Benchmark dataset contains 8.6K sentence pairs along with a human-annotated score for how similiar the two sentences are. We will load the training, development (validation), and test sets provided by STS Benchmark and preprocess the data (lowercase the text, drop irrelevant columns, and rename the remaining columns) using the utils contained in this repo. Each dataset will ultimately have three columns: _sentence1_ and _sentence2_ which contain the text of the sentences in the sentence pair, and _score_ which contains the human-annotated similarity score of the sentence pair." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 258KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 294KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 252KB/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Data downloaded to ../../data\\raw\\stsbenchmark\n" + ] + } + ], + "source": [ + "# Load in the raw datasets as pandas dataframes\n", + "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "dev_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"dev\")\n", + "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# Clean each dataset by lowercasing text, removing irrelevant columns,\n", + "# and renaming the remaining columns\n", + "train_clean = stsbenchmark.clean_sts(train_raw)\n", + "dev_clean = stsbenchmark.clean_sts(dev_raw)\n", + "test_clean = stsbenchmark.clean_sts(test_raw)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "# Convert all text to lowercase\n", + "train = to_lowercase(train_clean)\n", + "dev = to_lowercase(dev_clean)\n", + "test = to_lowercase(test_clean)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training set has 5749 sentences\n", + "Development set has 1500 sentences\n", + "Testing set has 1379 sentences\n" + ] + } + ], + "source": [ + "print(\"Training set has {} sentences\".format(len(train)))\n", + "print(\"Development set has {} sentences\".format(len(dev)))\n", + "print(\"Testing set has {} sentences\".format(len(test)))" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", + "
" + ], + "text/plain": [ + " score sentence1 \\\n", + "0 5.00 a plane is taking off. \n", + "1 3.80 a man is playing a large flute. \n", + "2 3.80 a man is spreading shreded cheese on a pizza. \n", + "3 2.60 three men are playing chess. \n", + "4 4.25 a man is playing the cello. \n", + "\n", + " sentence2 \n", + "0 an air plane is taking off. \n", + "1 a man is playing a flute. \n", + "2 a man is spreading shredded cheese on an uncoo... \n", + "3 two men are playing chess. \n", + "4 a man seated is playing the cello. " + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "train.head(5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 3. Create AutoML Run" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "AutoML can be used for classification, regression or timeseries experiments. Each experiment type has corresponding machine learning models and metrics that can be optimized (see [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train)) and the options will be delineated below. As a first step we connect to an existing workspace or create one if it doesn't exist." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.1 Link to or create a Workspace" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n" + ] + } + ], + "source": [ + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.2 Create AutoMLConfig object\n", + "Next, we specify the parameters for the AutoMLConfig class. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**task** \n", + "AutoML supports the following base learners for the regression task: Elastic Net, Light GBM, Gradient Boosting, Decision Tree, K-nearest Neighbors, LARS Lasso, Stochastic Gradient Descent, Random Forest, Extremely Randomized Trees, XGBoost, DNN Regressor, Linear Regression. In addition, AutoML also supports two kinds of ensemble methods: voting (weighted average of the output of multiple base learners) and stacking (training a second \"metalearner\" which uses the base algorithms' predictions to predict the target variable). Specific base learners can be included or excluded in the parameters for the AutoMLConfig class (whitelist_models and blacklist_models) and the voting/stacking ensemble options can be specified as well (enable_voting_ensemble and enable_stack_ensemble)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**preprocess** \n", + "AutoML also has advanced preprocessing methods, eliminating the need for users to perform this manually. Data is automatically scaled and normalized but an additional parameter in the AutoMLConfig class enables the use of more advanced techniques including imputation, generating additional features, transformations, word embeddings, etc. (full list found [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-create-portal-experiments#preprocess)). Note that algorithm-specific preprocessing will be applied even if preprocess=False. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**primary_metric** \n", + "The regression metrics available are the following: Spearman Correlation (spearman_correlation), Normalized RMSE (normalized_root_mean_squared_error), Normalized MAE (normalized_mean_absolute_error), and R2 score (r2_score) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Constraints:** \n", + "There is a cost_mode parameter to set cost prediction modes (see options [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl/azureml.train.automl.automlconfig?view=azure-ml-py)). To set constraints on time there are multiple parameters including experiment_exit_score (target score to exit the experiment after achieving), experiment_timeout_minutes (maximum amount of time for all combined iterations), and iterations (total number of different algorithm and parameter combinations to try)." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"task\": 'regression', #type of task: classification, regression or forecasting\n", + " \"debug_log\": 'automated_ml_errors.log',\n", + " \"path\": './automated-ml-regression',\n", + " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", + " \"iterations\" : 50, #Number of algorithm options to try\n", + " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", + " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", + " \"verbosity\":logging.ERROR}" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = train.drop(\"score\", axis=1).values\n", + "y_train = train['score'].values.flatten()\n", + "X_validation = dev.drop(\"score\", axis=1).values\n", + "y_validation = dev['score'].values.flatten()\n", + "\n", + "# local compute\n", + "automated_ml_config = AutoMLConfig(\n", + " X = X_train,\n", + " y = y_train,\n", + " X_valid = X_validation,\n", + " y_valid = y_validation,\n", + " **automl_settings)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3.3 Run Experiment\n", + "\n", + "Run the experiment locally and inspect the results using a widget" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running on local machine\n", + "Parent Run ID: AutoML_ad20c29f-7d03-4079-8699-3133d24d3631\n", + "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", + "Current status: DatasetEvaluation. Gathering dataset statistics.\n", + "Current status: FeaturesGeneration. Generating features for the dataset.\n", + "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", + "Current status: ModelSelection. Beginning model selection.\n", + "\n", + "****************************************************************************************************\n", + "ITERATION: The iteration being evaluated.\n", + "PIPELINE: A summary description of the pipeline being evaluated.\n", + "DURATION: Time taken for the current iteration.\n", + "METRIC: The result of computing score on the fitted pipeline.\n", + "BEST: The best observed score thus far.\n", + "****************************************************************************************************\n", + "\n", + " ITERATION PIPELINE DURATION METRIC BEST\n", + " 0 StandardScalerWrapper RandomForest 0:00:11 0.0606 0.0606\n", + " 1 MaxAbsScaler RandomForest 0:00:47 0.2127 0.2127\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:21 0.2173 0.2173\n", + " 3 StandardScalerWrapper LightGBM 0:00:15 0.2905 0.2905\n", + " 4 StandardScalerWrapper RandomForest 0:00:12 0.0669 0.2905\n", + " 5 MaxAbsScaler ExtremeRandomTrees 0:00:31 0.2224 0.2905\n", + " 6 StandardScalerWrapper ExtremeRandomTrees 0:00:17 0.1769 0.2905\n", + " 7 MaxAbsScaler DecisionTree 0:00:14 0.1186 0.2905\n", + " 8 MaxAbsScaler ExtremeRandomTrees 0:00:17 0.1891 0.2905\n", + " 9 MaxAbsScaler SGD 0:00:10 0.1448 0.2905\n", + " 10 StandardScalerWrapper RandomForest 0:00:12 0.0199 0.2905\n", + " 11 StandardScalerWrapper DecisionTree 0:00:14 0.1245 0.2905\n", + " 12 MaxAbsScaler SGD 0:00:13 0.1310 0.2905\n", + " 13 MaxAbsScaler DecisionTree 0:00:12 0.1370 0.2905\n", + " 14 MaxAbsScaler SGD 0:00:12 0.0572 0.2905\n", + " 15 StandardScalerWrapper RandomForest 0:00:19 0.1924 0.2905\n", + " 16 MaxAbsScaler RandomForest 0:00:11 0.0187 0.2905\n", + " 17 MaxAbsScaler ElasticNet 0:00:10 nan 0.2905\n", + "ERROR: Run AutoML_ad20c29f-7d03-4079-8699-3133d24d3631_17 failed with exception \"Primary metric spearman_correlation is not available.\".\n", + " 18 MaxAbsScaler ExtremeRandomTrees 0:00:11 0.0972 0.2905\n", + " 19 MaxAbsScaler DecisionTree 0:00:13 0.1686 0.2905\n", + " 20 StandardScalerWrapper LightGBM 0:00:26 0.6102 0.6102\n", + " 21 MaxAbsScaler RandomForest 0:05:00 0.1617 0.6102\n", + " 22 StandardScalerWrapper LightGBM 0:00:25 0.3608 0.6102\n", + " 23 StandardScalerWrapper RandomForest 0:02:32 0.2200 0.6102\n", + " 24 MaxAbsScaler DecisionTree 0:01:13 0.2027 0.6102\n", + " 25 TruncatedSVDWrapper LightGBM 0:00:31 0.3707 0.6102\n", + " 26 StandardScalerWrapper ExtremeRandomTrees 0:00:21 0.1498 0.6102\n", + " 27 MaxAbsScaler DecisionTree 0:00:11 0.1748 0.6102\n", + " 28 MaxAbsScaler LightGBM 0:00:18 0.4395 0.6102\n", + " 29 MaxAbsScaler LightGBM 0:00:23 0.4191 0.6102\n", + " 30 TruncatedSVDWrapper LightGBM 0:00:43 0.4102 0.6102\n", + " 31 MaxAbsScaler LightGBM 0:00:27 0.5077 0.6102\n", + " 32 MaxAbsScaler LightGBM 0:00:44 0.6012 0.6102\n", + " 33 MaxAbsScaler LightGBM 0:00:48 0.4611 0.6102\n", + " 34 MaxAbsScaler LightGBM 0:00:39 0.5135 0.6102\n", + " 35 MaxAbsScaler LightGBM 0:00:24 0.2219 0.6102\n", + " 36 SparseNormalizer LightGBM 0:00:24 0.2888 0.6102\n", + " 37 StandardScalerWrapper LightGBM 0:00:38 0.5663 0.6102\n", + " 38 MaxAbsScaler LightGBM 0:00:31 0.3793 0.6102\n", + " 39 MaxAbsScaler LightGBM 0:00:40 0.3672 0.6102\n", + " 40 MaxAbsScaler LightGBM 0:00:21 0.2507 0.6102\n", + " 41 MaxAbsScaler LightGBM 0:00:37 0.3352 0.6102\n", + " 42 StandardScalerWrapper LightGBM 0:01:05 0.5460 0.6102\n", + " 43 MaxAbsScaler LightGBM 0:00:36 0.5104 0.6102\n", + " 44 SparseNormalizer LightGBM 0:02:38 0.4208 0.6102\n", + " 45 TruncatedSVDWrapper LightGBM 0:00:37 0.2362 0.6102\n", + " 46 StandardScalerWrapper LightGBM 0:00:41 0.4394 0.6102\n", + " 47 MaxAbsScaler LightGBM 0:00:46 0.3982 0.6102\n", + " 48 VotingEnsemble 0:02:21 0.6408 0.6408\n", + " 49 StackEnsemble 0:03:01 0.6409 0.6409\n" + ] + } + ], + "source": [ + "experiment=Experiment(ws, 'automated-ml-regression')\n", + "local_run = experiment.submit(automated_ml_config, show_output=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The results of the completed run can be visualized in two ways. First, by using a RunDetails widget as shown in the cell below. Second, by accessing the [Azure portal](https://portal.azure.com), selecting your workspace, clicking on _Experiments_ and then selecting the name and run number of the experiment you want to inspect. Both these methods will show the results and duration for each iteration (algorithm tried), a visualization of the results, and information about the run including the compute target, primary metric, etc." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Inspect the run details using the provided widget\n", + "RunDetails(local_run).show()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![](https://nlpbp.blob.core.windows.net/images/autoMLwidget.PNG)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 4. Deploy Sentence Similarity Model\n", + "\n", + "## 4.1 Retrieve the Best Model\n", + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the get_output method which returns the best run and fitted model across all iterations. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", + "\n", + "The different steps that make up the pipeline can be accessed through `fitted_model.named_steps` and information about data preprocessing is available through `fitted_model.named_steps['datatransformer'].get_featurization_summary()`" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "best_run, fitted_model = local_run.get_output()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.2 Register the Fitted Model for Deployment\n", + "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Registering model AutoMLad20c29f7best\n", + "AutoMLad20c29f7best\n" + ] + } + ], + "source": [ + "description = 'AutoML Model'\n", + "tags = {'area': \"nlp\", 'type': \"sentence similarity automl\"}\n", + "name = 'automl'\n", + "model = local_run.register_model(description = description, tags = tags)\n", + "\n", + "print(local_run.model_id) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.3 Create Scoring Script" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting score.py\n" + ] + } + ], + "source": [ + "%%writefile score.py\n", + "import pickle\n", + "import json\n", + "import numpy\n", + "import azureml.train.automl\n", + "from sklearn.externals import joblib\n", + "from azureml.core.model import Model\n", + "\n", + "\n", + "def init():\n", + " global model\n", + " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", + " # deserialize the model file back into a sklearn model\n", + " model = joblib.load(model_path)\n", + "\n", + "def run(rawdata):\n", + " try:\n", + " data = json.loads(rawdata)['data']\n", + " data = numpy.array(data)\n", + " result = model.predict(data)\n", + " except Exception as e:\n", + " result = str(e)\n", + " return json.dumps({\"error\": result})\n", + " return json.dumps({\"result\":result.tolist()})" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Substitute the actual model id in the script file.\n", + "script_file_name = 'score.py'\n", + "\n", + "with open(script_file_name, 'r') as cefr:\n", + " content = cefr.read()\n", + "\n", + "with open(script_file_name, 'w') as cefw:\n", + " cefw.write(content.replace('<>', local_run.model_id))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.4 Create a YAML File for the Environment\n", + "\n", + "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, autoenv.yml, which specifies the dependencies from the run." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "experiment = Experiment(ws, 'automated-ml-regression')\n", + "ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "No issues found in the SDK package versions.\n" + ] + } + ], + "source": [ + "best_iteration = int(best_run.id.split(\"_\")[-1]) #get the appended iteration number for the best model\n", + "dependencies = ml_run.get_run_sdk_dependencies(iteration = best_iteration)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'azureml-train-automl': '1.0.43.1',\n", + " 'azureml-automl-core': '1.0.43',\n", + " 'azureml': '0.2.7',\n", + " 'azureml-widgets': '1.0.43.1',\n", + " 'azureml-train': '1.0.43',\n", + " 'azureml-train-restclients-hyperdrive': '1.0.43',\n", + " 'azureml-train-core': '1.0.43',\n", + " 'azureml-telemetry': '1.0.43',\n", + " 'azureml-sdk': '1.0.43',\n", + " 'azureml-pipeline': '1.0.43',\n", + " 'azureml-pipeline-steps': '1.0.43',\n", + " 'azureml-pipeline-core': '1.0.43',\n", + " 'azureml-dataprep': '1.1.5',\n", + " 'azureml-dataprep-native': '13.0.0',\n", + " 'azureml-core': '1.0.43.1',\n", + " 'azureml-contrib-brainwave': '1.0.33'}" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "dependencies" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Add dependencies in the yaml file from the above cell. You must specify the version of \"azureml-sdk[automl]\" while creating the yaml file." + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'automlenv.yml'" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n", + " pip_packages=['azureml-sdk[automl]==1.0.43.*'], \n", + " python_version = '3.6.8')\n", + "\n", + "conda_env_file_name = 'automlenv.yml'\n", + "myenv.save_to_file('.', conda_env_file_name)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.5 Create a Container Image" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating image\n", + "Running..................................................\n", + "Succeeded\n", + "Image creation operation finished for image automl-image:8, operation \"Succeeded\"\n" + ] + } + ], + "source": [ + "image_config = ContainerImage.image_configuration(execution_script = script_file_name,\n", + " runtime = \"python\",\n", + " conda_file = conda_env_file_name,\n", + " description = \"Image with automl model\",\n", + " tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"})\n", + "\n", + "image = ContainerImage.create(name = \"automl-image\",\n", + " # this is the model object\n", + " models = [model],\n", + " image_config = image_config,\n", + " workspace = ws)\n", + "\n", + "image.wait_for_creation(show_output = True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the above step fails then use below command to see logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(image.image_build_log_uri) " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.6 Deploy the Image as a Web Service to Azure Container Instance" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration\n", + "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", + " memory_gb = 8)" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating service\n", + "Running.......................\n", + "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "Healthy\n" + ] + } + ], + "source": [ + "# deploy image as web service\n", + "aci_service_name ='aci-automl-service'\n", + "aci_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = aci_service_name,\n", + " image = image,\n", + " deployment_config = aci_config)\n", + "\n", + "aci_service.wait_for_deployment(show_output = True)\n", + "print(aci_service.state)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Fetch logs to debug incase of failures." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(aks_service.get_logs())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.7 Test Deployed Model\n", + "We test the web sevice by passing data. The run method expects input in json format. Run() method retrieves API keys behind the scenes to make sure that call is authenticated. " + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "test_y = test['score'].values.flatten()\n", + "test_x = test.drop(\"score\", axis=1).values.tolist()\n", + "\n", + "data = {'data': test_x}\n", + "data = json.dumps(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "# Set up a Timer to see how long the model takes to predict\n", + "t = Timer()" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time elapsed: 2.7085\n", + "Number of samples predicted: 1379\n" + ] + } + ], + "source": [ + "t.start()\n", + "score = aci_service.run(input_data = data)\n", + "t.stop()\n", + "print(\"Time elapsed: {}\".format(t))\n", + "\n", + "result = json.loads(score)\n", + "try:\n", + " output = result[\"result\"]\n", + " print('Number of samples predicted: {0}'.format(len(output)))\n", + "except:\n", + " print(result['error'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll calculate the Pearson Correlation on the test set.\n", + "\n", + "**What is Pearson Correlation?**\n", + "\n", + "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", + "\n", + "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", + "\n", + "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0.6038286237427414\n" + ] + } + ], + "source": [ + "print(pearsonr(output, test_y)[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 1055ee9f00d2e4a12c12c9a92f126988c8f46d3c Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 12:28:39 -0400 Subject: [PATCH 085/108] text fix --- .../automl_with_pipelines.ipynb | 40 +++---------------- 1 file changed, 5 insertions(+), 35 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 10aadb125..535b63860 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -1156,24 +1156,9 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "861bee4292c5496f91d41f6e875ce9ec", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_PipelineWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', '…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "# Inspect the run details using the provided widget\n", "RunDetails(pipeline_run).show()" @@ -1650,7 +1635,7 @@ "metadata": {}, "source": [ "## 6.7 Test Deployed Webservice\n", - "We test the web sevice by passing data.The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated." + "We test the web sevice by passing data.The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated. The service has a timeout which does not allow passing the large test dataset. Timeout is based off a few things.It is set to a default of ~30 seconds. It depends on the error but is usually resolved with batching if so much is sent to the service at a time " ] }, { @@ -1713,24 +1698,9 @@ }, { "cell_type": "code", - "execution_count": 92, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[1.778579367510987, 3.50046288707411, 3.2538821258681896, 4.069305058515004, 1.8320877577432195, 1.954834972650947, 3.5939983298935054, 1.897463629391319, 2.5197130778016654, 1.8823957738334811, 1.8823957738334811, 3.6895210831755922, 1.8599267741539272, 3.1787909087040562, 2.7087431329327916, 1.5672046737532321, 4.451727638080559, 3.6796594501884523, 3.544795325159195, 1.9030900379648785, 2.8150889983631284, 2.183746733671711, 3.601602614164534, 4.206702507830564, 1.6307750689271787, 3.234565416414611, 1.867143843466714, 3.4361162329783883, 1.8026588109513297, 2.9688938044152415, 3.8929035816615896, 3.219924328188107, 2.0385247308394234, 3.798599603279599, 2.829788590045488, -0.3670312508170009, 1.8917018009786544, 2.5162131407684174, 2.5313683231655797, 0.7973264807516905, 3.262854998024572, 3.445451775581639, 3.313722165367336, 1.493230130992738, 2.5124437403792377, 1.3942647183703063, 1.2066715622591626, -0.028170077204135217, 3.6913052029330533, 1.8416574429556754, 1.393522016496808, 2.1291520226739578, 0.8828945371654574, 1.0923645190907914, 1.5209416690403, 0.01411515817131176, 3.258076062326671, 0.3944014864215918, 3.9701938861436727, 2.65596017823039, 4.51027789495544, 0.8117156468744592, 3.8409064709009497, 0.9678984253828021, 3.6720236594806943, 0.46319548835196217, 1.4426295438413104, 3.472301928135604, 0.42877110586924716, 3.4961889679526936, 3.6509575980863804, 3.39986439948315, 0.5707791995828623, 2.869269733905783, 2.909406927853261, 1.069238870706429, 0.09037136322296623, -0.9221399392952758, 3.083637155759759, 2.938458926392478, 0.3601067645921665, 1.2931620160489248, 2.6363382703619926, 0.11964411998628588, 1.0572862960365454, 2.844225439387958, 3.761174189987671, 4.022002929758196, 3.3617845329434126, 1.1598854829034062, 1.8305925299376244, 3.010449519213801, 1.5489415586274968, 3.0812518140815306, 0.676443308258562, 3.399204958974649, 3.39309775397624, 3.0641143777928077, 3.1282233227697516, 2.7204313476996553, 0.29152788619753, 0.779047535753565, 3.1687422653361224, 1.9176853344927747, 3.3321058405690747, 1.1355139054063599, 0.19445404142194367, 2.444253058126319, 2.018812357170213, 3.183675072930556, 3.943197137837966, 3.1776859187919704, 2.9883945395962472, 3.165448855970407, 0.011844066343963096, 0.8109493587815171, 3.6147243041057626, 2.935130768261376, 0.5927827346386821, 1.110259638056291, 2.560192324588774, 2.188104367329256, 1.8181030925038584, 1.5512957862163064, 1.176937367605996, 3.837496263533031, 1.1488700723496037, 2.609085088117903, 2.593289335087266, 3.1256889825726555, 2.197362681402585, 2.95073836833081, 3.6276470533672205, 4.160257728988088, 2.337142722917102, 5.189557474922617, 3.5079713967109316, 2.672819812348777, 1.8666935068270534, 3.6082266940857393, 4.005789113761765, 4.320823713979545, 3.1445787773470815, 4.338162586904133, 2.6285780131335343, 1.425600353770486, 2.28287482851972, 3.5047161825585107, 3.0199188671351362, 0.673381065561002, 1.6627484112582618, 1.084023477955743, 3.302986567953453, 3.515407342295263, 3.5413629060270777, 2.67418314447512, 0.8219920717592755, 2.489964350440359, 3.8003392361106454, 0.9813882025136177, 3.785274109889692, 2.5363700284519477, 1.5221492900900118, 3.812134025177163, 3.311918195339916, 3.6679683056591044, 3.5379166832852555, 2.854685946626756, 3.3897548617691635, 3.0473535373619876, 1.7845902076699358, 1.640145290482612, 4.474406326535217, 3.1099724212964626, 3.5252712800230164, 2.5140617464751185, 3.4851093057575624, 3.3316070545920877, -0.32247731924334333, 0.7470290419632193, 1.0025240287346724, 2.3587528771640867, 2.976263481225407, 0.749772255004769, 2.064102498898625, 0.5499526537455514, -1.1100450199843115, 0.9001430425514165, 1.1813195378269288, -0.3075496293353543, 3.3485033392977095, 4.249854521376428, 1.2047193426571745, 3.211374916497645, 2.9371099207645934, 3.7537103912169796, 3.086459491426497, 1.7175081883640178, -0.8367950773792318, 3.4958657234585466, 0.5584281748843918, 2.323993453802556, 1.1340020590641742, 0.2356620764047892, 1.4538762747953384, 1.911838161497753, 3.1099496853938677, 0.23857139517780146, 2.882212411206142, 3.332557151158329, 2.686762264721659, 1.2029051997428413, 1.9195652913137002, 1.2927466451887675, 2.6193458669682803, 0.8779286643399858, 2.4582824470374387, 3.5708840021174066, 3.1162068762394908, 3.5079043258209213, 0.38212743871609245, 0.8316898599713287, 2.244775240629253, 4.237750716421176, 0.2833692168000635, 1.4837853266557446, 1.9043627021605718, 3.2824894314338167, 1.5846240742393658, 1.347688268997379, 0.9355077911484706, 0.6393522345073741, 1.376999844855894, 3.520482105383186, 3.142535035520448, 3.0606485216654824, 0.6195833191332852, 0.0036039967120473203, 2.123771981524081, 0.4896998835349229, 2.038955063484078, 1.2394798988996667, 0.45110671257032176, 0.09946523480007285, 3.1744291393034407, 0.9724493800881352, 1.7487825757156128, 1.9900931658230014, 1.0748908587344606, 0.8851361122230214, 2.426671086111677, 3.4500926535694045, 4.079383374711955, 2.5497050943659794, 1.5901068224247974, 2.2474845113909816, 1.8102377929728803, 2.0354997057634896, 3.438190616794957, -0.8409201319618589, 4.4152553864820145, 3.329262298809403, 3.6723305059381746, 3.137159574582116, 1.01194997368975, 2.248942340047155, 1.801456034326579, 3.2163405831583156, 0.7882770357141339, 4.1288542122606104, 4.208285691235985, 1.2939999107965408, 3.6734639227976733, 3.3344765766819613, 3.3185377881225326, 4.3640700487132, 0.6568143533256078, -0.14954943159733056, 3.948994639949973, 3.8288724151269977, 1.436171570299873, 1.5419234570110598, 3.352340308369982, 2.8689506243032192, 4.434773405337667, 3.7987107535985736, 2.224143497403188, 3.5729735148951542, 2.73056653522326, 0.9034274079367253, 3.9939259482412806, 1.881260125599088, 2.8326671662420484, 3.8035985440038242, 0.55680347687452, 0.9661744368188914, 2.522422499786577, 2.598167760485692, 3.324533010905306, 2.785942034321825, 3.7914037683391437, 2.42984399264765, 1.4544035058832208, 3.17947473327575, 2.672935737507584, 3.6418322103370295, 3.6813814563363234, 2.2088651952686464, 4.239600673608598, 3.5912350066040086, 1.1744558100140334, 0.20841975791091144, 0.7211884545157704, 3.5725948619348498, 3.3912555987880237, 3.1179802841775976, 2.6485560905583503, 2.7134727882698813, 1.3790661164523925, 1.58398237205808, 3.087249141869335, 3.056483502700675, 3.4316755635363947, 3.0829216101095955, 2.4888850621085092, 1.7312292640253861, 3.2978667011291734, 1.9594851357992347, 3.501338745474774, 2.4572497315382997, 4.122385804728194, 3.5817915826451214, 3.76214425759962, 3.2238514698767076, 2.546189741738114, 3.588995377424493, 3.4673166462763314, 2.9383263946396747, 0.7182739285820456, 2.447474086847735, 3.1341213548950275, 3.6140627531634566, 4.135000756513951, 2.1590189159692263, 2.266793858319305, 0.6449591249502928, 3.15248167992217, 1.4182645073945896, 2.635548518432059, 1.423133889474279, 4.180928482953819, 4.234189274960988, 0.4966707872612917, 3.6912995494240084, 1.8449468831611793, -0.35009998531577413, 2.7468657115379256, 2.843140454129701, 0.27835314679370826, 3.678106851850904, 3.4059432307665958, 3.627707779696665, 2.9911358871717546, 2.3443291460356437, 3.749331720399955, 1.4343394020212272, 3.0688989720140354, 3.473973728603321, 4.051035413553973, 4.264851433051465, 3.063234134998642, 4.1989173726447975, 0.065581094872943, 0.9685839226664584, 1.4946268979965747, 3.6826951062750473, 2.8709584392926, 0.6243856036049147, 2.0371924532634247, 2.4341002586518985, 4.368599248431015, 3.0403320338819055, 2.1176754853409645, -0.27862101289514013, 3.703637526452533, 2.848593499974329, 3.4966677604257432, 1.227404300795719, 2.251568046282213, 3.2004004637285544, 3.07847752322984, 2.2237632085951766, 0.41587967523391006, 0.9748130956419503, 3.3212648765536077, 0.395991093813953, 0.6279202330260872, 4.008528518464035, 3.3536558405650245, 1.5731798732464972, 3.1891170928041856, 0.36113351200360905, 3.0864733891075313, 3.8392719759792913, 2.0288861058829877, 3.9706279749754034, 1.273236566674368, 3.3611135103194205, 2.1578221335401317, 1.9774519227544791, 2.448611378799904, 4.016633044765161, 1.6902305448696988, 3.812007616595003, 1.3340641549055285, 1.1415530027719636, 0.0760641525897117, 3.7273208351489906, 3.672385332583226, 3.7476533205977027, 3.89809414169982, 1.9992880687939183, 4.088823387943097, 1.4160064713276737, 3.0283405725031405, 3.448796410363719, 1.2956608140291663, 4.135488026727736, 2.752502149973333, 4.3360230156644715, 1.404887628226119, 3.9958126999501675, 0.7466476427621315, 2.9695500039330076, 0.3169057141567397, 2.8811440689905687, 3.0801109677349783, 4.248441094968972, 3.7933000884479067, 3.206528023602644, 3.622602285006396, 3.1141315987569596, 1.4651911730579796, 3.236369487465259, 4.14816278837152, 0.2702178262854273, 2.4483758700395613, 2.8018509440076174, -0.8671041792185687, 1.4262309389339065, 4.056026973627861, 4.184003944047801, 0.6480440947173031, 0.19496594166971634, 4.9854162736412935, 2.774382992575682, 2.856655403018273, 3.707442049412853, 2.8787216882728783, 3.335011408456148, 1.4208730964286322, 0.8896856343077626, 0.3018756909266505, 1.3515810409904838, 3.047248733066036, 2.510384411750882, 2.367533824986736, 4.6934113328466545, 4.04909656881364, 0.250424560062975, 3.195284307013469, 4.1507606931298895, 2.2219526041281994, 3.2019339908690583, 2.6762653051251135, 1.4181207629812869, 1.1481837877008814, 2.757521500627893, 3.5763225613938237, 2.868749566822571, 4.955239253826305, 1.789880822641129, 3.417808767359762, 1.0614909475403267, 3.443165508984839, 1.5300799223198358, 2.86378978019271, 2.6725634246818317, 3.6959181046368355, 3.353453765270806, 3.853827294123369, 2.445840875315773, 3.6397662739956256, 4.6814294541661585, 2.925762000203469, 2.4988469180585176, 0.5439297118150208, 2.8627109987831845, 1.0139115362949667, 5.495696535722694]" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n" - ] - } - ], + "outputs": [], "source": [ "print(output)" ] From 85f74c5580014b9bb0caa9e12e21c15b7cd08998 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 12:32:10 -0400 Subject: [PATCH 086/108] fixed title --- scenarios/sentence_similarity/automl_with_pipelines.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 535b63860..578399a9f 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -1635,7 +1635,7 @@ "metadata": {}, "source": [ "## 6.7 Test Deployed Webservice\n", - "We test the web sevice by passing data.The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated. The service has a timeout which does not allow passing the large test dataset. Timeout is based off a few things.It is set to a default of ~30 seconds. It depends on the error but is usually resolved with batching if so much is sent to the service at a time " + "We test the web sevice by passing data.The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated. The service has a timeout which does not allow passing the large test dataset. Timeout is based off a few things.It is set to a default of ~30 seconds. To overcome this you can batch data and send it to the service." ] }, { From b3e85d4a5934588f3900a1b68c22c8121a5b825b Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 12:36:46 -0400 Subject: [PATCH 087/108] fixed text issues --- .../sentence_similarity/automl_local_deployment_aci.ipynb | 4 ++-- scenarios/sentence_similarity/automl_with_pipelines.ipynb | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb index df005bf90..2aa69ea0a 100644 --- a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb +++ b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#
Local AutoML Model with ACI Deployment for Predicting Sentence Similarity
" + "# Local AutoML Model with ACI Deployment for Predicting Sentence Similarity" ] }, { @@ -986,7 +986,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(aks_service.get_logs())" + "print(aci_service.get_logs())" ] }, { diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines.ipynb index 578399a9f..b9769a3dc 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#
AzureML Pipeline, AutoML and ACI Deployment for Sentence Similarity
" + "# AzureML Pipeline, AutoML and AKS Deployment for Sentence Similarity" ] }, { @@ -24,7 +24,7 @@ "1. PythonScriptStep: embeds sentences using a popular sentence embedding model, Google Universal Sentence Encoder\n", "2. AutoMLStep: demonstrates how to use AutoML to automate model selection for predicting sentence similarity (regression)\n", "\n", - "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Container Instances (ACI).\n", + "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Kubernetes Service (AKS).\n", "\n", "This notebook showcases how to use the following AzureML features: \n", "- AzureML Pipelines (PythonScriptStep and AutoMLStep)\n", From 3759b8e1f992feacc8cb199bd8a145ce4ddbd482 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 13:22:05 -0400 Subject: [PATCH 088/108] revert file version to staging version --- scenarios/sentence_similarity/baseline_deep_dive.ipynb | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/scenarios/sentence_similarity/baseline_deep_dive.ipynb b/scenarios/sentence_similarity/baseline_deep_dive.ipynb index bfeb82a08..e99de3109 100644 --- a/scenarios/sentence_similarity/baseline_deep_dive.ipynb +++ b/scenarios/sentence_similarity/baseline_deep_dive.ipynb @@ -163,12 +163,8 @@ "outputs": [], "source": [ "# Produce a pandas dataframe for the training and test sets\n", - "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", - "test_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")\n", - "\n", - "# Clean the sts dataset\n", - "sts_train = stsbenchmark.clean_sts(train_raw)\n", - "sts_test = stsbenchmark.clean_sts(test_raw)" + "sts_train = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", + "sts_test = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"test\")" ] }, { @@ -1432,7 +1428,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.7" + "version": "3.5.5" } }, "nbformat": 4, From 84e5e042988f39b566ce74e71d353a79796a9083 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Wed, 26 Jun 2019 13:33:05 -0400 Subject: [PATCH 089/108] Fixes based on Abi's PR comments --- .../baseline_deep_dive.ipynb | 129 ++++++++++-------- 1 file changed, 71 insertions(+), 58 deletions(-) diff --git a/scenarios/sentence_similarity/baseline_deep_dive.ipynb b/scenarios/sentence_similarity/baseline_deep_dive.ipynb index e55cad900..922862120 100644 --- a/scenarios/sentence_similarity/baseline_deep_dive.ipynb +++ b/scenarios/sentence_similarity/baseline_deep_dive.ipynb @@ -81,7 +81,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -133,7 +133,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -165,7 +165,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "metadata": { "scrolled": true }, @@ -174,28 +174,28 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 160KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 247KB/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Data downloaded to C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpzq_k_pn9\\raw\\stsbenchmark\n" + "Data downloaded to C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpp2a0cw_t\\raw\\stsbenchmark\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 211KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 243KB/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "Data downloaded to C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpzq_k_pn9\\raw\\stsbenchmark\n" + "Data downloaded to C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpp2a0cw_t\\raw\\stsbenchmark\n" ] } ], @@ -211,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 9, "metadata": {}, "outputs": [ { @@ -230,7 +230,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "metadata": {}, "outputs": [ { @@ -350,7 +350,7 @@ "9 A man is playing a trumpet. " ] }, - "execution_count": 5, + "execution_count": 10, "metadata": {}, "output_type": "execute_result" } @@ -389,7 +389,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -414,7 +414,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 12, "metadata": {}, "outputs": [ { @@ -546,7 +546,7 @@ "4 [man, seated, playing, cello, .] " ] }, - "execution_count": 7, + "execution_count": 12, "metadata": {}, "output_type": "execute_result" } @@ -564,7 +564,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ @@ -592,7 +592,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -621,7 +621,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -631,7 +631,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 16, "metadata": {}, "outputs": [ { @@ -640,7 +640,7 @@ "11498" ] }, - "execution_count": 11, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -665,7 +665,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -698,14 +698,14 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 18, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████████████████████████████████| 1.61M/1.61M [04:45<00:00, 5.63kKB/s]\n", + "100%|████████████████████████████████████████████████████████████████████████████| 1.61M/1.61M [01:08<00:00, 23.4kKB/s]\n", "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\smart_open\\smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n", " 'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n" ] @@ -735,7 +735,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 19, "metadata": {}, "outputs": [], "source": [ @@ -783,7 +783,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 20, "metadata": {}, "outputs": [], "source": [ @@ -819,7 +819,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -849,7 +849,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 22, "metadata": {}, "outputs": [], "source": [ @@ -900,7 +900,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -931,7 +931,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -966,14 +966,14 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 25, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████████████████████████████████| 2.13M/2.13M [12:20<00:00, 2.87kKB/s]\n", + "100%|████████████████████████████████████████████████████████████████████████████| 2.13M/2.13M [01:58<00:00, 17.9kKB/s]\n", "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\smart_open\\smart_open_lib.py:398: UserWarning: This function is deprecated, use smart_open.open instead. See the migration notes for details: https://github.com/RaRe-Technologies/smart_open/blob/master/README.rst#migrating-to-the-new-open-function\n", " 'See the migration notes for details: %s' % _MIGRATION_NOTES_URL\n" ] @@ -985,7 +985,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 26, "metadata": {}, "outputs": [], "source": [ @@ -1014,7 +1014,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 27, "metadata": {}, "outputs": [], "source": [ @@ -1055,14 +1055,14 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 28, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "100%|████████████████████████████████████████████████████████████████████████████| 2.56M/2.56M [13:37<00:00, 3.13kKB/s]\n" + "100%|████████████████████████████████████████████████████████████████████████████| 2.56M/2.56M [01:46<00:00, 24.0kKB/s]\n" ] } ], @@ -1072,7 +1072,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 29, "metadata": {}, "outputs": [ { @@ -1104,7 +1104,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 30, "metadata": {}, "outputs": [ { @@ -1155,7 +1155,7 @@ }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 33, "metadata": {}, "outputs": [], "source": [ @@ -1169,8 +1169,8 @@ " Returns:\n", " list: predicted values for sentence similarity of test set examples\n", " \"\"\"\n", - " stop_word_param = 'english' if rm_stopwords else None\n", - " \n", + " stop_word_param = \"english\" if rm_stopwords else None\n", + "\n", " tf = TfidfVectorizer(\n", " input=\"content\",\n", " analyzer=\"word\",\n", @@ -1178,17 +1178,30 @@ " stop_words=stop_word_param,\n", " sublinear_tf=True,\n", " )\n", - " all_sentences = df[[\"sentence1\", \"sentence2\"]]\n", - " corpus = np.concatenate([df['sentence1'].values, df['sentence2'].values])\n", + " all_sentences = df[[\"sentence1\", \"sentence2\"]]\n", + " corpus = np.concatenate([df[\"sentence1\"].values, df[\"sentence2\"].values])\n", " tfidf_matrix = np.array(tf.fit_transform(corpus).todense())\n", - " df['predictions'] = df.apply(lambda x: calculate_cosine_similarity(tfidf_matrix[int(x.name),:], tfidf_matrix[len(df.index)+int(x.name),:]) if \n", - " (sum(tfidf_matrix[int(x.name),:]) != 0 and sum(tfidf_matrix[len(df.index)+int(x.name),:]) != 0) else 0,axis=1)\n", - " return df['predictions'].tolist()" + " num_samples = len(df.index)\n", + " \n", + " # calculate the cosine similarity between pairs of tfidf embeddings\n", + " # first pair at index 0 and n in tfidf_matrix, second pair at 1 and n+1, etc.\n", + " df[\"predictions\"] = df.apply(\n", + " lambda x: calculate_cosine_similarity(\n", + " tfidf_matrix[int(x.name), :], tfidf_matrix[num_samples + int(x.name), :]\n", + " )\n", + " if (\n", + " sum(tfidf_matrix[int(x.name), :]) != 0\n", + " and sum(tfidf_matrix[num_samples + int(x.name), :]) != 0\n", + " )\n", + " else 0,\n", + " axis=1,\n", + " )\n", + " return df[\"predictions\"].tolist()" ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 34, "metadata": { "scrolled": true }, @@ -1227,7 +1240,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 35, "metadata": {}, "outputs": [], "source": [ @@ -1248,7 +1261,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": 36, "metadata": {}, "outputs": [], "source": [ @@ -1306,7 +1319,7 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 37, "metadata": { "scrolled": true }, @@ -1336,7 +1349,7 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 38, "metadata": {}, "outputs": [], "source": [ @@ -1356,7 +1369,7 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 39, "metadata": {}, "outputs": [ { @@ -1376,11 +1389,11 @@ " 'fastText WMD with Stop Words': 0.5177829727556036,\n", " 'TF-IDF Cosine': 0.6749213786510483,\n", " 'TF-IDF Cosine with Stop Words': 0.7118087132257667,\n", - " 'Doc2vec Cosine': 0.5236274769065202,\n", - " 'Doc2vec Cosine with Stop Words': 0.45176043696294416}" + " 'Doc2vec Cosine': 0.528387685928394,\n", + " 'Doc2vec Cosine with Stop Words': 0.45572884639905675}" ] }, - "execution_count": 32, + "execution_count": 39, "metadata": {}, "output_type": "execute_result" } @@ -1400,14 +1413,14 @@ }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 40, "metadata": { "scrolled": true }, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAg8AAAEWCAYAAADhFHRsAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nOzdd7xcVbn/8c+XgLRQpBpUiHSBQIAQIAYEwYYNBQVEpKiIXOGCgvITr2C74AXFAsLlqvSOYgGUDgkpBBJCEkqQEqkivQWQJM/vj/UMZ2cy55yZ03JO8n2/XueVmV3WXnvNwF6z9trPo4jAzMzMrFlLLOwKmJmZ2cDizoOZmZm1xJ0HMzMza4k7D2ZmZtYSdx7MzMysJe48mJmZWUvceTCzHiXpR5KekfTPhV2X/kDS+yT9XdIrknZf2PUBkHSApFsr71+RtO7CrFMrJIWk9ZvYbidJj/VFnRY37jyYLeYkzZL0Wl5AnpJ0lqTBXSzr3cA3gU0i4h09W9MB6wfAqRExOCL+WL+yrv2fl3RVtmOfybo91NPlSro5L/Rb1C3/Yy7fqaePaX3DnQczA/hERAwGtgK2Ab7bagGSlgTWAZ6NiH91cf9F0TrA3Z1sU2v/IcBTwK96vVZ9537gi7U3klYFtgOeXmg1sm5z58HM3hIRjwN/BTYDkLSSpN9KelLS43lLYlCuO0DSOEmnSHoOuBm4Dlgrf0Wfndt9UtLdkl7IX6LvrR0vf3V/W9I04FVJS+ayoyVNk/RqHn9NSX+V9LKk6yW9vVLGZZL+KelFSWMkbVpZd7ak0/LX/MuSbpO0XmX9ppKuk/Rcjrp8J5cvIekYSQ9KelbSpZJWaa/dJH1F0gNZzp8lrZXLHwTWBf6SbbJ0J+3/OnA5sEml7I9JulPSS5IelXR8Zd0yks7POr4g6XZJa3b22TWo/1u3AZpos40rbTZT0uc6OifgAmCvyrH3Aa4A/l0pc2lJP5f0RP79vNpW+X14MtcdVFf3pSWdLOmR/AzPkLRsO+f57WyLl7Puu3RSd2uHOw9m9pYcLt8NuDMXnQPMAdYHtgQ+BHy5ssu2wEPAGsAHgY8CT+Qw+AGSNgQuAo4AVgeuplxI31YpYx/gY8DKETEnl+2R5W0IfILSofkOsBrl/1uHV/b/K7BB1mEK5WJVtQ/wfeDtwAPAj/NcVwCuB/4GrJXneEPucziwO/D+XPc8cFo7bfYB4ATgc5SRg38AFwNExHrAI+TIQkS80aiMSlnLAXsBEyuLX6X8cl852+lraps7sT+wEvBuYFXgEOC1XNfZZ9eR9tpseUoH8UJKe+8D/LraYWvgCeCePD55LufWbXMsZTRiOLAFMJIc/ZL0EeAoyvdhA2DXun1/QvmeDM9zfSfwvfpKSNoI+DqwTUSsAHwYmNVBva0jEeE///lvMf6j/A/0FeAFyoXv18CywJrAG8CylW33AW7K1wcAj9SVtRPwWOX9fwGXVt4vATwO7FQ59kEN6rNv5f3vgdMr7w8D/tjOuawMBLBSvj8b+E1l/W7AfZVzubOdcu4Fdqm8HwK8CSzZYNvfAv9TeT84tx1aOZ9dm2z/OZSL7bAOtv85cEq+PggYD2xet00zn92tlXUBrN9Em+0FjK071v8Cx7VT15spHZYvUDqRGwH357rHKt+DB4HdKvt9GJiVr38HnFhZt2GtvoAonav1Kuu3Bx6u/z7m9v+idD6WWtj/3Q30v0X1HqOZtWb3iLi+ukDSMGAp4ElJtcVLAI9WNqu+bmQtSocEgIiYJ+lRyq/Djsp4qvL6tQbvB2cdB1F+FX+WMrIxL7dZDXgxX1ef+phd25fya/3Bduq9DnCFpHmVZXMpF+XH67ZdizLiAUBEvCLpWco5zmqn/Hq7R8T1eT6fAm6RtElE/FPStsCJlFtJbwOWBi7L/c7L87hY0srA+ZRf8evQ+WfXkfbabB1gW0kvVNYvmfXoyB+AnwLPtrPtfN+TfL1WZd3kunU1qwPLAZMr5ylggdszEfGApCOA44FNJV0DfCMinuik7taAb1uYWXsepfx6XS0iVs6/FSOiOkTdWVreJygXHABU/g//bua/AHcnte/nKRfbXSnD90Nrh2pi30eB9TpY99HKea8cEctEmRNSr/4cl6fcQmi0bYciYm5E/IHSURmdiy8E/gy8OyJWAs4gzy8i3oyI70fEJsAo4OOU2wLNfHZd8ShwS127DI6Ir3VyXrMpt5e+RuPOw3xtCKydywCepHxnqutqnqF0Jjet1GelKJNPG9XjwogYnccKyi0P6wJ3HsysoYh4ErgW+KmkFXMS4XqS3t9CMZcCH5O0i6SlKI9xvkEZau8JK2R5z1J+gf53C/teCbxD0hE56W6F/JUP5QL9Y0nrAEhaXdKn2innQuBAScNzkt9/A7dFxKxWT0bFpyhzDe7NxSsAz0XE65JGUjpMte13ljQsRyxeotwumdtDn10jVwIbStpP0lL5t40qk2A78B3g/e20y0XAd7OdV6PMWTg/110KHCBpk5wTclxtp4iYB/wfcIqkNQAkvVPSh+sPIGkjSR/Iz+h1SqdjbrMnbvNz58HMOvJFylD5PZRJg5dT7v83JSJmUu53/4ryK/ETlMmD/+5wx+adSxnGfjzrOLHjzeer28uUSXifoAzT/x3YOVf/gvJr/1pJL2e527ZTzg2UuR2/p/xKXg/Yu8Xz+IukVygdgB8D+0dE7fHOQ4EfZD2+R7mY1ryD8pm8ROls3ELbRbdbn10j2WYfopzfE5R2+wnlVkpn+z4REbe2s/pHwB3ANGA65TbQj3K/v1LmedxImbx5Y92+387lEyW9RJkEu1GDYyxNuf3zTNZ7DUqHxrpAEd0ZMTQzM7PFjUcezMzMrCXuPJiZmVlL3HkwMzOzlrjzYGZmZi1xkChbLKy22moxdOjQhV0NM7MBZfLkyc9ExOr1y915sMXC0KFDueOOOxZ2NczMBhRJ/2i03LctzMzMrCXuPJiZmVlL3HkwMzOzlrjzYGZmZi1x58HMzMxa4s6DmZmZtcSdBzMzM2uJOw9mZmbWEgeJssXC9MdfZOgxVy3sapjZYmzWiR9b2FXoMR55MDMzs5a489DHJJ0i6YjK+2sk/aby/qeSvtGN8o+XdFS+PknSfZKmSbpC0srdq32nxz4qjzdD0l2SvtiFMg7pyn5mZtZ33Hnoe+OBUQCSlgBWAzatrB8FjGumIEmDOtnkOmCziNgcuB/4fy3XtkmSDgE+CIyMiM2AHQG1Wk5EnBER5/Z0/czMrOe489D3xpGdB0qnYQbwsqS3S1oaeC9wp4qT8lf8dEl7AUjaSdJNki4EpueyYyXNlHQ9sFHtQBFxbUTMybcTgXfl9rdJeqvDIulmSVtLWl7S7yTdLulOSZ/K9YMknZz1mCbpsAbn9R3g0Ih4KY/9YkSck/vvkuVNz/KXzuUnSronyzw5l1VHTm6W9BNJkyTdL2mHSn1OynpOk/TV7n0kZmbWCk+Y7GMR8YSkOZLWpnQiJgDvBLYHXgSmRcS/Je0BDAe2oIxO3C5pTBYzkjKi8LCkrYG9gS0pn+cUYHKDQx8EXJKvLwY+BxwnaQiwVkRMlvTfwI0RcVDe4piUHZIvAu8BtoyIOZJWqRYsaQVghYh4sP6gkpYBzgZ2iYj7JZ0LfC3//TSwcUREB7dUloyIkZJ2A44DdgW+BLwYEdtkR2ScpGsj4uG6Yx8MHAwwaMUFMsqamVkXeeRh4aiNPtQ6DxMq78fnNqOBiyJibkQ8BdwCbJPrJlUulDsAV0TE7PzV/+f6g0k6FpgDXJCLLgU+m68/B1yWrz8EHCNpKnAzsAywNuWCfUZtFCMinqs/BBDtnOtGwMMRcX++P4dyS+Ml4HXgN5I+A8xuZ/8/5L+TgaGVen4x63kbsCqwQf2OEXFmRIyIiBGDllupneLNzKxVHnlYOGrzHoZRbls8CnyTckH9XW7T0XyBV+vet3fhRtL+wMcpv/wDICIel/SspM2BvYDasL+APSJiZl0ZHXUOiIiXJL0qad2IeKi+Cu3sM0fSSGAXysjJ14EPNNj0jfx3Lm3fVwGHRcQ17dXJzMx6j0ceFo5xlAv6czmy8BywMuXWxYTcZgywV97fX53ya31Sg7LGAJ+WtGzePvhEbYWkjwDfBj4ZEfW/7C8GvgWsFBHTc9k1wGHZWUDSlrn8WuAQSUvm8lVY0AnAaZJWzG1WzNsG9wFDJa2f2+0H3CJpcB77auAIyi2aZl1DufWxVB5rQ0nLt7C/mZl1g0ceFo7plHkMF9YtGxwRz+T7Kyidibsov/q/FRH/lLRxtaCImCLpEmAq8A9gbGX1qcDSwHXZH5gYEYfkusuBXwA/rGz/Q+DnwLTsQMyidHJ+A2yYy98E/i/LrjodGEyZm/Em8Cbw04h4XdKBwGXZ+bgdOANYBfhTzokQcGSnrdbmN5RbGFOynk8Du7ewv5mZdYNyJNtskTZixIi44447FnY1zMwGFEmTI2JE/XLftjAzM7OWuPNgZmZmLfGcB1ssODGWmQ0EAyV5lkcezMzMrCUddh7kJE6dldFnSZwkXS1p5fw7tLJ8J0lXNrH/dhmWeqqkeyUdX9l/VCe7N1vHKyTtXnk/U9J3K+9/nwGhulr+2ZL27G49zcysezobeXASpw70ZRKniNgtIl6gxIM4tLPtGzgHODgihgObUaJMAuxEW66N7qp+X1YFXqE8blqzPW0RNDtUiylhZmb9T2edBydx6oMkTpK+JenwfH2KpBsrdTk/X8+StBpwIrBejiCclEUMlnR5jqRckLEP6q0BPJnnOzci7pE0FDgEODLL20HSOpJuyLreoJKDo/ar/wxJY/P8Pt7gGNXvyyjgSmD1/H68B3gtY1UsI+msbOM7Je2cxzhA0mWS/gJcm/udmu1+VZ5Drc0W+DzMzKxvdPjrzkmc+iyJ0xhKeOpfAiOApVWiJ45m/qBPAMdkew7POu+U7bkp8ATlAv4+4Na6/U4BZkq6GfgbcE5EzJJ0BvBKRNQ6RH8Bzo2IcyQdlHWq3YoYCrwfWA+4SdL6EfF65RiTgc0kvY3yfbkFWJfSydyStlGq/wCIiGEqQa+ulbRhrtse2DwinlO5xbERJYz3msA9wO/yM+3085ATY5mZ9YpmJkw6iVPvJ3GaDGydHZs3KG08gtJe9Z2HRiZFxGMRMY8SaXJo/QYR8YMs81rg85QORCPb0xb58jzKZ1tzaUTMi4i/Aw8B9dEu3wDuBrYCtsvzbe/7cl7ucx8lMmat83Bd5TPbkbbv1RPAjbm8qc/DibHMzHpHM52H+iROEykXmOp8h55O4rRvNYkTUE3idHHlmHtExPD8Wzsi7qXjzgHZaXlV0rqNqtDOPnMoIyi/p/wKb+/C21ESp1o93xMR19aV/yYlFPSBlPYeC+xM+YV/b3vn0uC49ceuP48HI+J0SjKqLVTmJXQm2nnd6D2U+u9IGd15nvJ9qXUeeuT70sLnYWZmvaDZkQcncer9JE5jgKPy37GUuQhTa52oipeBFVo4Pnncj1XmQmxA6WS80KC88ZRbSwD7Mv/tj89KWkLSepTbEfNl30zjKFk678r30yijEGtTRiWgnOO+Wa8Nc12jssYAe+f3agilQ0U3Pw8zM+umZma0O4lT3yRxGgscC0yIiFclvU6DWxYR8aykcZJmAH8Fmo18tB9wiqTZlNtC+0bE3JzjcLnKhNPDgMMp8wqOzroeWCljJuWW1JrAIXXzHWrGUzoWJ2R950j6F/Bo3lYB+DVwhqTpWZcDIuINLTjP8wpKmu7plCdwbsnlK9D1z8PMzLrJibGsKZLOBq6MiMsXdl26womxzMxaJyfGMjMzs57gQDzWlIg4YGHXoTuc28LM+puBkseiEY88mJmZWUvceehHtAjmEpG0Rca4qL3fR9LsytMnwyRNy9c3S3qk8lQIkv4o6ZV8PVTSaypRKe9Viea5f2/U28zM2ufOQ/+yKOYSmQ6sk4/mQjmH+ygRJ2vvq+f0AiVCJtmhGVJX3oMRsWVEvJfySOmR+YSMmZn1EXce+pdFLpdIPp55O7BtLtoaOI35c2BUk2VdTFucic/QFrVzARHxEPANyuOlZmbWR9x56EcyBHN9LpHbKDE0RpC5RCgX1VoukV2BkzKIEpTIi8dGxCaaP5fIZ2gLGV7vIErMCGjLJYIquUQoMShujIhtKMGaTspgVwfTlktkc9rCileNB0bl9vMo4cSrnYfqyMMNwI45crI3bTlO2jOFujDZNZIOlnSHpDvmzn6xk2LMzKxZ7jz0P4taLpHqOY0Ebs+kZOurRCMdnCMINXMpUS33ApaNiFntN1U5hfZWOLeFmVnv8KOa/U99LpFHKRk3XwJ+l9v0dC6RXaq5RCRVc4nUUojXconMrCujw1wiaSKlczOatpDmj1FGFsY32P5iSnTJ4zspF8qoSjP5P8zMrId45KH/WeRyiUTEy5RO0AGVc5hAyUvRqPMwlhLe+qIG694iaShwMvCrjrYzM7Oe5c5D/1PLJTKxbtmLdblEplFyidxI5hKpLygiplDmDEylZKCszyWyAiWXyFRJZ1TWXU4ZFbi0suyHwFKUnCEzaMsz8hvgkVx+FyXddyPjgKUj4tF8P4GSA2OBzkMUJ1fOt2q92qOaWb9fRcRZ7RzTzMx6gXNb2GLBuS3MzFrn3BZmZmbWI9x5MDMzs5b4aQtbLDgxltniaSAnn+rPPPJgZmZmLely58FJnPouiZOktSRdnq+HS9qtsu6tduqkjIMqIaRnVMJLHyBpra7Uq658SXpG0tvz/RBJIWl0ZZunJa3ajWPMkrRad+tqZmbd052RBydx6qMkThHxRETsmW+HA7t1tH09Se+ihJcenW24HeVRTyixF7rdecggU7VQ2lDa6k7aviMbAc9ExLNN1tm31MzM+qnudB6cxKmHkjhJujojOpL1/V6+/qGkL+coxgxJbwN+QAkQNbXWlsAmee4PSWqUJGoN4GXglazLKxHxsKQ9KTkzLsjylpW0S9Zherbh0lmXWZJ+kiMokySt3+A41e/EKOBnzN+ZGJ9lrSPphvwMblDJ5YGksyX9TNJNwE8krSrp2qzP/5KRNfPzvUrSXdkue2FmZn2my50HJ3ECei6J0xhgB0krUvJMvC+Xj6YS2Cnb83vAJRExPCJqx9sY+DClPY9T3mapuAt4CnhY0lmSPpHlXQ7cAewbEcMpYabPBvaKiGGUCbVfq5TzUkSMpASY+nmD83hrNCrr8kfg3fm+2nanAudWPoNfVsrYENg1Ir4JHAfcGhFbUvJyrJ3bfAR4IiK2iIjNgL81qIsTY5mZ9ZLuTph0EqeeSeI0lhJiejRwFTBY0nLA0PpcEu24KiLeyIiM/wLWrK6MiLmUC+6elNs+p0g6vkE5GwEPR8T9+f6crFfNRZV/t2dBk4Ats+O1VES8AjyUoxTV78T2wIX5+rw875rLsr7ksc/Pc7gKeD6XTwd2zZGQHSKiYc/AibHMzHpHdzsP9UmcJlIuDNVfmT2dxGnfahInoJrE6eLKMffIX+fDI2LtiLg3l/dGEqdfMX8o5/a0l8TpdspozQ6UUYg7ga8Ak5soE+CNyuu5NHgEN0M+T4qIEyjnskeDcjr6rGD+tlugHTNHxgOU0aEpuXgiZY7GGkB7HaFqWZ1+J7JzszWlE3FC7TaPmZn1jZ4YeXASp24mccrbEY9SRk8mZplHMX8uipqXKTkpmqbytMZWlUXDgX80KO8+YGhlPsN+lJGimr0q/06gsXGUtqq23X8CE2udPko71uaK7EsZvWlkTK5H0keB2pMcawGzI+J8Sptu1c7+ZmbWC7rbeXASp55L4jQWeCo7R2Mpk0IbdR5uokyQrE6Y7MxSwMkqj7tOpVz8/zPXnQ2ckcsFHAhcJmk6Zc5Hta2XlnRb7ntkO8caR2mrWudhSp5Lte0OBw5Ueex1v0pd6n2fMqdkCuVW1CO5fBgwKet8LPCjjk/fzMx6khNjWVMkzQJGtNNJ6vecGMvMrHVyYiwzMzPrCQ7EY02JiKELuw7d4dwWZjYQDJRcHB55MDMzs5a48zCASVpT0oUZWXKypAmSPp3rdpJ0ZZPlHC/phLplw3OiZyv1OSonZc7I6I9fbGX/LOOQruxnZmZ9x52HASofQ/0jMCYi1o2IWoTOd3WhuItoewyzZm/aAjk1U59DgA8CIzPq4450HjdiARFxRkSc2+p+ZmbWd9x5GLg+APw7It56lDIi/hERC8SRqGqUuyKjWL4gadvKpp8jg25J+lCOakyRdJmkwQ2K/g5waEYHJSJejIhz2jtmLj9R0j2Z4+LkXFbNpnpzJZ/G/ZJ2yOWDVPKl3J77frVrTWhmZl3hzsPAtSltURybImkZ2s9dcREZuEnSdsCzEfF3lRTY36Xkm9iKkgvjG3XlrgCskKG8mzpmBuj6NLBp5rhoL1bDkplP4whKrguAL1FiiWxDiQb6FUnvaXBs57YwM+sF7jwsIiSdlvMMbu9gs45yV1wM7KmSXn1v2iJmbgdsAozLoEz7A+vUH572w363d8yXgNeB30j6DFAfObSmlql0MjA0X38I+GLW5zZgVWCD+h2d28LMrHf4Uc2B624q+Ski4j9ylKCjSEjtzkGIiEczENT7s9ztK/tcFxH7dLDvS5JelbRuXeKwdo8ZEXMkjQR2oXRWvk65FVOvlrejmrNDwGERcU17dTIzs97jkYeB60ZgGUnVlNnLdbJPZ7krLgJOAR6MiMdy2UTgfbV9JC0nacMGZZ8AnKaSVhxJK0o6uL1j5ryJlSLiasotieGdn/JbrqHc+lgqj7WhSiZPMzPrAx55GKAiIiTtTkmv/S3gaUpGym9XNttF0mOV95+lLXfFkpRsntXcFZcBvwAOqxznaUkHABfVJjpS5kDcz/xOBwYDt0t6E3gT+GlEvC6p0TFXAf6UcyJE+7kyGvkN5RbGlHzq5Glg9xb2NzOzbnBuC1ssOLeFmVnrnNvCzMzMeoQ7D2ZmZtYSz3mwxYITY5lZVwyURFV9zSMPZmZm1pI+6zw4iVP3SLpa0sr5d2hleVNtJ2k7SbdJmirpXknHV/Yf1UN1vCKfAKm9nynpu5X3v8+AUF0t/2xJe3a3nmZm1j190nlwEqfui4jdIuIFYGXg0M62b+Ac4OCIGA5sBlyay3cCeqTzAIyvlSVpVeAV2oJNka/HN1NQPtZpZmb9UF+NPDiJUwdJnCR9S9Lh+foUSTdW6nJ+vp6VESRPBNbLEYSTsojBki7PkZQLsrNWbw3gyTzfuRFxj6ShwCHAkVneDpLWkXRD1vUGSWvn8c+WdIaksXl+H29wjHG0dURGAVcCq6t4D/BaRPxT0jKSzso2vlPSznmMA/Iz+wtwbe53arb7VXkOtTZb4PMwM7O+0VedBydx6jiJ0xhgh3w9gtIZWAoYDYyt2/YYSgTI4RFxdC7bMo+5CbAu8L4GdTsFmJm3Fr4qaZmImEUJ2HRKljcWOBU4N8/zAuCXlTKGUsJXfww4I9urajKwmaS3UToPE4CZwHvz/bjc7j8Aso33Ac6plLU9sH9EfIDS5hsBw4Cv0Daq0dTnISfGMjPrFQtlwqScxKk+idNkYOvs2LxBueiOoHQo6jsPjUyKiMciYh4wtXLst0TED7LMa4HPA39rp6ztabsFdB6lA1NzaUTMi4i/Aw8BG9cd4w1Kzo2tKJ/FbXkuo/KvdstidJZNRNwH/AOohby+LiKey9c7AhflSMkTlJDc0OTn4cRYZma9o686D7ULClCSOFESIq3ewT4dJnECZtGWxOnSyj7X5a/o4RGxSUR8qW7fl4BXJa3b7DEjYg4wEvg9JQxyexfejpI41er0noi4tq78N/N8DqRcYMcCOwPrAc1MBH2j8rp67PrzeDAiTqe0/RY5L6Ez0c7rRu+h1H9HyujO85TcGLXOQ23koaP5Ja92dowWPg8zM+sFfdV5cBKnzpM4jQGOyn/HUuYiTI0F44e/DKzQwvHJ436sMhdiA0on44UG5Y0nbwkB+wK3VtZ9VtISktaj3B6Z2eBQ44CvAnfl+2mUUYi1KZ1IKOe4b9Zrw1zXqKwxwN45b2QIpUNFNz8PMzPrpj6Z0e4kTk0lcRoLHAtMiIhXJb1Og1sWEfGspHGSZgB/BZqNfLQfpf1nA3OAfSNibk5OvFzSpyhteTjwO0lHZ10PrJQxk9KBWxM4JCJeb3Cc8ZSOxQlZ3zmS/gU8mrdVAH5NmTMxPetyQES80WCe5xWUybbTKZ9hrfO4Al3/PMzMrJucGMuaIuls4MqIuHxh16UrnBjLzKx1cmIsMzMz6wkOxGNNiYgDFnYdusO5LcystyyO+S888mBmZmYtcedhEaV+kktE0hYZ46L2fh9JsytPnwyTNC1f3yzpkcpTIUj6o6RX8vVQSa9lVMp7VaJ57t9MPczMrOe487AIyotvf8klMh1YJwNgQYn3cB8lKmbt/bjK9i+QETIlrQwMqSvvwYjYMiLem/U4Mp+QMTOzPuLOw6Kp3+QSycczbwdq+28NnMb8OTCqybIupi3OxGdoi9q5gIh4iBJ+/PCOzsvMzHqWOw+Lpn6TSySNB0ZlcKx5wM3M33mojjzcAOwoaVAe85JOqj6FujDZlXNybgszs17gzsNiYCHnEoG2bJsjgdszKdn6klYHBucIQs1cSlTLvYBlM3lXh6fX3grntjAz6x1+VHPRdDcl5wdQconkKEFHUZI6zCUiaRZtuUS2r+xzXUTs00l9JlIyio6mJMoCeIzSERnfYPuLKdElj++kXChzJ5qavGlmZj3DIw+Lpn6VSyQiXgYeBQ6grfMwgZKXolHnYSwlvPVFDda9RdJQ4GSgw7kcZmbWs9x5WARlMq3dgfdLeljSJMptiAVyidT+KL/ga3k9plPmJtTnEtmUnCiZx3ma0iG4KB+3nEg78w8oty6WzoyoUDoP69Kg8xDFyRHxTINy1qs9qknJpvqriDiro/YwM7Oe5dwWtlhwbgszs9Y5t4WZmZn1CHcezMzMrCV+2sIWC06MZda7FsfkUIszjzyYmZlZS/pF58FJnDqt11qSLq+cz25153xUE2UclGGnp0maIelTufwASWt1pV515UvSM5Lenu+HSHcQ6GwAACAASURBVApJoyvbPC1p1W4cY1bGqzAzs4VooXcenMSpcxHxRETsmW+HA7t1tH09Se8CjgVGR8TmlMiQ03L1AUC3Ow/5eOhttAWQGgXcmf8iaSPgmYh4tsk6+5aamVk/tdA7DziJE5KulrR5vr5T0vfy9Q8lfTlHMWZIehvwA2AvSVMl1TpKm+RIyEOSGiWJWgN4GXgl6/JKRDwsaU9gBHBBlrdso3bNusyS9JMcQZlUCSZVVQtDXWurnzF/Z2J8lrWOpBtyFOQGSWvn8rMl/UzSTcBPJK0q6dqsz/+SUTAlLS/pKpWQ2zMq7WBmZn2gP3QenMQJxgA7SFoRmEOOZlDCOY+tbRQR/wa+B1wSEcMjona8jYEPU3JHHFe7zVJxF/AU8LCksyR9Isu7nNIO+0bEcCBov10BXoqIkcCpwM8bnMd42tpqJGVE6d35vtp2pwLn5ijIBcAvK2VsSPmMvgkcB9waEVsCfwbWzm0+AjwREVtExGbA3xrUxYmxzMx6SX/oPMxHi2cSp7FZ/9HAVcBgScsBQ3M0pTNXRcQbGZHxX8Ca1ZURMZdywd0TuB84RdLxDcrpqF2hrS0vom1EoWoSsGV2vJaKiFeAh3KUojpqsz1tt5LOy/OuuSzrSx77/DyHq4Dnc/l0YNccCdkhIhr2DJwYy8ysd/SH+8pO4lRulYwAHgKuA1YDvgJMbqJMgDcqr+fS4HPNOQmTgEmSrgPOalDndtu1Vkw7r2vHmC3pAeAg2kaTJlLmaKwBtNcRqpb1agfrase5X9LWWe4Jkq6NiB90UnczM+sh/WHkYbFP4pS3Ix6lzM+YmGUeReWWRcXLwAoNlnd07LUkbVVZNBz4R4PyOmvXvSr/TqCxcZS2qrbdfwIToy0W+nja5orsSxm9aWRMrkfSR4HakxxrAbMj4nxKm27Vzv5mZtYLFnrnwUmc3jIWeCoiZufrd9G483ATZYJkdcJkZ5YCTpZ0X96y2YtyQYcyx+GMXC46btelJd2W+x7ZzrHGUdqq1nmYkudSbbvDgQPzc9ivUpd636fMKZkCfAh4JJcPo4ygTKU8RfKjjk/fzMx6khNjWVPyVtCIdjpJ/Z4TY5mZtU5OjGVmZmY9oT9MmLQBICKGLuw6dIdzW5jZQDEQ8oR45MHMzMxa4s5DPyXpcJWcGBe0uN/Kkg7N18NyYuVUSc/lhNSpkq7vQn3WlbR3B+s3lvRXSX/Pel8saY0WjzFIUqNJomZm1o+489B/HQrsFhH7trjfyrkvETE9I1EOp0RoPDrf79qF+qxL2+OV85G0LHAl5WmSDTKXx/8BLSXBioi5EbFDF+pmZmZ9yJ2HfkjSGZSL9Z8lHSlppKTx+djneJUkU0jaNPNMTM08ERsAJ1IeEZ0q6aROjnNM7j9Nbfk0ts993yZpsKR7JL03y90519Xnz9iPktjs6tqCiLghIu5VyZdxjkqujCmSdszjDJN0e6Xu60paUtILuX5XlbwXf5A0U9K5lXpvI+kWlQysf5W0JmZm1mc8YbIfiohDJH0E2DkinlHJebFjRMyRtCvw35TomYcAv4iIC1SSZg0CjgE2y9GGdqmk9V6bkgRMwNWSRkXEeEl/oyTgejtwVnYCjgG+HhG7NyhuM9qPhnk4JfHZMEmb5nE2oIyOnBwRl6gk32oU3XIrSkjxfwETVXKV3An8Avhkts2+wA+Bgxuc48G15YNWXL2j5jAzsxa48zAwrASckxfdoAR9ghKI6ViVlNt/yARgzZb5IeCjlIsxwGBKUqrxlIRUk4GXmD8xVleMBk4CiIi7JT0BrJ/H+a6kdbLuD2jBNNwTI+JJgAwINRR4nRIA7Po810GU8OELiIgzgTMBlh6ygQOamJn1EN+2GBh+CNyUGSQ/ASwDEBEXAp8EXgOukfSBFsoU8KPanIiIWD8izs51q1FChK8ILN1EWXdT0pe3d5wFRMR5wKcpeTmuq93OqNMoZ4eAaZV6D4uIjzZRRzMz6yHuPAwMKwGP5+sDagslrQs8FBG/pEyI3Jzmc19cA3xJJQMmkt6lkpAMyq/1Yyhhvk/IZR2Vex4lvPhHKnXbTdImzJ+f4r3AEOABSetGxAMR8QtKJtHNm6gzwD3AOyWNzDLflrdDzMysj7jzMDD8DyV75DjKMH3NXsCMHNLfGDg3Ip6lpB2f0dGEyZzceDllLsF0Su6NwZIOAl6NiEuBH1OSib2fcntjkEq69MPryppNGRE5Mh/VvAf4AvA0JRHYsnmMC4AvZiKwz0u6O+u+Lpl6uzMR8QYltfjPJN2V9dq2mX3NzKxnOLeFLRac28LMrHXObWFmZmY9wp0HMzMza4kf1bTFghNjmdlA1F+TZHnkwczMzFrSY50HJ3LqXZI+LenofP0ZSRtX1t0qqbOIkoMknZZPYUzPsNTrSFoio0f2RB23lnRH5f1+kl6RNCjfbylpSjfKXz+fzjAzs4WoJ29bHAp8NCIebnG/WiKnX0fEdGA4gKSzgSsj4vIu1qeWyOni+hVqS+R0eC0fg6RdKImc/tXsASJiLtAniZwi4orK288A84D7Wiji85Tz2zwi5klamxJBcglKTIcTe6CadwHrS1ouH98cBdwPbAFMyffjmi1M0pIRMacH6mVmZj2oR0Ye5ERO3UrklOU8lK9XkzRP0qh8P0HSUElflvRzSTsAuwGnZF2GZjF7Z9vMrO1bZwjwZETMy/N9JCJeyHZaIcs6N4/5rRyhmCHpsFy2fsZlOC/b5tLshL0lL/RTgJG5aEvgdEqngfx3fJb3wTzmdEn/p5KbA0mPSfovlZgWn862myZpAiWXR63NFvg8GpyzmZn1gh7pPETEIcATlEROp1B+Ee8YEVsC36MkcoK2RE7DgRGUnATHAA9mqOGj2zuG5k/kNBwYpZLIaQJQS+T0UzKRU5Z7U5b7y7rimkrkROlknJcXtloip+HANnm+9bYC/oOSzOm9krZTSfr0C2CPiNiaEgzph3XtNwd4KDtZo7NuO+TFeY2ImFXZdixwNXBknlttnSJiJHA0pc3rXQx8Jjt0J1ducxwDvJxlfVElcuO+lA7A9sChkmrRHzcBTsu2eR34aoPjjKd8NitQwkuPYf7OwzhJywG/yzYZRgmFXU1s9WpEvC8iLgPOBr4WEdszf4CsTj8PSQdLukPSHXNnv9igqmZm1hW9NWFyJeAySTOAUyiJjKAkcvqOpG8D60TEay2UWU3kNIWSXGnDXHcc8HFgGKUD0R2jKeGWiYi7KRelaiKnbwHvjojXG+w7MSKezNsZtURO76UtkdNUysX63Q32HQvsmH8nUG6HbAvc1mS9/5D/Ts7jziciHgE2Ao7NRTdJ2qlBOTsAv4+I2RHxMvBHSpsAPBwRE/P1+ZXlVeMonYTtgEkRMRPYSNI7gKWyHu8F/h4RD+Y+51LOu+YSKKMwwLIRUbvVcV5lm04/j4g4MyJGRMSIQcut1KCqZmbWFb3VeXAip9YTOY2lXLhHUOZjrEa5oI5p4nyqx64dt9E5vB4RV0fEUcBPgE812KyjtJz14UgbhSedQOn0vC9fA/wT+Cxt8x06S/35aifHaPbzMDOzXtCbIw9O5FQ0m8hpAvB+yi2TfwPTga9QOhX1mm2zt6g8CTEkXy9BGaX5R21CotrSYY+hzDVYVtJgSgejVof3SNomX+8D3Fp/nJxH8RTllk+t8zAROIKc70Bpkw0q8xS+ANzSoKxngNclbZ+L9q2cT1c/DzMz66be6jw4kVPbsZpK5JS3cJ6g7QI7ljKack+DYi+i3P6pTpjszDuAq/JW0nTK6M/pue63wDRJ50bEpCz/dspF//R8CgbKiM1XJE0Dlqd02hoZBwyKiCfz/QRKm43Pc50NfAn4Q7bzG8D/tVPWgcD/5oTJVyrLu/R5mJlZ9zkxljVF0vrA5TlBccBxYiwzs9bJibHMzMysJzi3hTUlIh4gA3gNRM5tYWZ9rb/mpegJHnkwMzOzlrjzMEBpAOUSkfQXSR+vvH9QlXwakv4k6ZMqUTpD0v6VddvksiPy/flZz7sk3a8SDXStVutrZmZd587DwHUosFtE7NvplvOr5RIhIqbX4k9QHp09Ot/v2oX61HKJNDKejDKpEpr7BUr0yprtaHvKZHpdOXtTcmZUHRkRW1Ce2JkO3ChpqS7U2czMusCdhwFIAy+XSC3qJPnvH4G1srwNgBcypgPAQ8CKKjk+BHyQEuNjARExLyJOBp6jRCA1M7M+4AmTA1BEHJIBrnaOiGckrUjJJTJH0q6UXCJ70JZL5AKV/ByDKMG0NuvskUvNn0tEwNUquUTGS6rlEnk7mUskb0N8PSJ2b1Dc7cDwDEQ1itIZ2ETShpQRiPpMm7+nxMa4lxKe+81OmmQKZRRivhmRkg4mc2YMWnH1ToowM7NmufOwaFgJOCd/xQdQG8KfABwr6V3AHyLi7+XHfFOquUQABlNyiYyn5BKZTEnp/bXOCoqI1yTNpDytsS0lfPkmlI7E9rTdsqi5hBIF9H5KwKrOwpi3F1L8TDKQ1dJDNnBAEzOzHuLbFouG/p5LBEoHYSdgmYh4iRK9clT+zTfyEBGP5/HfD9zcRNnDKaMUZmbWB9x5WDT091wiUDoIX6NtJONOSiKwd1BSuNf7L+DbETGvvQJVHAmsClzXxDmZmVkPcOdh0dCvc4mkcZRJnhOy/DeBZylpuxe4pRARt0bEn9up3imZJ6R2K+QDWZ6ZmfUB57awxYJzW5iZtc65LczMzKxHuPNgZmZmLfGjmrZYcGIss/5rUU4gtajyyIOZmZm1pE86D07i1P0kTpJ+LGnnfP0NScvk6yUlvdDE/kMkXZ11uUfSnztriy7U8ZuSTq68/21Go6y9P1LSz7pR/pcl/by79TQzs+7pq5EHJ3HqZhKniDg2Im7Kt98gA0G14EfAVRGxRURsAnw3l3fUFq16q+3SMGBVSbXv2QIBodqTMRw8MmZm1g/1+v+c5SROQMdJnCSNknRpvt5D0quSlpK0vKS/5/LzJe2eQZHWAMZWR10knZijChMkrdGgCkOAxyr1mZYv52sLScvmCMl0SVMk7Zjlf1nSFZKukTRT0ncbHGMyJWfF0pJWoYSvnkEJRQ2VUNSSvqUSa2KGpMNy2fr5/gxKvoohedz7Jd1M6bjVznfv3PYuSTdhZmZ9ptcnTDqJ0wIaJXG6Hdg6X+8A3ANsRcknMbG6c0ScIumbwA4R8ULWcyXglog4Jm8LHETpFFSdClwoaQpwfbbFk5Q2fqstJH0b+HdEDJO0KaUtN8gyRgKbAf8Gbpd0ZURMrdTt35Jm5Lm8Pev+KDBK0stZ7pOSRgL7ZnmDgEmSbgFmUzoaB+b35l2USJNbUSJYjqm0x3HAThHxlKSVGzW0nBjLzKxXLIxh4ZWAy/IicwqwaS6fAHwnL17rRMRrLZRZTeI0BVifksQJykXm45Qh9J92VlAet5rEaVLWrZaHoVESp88B+1CSOHVmgSROGR3xkbxIjwB+DuxI6UiMbaLM1yLir/l6MjC0wTGuBtYDfku5QN8padUGZY2mJKUiIu4GnqC0J8A1EfF8RLxKGZEZ3WD/2sjNKEq7NWq7HYDfR8TsiHi5rqwHI+L2fL0dcENEPBsR/6ZEuawe51xJX6ad73FEnBkRIyJixKDlVmq0iZmZdcHC6Dw4iVPjJE5jgY9Rfn3fQLnAjqb82u7Mvyuv59LOiFJehC+IiC8AU2l88e8o7WZ9ONJG4Ulr8x62p7TbDMpoRbXtOjrGq00cA+ArlI7hUOAuSW/voEwzM+tBC2vkwUmcFjSGMhFyfET8M4+1XkQ0Ol6z7VI9/i6Sls3XKwLvAR5pUNYYyi0Fcn7IEOCBXPchlSdglgM+RePJj7WRh5WzszKPMun0Y7SNPIwBPp3zKwZnWY1GWCYCu0haJW9l7VlZt25ETKS0/fPAO5tvDTMz646FESTqf4BzJH0DuLGyfC/gC5LeBP4J/CAinpM0Lm9x/DUijm5UYERcLWljShInKBfEz0v6JJnEKecGTFBJ4jSBTOIE/DY7LFULJHGS9Cylc9MwiVMH53uKpO8Dy2Z57SVxmkC5UNdGGmZQnjZp5EzgekmPAh/p4NhV2wCnZvsuAZweEXfW5pfU2gL4FfC/Ksmw3gS+mHMZAG4FLqTc/jivOt+hJue1vAhMqyyeSJnfMD23mSTpIspcD7Iu0yWtX1fWY5J+lPs/AVSTU5wi6T2UUYxrI2JGk+1gZmbd5MRY1pScW7BZRByxsOvSFU6MZWbWOjkxlpmZmfUEjzzYYmHpIRvEkP0dnNLMBo7+kPPDIw9mZmbWI9x5GKAkraq2XB//lPR45X1UXk+VNLTB/mdL2jNf35xRI6dJuk/SqdXAS5LmNlHehiq5Mx5QyWNyqUp471bPqz6OhpmZ9TNOyT1ARcSzlJgRSDoeeCXDXyPplc6icjawb0TckU9fnAD8iRK7AkoQqnbLU0nSdRXwjYj4Sy7bGVgdeKqVSkTEqM63MjOzhckjDzafjOT4LWBtSVs0udvngQm1jkOWc1NEzJC0jKSzVHJl3Km2zKCNcpkg6ZX8d6ccEbk8R0MuUD4vKmlrSbdImqySa2NIT7aBmZl1zCMPi6ZlJdViMDwcEZ9uZeeImJtxHzamZAntrLzNKGGxG/mPLHNYxuK4ViVPSKNcJvW2pIQvf4ISe+N9km6jxKL4VEQ8LWkv4MeUfB7zcW4LM7Pe4c7DoqnD2wxNqoaQ7k55oykXeyLiPkn/oOQdmQAcm8mv/hARf2+w76SIeAwgOy9DKdEqNwOuy4GIQcCTjQ4cEWdSAmqx9JAN/FiRmVkP8W2LxUTeOpgq6eomth1ESSTWKAdHI3fTlhV0geIaLWwyl8kblde1nB0C7q7kMRkWER9qsK+ZmfUSdx4WExFxYF5sd+toO0lLUSZMPhoR0zratuJCStrttx5KlvQRScOYP1fGhpTU6TPVOJdJM2YCq0vavlZfldThZmbWR9x5sJoLJE2j5NRYnpKsqimZxvzjwGGS/i7pHkrSs38Bv6bkzphOSV9+QES8QcllMiNvR2wMnNvksf5NSZD1k5yXMZWSiMvMzPqII0zaYsG5LczMWucIk2ZmZtYj3HkwMzOzlvhRTVssTH/8RYYec9XCroaZWZ/qreRaHnkwMzOzlvRZ58GJnLpH0iclHZOvd5e0SWXdzZIWmNBSt/8Skn4paUaGir5d0nty3Xd6qI5bVCJRImkfSbPz8U8kDcsnOrpa/lBJM3qirmZm1nV9dtvCiZy6JyL+TImHALA7cCVwTwtF7AWsBWweEfMysuOrue47wH/3QDWnA+tIWiEiXqY8QnkfJcz0pHw/rtnCJA2KiLk9UC8zM+tBA/62xaKQyEnSIEkPqVhZ0jxJO+a6sZLWl3RAjrCMokRmPCnrsl4W89ms3/2SdmhwzkOAJyNiXp7vYxHxvKQTydwVki7IY34jRyhmSDoilw3N8zonz/9yScvVfRbzgNuBbXPR1sBptMVhGAWMz/J2yfadLul3kpbO5bMkfU/SrXlOW0u6S9IEMk9GR5+HmZn1vv7SeahdvKZKuqLVnfPXaS2RUzPlNZXICdgHOCdHKmqJnIYDI4DHGuy7JXAEsAmwLiWR01KU3A57RsTWwO8oiZzq639/7jc667ZDXlDfFREPVLYdTxmBODojRj6Yq5aMiJF5/OMa1O1S4BPZJj+VtGWWdww5UhMR+0raGjiQ0gHYDvhKbVtgI+DMiNgceAk4tMFxxlOiTS4PzANuZv7Ow7hsz7OBvbKdlwS+Vinj9YgYHREXA2cBh0fE9nXH6fTzkHSwpDsk3TF39osNqmpmZl3RXzoPtYvX8FYzQFYskMipi+WNBs6DksgJqCZy+o6kbwPrZFTFepPyF/08SuTDoZQLbi2R01Tgu8C7Guw7Ftgx/07IemxD+SXfjD/kv5PzuPPJBFMbAf+PclG/QdIuDcoZDVwREa9GxCtZbm0k49GIqN12OD+3rTeO0kkYCdyenZv1Ja0ODI6Ih7IeD0fE/bnPOXneNZcASFoJWDkibsnl51W26fTziIgzI2JERIwYtNxKDapqZmZd0V86DwvQ4pfIaSzlIj0SuBpYGdiJkhuiGbVj147b6BzeiIi/RsTRlDkOuzfYrOH514ro5D3AREqnZzTlAg9lVGBv8pZFJ8eAtrkYaucYzX4eZmbWC/pt52ExTOR0G+UX+7yIeJ0ycvFVSqei3svACk0enzzuVpLWytdLZP3/kavfzHaEcv67S1oubz18ulKHtWvnQbmlc2v9cXKi5KOU3Ba1zsMEyu2UWufhPmCopPXz/X7ALdSJiBeAFyXVRjj2rZxPVz8PMzPrpn7beWjCIpXIKY/xKOWXO5QL9gqUJxjqXQwcnRMO12uwvpE1gL+oPOo4DZgDnJrrzgSmSbogIqZQ5iNMonRofhMRd+Z29wL7Z7uvApzezrHGAUtHxKP5fgJlDsj4PNfXKfMqLst2ngec0U5ZBwKn5YTJ6q2JLn0eZmbWfU6MZU1RiZVxZURstpCr0iVOjGVm1jo5MZaZmZn1BOe2sKZExCzKUyMDknNbmPV/vZWHwXqeRx7MzMysJe489DNqy8txd0ZW/EY+HdGVsi5QyQEyI6M4LtX5Xl2TT5GcmBNQZ2T0x492oZwfSNq1N+poZmY9w52H/qcW4GpT4IPAbjSOGNmMCyhPIgwDlgW+3DNVbOiHlBDYm+Wkyk/Q4uOkABHxvYi4vqcrZ2ZmPcedh34sIv4FHAx8XUV7eTcGSTo5l0+TdFjuf3UkyqOX71LJrjlL82chfUDSmpJWl/R7lYybt0t6X64fXDnuNEl7VOupkuPiK8Bh+cgpEfFURFya6/fJfWdI+kmlzmerLcvnkbm8mj11lqTvS5qS22ycy5fPkZTbsx2afkzXzMy6zxMm+7mIeChvW6wBfCGXDcsL6bUZyOpA4D3AlhExR9Iq1TLydsV+wH9mRs0/UYI/nSVpW2BWRDwl6ULglIi4VdLawDXAe4H/Al7MPBRIentdNdcHHomIl+rrn4GpfkKJ6Pl81nl3SkyLd9Ye/ax2Zuo8ExFbSToUOIoyenIscGNEHJT7TZJ0fUS8Wt1R0sGUzheDVly93TY2M7PWeORhYKiFc24v78auwBkRMSfXPVe3/6+BMRFRixR5CSXIEpSw0Zfk612BUzPw0p+BFSWtkMtPqxUWEc+3UPdtgJsj4ums3wWUPBYPAetK+pWkj1ASbTXSKGfHh4Bjsp43A8tQIoHOx7ktzMx6h0ce+rkMwzyXEv2yvZwQ7eaAkHQcsDol1HXNBNqSVe0O/CiXLwFsX59kSlK75acHKKGrV8jw1PV1W0CmA98C+DAlk+nngIMabNooZ4eAPSJiZgd1MjOzXuKRh34sL+5nAKfmvIWGeTeAa4FDJC2Z61bJf79MuTjvk5k+AciyrgB+BtwbEc/mqmuBr1eOP7yd5fPdtoiI2cBvgV9KeltuM0TSFyghrt8vaTWVBGb7ALdIWg1YIiJ+T7ktslULTXMNJbS48lhbdrK9mZn1IHce+p9la49qAtdTLtzfz3Xt5d34DfAIJT/FXcDnc/szgDWBCVnm9yrHuYQyh+KSyrLDgRE5KfIe4JBc/iPg7Tm58S5g5wb1/i7wNHCPSv6MPwJPR8STlDTgNwF3AVMi4k/AO4Gb89bD2blNs34ILJXnOyPfm5lZH3FuC1ssOLeFmVnr5NwWZmZm1hPceTAzM7OW+GkLWyw4MZZZ55yYyprlkQczMzNrSbc6D3ISpz5L4iRpfP47VNLnK8sPkHRqE/t/PEM53yXpHklfzeW7S9qkh+p4Z+3xTklLSno1H9esrZ8sqZVHMuvLv1nSAhN3zMysb3V35MFJnPooiVNEjMqXQ2l7FLMp2RE7E/hERGwBbEmJzAglSFSPdB6A8UCtnltQYlCMyjosD6xLeVyzmTr7lpqZWT/VY7ctnMSp60mcJP1a0ifz9RWSfpevvyTpR/n6ldz8RGCHHPE5MpetJelvOZLyPw0+nhUo81uezfN9IyJmShoFfBI4KctbT9JwSROz7a5QBoTKX/0/lzQ+22Fkg+OMo63zMIoSZ6IWaGokJcbDXEmrSPpjHmOipM3zGMdLOlPStcC5kpaVdHFudwmlU9nu52FmZn2jR+c8RMRDWeYalJDDZDKlfYBzJC1D6WDUkjhtThlxeIvakjj9LaMi1pI4oUoSJ+AXlCRO2wB7UAIlQSWJU5Z/Y101m0ni9AHKRW8blSROw8kkTnk+Z7XTBM9ExFbA6ZQkTtCWxGkbSnClk/JXeNUYYId8/U7aRgJGA2Prtj0GGJsjPqfksuGUXBXDgL0kvbu6Q+a6+DPwD0kXSdpX0hIRMT6XH53lPQicC3w72246848kLZ8jIIcCv2tw/tWRh1F5Xm+o5McYRelcQAl6dWce4zt5zJqtgU9FxOeBrwGzc7sf57ra+Xb6eUg6WNIdku6YO/vFRpuYmVkX9MaESSdxaj2J01jKaMImwD3AU5KGANtTLsiduSEiXoyI13P/deo3iIgvA7tQRnWOosHFX9JKwMoRcUsuOody/jUXZVljKO09XybMiJgFvE3SOyi3oGYCtwPbUjoPtXOpfjduBFbNYwP8uZJbY0fg/NxuGjAtlzf1eTgxlplZ7+jR+8pyEqcuJXGKiMfz9sBHKL/WV8ljvNKgjo28UXldPXb9caYD0yWdBzwMHNBE2fMV0cl7KJ/XnsCTERGSJgLvo9y2mJjbNGrnWlmvtrO8bUHzn4eZmfWCHht5kJM4tafZJE4TgCMo7TaWMjpQf8sC4GVanNyZ80B2qiwaThkJmq+8iHgReF5S7RbKfsAtlf32yvJGU24NNboXMA44Ms+ndl5fBP4ZES/ksup3YyfK7Z5GowfV7TYDanMjuvN5mJlZN3W38+AkTp1rNonTWGDJiHgAmEIZfWjUeZgGzFF5jELG/gAACWlJREFU5LLZiYICvqXyKOxUymd0QK67GDg6J3OuB+xPmZcxjdLJ+EGlnOdVHhk9A/hSO8caR3mqYgJAtukg5r/9cjz52VEmgO7fTlmnA4Nzu29RbrlA9z4PMzPrJifGsqZIuhk4KiIGZHYpJ8YyM2udnBjLzMzMeoID8VhTImKnhV0HMzPrHzzyYGZmZi1x58HMzMxa4s6DmZmZtcSdBzMzM2uJOw9mZmbWEncezMzMrCXuPJiZmVlLHGHSFguSXqbkVhkoVgOeWdiVaJHr3Ddc59430OoLvVfndSJi9fqFDhJli4uZjUKs9leS7hhI9QXXua+4zr1voNUX+r7Ovm1hZmZmLXHnwczMzFrizoMtLs5c2BVo0UCrL7jOfcV17n0Drb7Qx3X2hEkzMzNriUcezMzMrCXuPJiZmVlL3HmwRYakj0iaKekBScc0WL+0pEty/W2ShvZ9LReoU2d13lHSFElzJO25MOpYr4k6f0PSPZKmSbpB0joLo551deqszodImi5pqqRbJW2yMOpZV6cO61zZbk9JIWmhPlrYRBsfIOnpbOOpkr68MOpZV6dO21jS5/L7fLekC/u6jg3q01k7n1Jp4//f3v3HyFHWcRx/f9pC6A8EpGJEGg/wqFpS2vRqaUQFa6IGcsVYYxtIqBESFWwMxh9o1aLhDyQGo0Co1FoqxCpVoYpaf7SlarzSQulPora1lUYTbBWwtNByfPxjnqvrdvd2xrqze9vvK7lkd/aZvc/M7e0+88yz8/2jpGeaEsR2/MTPkP8BhgM7gfOAk4FNwJuq2nwUuDvdng18bwhk7gImAkuBWUNkP18GjEq3PzJE9vMrKm73Aj9v98yp3anAWqAP6GnnvMBc4I5W7tf/IXM3sBE4I90/q90zV7X/GLC4GVli5CF0ijcDO2zvsn0YWAbMrGozE7g33V4OzJCkEjNWa5jZ9m7bm4GXWxGwhjyZV9s+mO72AeeUnLFanszPVdwdDbR6Jnme1zPAl4GvAC+UGa6GvHnbSZ7M1wF32v4ngO2nS85Yreh+ngN8txlBovMQOsVrgacq7u9Ny2q2sf0S8CxwZinpasuTud0Uzfwh4GdNTdRYrsySrpe0k+zDeF5J2eppmFnSZGCc7Z+UGayOvK+L96XTWcsljSsnWl15Ml8AXCDpd5L6JL27tHS15f7/S6cLzwVWNSNIdB5Cp6g1glB99JinTZnaLU8euTNLuhroAW5raqLGcmW2faft84FPA/Obnmpwg2aWNAy4HfhEaYkGl2cf/xjosj0R+BX/GQVslTyZR5CduriU7Ch+kaTTm5xrMEXeM2YDy233NyNIdB5Cp9gLVB7JnAP8tV4bSSOA04B/lJKutjyZ202uzJLeCXwO6LX9YknZ6im6n5cBVzY1UWONMp8KXAiskbQbuBhY0cJJkw33se39Fa+Fe4ApJWWrJ+97xkO2j9j+M1lxve6S8tVS5LU8myadsoDoPITOsR7olnSupJPJ/nFWVLVZAVyTbs8CVjnNKmqRPJnbTcPMaTh9IVnHodXniCFf5soPhMuBP5WYr5ZBM9t+1vZY2122u8jmlvTa3tCauLn28Wsq7vYCT5aYr5Y8/38Pkk0ARtJYstMYu0pN+d9yvWdIGg+cAfy+WUGi8xA6QprDcAOwkuxN6fu2t0n6kqTe1OxbwJmSdgA3AnW//laGPJklTZW0F3g/sFDSttYlzr2fbwPGAA+kr4u1tEOUM/MN6at4T5C9Nq6p83SlyJm5beTMOy/t401kc0rmtiZtJmfmlcB+SduB1cAnbe9vTeJCr4s5wLJmHhzF5alDCCGEUEiMPIQQQgihkOg8hBBCCKGQ6DyEEEIIoZDoPIQQQgihkOg8hBBCCKGQ6DyEEIY0Sf3pK6FbJT0gaVSrM9UjaYykhZJ2pq8trpU0rUm/a4kaVGJNlS7Prri/qB0qiob2F52HEMJQd8j2JNsXAoeBD+ddUdLw5sWqaRHZVU27bU8gu9bB2DwrKjOsatnx5p8LHO082L7W9vbjfM5wAojOQwihk/wGeD1ktTUkPZpGJRYOfNBKOpAuqrMOmC7pC5LWp5GLbw5UWpU0T9L2VMhpWVr2SkkPpmV9kiam5QskLZa0RtIuSccU1pJ0PjANmG/7ZYBUHfHh9PiNKcNWSR9Py7okPSnpLuBxYFyN/FMkPSLpMUkrq67kOPC7j9nGNCrRA9yf9tHIlL8nrTNH0pa0zq0Vz3VA0i2SNqV98Or/y18uDCnReQghdARl9UreA2yR9EbgA8BbbE8C+oGrUtPRwFbb02z/FrjD9tQ0cjESuCK1+wwwORVyGhjNuBnYmJZ9FlhaEeENwLvIyiZ/UdJJVREnAE/UKlQkaQrwQbLOxcXAdcou8w0wHlhqe7LtPZX5gXXAN4BZtqcAi4FbauyeY7bR9nJgA3BVGrk5VJHnbOBW4B3AJGCqpIF6H6OBPtsXAWvJylaHE0x0HkIIQ93IdFnpDcBfyC5DPoOs8NL69NgM4LzUvh/4QcX6l0laJ2kL2YflhLR8M9lR+dXAS2nZJcB3AGyvIrvc+WnpsYdtv2h7H/A0UOSI/BLgR7aft30A+CHw1vTYHtt9FW0r848nK5D1y7Sd88mKJVWrt431TAXW2P57uiTy/cDb0mOHgYEy4I8BXTm3MXSQEa0OEEIIx+lQGl04Kp16uNf2TTXavzBw9C/pFOAuoMf2U5IWAKekdpeTfWD2Ap+XNIHBSyJXVg/t59j3123ARZKGDZy2qIw8yPY9Xy9/Wm+b7en1Vm6wjXVXG+SxIxU1E2ptZzgBxMhDCKET/RqYJeksODpX4XU12g18iO6TNIas2ippYuI426uBTwGnkxX7Wks6/SHpUmCf7efyBLK9k2x05OaKeRXdkmam571S0ihJo4H3ks3faOQPwKskTU/Pd1Lq5DTcxuRfZOW9q60D3i5pbJorMgd4JM92hhND9BhDCB3H9nZJ84FfpI7AEeB6YE9Vu2ck3QNsAXaTlTwGGA7cl05JCLg9tV0AfFvSZuAgxatvXgt8Fdgh6SCwn6xS4+OSlgCPpnaLbG+U1NVgOw+niY9fT1lHAF8jG+VotI0AS4C7JR0Cples8zdJN5FVkhTwU9sPFdzW0MGiqmYIIYQQConTFiGEEEIoJDoPIYQQQigkOg8hhBBCKCQ6DyGEEEIoJDoPIYQQQigkOg8hhBBCKCQ6DyGEEEIo5N+RLSlEIAFpQAAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAg8AAAEWCAYAAADhFHRsAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nOzdd7xcVbn/8c+XgLRQpBpUiHSBQIAQIAYEwYYNBQVEpKiIXOGCgvITr2C74AXFAsLlqvSOYgGUDgkpBBJCEkqQEqkivQWQJM/vj/UMZ2cy55yZ03JO8n2/XueVmV3WXnvNwF6z9trPo4jAzMzMrFlLLOwKmJmZ2cDizoOZmZm1xJ0HMzMza4k7D2ZmZtYSdx7MzMysJe48mJmZWUvceTCzHiXpR5KekfTPhV2X/kDS+yT9XdIrknZf2PUBkHSApFsr71+RtO7CrFMrJIWk9ZvYbidJj/VFnRY37jyYLeYkzZL0Wl5AnpJ0lqTBXSzr3cA3gU0i4h09W9MB6wfAqRExOCL+WL+yrv2fl3RVtmOfybo91NPlSro5L/Rb1C3/Yy7fqaePaX3DnQczA/hERAwGtgK2Ab7bagGSlgTWAZ6NiH91cf9F0TrA3Z1sU2v/IcBTwK96vVZ9537gi7U3klYFtgOeXmg1sm5z58HM3hIRjwN/BTYDkLSSpN9KelLS43lLYlCuO0DSOEmnSHoOuBm4Dlgrf0Wfndt9UtLdkl7IX6LvrR0vf3V/W9I04FVJS+ayoyVNk/RqHn9NSX+V9LKk6yW9vVLGZZL+KelFSWMkbVpZd7ak0/LX/MuSbpO0XmX9ppKuk/Rcjrp8J5cvIekYSQ9KelbSpZJWaa/dJH1F0gNZzp8lrZXLHwTWBf6SbbJ0J+3/OnA5sEml7I9JulPSS5IelXR8Zd0yks7POr4g6XZJa3b22TWo/1u3AZpos40rbTZT0uc6OifgAmCvyrH3Aa4A/l0pc2lJP5f0RP79vNpW+X14MtcdVFf3pSWdLOmR/AzPkLRsO+f57WyLl7Puu3RSd2uHOw9m9pYcLt8NuDMXnQPMAdYHtgQ+BHy5ssu2wEPAGsAHgY8CT+Qw+AGSNgQuAo4AVgeuplxI31YpYx/gY8DKETEnl+2R5W0IfILSofkOsBrl/1uHV/b/K7BB1mEK5WJVtQ/wfeDtwAPAj/NcVwCuB/4GrJXneEPucziwO/D+XPc8cFo7bfYB4ATgc5SRg38AFwNExHrAI+TIQkS80aiMSlnLAXsBEyuLX6X8cl852+lraps7sT+wEvBuYFXgEOC1XNfZZ9eR9tpseUoH8UJKe+8D/LraYWvgCeCePD55LufWbXMsZTRiOLAFMJIc/ZL0EeAoyvdhA2DXun1/QvmeDM9zfSfwvfpKSNoI+DqwTUSsAHwYmNVBva0jEeE///lvMf6j/A/0FeAFyoXv18CywJrAG8CylW33AW7K1wcAj9SVtRPwWOX9fwGXVt4vATwO7FQ59kEN6rNv5f3vgdMr7w8D/tjOuawMBLBSvj8b+E1l/W7AfZVzubOdcu4Fdqm8HwK8CSzZYNvfAv9TeT84tx1aOZ9dm2z/OZSL7bAOtv85cEq+PggYD2xet00zn92tlXUBrN9Em+0FjK071v8Cx7VT15spHZYvUDqRGwH357rHKt+DB4HdKvt9GJiVr38HnFhZt2GtvoAonav1Kuu3Bx6u/z7m9v+idD6WWtj/3Q30v0X1HqOZtWb3iLi+ukDSMGAp4ElJtcVLAI9WNqu+bmQtSocEgIiYJ+lRyq/Djsp4qvL6tQbvB2cdB1F+FX+WMrIxL7dZDXgxX1ef+phd25fya/3Bduq9DnCFpHmVZXMpF+XH67ZdizLiAUBEvCLpWco5zmqn/Hq7R8T1eT6fAm6RtElE/FPStsCJlFtJbwOWBi7L/c7L87hY0srA+ZRf8evQ+WfXkfbabB1gW0kvVNYvmfXoyB+AnwLPtrPtfN+TfL1WZd3kunU1qwPLAZMr5ylggdszEfGApCOA44FNJV0DfCMinuik7taAb1uYWXsepfx6XS0iVs6/FSOiOkTdWVreJygXHABU/g//bua/AHcnte/nKRfbXSnD90Nrh2pi30eB9TpY99HKea8cEctEmRNSr/4cl6fcQmi0bYciYm5E/IHSURmdiy8E/gy8OyJWAs4gzy8i3oyI70fEJsAo4OOU2wLNfHZd8ShwS127DI6Ir3VyXrMpt5e+RuPOw3xtCKydywCepHxnqutqnqF0Jjet1GelKJNPG9XjwogYnccKyi0P6wJ3HsysoYh4ErgW+KmkFXMS4XqS3t9CMZcCH5O0i6SlKI9xvkEZau8JK2R5z1J+gf53C/teCbxD0hE56W6F/JUP5QL9Y0nrAEhaXdKn2innQuBAScNzkt9/A7dFxKxWT0bFpyhzDe7NxSsAz0XE65JGUjpMte13ljQsRyxeotwumdtDn10jVwIbStpP0lL5t40qk2A78B3g/e20y0XAd7OdV6PMWTg/110KHCBpk5wTclxtp4iYB/wfcIqkNQAkvVPSh+sPIGkjSR/Iz+h1SqdjbrMnbvNz58HMOvJFylD5PZRJg5dT7v83JSJmUu53/4ryK/ETlMmD/+5wx+adSxnGfjzrOLHjzeer28uUSXifoAzT/x3YOVf/gvJr/1pJL2e527ZTzg2UuR2/p/xKXg/Yu8Xz+IukVygdgB8D+0dE7fHOQ4EfZD2+R7mY1ryD8pm8ROls3ELbRbdbn10j2WYfopzfE5R2+wnlVkpn+z4REbe2s/pHwB3ANGA65TbQj3K/v1LmedxImbx5Y92+387lEyW9RJkEu1GDYyxNuf3zTNZ7DUqHxrpAEd0ZMTQzM7PFjUcezMzMrCXuPJiZmVlL3HkwMzOzlrjzYGZmZi1xkChbLKy22moxdOjQhV0NM7MBZfLkyc9ExOr1y915sMXC0KFDueOOOxZ2NczMBhRJ/2i03LctzMzMrCXuPJiZmVlL3HkwMzOzlrjzYGZmZi1x58HMzMxa4s6DmZmZtcSdBzMzM2uJOw9mZmbWEgeJssXC9MdfZOgxVy3sapjZYmzWiR9b2FXoMR55MDMzs5a489DHJJ0i6YjK+2sk/aby/qeSvtGN8o+XdFS+PknSfZKmSbpC0srdq32nxz4qjzdD0l2SvtiFMg7pyn5mZtZ33Hnoe+OBUQCSlgBWAzatrB8FjGumIEmDOtnkOmCziNgcuB/4fy3XtkmSDgE+CIyMiM2AHQG1Wk5EnBER5/Z0/czMrOe489D3xpGdB0qnYQbwsqS3S1oaeC9wp4qT8lf8dEl7AUjaSdJNki4EpueyYyXNlHQ9sFHtQBFxbUTMybcTgXfl9rdJeqvDIulmSVtLWl7S7yTdLulOSZ/K9YMknZz1mCbpsAbn9R3g0Ih4KY/9YkSck/vvkuVNz/KXzuUnSronyzw5l1VHTm6W9BNJkyTdL2mHSn1OynpOk/TV7n0kZmbWCk+Y7GMR8YSkOZLWpnQiJgDvBLYHXgSmRcS/Je0BDAe2oIxO3C5pTBYzkjKi8LCkrYG9gS0pn+cUYHKDQx8EXJKvLwY+BxwnaQiwVkRMlvTfwI0RcVDe4piUHZIvAu8BtoyIOZJWqRYsaQVghYh4sP6gkpYBzgZ2iYj7JZ0LfC3//TSwcUREB7dUloyIkZJ2A44DdgW+BLwYEdtkR2ScpGsj4uG6Yx8MHAwwaMUFMsqamVkXeeRh4aiNPtQ6DxMq78fnNqOBiyJibkQ8BdwCbJPrJlUulDsAV0TE7PzV/+f6g0k6FpgDXJCLLgU+m68/B1yWrz8EHCNpKnAzsAywNuWCfUZtFCMinqs/BBDtnOtGwMMRcX++P4dyS+Ml4HXgN5I+A8xuZ/8/5L+TgaGVen4x63kbsCqwQf2OEXFmRIyIiBGDllupneLNzKxVHnlYOGrzHoZRbls8CnyTckH9XW7T0XyBV+vet3fhRtL+wMcpv/wDICIel/SspM2BvYDasL+APSJiZl0ZHXUOiIiXJL0qad2IeKi+Cu3sM0fSSGAXysjJ14EPNNj0jfx3Lm3fVwGHRcQ17dXJzMx6j0ceFo5xlAv6czmy8BywMuXWxYTcZgywV97fX53ya31Sg7LGAJ+WtGzePvhEbYWkjwDfBj4ZEfW/7C8GvgWsFBHTc9k1wGHZWUDSlrn8WuAQSUvm8lVY0AnAaZJWzG1WzNsG9wFDJa2f2+0H3CJpcB77auAIyi2aZl1DufWxVB5rQ0nLt7C/mZl1g0ceFo7plHkMF9YtGxwRz+T7Kyidibsov/q/FRH/lLRxtaCImCLpEmAq8A9gbGX1qcDSwHXZH5gYEYfkusuBXwA/rGz/Q+DnwLTsQMyidHJ+A2yYy98E/i/LrjodGEyZm/Em8Cbw04h4XdKBwGXZ+bgdOANYBfhTzokQcGSnrdbmN5RbGFOynk8Du7ewv5mZdYNyJNtskTZixIi44447FnY1zMwGFEmTI2JE/XLftjAzM7OWuPNgZmZmLfGcB1ssODGWmQ0EAyV5lkcezMzMrCUddh7kJE6dldFnSZwkXS1p5fw7tLJ8J0lXNrH/dhmWeqqkeyUdX9l/VCe7N1vHKyTtXnk/U9J3K+9/nwGhulr+2ZL27G49zcysezobeXASpw70ZRKniNgtIl6gxIM4tLPtGzgHODgihgObUaJMAuxEW66N7qp+X1YFXqE8blqzPW0RNDtUiylhZmb9T2edBydx6oMkTpK+JenwfH2KpBsrdTk/X8+StBpwIrBejiCclEUMlnR5jqRckLEP6q0BPJnnOzci7pE0FDgEODLL20HSOpJuyLreoJKDo/ar/wxJY/P8Pt7gGNXvyyjgSmD1/H68B3gtY1UsI+msbOM7Je2cxzhA0mWS/gJcm/udmu1+VZ5Drc0W+DzMzKxvdPjrzkmc+iyJ0xhKeOpfAiOApVWiJ45m/qBPAMdkew7POu+U7bkp8ATlAv4+4Na6/U4BZkq6GfgbcE5EzJJ0BvBKRNQ6RH8Bzo2IcyQdlHWq3YoYCrwfWA+4SdL6EfF65RiTgc0kvY3yfbkFWJfSydyStlGq/wCIiGEqQa+ulbRhrtse2DwinlO5xbERJYz3msA9wO/yM+3085ATY5mZ9YpmJkw6iVPvJ3GaDGydHZs3KG08gtJe9Z2HRiZFxGMRMY8SaXJo/QYR8YMs81rg85QORCPb0xb58jzKZ1tzaUTMi4i/Aw8B9dEu3wDuBrYCtsvzbe/7cl7ucx8lMmat83Bd5TPbkbbv1RPAjbm8qc/DibHMzHpHM52H+iROEykXmOp8h55O4rRvNYkTUE3idHHlmHtExPD8Wzsi7qXjzgHZaXlV0rqNqtDOPnMoIyi/p/wKb+/C21ESp1o93xMR19aV/yYlFPSBlPYeC+xM+YV/b3vn0uC49ceuP48HI+J0SjKqLVTmJXQm2nnd6D2U+u9IGd15nvJ9qXUeeuT70sLnYWZmvaDZkQcncer9JE5jgKPy37GUuQhTa52oipeBFVo4Pnncj1XmQmxA6WS80KC88ZRbSwD7Mv/tj89KWkLSepTbEfNl30zjKFk678r30yijEGtTRiWgnOO+Wa8Nc12jssYAe+f3agilQ0U3Pw8zM+umZma0O4lT3yRxGgscC0yIiFclvU6DWxYR8aykcZJmAH8Fmo18tB9wiqTZlNtC+0bE3JzjcLnKhNPDgMMp8wqOzroeWCljJuWW1JrAIXXzHWrGUzoWJ2R950j6F/Bo3lYB+DVwhqTpWZcDIuINLTjP8wpKmu7plCdwbsnlK9D1z8PMzLrJibGsKZLOBq6MiMsXdl26womxzMxaJyfGMjMzs57gQDzWlIg4YGHXoTuc28LM+puBkseiEY88mJmZWUvceehHtAjmEpG0Rca4qL3fR9LsytMnwyRNy9c3S3qk8lQIkv4o6ZV8PVTSaypRKe9Viea5f2/U28zM2ufOQ/+yKOYSmQ6sk4/mQjmH+ygRJ2vvq+f0AiVCJtmhGVJX3oMRsWVEvJfySOmR+YSMmZn1EXce+pdFLpdIPp55O7BtLtoaOI35c2BUk2VdTFucic/QFrVzARHxEPANyuOlZmbWR9x56EcyBHN9LpHbKDE0RpC5RCgX1VoukV2BkzKIEpTIi8dGxCaaP5fIZ2gLGV7vIErMCGjLJYIquUQoMShujIhtKMGaTspgVwfTlktkc9rCileNB0bl9vMo4cSrnYfqyMMNwI45crI3bTlO2jOFujDZNZIOlnSHpDvmzn6xk2LMzKxZ7jz0P4taLpHqOY0Ebs+kZOurRCMdnCMINXMpUS33ApaNiFntN1U5hfZWOLeFmVnv8KOa/U99LpFHKRk3XwJ+l9v0dC6RXaq5RCRVc4nUUojXconMrCujw1wiaSKlczOatpDmj1FGFsY32P5iSnTJ4zspF8qoSjP5P8zMrId45KH/WeRyiUTEy5RO0AGVc5hAyUvRqPMwlhLe+qIG694iaShwMvCrjrYzM7Oe5c5D/1PLJTKxbtmLdblEplFyidxI5hKpLygiplDmDEylZKCszyWyAiWXyFRJZ1TWXU4ZFbi0suyHwFKUnCEzaMsz8hvgkVx+FyXddyPjgKUj4tF8P4GSA2OBzkMUJ1fOt2q92qOaWb9fRcRZ7RzTzMx6gXNb2GLBuS3MzFrn3BZmZmbWI9x5MDMzs5b4aQtbLDgxltniaSAnn+rPPPJgZmZmLely58FJnPouiZOktSRdnq+HS9qtsu6tduqkjIMqIaRnVMJLHyBpra7Uq658SXpG0tvz/RBJIWl0ZZunJa3ajWPMkrRad+tqZmbd052RBydx6qMkThHxRETsmW+HA7t1tH09Se+ihJcenW24HeVRTyixF7rdecggU7VQ2lDa6k7aviMbAc9ExLNN1tm31MzM+qnudB6cxKmHkjhJujojOpL1/V6+/qGkL+coxgxJbwN+QAkQNbXWlsAmee4PSWqUJGoN4GXglazLKxHxsKQ9KTkzLsjylpW0S9Zherbh0lmXWZJ+kiMokySt3+A41e/EKOBnzN+ZGJ9lrSPphvwMblDJ5YGksyX9TNJNwE8krSrp2qzP/5KRNfPzvUrSXdkue2FmZn2my50HJ3ECei6J0xhgB0krUvJMvC+Xj6YS2Cnb83vAJRExPCJqx9sY+DClPY9T3mapuAt4CnhY0lmSPpHlXQ7cAewbEcMpYabPBvaKiGGUCbVfq5TzUkSMpASY+nmD83hrNCrr8kfg3fm+2nanAudWPoNfVsrYENg1Ir4JHAfcGhFbUvJyrJ3bfAR4IiK2iIjNgL81qIsTY5mZ9ZLuTph0EqeeSeI0lhJiejRwFTBY0nLA0PpcEu24KiLeyIiM/wLWrK6MiLmUC+6elNs+p0g6vkE5GwEPR8T9+f6crFfNRZV/t2dBk4Ats+O1VES8AjyUoxTV78T2wIX5+rw875rLsr7ksc/Pc7gKeD6XTwd2zZGQHSKiYc/AibHMzHpHdzsP9UmcJlIuDNVfmT2dxGnfahInoJrE6eLKMffIX+fDI2LtiLg3l/dGEqdfMX8o5/a0l8TpdspozQ6UUYg7ga8Ak5soE+CNyuu5NHgEN0M+T4qIEyjnskeDcjr6rGD+tlugHTNHxgOU0aEpuXgiZY7GGkB7HaFqWZ1+J7JzszWlE3FC7TaPmZn1jZ4YeXASp24mccrbEY9SRk8mZplHMX8uipqXKTkpmqbytMZWlUXDgX80KO8+YGhlPsN+lJGimr0q/06gsXGUtqq23X8CE2udPko71uaK7EsZvWlkTK5H0keB2pMcawGzI+J8Sptu1c7+ZmbWC7rbeXASp55L4jQWeCo7R2Mpk0IbdR5uokyQrE6Y7MxSwMkqj7tOpVz8/zPXnQ2ckcsFHAhcJmk6Zc5Hta2XlnRb7ntkO8caR2mrWudhSp5Lte0OBw5Ueex1v0pd6n2fMqdkCuVW1CO5fBgwKet8LPCjjk/fzMx6khNjWVMkzQJGtNNJ6vecGMvMrHVyYiwzMzPrCQ7EY02JiKELuw7d4dwWZjYQDJRcHB55MDMzs5a48zCASVpT0oUZWXKypAmSPp3rdpJ0ZZPlHC/phLplw3OiZyv1OSonZc7I6I9fbGX/LOOQruxnZmZ9x52HASofQ/0jMCYi1o2IWoTOd3WhuItoewyzZm/aAjk1U59DgA8CIzPq4450HjdiARFxRkSc2+p+ZmbWd9x5GLg+APw7It56lDIi/hERC8SRqGqUuyKjWL4gadvKpp8jg25J+lCOakyRdJmkwQ2K/g5waEYHJSJejIhz2jtmLj9R0j2Z4+LkXFbNpnpzJZ/G/ZJ2yOWDVPKl3J77frVrTWhmZl3hzsPAtSltURybImkZ2s9dcREZuEnSdsCzEfF3lRTY36Xkm9iKkgvjG3XlrgCskKG8mzpmBuj6NLBp5rhoL1bDkplP4whKrguAL1FiiWxDiQb6FUnvaXBs57YwM+sF7jwsIiSdlvMMbu9gs45yV1wM7KmSXn1v2iJmbgdsAozLoEz7A+vUH572w363d8yXgNeB30j6DFAfObSmlql0MjA0X38I+GLW5zZgVWCD+h2d28LMrHf4Uc2B624q+Ski4j9ylKCjSEjtzkGIiEczENT7s9ztK/tcFxH7dLDvS5JelbRuXeKwdo8ZEXMkjQR2oXRWvk65FVOvlrejmrNDwGERcU17dTIzs97jkYeB60ZgGUnVlNnLdbJPZ7krLgJOAR6MiMdy2UTgfbV9JC0nacMGZZ8AnKaSVhxJK0o6uL1j5ryJlSLiasotieGdn/JbrqHc+lgqj7WhSiZPMzPrAx55GKAiIiTtTkmv/S3gaUpGym9XNttF0mOV95+lLXfFkpRsntXcFZcBvwAOqxznaUkHABfVJjpS5kDcz/xOBwYDt0t6E3gT+GlEvC6p0TFXAf6UcyJE+7kyGvkN5RbGlHzq5Glg9xb2NzOzbnBuC1ssOLeFmVnrnNvCzMzMeoQ7D2ZmZtYSz3mwxYITY5lZVwyURFV9zSMPZmZm1pI+6zw4iVP3SLpa0sr5d2hleVNtJ2k7SbdJmirpXknHV/Yf1UN1vCKfAKm9nynpu5X3v8+AUF0t/2xJe3a3nmZm1j190nlwEqfui4jdIuIFYGXg0M62b+Ac4OCIGA5sBlyay3cCeqTzAIyvlSVpVeAV2oJNka/HN1NQPtZpZmb9UF+NPDiJUwdJnCR9S9Lh+foUSTdW6nJ+vp6VESRPBNbLEYSTsojBki7PkZQLsrNWbw3gyTzfuRFxj6ShwCHAkVneDpLWkXRD1vUGSWvn8c+WdIaksXl+H29wjHG0dURGAVcCq6t4D/BaRPxT0jKSzso2vlPSznmMA/Iz+wtwbe53arb7VXkOtTZb4PMwM7O+0VedBydx6jiJ0xhgh3w9gtIZWAoYDYyt2/YYSgTI4RFxdC7bMo+5CbAu8L4GdTsFmJm3Fr4qaZmImEUJ2HRKljcWOBU4N8/zAuCXlTKGUsJXfww4I9urajKwmaS3UToPE4CZwHvz/bjc7j8Aso33Ac6plLU9sH9EfIDS5hsBw4Cv0Daq0dTnISfGMjPrFQtlwqScxKk+idNkYOvs2LxBueiOoHQo6jsPjUyKiMciYh4wtXLst0TED7LMa4HPA39rp6ztabsFdB6lA1NzaUTMi4i/Aw8BG9cd4w1Kzo2tKJ/FbXkuo/KvdstidJZNRNwH/AOohby+LiKey9c7AhflSMkTlJDc0OTn4cRYZma9o686D7ULClCSOFESIq3ewT4dJnECZtGWxOnSyj7X5a/o4RGxSUR8qW7fl4BXJa3b7DEjYg4wEvg9JQxyexfejpI41er0noi4tq78N/N8DqRcYMcCOwPrAc1MBH2j8rp67PrzeDAiTqe0/RY5L6Ez0c7rRu+h1H9HyujO85TcGLXOQ23koaP5Ja92dowWPg8zM+sFfdV5cBKnzpM4jQGOyn/HUuYiTI0F44e/DKzQwvHJ436sMhdiA0on44UG5Y0nbwkB+wK3VtZ9VtISktaj3B6Z2eBQ44CvAnfl+2mUUYi1KZ1IKOe4b9Zrw1zXqKwxwN45b2QIpUNFNz8PMzPrpj6Z0e4kTk0lcRoLHAtMiIhXJb1Og1sWEfGspHGSZgB/BZqNfLQfpf1nA3OAfSNibk5OvFzSpyhteTjwO0lHZ10PrJQxk9KBWxM4JCJeb3Cc8ZSOxQlZ3zmS/gU8mrdVAH5NmTMxPetyQES80WCe5xWUybbTKZ9hrfO4Al3/PMzMrJucGMuaIuls4MqIuHxh16UrnBjLzKx1cmIsMzMz6wkOxGNNiYgDFnYdusO5LcystyyO+S888mBmZmYtcedhEaV+kktE0hYZ46L2fh9JsytPnwyTNC1f3yzpkcpTIUj6o6RX8vVQSa9lVMp7VaJ57t9MPczMrOe487AIyotvf8klMh1YJwNgQYn3cB8lKmbt/bjK9i+QETIlrQwMqSvvwYjYMiLem/U4Mp+QMTOzPuLOw6Kp3+QSycczbwdq+28NnMb8OTCqybIupi3OxGdoi9q5gIh4iBJ+/PCOzsvMzHqWOw+Lpn6TSySNB0ZlcKx5wM3M33mojjzcAOwoaVAe85JOqj6FujDZlXNybgszs17gzsNiYCHnEoG2bJsjgdszKdn6klYHBucIQs1cSlTLvYBlM3lXh6fX3grntjAz6x1+VHPRdDcl5wdQconkKEFHUZI6zCUiaRZtuUS2r+xzXUTs00l9JlIyio6mJMoCeIzSERnfYPuLKdElj++kXChzJ5qavGlmZj3DIw+Lpn6VSyQiXgYeBQ6grfMwgZKXolHnYSwlvPVFDda9RdJQ4GSgw7kcZmbWs9x5WARlMq3dgfdLeljSJMptiAVyidT+KL/ga3k9plPmJtTnEtmUnCiZx3ma0iG4KB+3nEg78w8oty6WzoyoUDoP69Kg8xDFyRHxTINy1qs9qknJpvqriDiro/YwM7Oe5dwWtlhwbgszs9Y5t4WZmZn1CHcezMzMrCV+2sIWC06MZda7FsfkUIszjzyYmZlZS/pF58FJnDqt11qSLq+cz25153xUE2UclGGnp0maIelTufwASWt1pV515UvSM5Lenu+HSHcQ6GwAACAASURBVApJoyvbPC1p1W4cY1bGqzAzs4VooXcenMSpcxHxRETsmW+HA7t1tH09Se8CjgVGR8TmlMiQ03L1AUC3Ow/5eOhttAWQGgXcmf8iaSPgmYh4tsk6+5aamVk/tdA7DziJE5KulrR5vr5T0vfy9Q8lfTlHMWZIehvwA2AvSVMl1TpKm+RIyEOSGiWJWgN4GXgl6/JKRDwsaU9gBHBBlrdso3bNusyS9JMcQZlUCSZVVQtDXWurnzF/Z2J8lrWOpBtyFOQGSWvn8rMl/UzSTcBPJK0q6dqsz/+SUTAlLS/pKpWQ2zMq7WBmZn2gP3QenMQJxgA7SFoRmEOOZlDCOY+tbRQR/wa+B1wSEcMjona8jYEPU3JHHFe7zVJxF/AU8LCksyR9Isu7nNIO+0bEcCBov10BXoqIkcCpwM8bnMd42tpqJGVE6d35vtp2pwLn5ijIBcAvK2VsSPmMvgkcB9waEVsCfwbWzm0+AjwREVtExGbA3xrUxYmxzMx6SX/oPMxHi2cSp7FZ/9HAVcBgScsBQ3M0pTNXRcQbGZHxX8Ca1ZURMZdywd0TuB84RdLxDcrpqF2hrS0vom1EoWoSsGV2vJaKiFeAh3KUojpqsz1tt5LOy/OuuSzrSx77/DyHq4Dnc/l0YNccCdkhIhr2DJwYy8ysd/SH+8pO4lRulYwAHgKuA1YDvgJMbqJMgDcqr+fS4HPNOQmTgEmSrgPOalDndtu1Vkw7r2vHmC3pAeAg2kaTJlLmaKwBtNcRqpb1agfrase5X9LWWe4Jkq6NiB90UnczM+sh/WHkYbFP4pS3Ix6lzM+YmGUeReWWRcXLwAoNlnd07LUkbVVZNBz4R4PyOmvXvSr/TqCxcZS2qrbdfwIToy0W+nja5orsSxm9aWRMrkfSR4HakxxrAbMj4nxKm27Vzv5mZtYLFnrnwUmc3jIWeCoiZufrd9G483ATZYJkdcJkZ5YCTpZ0X96y2YtyQYcyx+GMXC46btelJd2W+x7ZzrHGUdqq1nmYkudSbbvDgQPzc9ivUpd636fMKZkCfAh4JJcPo4ygTKU8RfKjjk/fzMx6khNjWVPyVtCIdjpJ/Z4TY5mZtU5OjGVmZmY9oT9MmLQBICKGLuw6dIdzW5jZQDEQ8oR45MHMzMxa4s5DPyXpcJWcGBe0uN/Kkg7N18NyYuVUSc/lhNSpkq7vQn3WlbR3B+s3lvRXSX/Pel8saY0WjzFIUqNJomZm1o+489B/HQrsFhH7trjfyrkvETE9I1EOp0RoPDrf79qF+qxL2+OV85G0LHAl5WmSDTKXx/8BLSXBioi5EbFDF+pmZmZ9yJ2HfkjSGZSL9Z8lHSlppKTx+djneJUkU0jaNPNMTM08ERsAJ1IeEZ0q6aROjnNM7j9Nbfk0ts993yZpsKR7JL03y90519Xnz9iPktjs6tqCiLghIu5VyZdxjkqujCmSdszjDJN0e6Xu60paUtILuX5XlbwXf5A0U9K5lXpvI+kWlQysf5W0JmZm1mc8YbIfiohDJH0E2DkinlHJebFjRMyRtCvw35TomYcAv4iIC1SSZg0CjgE2y9GGdqmk9V6bkgRMwNWSRkXEeEl/oyTgejtwVnYCjgG+HhG7NyhuM9qPhnk4JfHZMEmb5nE2oIyOnBwRl6gk32oU3XIrSkjxfwETVXKV3An8Avhkts2+wA+Bgxuc48G15YNWXL2j5jAzsxa48zAwrASckxfdoAR9ghKI6ViVlNt/yARgzZb5IeCjlIsxwGBKUqrxlIRUk4GXmD8xVleMBk4CiIi7JT0BrJ/H+a6kdbLuD2jBNNwTI+JJgAwINRR4nRIA7Po810GU8OELiIgzgTMBlh6ygQOamJn1EN+2GBh+CNyUGSQ/ASwDEBEXAp8EXgOukfSBFsoU8KPanIiIWD8izs51q1FChK8ILN1EWXdT0pe3d5wFRMR5wKcpeTmuq93OqNMoZ4eAaZV6D4uIjzZRRzMz6yHuPAwMKwGP5+sDagslrQs8FBG/pEyI3Jzmc19cA3xJJQMmkt6lkpAMyq/1Yyhhvk/IZR2Vex4lvPhHKnXbTdImzJ+f4r3AEOABSetGxAMR8QtKJtHNm6gzwD3AOyWNzDLflrdDzMysj7jzMDD8DyV75DjKMH3NXsCMHNLfGDg3Ip6lpB2f0dGEyZzceDllLsF0Su6NwZIOAl6NiEuBH1OSib2fcntjkEq69MPryppNGRE5Mh/VvAf4AvA0JRHYsnmMC4AvZiKwz0u6O+u+Lpl6uzMR8QYltfjPJN2V9dq2mX3NzKxnOLeFLRac28LMrHXObWFmZmY9wp0HMzMza4kf1bTFghNjmdlA1F+TZHnkwczMzFrSY50HJ3LqXZI+LenofP0ZSRtX1t0qqbOIkoMknZZPYUzPsNTrSFoio0f2RB23lnRH5f1+kl6RNCjfbylpSjfKXz+fzjAzs4WoJ29bHAp8NCIebnG/WiKnX0fEdGA4gKSzgSsj4vIu1qeWyOni+hVqS+R0eC0fg6RdKImc/tXsASJiLtAniZwi4orK288A84D7Wiji85Tz2zwi5klamxJBcglKTIcTe6CadwHrS1ouH98cBdwPbAFMyffjmi1M0pIRMacH6mVmZj2oR0Ye5ERO3UrklOU8lK9XkzRP0qh8P0HSUElflvRzSTsAuwGnZF2GZjF7Z9vMrO1bZwjwZETMy/N9JCJeyHZaIcs6N4/5rRyhmCHpsFy2fsZlOC/b5tLshL0lL/RTgJG5aEvgdEqngfx3fJb3wTzmdEn/p5KbA0mPSfovlZgWn862myZpAiWXR63NFvg8GpyzmZn1gh7pPETEIcATlEROp1B+Ee8YEVsC36MkcoK2RE7DgRGUnATHAA9mqOGj2zuG5k/kNBwYpZLIaQJQS+T0UzKRU5Z7U5b7y7rimkrkROlknJcXtloip+HANnm+9bYC/oOSzOm9krZTSfr0C2CPiNiaEgzph3XtNwd4KDtZo7NuO+TFeY2ImFXZdixwNXBknlttnSJiJHA0pc3rXQx8Jjt0J1ducxwDvJxlfVElcuO+lA7A9sChkmrRHzcBTsu2eR34aoPjjKd8NitQwkuPYf7OwzhJywG/yzYZRgmFXU1s9WpEvC8iLgPOBr4WEdszf4CsTj8PSQdLukPSHXNnv9igqmZm1hW9NWFyJeAySTOAUyiJjKAkcvqOpG8D60TEay2UWU3kNIWSXGnDXHcc8HFgGKUD0R2jKeGWiYi7KRelaiKnbwHvjojXG+w7MSKezNsZtURO76UtkdNUysX63Q32HQvsmH8nUG6HbAvc1mS9/5D/Ts7jziciHgE2Ao7NRTdJ2qlBOTsAv4+I2RHxMvBHSpsAPBwRE/P1+ZXlVeMonYTtgEkRMRPYSNI7gKWyHu8F/h4RD+Y+51LOu+YSKKMwwLIRUbvVcV5lm04/j4g4MyJGRMSIQcut1KCqZmbWFb3VeXAip9YTOY2lXLhHUOZjrEa5oI5p4nyqx64dt9E5vB4RV0fEUcBPgE812KyjtJz14UgbhSedQOn0vC9fA/wT+Cxt8x06S/35aifHaPbzMDOzXtCbIw9O5FQ0m8hpAvB+yi2TfwPTga9QOhX1mm2zt6g8CTEkXy9BGaX5R21CotrSYY+hzDVYVtJgSgejVof3SNomX+8D3Fp/nJxH8RTllk+t8zAROIKc70Bpkw0q8xS+ANzSoKxngNclbZ+L9q2cT1c/DzMz66be6jw4kVPbsZpK5JS3cJ6g7QI7ljKack+DYi+i3P6pTpjszDuAq/JW0nTK6M/pue63wDRJ50bEpCz/dspF//R8CgbKiM1XJE0Dlqd02hoZBwyKiCfz/QRKm43Pc50NfAn4Q7bzG8D/tVPWgcD/5oTJVyrLu/R5mJlZ9zkxljVF0vrA5TlBccBxYiwzs9bJibHMzMysJzi3hTUlIh4gA3gNRM5tYWZ9rb/mpegJHnkwMzOzlrjzMEBpAOUSkfQXSR+vvH9QlXwakv4k6ZMqUTpD0v6VddvksiPy/flZz7sk3a8SDXStVutrZmZd587DwHUosFtE7NvplvOr5RIhIqbX4k9QHp09Ot/v2oX61HKJNDKejDKpEpr7BUr0yprtaHvKZHpdOXtTcmZUHRkRW1Ce2JkO3ChpqS7U2czMusCdhwFIAy+XSC3qJPnvH4G1srwNgBcypgPAQ8CKKjk+BHyQEuNjARExLyJOBp6jRCA1M7M+4AmTA1BEHJIBrnaOiGckrUjJJTJH0q6UXCJ70JZL5AKV/ByDKMG0NuvskUvNn0tEwNUquUTGS6rlEnk7mUskb0N8PSJ2b1Dc7cDwDEQ1itIZ2ETShpQRiPpMm7+nxMa4lxKe+81OmmQKZRRivhmRkg4mc2YMWnH1ToowM7NmufOwaFgJOCd/xQdQG8KfABwr6V3AHyLi7+XHfFOquUQABlNyiYyn5BKZTEnp/bXOCoqI1yTNpDytsS0lfPkmlI7E9rTdsqi5hBIF9H5KwKrOwpi3F1L8TDKQ1dJDNnBAEzOzHuLbFouG/p5LBEoHYSdgmYh4iRK9clT+zTfyEBGP5/HfD9zcRNnDKaMUZmbWB9x5WDT091wiUDoIX6NtJONOSiKwd1BSuNf7L+DbETGvvQJVHAmsClzXxDmZmVkPcOdh0dCvc4mkcZRJnhOy/DeBZylpuxe4pRARt0bEn9up3imZJ6R2K+QDWZ6ZmfUB57awxYJzW5iZtc65LczMzKxHuPNgZmZmLfGjmrZYcGIss/5rUU4gtajyyIOZmZm1pE86D07i1P0kTpJ+LGnnfP0NScvk6yUlvdDE/kMkXZ11uUfSnztriy7U8ZuSTq68/21Go6y9P1LSz7pR/pcl/by79TQzs+7pq5EHJ3HqZhKniDg2Im7Kt98gA0G14EfAVRGxRURsAnw3l3fUFq16q+3SMGBVSbXv2QIBodqTMRw8MmZm1g/1+v+c5SROQMdJnCSNknRpvt5D0quSlpK0vKS/5/LzJe2eQZHWAMZWR10knZijChMkrdGgCkOAxyr1mZYv52sLScvmCMl0SVMk7Zjlf1nSFZKukTRT0ncbHGMyJWfF0pJWoYSvnkEJRQ2VUNSSvqUSa2KGpMNy2fr5/gxKvoohedz7Jd1M6bjVznfv3PYuSTdhZmZ9ptcnTDqJ0wIaJXG6Hdg6X+8A3ANsRcknMbG6c0ScIumbwA4R8ULWcyXglog4Jm8LHETpFFSdClwoaQpwfbbFk5Q2fqstJH0b+HdEDJO0KaUtN8gyRgKbAf8Gbpd0ZURMrdTt35Jm5Lm8Pev+KDBK0stZ7pOSRgL7ZnmDgEmSbgFmUzoaB+b35l2USJNbUSJYjqm0x3HAThHxlKSVGzW0nBjLzKxXLIxh4ZWAy/IicwqwaS6fAHwnL17rRMRrLZRZTeI0BVifksQJykXm45Qh9J92VlAet5rEaVLWrZaHoVESp88B+1CSOHVmgSROGR3xkbxIjwB+DuxI6UiMbaLM1yLir/l6MjC0wTGuBtYDfku5QN8padUGZY2mJKUiIu4GnqC0J8A1EfF8RLxKGZEZ3WD/2sjNKEq7NWq7HYDfR8TsiHi5rqwHI+L2fL0dcENEPBsR/6ZEuawe51xJX6ad73FEnBkRIyJixKDlVmq0iZmZdcHC6Dw4iVPjJE5jgY9Rfn3fQLnAjqb82u7Mvyuv59LOiFJehC+IiC8AU2l88e8o7WZ9ONJG4Ulr8x62p7TbDMpoRbXtOjrGq00cA+ArlI7hUOAuSW/voEwzM+tBC2vkwUmcFjSGMhFyfET8M4+1XkQ0Ol6z7VI9/i6Sls3XKwLvAR5pUNYYyi0Fcn7IEOCBXPchlSdglgM+RePJj7WRh5WzszKPMun0Y7SNPIwBPp3zKwZnWY1GWCYCu0haJW9l7VlZt25ETKS0/fPAO5tvDTMz646FESTqf4BzJH0DuLGyfC/gC5LeBP4J/CAinpM0Lm9x/DUijm5UYERcLWljShInKBfEz0v6JJnEKecGTFBJ4jSBTOIE/DY7LFULJHGS9Cylc9MwiVMH53uKpO8Dy2Z57SVxmkC5UNdGGmZQnjZp5EzgekmPAh/p4NhV2wCnZvsuAZweEXfW5pfU2gL4FfC/Ksmw3gS+mHMZAG4FLqTc/jivOt+hJue1vAhMqyyeSJnfMD23mSTpIspcD7Iu0yWtX1fWY5J+lPs/AVSTU5wi6T2UUYxrI2JGk+1gZmbd5MRY1pScW7BZRByxsOvSFU6MZWbWOjkxlpmZmfUEjzzYYmHpIRvEkP0dnNLMBo7+kPPDIw9mZmbWI9x5GKAkraq2XB//lPR45X1UXk+VNLTB/mdL2jNf35xRI6dJuk/SqdXAS5LmNlHehiq5Mx5QyWNyqUp471bPqz6OhpmZ9TNOyT1ARcSzlJgRSDoeeCXDXyPplc6icjawb0TckU9fnAD8iRK7AkoQqnbLU0nSdRXwjYj4Sy7bGVgdeKqVSkTEqM63MjOzhckjDzafjOT4LWBtSVs0udvngQm1jkOWc1NEzJC0jKSzVHJl3Km2zKCNcpkg6ZX8d6ccEbk8R0MuUD4vKmlrSbdImqySa2NIT7aBmZl1zCMPi6ZlJdViMDwcEZ9uZeeImJtxHzamZAntrLzNKGGxG/mPLHNYxuK4ViVPSKNcJvW2pIQvf4ISe+N9km6jxKL4VEQ8LWkv4MeUfB7zcW4LM7Pe4c7DoqnD2wxNqoaQ7k55oykXeyLiPkn/oOQdmQAcm8mv/hARf2+w76SIeAwgOy9DKdEqNwOuy4GIQcCTjQ4cEWdSAmqx9JAN/FiRmVkP8W2LxUTeOpgq6eomth1ESSTWKAdHI3fTlhV0geIaLWwyl8kblde1nB0C7q7kMRkWER9qsK+ZmfUSdx4WExFxYF5sd+toO0lLUSZMPhoR0zratuJCStrttx5KlvQRScOYP1fGhpTU6TPVOJdJM2YCq0vavlZfldThZmbWR9x5sJoLJE2j5NRYnpKsqimZxvzjwGGS/i7pHkrSs38Bv6bkzphOSV9+QES8QcllMiNvR2wMnNvksf5NSZD1k5yXMZWSiMvMzPqII0zaYsG5LczMWucIk2ZmZtYj3HkwMzOzlvhRTVssTH/8RYYec9XCroaZWZ/qreRaHnkwMzOzlvRZ58GJnLpH0iclHZOvd5e0SWXdzZIWmNBSt/8Skn4paUaGir5d0nty3Xd6qI5bVCJRImkfSbPz8U8kDcsnOrpa/lBJM3qirmZm1nV9dtvCiZy6JyL+TImHALA7cCVwTwtF7AWsBWweEfMysuOrue47wH/3QDWnA+tIWiEiXqY8QnkfJcz0pHw/rtnCJA2KiLk9UC8zM+tBA/62xaKQyEnSIEkPqVhZ0jxJO+a6sZLWl3RAjrCMokRmPCnrsl4W89ms3/2SdmhwzkOAJyNiXp7vYxHxvKQTydwVki7IY34jRyhmSDoilw3N8zonz/9yScvVfRbzgNuBbXPR1sBptMVhGAWMz/J2yfadLul3kpbO5bMkfU/SrXlOW0u6S9IEMk9GR5+HmZn1vv7SeahdvKZKuqLVnfPXaS2RUzPlNZXICdgHOCdHKmqJnIYDI4DHGuy7JXAEsAmwLiWR01KU3A57RsTWwO8oiZzq639/7jc667ZDXlDfFREPVLYdTxmBODojRj6Yq5aMiJF5/OMa1O1S4BPZJj+VtGWWdww5UhMR+0raGjiQ0gHYDvhKbVtgI+DMiNgceAk4tMFxxlOiTS4PzANuZv7Ow7hsz7OBvbKdlwS+Vinj9YgYHREXA2cBh0fE9nXH6fTzkHSwpDsk3TF39osNqmpmZl3RXzoPtYvX8FYzQFYskMipi+WNBs6DksgJqCZy+o6kbwPrZFTFepPyF/08SuTDoZQLbi2R01Tgu8C7Guw7Ftgx/07IemxD+SXfjD/kv5PzuPPJBFMbAf+PclG/QdIuDcoZDVwREa9GxCtZbm0k49GIqN12OD+3rTeO0kkYCdyenZv1Ja0ODI6Ih7IeD0fE/bnPOXneNZcASFoJWDkibsnl51W26fTziIgzI2JERIwYtNxKDapqZmZd0V86DwvQ4pfIaSzlIj0SuBpYGdiJkhuiGbVj147b6BzeiIi/RsTRlDkOuzfYrOH514ro5D3AREqnZzTlAg9lVGBv8pZFJ8eAtrkYaucYzX4eZmbWC/pt52ExTOR0G+UX+7yIeJ0ycvFVSqei3svACk0enzzuVpLWytdLZP3/kavfzHaEcv67S1oubz18ulKHtWvnQbmlc2v9cXKi5KOU3Ba1zsMEyu2UWufhPmCopPXz/X7ALdSJiBeAFyXVRjj2rZxPVz8PMzPrpn7beWjCIpXIKY/xKOWXO5QL9gqUJxjqXQwcnRMO12uwvpE1gL+oPOo4DZgDnJrrzgSmSbogIqZQ5iNMonRofhMRd+Z29wL7Z7uvApzezrHGAUtHxKP5fgJlDsj4PNfXKfMqLst2ngec0U5ZBwKn5YTJ6q2JLn0eZmbWfU6MZU1RiZVxZURstpCr0iVOjGVm1jo5MZaZmZn1BOe2sKZExCzKUyMDknNbmA0cvZWPwXqORx7MzMysJe489DNqy8txd0ZW/EY+HdGVsi5QyQEyI6M4LtX5Xl2TT5GcmBNQZ2T0x492oZwfSNq1N+poZmY9w52H/qcW4GpT4IPAbjSOGNmMCyhPIgwDlgW+3DNVbOiHlBDYm+Wkyk/Q4uOkABHxvYi4vqcrZ2ZmPcedh34sIv4FHAx8XUV7eTcGSTo5l0+TdFjuf3UkyqOX71LJrjlL82chfUDSmpJWl/R7lYybt0t6X64fXDnuNEl7VOupkuPiK8Bh+cgpEfFURFya6/fJfWdI+kmlzmerLcvnkbm8mj11lqTvS5qS22ycy5fPkZTbsx2afkzXzMy6zxMm+7mIeChvW6wBfCGXDcsL6bUZyOpA4D3AlhExR9Iq1TLydsV+wH9mRs0/UYI/nSVpW2BWRDwl6ULglIi4VdLawDXAe4H/Al7MPBRIentdNdcHHomIl+rrn4GpfkKJ6Pl81nl3SkyLd9Ye/ax2Zuo8ExFbSToUOIoyenIscGNEHJT7TZJ0fUS8Wt1R0sGUzheDVly93TY2M7PWeORhYKiFc24v78auwBkRMSfXPVe3/6+BMRFRixR5CSXIEpSw0Zfk612BUzPw0p+BFSWtkMtPqxUWEc+3UPdtgJsj4ums3wWUPBYPAetK+pWkj1ASbTXSKGfHh4Bjsp43A8tQIoHOx7ktzMx6h0ce+rkMwzyXEv2yvZwQ7eaAkHQcsDol1HXNBNqSVe0O/CiXLwFsX59kSlK75acHKKGrV8jw1PV1W0CmA98C+DAlk+nngIMabNooZ4eAPSJiZgd1MjOzXuKRh34sL+5nAKfmvIWGeTeAa4FDJC2Z61bJf79MuTjvk5k+AciyrgB+BtwbEc/mqmuBr1eOP7yd5fPdtoiI2cBvgV9KeltuM0TSFyghrt8vaTWVBGb7ALdIWg1YIiJ+T7ktslULTXMNJbS48lhbdrK9mZn1IHce+p9la49qAtdTLtzfz3Xt5d34DfAIJT/FXcDnc/szgDWBCVnm9yrHuYQyh+KSyrLDgRE5KfIe4JBc/iPg7Tm58S5g5wb1/i7wNHCPSv6MPwJPR8STlDTgNwF3AVMi4k/AO4Gb89bD2blNs34ILJXnOyPfm5lZH3FuC1ssOLeFmVnr5NwWZmZm1hPceTAzM7OW+GkLWyw4MZZZ85yYyjrjkQczMzNrSbc6D3ISpz5L4iRpfP47VNLnK8sPkHRqE/t/PEM53yXpHklfzeW7S9qkh+p4Z+3xTklLSno1H9esrZ8sqZVHMuvLv1nSAhN3zMysb3V35MFJnPooiVNEjMqXQ2l7FLMp2RE7E/hERGwBbEmJzAglSFSPdB6A8UCtnltQYlCMyjosD6xLeVyzmTr7lpqZWT/VY7ctnMSp60mcJP1a0ifz9RWSfpevvyTpR/n6ldz8RGCHHPE5MpetJelvOZLyPw0+nhUo81uezfN9IyJmShoFfBI4KctbT9JwSROz7a5QBoTKX/0/lzQ+22Fkg+OMo63zMIoSZ6IWaGokJcbDXEmrSPpjHmOipM3zGMdLOlPStcC5kpaVdHFudwmlU9nu52FmZn2jR+c8RMRDWeYalJDDZDKlfYBzJC1D6WDUkjhtThlxeIvakjj9LaMi1pI4oUoSJ+AXlCRO2wB7UAIlQSWJU5Z/Y101m0ni9AHKRW8blSROw8kkTnk+Z7XTBM9ExFbA6ZQkTtCWxGkbSnClk/JXeNUYYId8/U7aRgJGA2Prtj0GGJsjPqfksuGUXBXDgL0kvbu6Q+a6+DPwD0kXSdpX0hIRMT6XH53lPQicC3w72246848kLZ8jIIcCv2tw/tWRh1F5Xm+o5McYRelcQAl6dWce4zt5zJqtgU9FxOeBrwGzc7sf57ra+Xb6eUg6WNIdku6YO/vFRpuYmVkX9MaESSdxaj2J01jKaMImwD3AU5KGANtTLsiduSEiXoyI13P/deo3iIgvA7tQRnWOosHFX9JKwMoRcUsuOody/jUXZVljKO09XybMiJgFvE3SOyi3oGYCtwPbUjoPtXOpfjduBFbNYwP8uZJbY0fg/NxuGjAtlzf1eTgxlplZ7+jR+8pyEqcuJXGKiMfz9sBHKL/WV8ljvNKgjo28UXldPXb9caYD0yWdBzwMHNBE2fMV0cl7KJ/XnsCTERGSJgLvo9y2mJjbNGrnWlmvtrO8bUHzn4eZmfWCHht5kJM4tafZJE4TgCMo7TaWMjpQf8sC4GVanNyZ80B2qiwaThkJmq+8iHgReF5S7RbKfsAtlf32yvJGU24NNboXMA44Ms+ndl5fBP4ZES/ksup3YyfK7Z5GowfV7TYDanMjuvN5mJlZN3W38+AkTp1rNonTWGDJiHgAmEIZfWjUeZgGzFF5KKDjQAAACWlJREFU5LLZiYICvqXyKOxUymd0QK67GDg6J3OuB+xPmZcxjdLJ+EGlnOdVHhk9A/hSO8caR3mqYgJAtukg5r/9cjz52VEmgO7fTlmnA4Nzu29RbrlA9z4PMzPrJifGsqZIuhk4KiIGZHYpJ8YyM2udnBjLzMzMeoID8VhTImKnhV0HMzPrHzzyYGZmZi1x58HMzMxa4s6DmZmZtcSdBzMzM2uJOw9mZmbWEncezMzMrCXuPJiZmVlLHGHSFguSXqbkVhkoVgOeWdiVaJHr3Ddc59430OoLvVfndSJi9fqFDhJli4uZjUKs9leS7hhI9QXXua+4zr1voNUX+r7Ovm1hZmZmLXHnwczMzFrizoMtLs5c2BVo0UCrL7jOfcV17n0Drb7Qx3X2hEkzMzNriUcezMzMrCXuPJiZmVlL3HmwRYakj0iaKekBScc0WL+0pEty/W2ShvZ9LReoU2d13lHSFElzJO25MOpYr4k6f0PSPZKmSbpB0joLo551deqszodImi5pqqRbJW2yMOpZV6cO61zZbk9JIWmhPlrYRBsfIOnpbOOpkr68MOpZV6dO21jS5/L7fLekC/u6jg3q01k7n1Jp4//f3v3HyFHWcRx/f9pC6A8EpGJEGg/wqFpS2vRqaUQFa6IGcsVYYxtIqBESFWwMxh9o1aLhDyQGo0Co1FoqxCpVoYpaf7SlarzSQulPora1lUYTbBWwtNByfPxjnqvrdvd2xrqze9vvK7lkd/aZvc/M7e0+88yz8/2jpGeaEsR2/MTPkP8BhgM7gfOAk4FNwJuq2nwUuDvdng18bwhk7gImAkuBWUNkP18GjEq3PzJE9vMrKm73Aj9v98yp3anAWqAP6GnnvMBc4I5W7tf/IXM3sBE4I90/q90zV7X/GLC4GVli5CF0ijcDO2zvsn0YWAbMrGozE7g33V4OzJCkEjNWa5jZ9m7bm4GXWxGwhjyZV9s+mO72AeeUnLFanszPVdwdDbR6Jnme1zPAl4GvAC+UGa6GvHnbSZ7M1wF32v4ngO2nS85Yreh+ngN8txlBovMQOsVrgacq7u9Ny2q2sf0S8CxwZinpasuTud0Uzfwh4GdNTdRYrsySrpe0k+zDeF5J2eppmFnSZGCc7Z+UGayOvK+L96XTWcsljSsnWl15Ml8AXCDpd5L6JL27tHS15f7/S6cLzwVWNSNIdB5Cp6g1glB99JinTZnaLU8euTNLuhroAW5raqLGcmW2faft84FPA/Obnmpwg2aWNAy4HfhEaYkGl2cf/xjosj0R+BX/GQVslTyZR5CduriU7Ch+kaTTm5xrMEXeM2YDy233NyNIdB5Cp9gLVB7JnAP8tV4bSSOA04B/lJKutjyZ202uzJLeCXwO6LX9YknZ6im6n5cBVzY1UWONMp8KXAiskbQbuBhY0cJJkw33se39Fa+Fe4ApJWWrJ+97xkO2j9j+M1lxve6S8tVS5LU8myadsoDoPITOsR7olnSupJPJ/nFWVLVZAVyTbs8CVjnNKmqRPJnbTcPMaTh9IVnHodXniCFf5soPhMuBP5WYr5ZBM9t+1vZY2122u8jmlvTa3tCauLn28Wsq7vYCT5aYr5Y8/38Pkk0ARtJYstMYu0pN+d9yvWdIGg+cAfy+WUGi8xA6QprDcAOwkuxN6fu2t0n6kqTe1OxbwJmSdgA3AnW//laGPJklTZW0F3g/sFDSttYlzr2fbwPGAA+kr4u1tEOUM/MN6at4T5C9Nq6p83SlyJm5beTMOy/t401kc0rmtiZtJmfmlcB+SduB1cAnbe9vTeJCr4s5wLJmHhzF5alDCCGEUEiMPIQQQgihkOg8hBBCCKGQ6DyEEEIIoZDoPIQQQgihkOg8hBBCCKGQ6DyEEIY0Sf3pK6FbJT0gaVSrM9UjaYykhZJ2pq8trpU0rUm/a4kaVGJNlS7Prri/qB0qiob2F52HEMJQd8j2JNsXAoeBD+ddUdLw5sWqaRHZVU27bU8gu9bB2DwrKjOsatnx5p8LHO082L7W9vbjfM5wAojOQwihk/wGeD1ktTUkPZpGJRYOfNBKOpAuqrMOmC7pC5LWp5GLbw5UWpU0T9L2VMhpWVr2SkkPpmV9kiam5QskLZa0RtIuSccU1pJ0PjANmG/7ZYBUHfHh9PiNKcNWSR9Py7okPSnpLuBxYFyN/FMkPSLpMUkrq67kOPC7j9nGNCrRA9yf9tHIlL8nrTNH0pa0zq0Vz3VA0i2SNqV98Or/y18uDCnReQghdARl9UreA2yR9EbgA8BbbE8C+oGrUtPRwFbb02z/FrjD9tQ0cjESuCK1+wwwORVyGhjNuBnYmJZ9FlhaEeENwLvIyiZ/UdJJVREnAE/UKlQkaQrwQbLOxcXAdcou8w0wHlhqe7LtPZX5gXXAN4BZtqcAi4FbauyeY7bR9nJgA3BVGrk5VJHnbOBW4B3AJGCqpIF6H6OBPtsXAWvJylaHE0x0HkIIQ93IdFnpDcBfyC5DPoOs8NL69NgM4LzUvh/4QcX6l0laJ2kL2YflhLR8M9lR+dXAS2nZJcB3AGyvIrvc+WnpsYdtv2h7H/A0UOSI/BLgR7aft30A+CHw1vTYHtt9FW0r848nK5D1y7Sd88mKJVWrt431TAXW2P57uiTy/cDb0mOHgYEy4I8BXTm3MXSQEa0OEEIIx+lQGl04Kp16uNf2TTXavzBw9C/pFOAuoMf2U5IWAKekdpeTfWD2Ap+XNIHBSyJXVg/t59j3123ARZKGDZy2qIw8yPY9Xy9/Wm+b7en1Vm6wjXVXG+SxIxU1E2ptZzgBxMhDCKET/RqYJeksODpX4XU12g18iO6TNIas2ippYuI426uBTwGnkxX7Wks6/SHpUmCf7efyBLK9k2x05OaKeRXdkmam571S0ihJo4H3ks3faOQPwKskTU/Pd1Lq5DTcxuRfZOW9q60D3i5pbJorMgd4JM92hhND9BhDCB3H9nZJ84FfpI7AEeB6YE9Vu2ck3QNsAXaTlTwGGA7cl05JCLg9tV0AfFvSZuAgxatvXgt8Fdgh6SCwn6xS4+OSlgCPpnaLbG+U1NVgOw+niY9fT1lHAF8jG+VotI0AS4C7JR0Cples8zdJN5FVkhTwU9sPFdzW0MGiqmYIIYQQConTFiGEEEIoJDoPIYQQQigkOg8hhBBCKCQ6DyGEEEIoJDoPIYQQQigkOg8hhBBCKCQ6DyGEEEIo5N91/SlE9JWKHwAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] @@ -1438,7 +1451,7 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 41, "metadata": {}, "outputs": [], "source": [ @@ -1448,15 +1461,15 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 42, "metadata": {}, "outputs": [ { "data": { "application/papermill.record+json": { "results": { - "Doc2vec Cosine": 0.5236274769065202, - "Doc2vec Cosine with Stop Words": 0.45176043696294416, + "Doc2vec Cosine": 0.528387685928394, + "Doc2vec Cosine with Stop Words": 0.45572884639905675, "GLoVe Cosine": 0.6688056947022161, "GLoVe Cosine with Stop Words": 0.6049380247374541, "GLoVe WMD": 0.6267300417407605, From f43ab86d7e5e8f8c4ece5199b2840eaa8d9c8f5a Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Wed, 26 Jun 2019 15:14:16 -0400 Subject: [PATCH 090/108] renamed automl_with_pipelines to automl_with_pipelines_aks --- ...pipelines.ipynb => automl_with_pipelines_deployment_aks.ipynb} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename scenarios/sentence_similarity/{automl_with_pipelines.ipynb => automl_with_pipelines_deployment_aks.ipynb} (100%) diff --git a/scenarios/sentence_similarity/automl_with_pipelines.ipynb b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb similarity index 100% rename from scenarios/sentence_similarity/automl_with_pipelines.ipynb rename to scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb From ab2ce57a4927d18af560e1c0c1668cfaaef0374f Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Thu, 27 Jun 2019 18:36:45 -0400 Subject: [PATCH 091/108] Fixed most PR comments --- scenarios/sentence_similarity/README.md | 2 + .../automl_local_deployment_aci.ipynb | 331 ++++++++++++------ 2 files changed, 234 insertions(+), 99 deletions(-) diff --git a/scenarios/sentence_similarity/README.md b/scenarios/sentence_similarity/README.md index 0bfe52821..fcd5e27bc 100644 --- a/scenarios/sentence_similarity/README.md +++ b/scenarios/sentence_similarity/README.md @@ -22,3 +22,5 @@ The following summarizes each notebook for Sentence Similarity. Each notebook pr |[Creating a Baseline model](baseline_deep_dive.ipynb)| Yes| A baseline model is a basic solution that serves as a point of reference for comparing other models to. The baseline model's performance gives us an indication of how much better our models can perform relative to a naive approach.| |Senteval |[local](senteval_local.ipynb), [AzureML](senteval_azureml.ipynb)|SentEval is a widely used benchmarking tool for evaluating general-purpose sentence embeddings. Running SentEval locally is easy, but not necessarily efficient depending on the model specs. We provide an example on how to do this efficiently in Azure Machine Learning Service. | |[GenSen on AzureML](gensen_aml_deep_dive.ipynb)| No | This notebook serves as an introduction to an end-to-end NLP solution for sentence similarity building one of the State of the Art models, GenSen, on the AzureML platform. We show the advantages of AzureML when training large NLP models with GPU. +|[Automated Machine Learning(AutoML) with Deployment on Azure Container Instance](automl_local_deployment_ACI.ipynb)| Yes |This notebook shows users how to use AutoML on local machine and deploy the model as a webservice to Azure Container Instance(ACI) to get a sentence similarity score. +|[Google Universal Sentence Encoder with Azure Machine Learning Pipeline, AutoML with Deployment on Azure Kubernetes Service](aml_pipelines_deployment_AKS.ipynb)| No | This notebook shows a user how to use AzureML pipelines and deploy the pipeline output model as a webservice to Azure Kubernetes Service which can be used as an end point to get sentence similarity scores. diff --git a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb index 2aa69ea0a..6ef86fa02 100644 --- a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb +++ b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb @@ -13,14 +13,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Local AutoML Model with ACI Deployment for Predicting Sentence Similarity" + "# Local Automated Machine Learning Model with ACI Deployment for Predicting Sentence Similarity" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook demonstrates how to use Azure AutoML locally to automate machine learning model selection and tuning and how to use Azure Container Instance (ACI) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." + "This notebook demonstrates how to use Azure Automated Machine Learning(AutoML) locally to automate machine learning model selection and tuning and how to use Azure Container Instance (ACI) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." ] }, { @@ -58,9 +58,7 @@ "source": [ "### 1.1 What is Azure AutoML?\n", "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", - "\n", - "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to improve the productivity of data scientists and democratize AI by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning." ] }, { @@ -168,7 +166,9 @@ "metadata": {}, "outputs": [], "source": [ - "BASE_DATA_PATH = '../../data'" + "BASE_DATA_PATH = '../../data'\n", + "CPU_CORES = 1\n", + "MEMORY_GB = 8" ] }, { @@ -201,7 +201,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 258KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 300KB/s]\n" ] }, { @@ -215,7 +215,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 294KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 265KB/s]\n" ] }, { @@ -229,7 +229,7 @@ "name": "stderr", "output_type": "stream", "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 252KB/s]\n" + "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 300KB/s]\n" ] }, { @@ -383,7 +383,7 @@ } ], "source": [ - "train.head(5)" + "train.head()" ] }, { @@ -520,9 +520,9 @@ "outputs": [], "source": [ "X_train = train.drop(\"score\", axis=1).values\n", - "y_train = train['score'].values.flatten()\n", + "y_train = train[\"score\"].values.flatten()\n", "X_validation = dev.drop(\"score\", axis=1).values\n", - "y_validation = dev['score'].values.flatten()\n", + "y_validation = dev[\"score\"].values.flatten()\n", "\n", "# local compute\n", "automated_ml_config = AutoMLConfig(\n", @@ -554,7 +554,7 @@ "output_type": "stream", "text": [ "Running on local machine\n", - "Parent Run ID: AutoML_ad20c29f-7d03-4079-8699-3133d24d3631\n", + "Parent Run ID: AutoML_7e7a2c36-8bab-4c25-9050-6f110036e7b3\n", "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", "Current status: DatasetEvaluation. Gathering dataset statistics.\n", "Current status: FeaturesGeneration. Generating features for the dataset.\n", @@ -570,57 +570,57 @@ "****************************************************************************************************\n", "\n", " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:11 0.0606 0.0606\n", - " 1 MaxAbsScaler RandomForest 0:00:47 0.2127 0.2127\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:21 0.2173 0.2173\n", - " 3 StandardScalerWrapper LightGBM 0:00:15 0.2905 0.2905\n", - " 4 StandardScalerWrapper RandomForest 0:00:12 0.0669 0.2905\n", - " 5 MaxAbsScaler ExtremeRandomTrees 0:00:31 0.2224 0.2905\n", - " 6 StandardScalerWrapper ExtremeRandomTrees 0:00:17 0.1769 0.2905\n", - " 7 MaxAbsScaler DecisionTree 0:00:14 0.1186 0.2905\n", - " 8 MaxAbsScaler ExtremeRandomTrees 0:00:17 0.1891 0.2905\n", - " 9 MaxAbsScaler SGD 0:00:10 0.1448 0.2905\n", - " 10 StandardScalerWrapper RandomForest 0:00:12 0.0199 0.2905\n", - " 11 StandardScalerWrapper DecisionTree 0:00:14 0.1245 0.2905\n", - " 12 MaxAbsScaler SGD 0:00:13 0.1310 0.2905\n", - " 13 MaxAbsScaler DecisionTree 0:00:12 0.1370 0.2905\n", - " 14 MaxAbsScaler SGD 0:00:12 0.0572 0.2905\n", - " 15 StandardScalerWrapper RandomForest 0:00:19 0.1924 0.2905\n", - " 16 MaxAbsScaler RandomForest 0:00:11 0.0187 0.2905\n", - " 17 MaxAbsScaler ElasticNet 0:00:10 nan 0.2905\n", - "ERROR: Run AutoML_ad20c29f-7d03-4079-8699-3133d24d3631_17 failed with exception \"Primary metric spearman_correlation is not available.\".\n", - " 18 MaxAbsScaler ExtremeRandomTrees 0:00:11 0.0972 0.2905\n", - " 19 MaxAbsScaler DecisionTree 0:00:13 0.1686 0.2905\n", - " 20 StandardScalerWrapper LightGBM 0:00:26 0.6102 0.6102\n", - " 21 MaxAbsScaler RandomForest 0:05:00 0.1617 0.6102\n", - " 22 StandardScalerWrapper LightGBM 0:00:25 0.3608 0.6102\n", - " 23 StandardScalerWrapper RandomForest 0:02:32 0.2200 0.6102\n", - " 24 MaxAbsScaler DecisionTree 0:01:13 0.2027 0.6102\n", - " 25 TruncatedSVDWrapper LightGBM 0:00:31 0.3707 0.6102\n", - " 26 StandardScalerWrapper ExtremeRandomTrees 0:00:21 0.1498 0.6102\n", - " 27 MaxAbsScaler DecisionTree 0:00:11 0.1748 0.6102\n", - " 28 MaxAbsScaler LightGBM 0:00:18 0.4395 0.6102\n", - " 29 MaxAbsScaler LightGBM 0:00:23 0.4191 0.6102\n", - " 30 TruncatedSVDWrapper LightGBM 0:00:43 0.4102 0.6102\n", - " 31 MaxAbsScaler LightGBM 0:00:27 0.5077 0.6102\n", - " 32 MaxAbsScaler LightGBM 0:00:44 0.6012 0.6102\n", - " 33 MaxAbsScaler LightGBM 0:00:48 0.4611 0.6102\n", - " 34 MaxAbsScaler LightGBM 0:00:39 0.5135 0.6102\n", - " 35 MaxAbsScaler LightGBM 0:00:24 0.2219 0.6102\n", - " 36 SparseNormalizer LightGBM 0:00:24 0.2888 0.6102\n", - " 37 StandardScalerWrapper LightGBM 0:00:38 0.5663 0.6102\n", - " 38 MaxAbsScaler LightGBM 0:00:31 0.3793 0.6102\n", - " 39 MaxAbsScaler LightGBM 0:00:40 0.3672 0.6102\n", + " 0 StandardScalerWrapper RandomForest 0:00:10 0.0764 0.0764\n", + " 1 MaxAbsScaler RandomForest 0:00:43 0.2217 0.2217\n", + " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:22 0.1523 0.2217\n", + " 3 StandardScalerWrapper LightGBM 0:00:14 0.2905 0.2905\n", + " 4 StandardScalerWrapper RandomForest 0:00:11 0.0820 0.2905\n", + " 5 MaxAbsScaler ExtremeRandomTrees 0:00:27 0.2312 0.2905\n", + " 6 StandardScalerWrapper ExtremeRandomTrees 0:00:15 0.1698 0.2905\n", + " 7 MaxAbsScaler DecisionTree 0:00:12 0.1582 0.2905\n", + " 8 MaxAbsScaler ExtremeRandomTrees 0:00:15 0.1959 0.2905\n", + " 9 MaxAbsScaler SGD 0:00:12 0.1433 0.2905\n", + " 10 StandardScalerWrapper RandomForest 0:00:10 0.0757 0.2905\n", + " 11 StandardScalerWrapper DecisionTree 0:00:11 0.1543 0.2905\n", + " 12 MaxAbsScaler SGD 0:00:10 0.1312 0.2905\n", + " 13 MaxAbsScaler DecisionTree 0:00:12 0.1271 0.2905\n", + " 14 MaxAbsScaler SGD 0:00:10 0.0713 0.2905\n", + " 15 StandardScalerWrapper RandomForest 0:00:20 0.1937 0.2905\n", + " 16 MaxAbsScaler RandomForest 0:00:10 0.0223 0.2905\n", + " 17 MaxAbsScaler ElasticNet 0:00:07 nan 0.2905\n", + "ERROR: Run AutoML_7e7a2c36-8bab-4c25-9050-6f110036e7b3_17 failed with exception \"Primary metric spearman_correlation is not available.\".\n", + " 18 MaxAbsScaler ExtremeRandomTrees 0:00:11 0.1129 0.2905\n", + " 19 MaxAbsScaler DecisionTree 0:00:11 0.1686 0.2905\n", + " 20 StandardScalerWrapper LightGBM 0:00:25 0.6102 0.6102\n", + " 21 MaxAbsScaler RandomForest 0:04:16 0.1617 0.6102\n", + " 22 StandardScalerWrapper LightGBM 0:00:18 0.3608 0.6102\n", + " 23 StandardScalerWrapper RandomForest 0:02:14 0.1981 0.6102\n", + " 24 MaxAbsScaler DecisionTree 0:01:52 0.0829 0.6102\n", + " 25 TruncatedSVDWrapper LightGBM 0:00:25 0.3793 0.6102\n", + " 26 StandardScalerWrapper ExtremeRandomTrees 0:00:19 0.1402 0.6102\n", + " 27 MaxAbsScaler LightGBM 0:00:22 0.4191 0.6102\n", + " 28 MaxAbsScaler LightGBM 0:00:16 0.4395 0.6102\n", + " 29 StandardScalerWrapper LightGBM 0:00:37 0.4394 0.6102\n", + " 30 TruncatedSVDWrapper LightGBM 0:00:44 0.4081 0.6102\n", + " 31 MaxAbsScaler LightGBM 0:00:25 0.5077 0.6102\n", + " 32 MaxAbsScaler LightGBM 0:00:32 0.3352 0.6102\n", + " 33 MaxAbsScaler LightGBM 0:00:35 0.5135 0.6102\n", + " 34 SparseNormalizer LightGBM 0:00:22 0.2888 0.6102\n", + " 35 MaxAbsScaler LightGBM 0:00:40 0.3648 0.6102\n", + " 36 MaxAbsScaler LightGBM 0:00:46 0.4611 0.6102\n", + " 37 MaxAbsScaler LightGBM 0:00:44 0.6012 0.6102\n", + " 38 MaxAbsScaler LightGBM 0:00:36 0.3672 0.6102\n", + " 39 StandardScalerWrapper LightGBM 0:00:36 0.4819 0.6102\n", " 40 MaxAbsScaler LightGBM 0:00:21 0.2507 0.6102\n", - " 41 MaxAbsScaler LightGBM 0:00:37 0.3352 0.6102\n", - " 42 StandardScalerWrapper LightGBM 0:01:05 0.5460 0.6102\n", - " 43 MaxAbsScaler LightGBM 0:00:36 0.5104 0.6102\n", - " 44 SparseNormalizer LightGBM 0:02:38 0.4208 0.6102\n", - " 45 TruncatedSVDWrapper LightGBM 0:00:37 0.2362 0.6102\n", - " 46 StandardScalerWrapper LightGBM 0:00:41 0.4394 0.6102\n", - " 47 MaxAbsScaler LightGBM 0:00:46 0.3982 0.6102\n", - " 48 VotingEnsemble 0:02:21 0.6408 0.6408\n", - " 49 StackEnsemble 0:03:01 0.6409 0.6409\n" + " 41 MaxAbsScaler DecisionTree 0:00:13 0.1288 0.6102\n", + " 42 StandardScalerWrapper LightGBM 0:00:24 0.4605 0.6102\n", + " 43 SparseNormalizer LightGBM 0:02:29 0.4208 0.6102\n", + " 44 SparseNormalizer LightGBM 0:00:58 0.4316 0.6102\n", + " 45 TruncatedSVDWrapper LightGBM 0:00:30 0.3711 0.6102\n", + " 46 MaxAbsScaler LightGBM 0:00:34 0.3880 0.6102\n", + " 47 MaxAbsScaler LightGBM 0:00:31 0.4294 0.6102\n", + " 48 VotingEnsemble 0:02:30 0.6408 0.6408\n", + " 49 StackEnsemble 0:03:06 0.6409 0.6409\n" ] } ], @@ -638,9 +638,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4631a71f61a44001a435d206ed91f1cb", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Inspect the run details using the provided widget\n", "RunDetails(local_run).show()" @@ -658,9 +673,24 @@ "metadata": {}, "source": [ "# 4. Deploy Sentence Similarity Model\n", + "Deploying an Azure Machine Learning model as a web service creates a REST API. You can send data to this API and receive the prediction returned by the model.\n", + "In general, you create a webservice by deploying a model as an image to a Compute Target.\n", + "\n", + "Some of the Compute Targets are: \n", + "1. Azure Container Instance\n", + "2. Azure Kubernetes Service\n", + "3. Local web service\n", "\n", - "## 4.1 Retrieve the Best Model\n", - "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the get_output method which returns the best run and fitted model across all iterations. Overloads on get_output allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a \"reproducible mechanism for building, evaluating, deploying, and running ML systems\" (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", + "The general workflow for deploying a model is as follows:\n", + "1. Register a model\n", + "2. Prepare to deploy\n", + "3. Deploy the model to the compute target\n", + "4. Test the deployed model (webservice)\n", + "\n", + "In this notebook we walk you through the process of creating a webservice running on Azure Container Instance by deploying an AutoML model as an image. ACI is usually good for low scale, CPU-based workloads. (You can find more information on deploying and serving models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)\n", + "\n", + "## 4.1 Retrieve the Best Model @Courtney : review section\n", + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the `get_output` method which returns the best run and fitted model across all iterations. Overloads on `get_output` allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a reproducible mechanism for building, evaluating, deploying, and running ML systems (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", "\n", "The different steps that make up the pipeline can be accessed through `fitted_model.named_steps` and information about data preprocessing is available through `fitted_model.named_steps['datatransformer'].get_featurization_summary()`" ] @@ -679,7 +709,12 @@ "metadata": {}, "source": [ "## 4.2 Register the Fitted Model for Deployment\n", - "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered." + "\n", + "Registering a model means registering a one or more files that make up for a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can be either come from Azure Machine Learning or any other location such as local machine. \n", + "Below shows how a model is registered from an experiment run. \n", + "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered.\n", + "\n", + "See other ways to register a model [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)" ] }, { @@ -691,8 +726,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "Registering model AutoMLad20c29f7best\n", - "AutoMLad20c29f7best\n" + "Registering model AutoML7e7a2c368best\n", + "AutoML7e7a2c368best\n" ] } ], @@ -709,7 +744,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 4.3 Create Scoring Script" + "## 4.3 Create an Entry Script\n", + "In this section we show an example of entry script which is called from the deployed webservice. `score.py` is our entry script. The script must contain:\n", + "1. init() - This function loads the model in a global object.\n", + "2. run() - This function is used for model prediction. The inputs and outputs to `run()` typically use JSON for serialization and deserilization. \n" ] }, { @@ -879,6 +917,15 @@ "## 4.5 Create a Container Image" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step.\n", + "\n", + "Below image creation step takes a while sometimes." + ] + }, { "cell_type": "code", "execution_count": 21, @@ -889,9 +936,9 @@ "output_type": "stream", "text": [ "Creating image\n", - "Running..................................................\n", + "Running................................................\n", "Succeeded\n", - "Image creation operation finished for image automl-image:8, operation \"Succeeded\"\n" + "Image creation operation finished for image automl-image:9, operation \"Succeeded\"\n" ] } ], @@ -924,7 +971,7 @@ "metadata": {}, "outputs": [], "source": [ - "print(image.image_build_log_uri) " + "# print(image.image_build_log_uri)" ] }, { @@ -934,6 +981,15 @@ "## 4.6 Deploy the Image as a Web Service to Azure Container Instance" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Azure Container Instances is mostly used for deploying your models as a web service if one or more of the following conditions is true:\n", + "1. You need to quickly deploy and validate your model.\n", + "2. You are testing a model that is under development." + ] + }, { "cell_type": "code", "execution_count": 22, @@ -941,13 +997,20 @@ "outputs": [], "source": [ "#Set the web service configuration\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores = 1, \n", - " memory_gb = 8)" + "aci_config = AciWebservice.deploy_configuration(cpu_cores = CPU_CORES, \n", + " memory_gb = MEMORY_GB)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below step sometimes takes time. " ] }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 24, "metadata": {}, "outputs": [ { @@ -955,7 +1018,7 @@ "output_type": "stream", "text": [ "Creating service\n", - "Running.......................\n", + "Running.....................\n", "SucceededACI service creation operation finished, operation \"Succeeded\"\n", "Healthy\n" ] @@ -963,9 +1026,8 @@ ], "source": [ "# deploy image as web service\n", - "aci_service_name ='aci-automl-service'\n", "aci_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = aci_service_name,\n", + " name = 'aci-automl-service-1',\n", " image = image,\n", " deployment_config = aci_config)\n", "\n", @@ -986,64 +1048,83 @@ "metadata": {}, "outputs": [], "source": [ - "print(aci_service.get_logs())" + "#print(aci_service.get_logs())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 4.7 Test Deployed Model\n", - "We test the web sevice by passing data. The run method expects input in json format. Run() method retrieves API keys behind the scenes to make sure that call is authenticated. " + "If you are not creating a webservice but want to reuse an existing service call the webservice with the name. You can look up all the deployed webservices under deployment in the Azure Portal. Below is an example to it:" ] }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "test_y = test['score'].values.flatten()\n", - "test_x = test.drop(\"score\", axis=1).values.tolist()\n", + "#aci_service = Webservice(workspace=ws, name='<>')\n", "\n", - "data = {'data': test_x}\n", - "data = json.dumps(data)" + "# to use the webservice\n", + "#aci_service.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4.7 Test Deployed Model @Courtney: can you check this section too for the values of the output & description\n", + "\n", + "Testing the deployed model is nothing but calling the created webservice.
\n", + "The deployed model can be tested by passing list of sentence pairs. The output will be a score between 1-5 with 5 being identical sentences and 1 indicates that the sentences are totally different.\n", + "\n", + "The run method expects input in json format. Run() method retrieves API keys behind the scenes to make sure that call is authenticated. " ] }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 37, "metadata": {}, "outputs": [], "source": [ - "# Set up a Timer to see how long the model takes to predict\n", - "t = Timer()" + "sentences = [['This is sentence1', 'This is sentence1'],\n", + " ['A hungry cat.', 'A sleeping cat'],\n", + " ['Its summer time ', 'Winter is coming']]\n", + "data = {'data': sentences}\n", + "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 26, + "execution_count": 38, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 2.7085\n", - "Number of samples predicted: 1379\n" + "Time elapsed: 0.1937\n", + "Number of samples predicted: 3\n", + "[1.844837748433727, 2.072780308418169, 2.0510668030610644]\n" ] } ], "source": [ + "# Set up a Timer to see how long the model takes to predict\n", + "t = Timer()\n", + "\n", "t.start()\n", "score = aci_service.run(input_data = data)\n", "t.stop()\n", - "print(\"Time elapsed: {}\".format(t))\n", + "\n", + "print('Time elapsed: {}'.format(t))\n", "\n", "result = json.loads(score)\n", "try:\n", - " output = result[\"result\"]\n", - " print('Number of samples predicted: {0}'.format(len(output)))\n", + " output = result['result']\n", + " print('Number of samples predicted: {}'.format(len(output)))\n", + " print(output)\n", "except:\n", " print(result['error'])" ] @@ -1060,12 +1141,57 @@ "\n", "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", "\n", - "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." + "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the main metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." ] }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 31, + "metadata": {}, + "outputs": [], + "source": [ + "test_y = test['score'].values.flatten()\n", + "test_x = test.drop('score', axis=1).values.tolist()\n", + "\n", + "data = {'data': test_x}\n", + "data = json.dumps(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time elapsed: 1.2612\n", + "Number of samples predicted: 1379\n" + ] + } + ], + "source": [ + "# Set up a Timer to see how long the model takes to predict\n", + "t = Timer()\n", + "\n", + "t.start()\n", + "score = aci_service.run(input_data = data)\n", + "t.stop()\n", + "\n", + "print('Time elapsed: {}'.format(t))\n", + "\n", + "result = json.loads(score)\n", + "try:\n", + " output = result['result']\n", + " print('Number of samples predicted: {}'.format(len(output)))\n", + "except:\n", + " print(result['error'])" + ] + }, + { + "cell_type": "code", + "execution_count": 34, "metadata": {}, "outputs": [ { @@ -1080,6 +1206,13 @@ "print(pearsonr(output, test_y)[0])" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For more examples to deploy Machine Learning models follow [MachineLearningNotebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) github repository." + ] + }, { "cell_type": "code", "execution_count": null, From af1d7a667b86bc1817b43679aa8073b40a9aaeb2 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Thu, 27 Jun 2019 20:00:50 -0400 Subject: [PATCH 092/108] black formatting and some text addition in deployment section --- .../automl_local_deployment_aci.ipynb | 791 ++++++------------ 1 file changed, 260 insertions(+), 531 deletions(-) diff --git a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb index 6ef86fa02..4d8d6cd79 100644 --- a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb +++ b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb @@ -45,11 +45,14 @@ "4. [Deploy Sentence Similarity Model](#4.-Deploy-Sentence-Similarity-Model) \n", " 4.1 [Retrieve the Best Model](#4.1-Retrieve-the-Best-Model) \n", " 4.2 [Register the Fitted Model for Deployment](#4.2-Register-the-Fitted-Model-for-Deployment) \n", - " 4.3 [Create Scoring Script](#4.3-Create-Scoring-Script) \n", + " 4.3 [Create an Entry Script](#4.3-Create-an-Entry-Script) \n", " 4.4 [Create a YAML File for the Environment](#4.4-Create-a-YAML-File-for-the-Environment) \n", " 4.5 [Create a Container Image](#4.5-Create-a-Container-Image) \n", " 4.6 [Deploy the Image as a Web Service to Azure Container Instance](#4.6-Deploy-the-Image-as-a-Web-Service-to-Azure-Container-Instance) \n", - " 4.7 [Test Deployed Model](#4.7-Test-Deployed-Model) " + " 4.7 [Test Deployed Model](#4.7-Test-Deployed-Model) \n", + " \n", + " \n", + "5. [Clean](#5-Clean)" ] }, { @@ -86,33 +89,24 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext blackcellmagic" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Some hub symbols are not available because TensorFlow version is less than 1.14\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Turning diagnostics collection on. \n", - "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.43\n", - "Pandas version: 0.23.4\n", - "Tensorflow Version: 1.13.1\n" - ] - } - ], + "outputs": [], "source": [ "# Set the environment path to find NLP\n", "import sys\n", + "\n", "sys.path.append(\"../../\")\n", "import time\n", "import os\n", @@ -139,12 +133,14 @@ "# Tensorflow dependencies for Google Universal Sentence Encoder\n", "import tensorflow as tf\n", "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", "\n", "# AzureML packages\n", "import azureml as aml\n", "import logging\n", "from azureml.telemetry import set_diagnostics_collection\n", + "\n", "set_diagnostics_collection(send_diagnostics=True)\n", "from azureml.train.automl import AutoMLConfig\n", "from azureml.core.experiment import Experiment\n", @@ -162,11 +158,11 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "BASE_DATA_PATH = '../../data'\n", + "BASE_DATA_PATH = \"../../data\"\n", "CPU_CORES = 1\n", "MEMORY_GB = 8" ] @@ -194,52 +190,9 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 300KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 265KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 300KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Load in the raw datasets as pandas dataframes\n", "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", @@ -249,7 +202,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -262,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -274,19 +227,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Training set has 5749 sentences\n", - "Development set has 1500 sentences\n", - "Testing set has 1379 sentences\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(\"Training set has {} sentences\".format(len(train)))\n", "print(\"Development set has {} sentences\".format(len(dev)))\n", @@ -295,93 +238,11 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", - "
" - ], - "text/plain": [ - " score sentence1 \\\n", - "0 5.00 a plane is taking off. \n", - "1 3.80 a man is playing a large flute. \n", - "2 3.80 a man is spreading shreded cheese on a pizza. \n", - "3 2.60 three men are playing chess. \n", - "4 4.25 a man is playing the cello. \n", - "\n", - " sentence2 \n", - "0 an air plane is taking off. \n", - "1 a man is playing a flute. \n", - "2 a man is spreading shredded cheese on an uncoo... \n", - "3 two men are playing chess. \n", - "4 a man seated is playing the cello. " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "train.head()" ] @@ -409,38 +270,15 @@ }, { "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "ws = azureml_utils.get_or_create_workspace(\n", " subscription_id=\"\",\n", " resource_group=\"\",\n", " workspace_name=\"\",\n", - " workspace_region=\"\"\n", + " workspace_region=\"\",\n", ")" ] }, @@ -450,10 +288,13 @@ "metadata": {}, "outputs": [], "source": [ - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" + "print(\n", + " \"Workspace name: \" + ws.name,\n", + " \"Azure region: \" + ws.location,\n", + " \"Subscription id: \" + ws.subscription_id,\n", + " \"Resource group: \" + ws.resource_group,\n", + " sep=\"\\n\",\n", + ")" ] }, { @@ -498,24 +339,25 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "automl_settings = {\n", - " \"task\": 'regression', #type of task: classification, regression or forecasting\n", - " \"debug_log\": 'automated_ml_errors.log',\n", - " \"path\": './automated-ml-regression',\n", - " \"iteration_timeout_minutes\" : 15, #How long each iteration can take before moving on\n", - " \"iterations\" : 50, #Number of algorithm options to try\n", - " \"primary_metric\" : 'spearman_correlation', #Metric to optimize\n", - " \"preprocess\" : True, #Whether dataset preprocessing should be applied\n", - " \"verbosity\":logging.ERROR}" + " \"task\": \"regression\", # type of task: classification, regression or forecasting\n", + " \"debug_log\": \"automated_ml_errors.log\",\n", + " \"path\": \"./automated-ml-regression\",\n", + " \"iteration_timeout_minutes\": 15, # How long each iteration can take before moving on\n", + " \"iterations\": 50, # Number of algorithm options to try\n", + " \"primary_metric\": \"spearman_correlation\", # Metric to optimize\n", + " \"preprocess\": True, # Whether dataset preprocessing should be applied\n", + " \"verbosity\": logging.ERROR,\n", + "}" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -526,11 +368,8 @@ "\n", "# local compute\n", "automated_ml_config = AutoMLConfig(\n", - " X = X_train,\n", - " y = y_train,\n", - " X_valid = X_validation,\n", - " y_valid = y_validation,\n", - " **automl_settings)" + " X=X_train, y=y_train, X_valid=X_validation, y_valid=y_validation, **automl_settings\n", + ")" ] }, { @@ -544,88 +383,13 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running on local machine\n", - "Parent Run ID: AutoML_7e7a2c36-8bab-4c25-9050-6f110036e7b3\n", - "Current status: DatasetFeaturization. Beginning to featurize the dataset.\n", - "Current status: DatasetEvaluation. Gathering dataset statistics.\n", - "Current status: FeaturesGeneration. Generating features for the dataset.\n", - "Current status: DatasetFeaturizationCompleted. Completed featurizing the dataset.\n", - "Current status: ModelSelection. Beginning model selection.\n", - "\n", - "****************************************************************************************************\n", - "ITERATION: The iteration being evaluated.\n", - "PIPELINE: A summary description of the pipeline being evaluated.\n", - "DURATION: Time taken for the current iteration.\n", - "METRIC: The result of computing score on the fitted pipeline.\n", - "BEST: The best observed score thus far.\n", - "****************************************************************************************************\n", - "\n", - " ITERATION PIPELINE DURATION METRIC BEST\n", - " 0 StandardScalerWrapper RandomForest 0:00:10 0.0764 0.0764\n", - " 1 MaxAbsScaler RandomForest 0:00:43 0.2217 0.2217\n", - " 2 StandardScalerWrapper ExtremeRandomTrees 0:00:22 0.1523 0.2217\n", - " 3 StandardScalerWrapper LightGBM 0:00:14 0.2905 0.2905\n", - " 4 StandardScalerWrapper RandomForest 0:00:11 0.0820 0.2905\n", - " 5 MaxAbsScaler ExtremeRandomTrees 0:00:27 0.2312 0.2905\n", - " 6 StandardScalerWrapper ExtremeRandomTrees 0:00:15 0.1698 0.2905\n", - " 7 MaxAbsScaler DecisionTree 0:00:12 0.1582 0.2905\n", - " 8 MaxAbsScaler ExtremeRandomTrees 0:00:15 0.1959 0.2905\n", - " 9 MaxAbsScaler SGD 0:00:12 0.1433 0.2905\n", - " 10 StandardScalerWrapper RandomForest 0:00:10 0.0757 0.2905\n", - " 11 StandardScalerWrapper DecisionTree 0:00:11 0.1543 0.2905\n", - " 12 MaxAbsScaler SGD 0:00:10 0.1312 0.2905\n", - " 13 MaxAbsScaler DecisionTree 0:00:12 0.1271 0.2905\n", - " 14 MaxAbsScaler SGD 0:00:10 0.0713 0.2905\n", - " 15 StandardScalerWrapper RandomForest 0:00:20 0.1937 0.2905\n", - " 16 MaxAbsScaler RandomForest 0:00:10 0.0223 0.2905\n", - " 17 MaxAbsScaler ElasticNet 0:00:07 nan 0.2905\n", - "ERROR: Run AutoML_7e7a2c36-8bab-4c25-9050-6f110036e7b3_17 failed with exception \"Primary metric spearman_correlation is not available.\".\n", - " 18 MaxAbsScaler ExtremeRandomTrees 0:00:11 0.1129 0.2905\n", - " 19 MaxAbsScaler DecisionTree 0:00:11 0.1686 0.2905\n", - " 20 StandardScalerWrapper LightGBM 0:00:25 0.6102 0.6102\n", - " 21 MaxAbsScaler RandomForest 0:04:16 0.1617 0.6102\n", - " 22 StandardScalerWrapper LightGBM 0:00:18 0.3608 0.6102\n", - " 23 StandardScalerWrapper RandomForest 0:02:14 0.1981 0.6102\n", - " 24 MaxAbsScaler DecisionTree 0:01:52 0.0829 0.6102\n", - " 25 TruncatedSVDWrapper LightGBM 0:00:25 0.3793 0.6102\n", - " 26 StandardScalerWrapper ExtremeRandomTrees 0:00:19 0.1402 0.6102\n", - " 27 MaxAbsScaler LightGBM 0:00:22 0.4191 0.6102\n", - " 28 MaxAbsScaler LightGBM 0:00:16 0.4395 0.6102\n", - " 29 StandardScalerWrapper LightGBM 0:00:37 0.4394 0.6102\n", - " 30 TruncatedSVDWrapper LightGBM 0:00:44 0.4081 0.6102\n", - " 31 MaxAbsScaler LightGBM 0:00:25 0.5077 0.6102\n", - " 32 MaxAbsScaler LightGBM 0:00:32 0.3352 0.6102\n", - " 33 MaxAbsScaler LightGBM 0:00:35 0.5135 0.6102\n", - " 34 SparseNormalizer LightGBM 0:00:22 0.2888 0.6102\n", - " 35 MaxAbsScaler LightGBM 0:00:40 0.3648 0.6102\n", - " 36 MaxAbsScaler LightGBM 0:00:46 0.4611 0.6102\n", - " 37 MaxAbsScaler LightGBM 0:00:44 0.6012 0.6102\n", - " 38 MaxAbsScaler LightGBM 0:00:36 0.3672 0.6102\n", - " 39 StandardScalerWrapper LightGBM 0:00:36 0.4819 0.6102\n", - " 40 MaxAbsScaler LightGBM 0:00:21 0.2507 0.6102\n", - " 41 MaxAbsScaler DecisionTree 0:00:13 0.1288 0.6102\n", - " 42 StandardScalerWrapper LightGBM 0:00:24 0.4605 0.6102\n", - " 43 SparseNormalizer LightGBM 0:02:29 0.4208 0.6102\n", - " 44 SparseNormalizer LightGBM 0:00:58 0.4316 0.6102\n", - " 45 TruncatedSVDWrapper LightGBM 0:00:30 0.3711 0.6102\n", - " 46 MaxAbsScaler LightGBM 0:00:34 0.3880 0.6102\n", - " 47 MaxAbsScaler LightGBM 0:00:31 0.4294 0.6102\n", - " 48 VotingEnsemble 0:02:30 0.6408 0.6408\n", - " 49 StackEnsemble 0:03:06 0.6409 0.6409\n" - ] - } - ], - "source": [ - "experiment=Experiment(ws, 'automated-ml-regression')\n", + "outputs": [], + "source": [ + "experiment = Experiment(ws, \"automated-ml-regression\")\n", "local_run = experiment.submit(automated_ml_config, show_output=True)" ] }, @@ -638,24 +402,9 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "4631a71f61a44001a435d206ed91f1cb", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "_AutoMLWidget(widget_settings={'childWidgetDisplay': 'popup', 'send_telemetry': True, 'log_level': 'INFO', 'sd…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Inspect the run details using the provided widget\n", "RunDetails(local_run).show()" @@ -697,7 +446,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -710,7 +459,7 @@ "source": [ "## 4.2 Register the Fitted Model for Deployment\n", "\n", - "Registering a model means registering a one or more files that make up for a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can be either come from Azure Machine Learning or any other location such as local machine. \n", + "Registering a model means registering one or more files that make up for a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can be either come from Azure Machine Learning or any other location such as local machine. \n", "Below shows how a model is registered from an experiment run. \n", "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered.\n", "\n", @@ -719,25 +468,16 @@ }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Registering model AutoML7e7a2c368best\n", - "AutoML7e7a2c368best\n" - ] - } - ], - "source": [ - "description = 'AutoML Model'\n", - "tags = {'area': \"nlp\", 'type': \"sentence similarity automl\"}\n", - "name = 'automl'\n", - "model = local_run.register_model(description = description, tags = tags)\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "description = \"AutoML Model\"\n", + "tags = {\"area\": \"nlp\", \"type\": \"sentence similarity automl\"}\n", + "name = \"automl\"\n", + "model = local_run.register_model(description=description, tags=tags)\n", "\n", - "print(local_run.model_id) " + "print(local_run.model_id)" ] }, { @@ -752,17 +492,9 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Overwriting score.py\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%%writefile score.py\n", "import pickle\n", @@ -775,35 +507,38 @@ "\n", "def init():\n", " global model\n", - " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", + " model_path = Model.get_model_path(\n", + " model_name=\"<>\"\n", + " ) # this name is model.id of model that we want to deploy\n", " # deserialize the model file back into a sklearn model\n", " model = joblib.load(model_path)\n", "\n", + "\n", "def run(rawdata):\n", " try:\n", - " data = json.loads(rawdata)['data']\n", + " data = json.loads(rawdata)[\"data\"]\n", " data = numpy.array(data)\n", " result = model.predict(data)\n", " except Exception as e:\n", " result = str(e)\n", " return json.dumps({\"error\": result})\n", - " return json.dumps({\"result\":result.tolist()})" + " return json.dumps({\"result\": result.tolist()})" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Substitute the actual model id in the script file.\n", - "script_file_name = 'score.py'\n", + "script_file_name = \"score.py\"\n", "\n", - "with open(script_file_name, 'r') as cefr:\n", + "with open(script_file_name, \"r\") as cefr:\n", " content = cefr.read()\n", "\n", - "with open(script_file_name, 'w') as cefw:\n", - " cefw.write(content.replace('<>', local_run.model_id))" + "with open(script_file_name, \"w\") as cefw:\n", + " cefw.write(content.replace(\"<>\", local_run.model_id))" ] }, { @@ -817,63 +552,31 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "experiment = Experiment(ws, 'automated-ml-regression')\n", - "ml_run = AutoMLRun(experiment = experiment, run_id = local_run.id)" + "experiment = Experiment(ws, \"automated-ml-regression\")\n", + "ml_run = AutoMLRun(experiment=experiment, run_id=local_run.id)" ] }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "No issues found in the SDK package versions.\n" - ] - } - ], + "outputs": [], "source": [ - "best_iteration = int(best_run.id.split(\"_\")[-1]) #get the appended iteration number for the best model\n", - "dependencies = ml_run.get_run_sdk_dependencies(iteration = best_iteration)" + "best_iteration = int(\n", + " best_run.id.split(\"_\")[-1]\n", + ") # get the appended iteration number for the best model\n", + "dependencies = ml_run.get_run_sdk_dependencies(iteration=best_iteration)" ] }, { "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'azureml-train-automl': '1.0.43.1',\n", - " 'azureml-automl-core': '1.0.43',\n", - " 'azureml': '0.2.7',\n", - " 'azureml-widgets': '1.0.43.1',\n", - " 'azureml-train': '1.0.43',\n", - " 'azureml-train-restclients-hyperdrive': '1.0.43',\n", - " 'azureml-train-core': '1.0.43',\n", - " 'azureml-telemetry': '1.0.43',\n", - " 'azureml-sdk': '1.0.43',\n", - " 'azureml-pipeline': '1.0.43',\n", - " 'azureml-pipeline-steps': '1.0.43',\n", - " 'azureml-pipeline-core': '1.0.43',\n", - " 'azureml-dataprep': '1.1.5',\n", - " 'azureml-dataprep-native': '13.0.0',\n", - " 'azureml-core': '1.0.43.1',\n", - " 'azureml-contrib-brainwave': '1.0.33'}" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "dependencies" ] @@ -887,27 +590,18 @@ }, { "cell_type": "code", - "execution_count": 20, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'automlenv.yml'" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80'],\n", - " pip_packages=['azureml-sdk[automl]==1.0.43.*'], \n", - " python_version = '3.6.8')\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "myenv = CondaDependencies.create(\n", + " conda_packages=[\"numpy\", \"scikit-learn\", \"py-xgboost<=0.80\"],\n", + " pip_packages=[\"azureml-sdk[automl]==1.0.43.*\"],\n", + " python_version=\"3.6.8\",\n", + ")\n", "\n", - "conda_env_file_name = 'automlenv.yml'\n", - "myenv.save_to_file('.', conda_env_file_name)" + "conda_env_file_name = \"automlenv.yml\"\n", + "myenv.save_to_file(\".\", conda_env_file_name)" ] }, { @@ -928,34 +622,27 @@ }, { "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating image\n", - "Running................................................\n", - "Succeeded\n", - "Image creation operation finished for image automl-image:9, operation \"Succeeded\"\n" - ] - } - ], - "source": [ - "image_config = ContainerImage.image_configuration(execution_script = script_file_name,\n", - " runtime = \"python\",\n", - " conda_file = conda_env_file_name,\n", - " description = \"Image with automl model\",\n", - " tags = {'area': \"nlp\", 'type': \"sentencesimilarity automl\"})\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "image_config = ContainerImage.image_configuration(\n", + " execution_script=script_file_name,\n", + " runtime=\"python\",\n", + " conda_file=conda_env_file_name,\n", + " description=\"Image with automl model\",\n", + " tags={\"area\": \"nlp\", \"type\": \"sentencesimilarity automl\"},\n", + ")\n", "\n", - "image = ContainerImage.create(name = \"automl-image\",\n", - " # this is the model object\n", - " models = [model],\n", - " image_config = image_config,\n", - " workspace = ws)\n", + "image = ContainerImage.create(\n", + " name=\"automl-image\",\n", + " # this is the model object\n", + " models=[model],\n", + " image_config=image_config,\n", + " workspace=ws,\n", + ")\n", "\n", - "image.wait_for_creation(show_output = True)" + "image.wait_for_creation(show_output=True)" ] }, { @@ -987,51 +674,54 @@ "source": [ "Azure Container Instances is mostly used for deploying your models as a web service if one or more of the following conditions is true:\n", "1. You need to quickly deploy and validate your model.\n", - "2. You are testing a model that is under development." + "2. You are testing a model that is under development.\n", + "\n", + "To set them up properly, we need to indicate the number of CPU cores and the amount of memory we want to allocate to our web service." ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "#Set the web service configuration\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores = CPU_CORES, \n", - " memory_gb = MEMORY_GB)" + "# Set the web service configuration\n", + "aci_config = AciWebservice.deploy_configuration(\n", + " cpu_cores=CPU_CORES, memory_gb=MEMORY_GB\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Below step sometimes takes time. " + "The final step to deploying our web service is to call `WebService.deploy_from_image()`. This function uses the Docker image and the deployment configuration we created above to perform the following:\n", + "1. Deploy the docker image to an Azure Container Instance\n", + "2. Call the init() function in our scoring file\n", + "3. Provide an HTTP endpoint for scoring calls\n", + "\n", + "The deploy_from_image method requires the following parameters:\n", + "\n", + "1. workspace: the workspace containing the service\n", + "2. name: a unique name used to identify the service in the workspace\n", + "3. image: a docker image object that contains the environment needed for scoring/inference\n", + "4. deployment_config: a configuration object describing the compute type\n", + "\n", + "**Note:** The web service creation can take a few minutes " ] }, { "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating service\n", - "Running.....................\n", - "SucceededACI service creation operation finished, operation \"Succeeded\"\n", - "Healthy\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# deploy image as web service\n", - "aci_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = 'aci-automl-service-1',\n", - " image = image,\n", - " deployment_config = aci_config)\n", + "aci_service = Webservice.deploy_from_image(\n", + " workspace=ws, name=\"aci-automl-service-1\", image=image, deployment_config=aci_config\n", + ")\n", "\n", - "aci_service.wait_for_deployment(show_output = True)\n", + "aci_service.wait_for_deployment(show_output=True)\n", "print(aci_service.state)" ] }, @@ -1048,7 +738,7 @@ "metadata": {}, "outputs": [], "source": [ - "#print(aci_service.get_logs())" + "# print(aci_service.get_logs())" ] }, { @@ -1064,10 +754,10 @@ "metadata": {}, "outputs": [], "source": [ - "#aci_service = Webservice(workspace=ws, name='<>')\n", + "# aci_service = Webservice(workspace=ws, name='<>')\n", "\n", "# to use the webservice\n", - "#aci_service.run()" + "# aci_service.run()" ] }, { @@ -1076,57 +766,47 @@ "source": [ "## 4.7 Test Deployed Model @Courtney: can you check this section too for the values of the output & description\n", "\n", - "Testing the deployed model is nothing but calling the created webservice.
\n", - "The deployed model can be tested by passing list of sentence pairs. The output will be a score between 1-5 with 5 being identical sentences and 1 indicates that the sentences are totally different.\n", - "\n", - "The run method expects input in json format. Run() method retrieves API keys behind the scenes to make sure that call is authenticated. " + "Testing the deployed model means running the created webservice.
\n", + "The deployed model can be tested by passing list of sentence pairs. The output will be a score between 1-5 with 5 being identical sentences and 1 indicates that the sentences are totally different.\n" ] }, { "cell_type": "code", - "execution_count": 37, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "sentences = [['This is sentence1', 'This is sentence1'],\n", - " ['A hungry cat.', 'A sleeping cat'],\n", - " ['Its summer time ', 'Winter is coming']]\n", - "data = {'data': sentences}\n", + "sentences = [\n", + " [\"This is sentence1\", \"This is sentence1\"],\n", + " [\"A hungry cat.\", \"A sleeping cat\"],\n", + " [\"Its summer time \", \"Winter is coming\"],\n", + "]\n", + "data = {\"data\": sentences}\n", "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 38, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Time elapsed: 0.1937\n", - "Number of samples predicted: 3\n", - "[1.844837748433727, 2.072780308418169, 2.0510668030610644]\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Set up a Timer to see how long the model takes to predict\n", "t = Timer()\n", "\n", "t.start()\n", - "score = aci_service.run(input_data = data)\n", + "score = aci_service.run(input_data=data)\n", "t.stop()\n", "\n", - "print('Time elapsed: {}'.format(t))\n", + "print(\"Time elapsed: {}\".format(t))\n", "\n", "result = json.loads(score)\n", "try:\n", - " output = result['result']\n", - " print('Number of samples predicted: {}'.format(len(output)))\n", + " output = result[\"result\"]\n", + " print(\"Number of samples predicted: {}\".format(len(output)))\n", " print(output)\n", "except:\n", - " print(result['error'])" + " print(result[\"error\"])" ] }, { @@ -1146,62 +826,45 @@ }, { "cell_type": "code", - "execution_count": 31, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "test_y = test['score'].values.flatten()\n", - "test_x = test.drop('score', axis=1).values.tolist()\n", + "test_y = test[\"score\"].values.flatten()\n", + "test_x = test.drop(\"score\", axis=1).values.tolist()\n", "\n", - "data = {'data': test_x}\n", + "data = {\"data\": test_x}\n", "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 33, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Time elapsed: 1.2612\n", - "Number of samples predicted: 1379\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Set up a Timer to see how long the model takes to predict\n", "t = Timer()\n", "\n", "t.start()\n", - "score = aci_service.run(input_data = data)\n", + "score = aci_service.run(input_data=data)\n", "t.stop()\n", "\n", - "print('Time elapsed: {}'.format(t))\n", + "print(\"Time elapsed: {}\".format(t))\n", "\n", "result = json.loads(score)\n", "try:\n", - " output = result['result']\n", - " print('Number of samples predicted: {}'.format(len(output)))\n", + " output = result[\"result\"]\n", + " print(\"Number of samples predicted: {}\".format(len(output)))\n", "except:\n", - " print(result['error'])" + " print(result[\"error\"])" ] }, { "cell_type": "code", - "execution_count": 34, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.6038286237427414\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(pearsonr(output, test_y)[0])" ] @@ -1210,7 +873,73 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "For more examples to deploy Machine Learning models follow [MachineLearningNotebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) github repository." + "## 5. Clean up\n", + "Throughout the notebook, we used a workspace and Azure container instances. To get a sense of the cost we incurred, we can refer to this [calculator](https://azure.microsoft.com/en-us/pricing/calculator/). We can also navigate to the [Cost Management + Billing](https://ms.portal.azure.com/#blade/Microsoft_Azure_Billing/ModernBillingMenuBlade/Overview) pane on the portal, click on our subscription ID, and click on the Cost Analysis tab to check our credit usage.\n", + "

\n", + "In order not to incur extra costs, let's delete the resources we no longer need.\n", + "

\n", + "Once we have verified that our web service works well on ACI, we can delete it. This helps reduce [costs](https://azure.microsoft.com/en-us/pricing/details/container-instances/), since the container group we were paying for no longer exists, and allows us to keep our workspace clean." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# aci_service.delete()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "At this point, the main resource we are paying for is the Standard Azure Container Registry (ACR), which contains our Docker image. Details on pricing are available [here](https://azure.microsoft.com/en-us/pricing/details/container-registry/).\n", + "\n", + "We may decide to use our Docker image in a separate ACI or even in an AKS deployment. In that case, we should keep it available in our workspace. However, if we no longer have a use for it, we can delete it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# docker_image.delete()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If our goal is to continue using our workspace, we should keep it available. On the contrary, if we plan on no longer using it and its associated resources, we can delete it.\n", + "

\n", + "Note: Deleting the workspace will delete all the experiments, outputs, models, Docker images, deployments, etc. that we created in that workspace" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# ws.delete(delete_dependent_resources=True)\n", + "# This deletes our workspace, the container registry, the account storage, Application Insights and the key vault" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As mentioned above, Azure Container Instances tend to be used to develop and test deployments. They are typically configured with CPUs, which usually suffice when the number of requests per second is not too high. When working with several instances, we can configure them further by specifically allocating CPU resources to each of them.\n", + "\n", + "\n", + "For production requirements, i.e. when > 100 requests per second are expected, we recommend deploying models to Azure Kubernetes Service (AKS). It is a convenient infrastructure as it manages hosted Kubernetes environments, and makes it easy to deploy and manage containerized applications without container orchestration expertise. It also supports deployments with CPU clusters and deployments with GPU clusters.\n", + "\n", + "To see an example with Azure Kubernetes Service example, go to the [this notebook](https://github.com/microsoft/nlp/blob/courtney-janhavi-automl/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb)\n", + "\n", + "\n", + "For more examples on deployment follow [MachineLearningNotebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) github repository." ] }, { From 12043e13d78e114eb8ebaa5cc6bc412cf2772754 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Thu, 27 Jun 2019 20:07:49 -0400 Subject: [PATCH 093/108] black formatting and some text addition in deployment section --- .../automl_local_deployment_aci.ipynb | 9 --------- 1 file changed, 9 deletions(-) diff --git a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb index 4d8d6cd79..2cecb7ec3 100644 --- a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb +++ b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb @@ -87,15 +87,6 @@ "The regression problem we will demonstrate is predicting sentence similarity scores on the STS Benchmark dataset. The [STS Benchmark dataset](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark#STS_benchmark_dataset_and_companion_dataset) contains a selection of English datasets that were used in Semantic Textual Similarity (STS) tasks 2012-2017. The dataset contains 8,628 sentence pairs with a human-labeled integer representing the sentences' similarity (ranging from 0, for no meaning overlap, to 5, meaning equivalence). The sentence pairs will be embedded using AutoML's built-in preprocessing, so we'll pass the sentences directly into the model." ] }, - { - "cell_type": "code", - "execution_count": 39, - "metadata": {}, - "outputs": [], - "source": [ - "%load_ext blackcellmagic" - ] - }, { "cell_type": "code", "execution_count": null, From 5de635c42e64c46f334694b634eacb6e5066bac7 Mon Sep 17 00:00:00 2001 From: Janhavi Mahajan Date: Thu, 27 Jun 2019 20:59:26 -0400 Subject: [PATCH 094/108] [WIP]resolve review comments and black formatting --- ...automl_with_pipelines_deployment_aks.ipynb | 1075 +++++++---------- 1 file changed, 454 insertions(+), 621 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb index b9769a3dc..ee55dba64 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb @@ -13,7 +13,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# AzureML Pipeline, AutoML and AKS Deployment for Sentence Similarity" + "# AzureML Pipeline, AutoML, AKS Deployment for Sentence Similarity" ] }, { @@ -22,13 +22,13 @@ "source": [ "This notebook demonstrates how to use AzureML pipelines and AutoML to streamline the creation of a machine learning workflow for predicting sentence similarity. The pipeline contains two steps: \n", "1. PythonScriptStep: embeds sentences using a popular sentence embedding model, Google Universal Sentence Encoder\n", - "2. AutoMLStep: demonstrates how to use AutoML to automate model selection for predicting sentence similarity (regression)\n", + "2. AutoMLStep: demonstrates how to use Automated Machine Learning(AutoML) to automate model selection for predicting sentence similarity (regression)\n", "\n", "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Kubernetes Service (AKS).\n", "\n", "This notebook showcases how to use the following AzureML features: \n", "- AzureML Pipelines (PythonScriptStep and AutoMLStep)\n", - "- AutoML\n", + "- Automated Machine Learning\n", "- AmlCompute\n", "- Datastore\n", "- Logging" @@ -46,7 +46,7 @@ "2. [Data Preparation](#2.-Data-Preparation) \n", "3. [AzureML Setup](#3.-AzureML-Setup) \n", " * 3.1 [Link to or create a `Workspace`](#3.1-Link-to-or-create-a-Workspace) \n", - " * 3.2 [Set up an `Experiment` and logging](#3.2-Set-up-an-Experiment-and-logging) \n", + " * 3.2 [Set up an `Experiment` and Logging](#3.2-Set-up-an-Experiment-and-Logging) \n", " * 3.3 [Link `AmlCompute` compute target](#3.3-Link-AmlCompute-compute-target) \n", " * 3.4 [Upload data to `Datastore`](#3.4-Upload-data-to-Datastore) \n", "4. [Create AzureML Pipeline](#4.-Create-AzureML-Pipeline) \n", @@ -67,7 +67,7 @@ " * 6.4 [Image Creation](#6.4-Image-Creation) \n", " * 6.5 [Provision the AKS Cluster](#6.5-Provision-the-AKS-Cluster) \n", " * 6.6 [Deploy the image as a Web Service to Azure Kubernetes Service](#6.6-Deploy-the-image-as-a-Web-Service-to-Azure-Kubernetes-Service) \n", - " * 6.7 [Test Deployed Model](#6.7-Test-Deployed-Webservice)\n", + " * 6.7 [Test Deployed Model](#6.7-Test-Deployed-Webservice) \n", " \n" ] }, @@ -132,33 +132,15 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Some hub symbols are not available because TensorFlow version is less than 1.14\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Turning diagnostics collection on. \n", - "System version: 3.6.8 |Anaconda, Inc.| (default, Feb 21 2019, 18:30:04) [MSC v.1916 64 bit (AMD64)]\n", - "Azure ML SDK Version: 1.0.43\n", - "Pandas version: 0.23.4\n", - "Tensorflow Version: 1.13.1\n" - ] - } - ], + "outputs": [], "source": [ "# Set the environment path to find NLP\n", "import sys\n", + "\n", "sys.path.append(\"../../\")\n", "import time\n", "import logging\n", @@ -187,12 +169,14 @@ "# Tensorflow dependencies for Google Universal Sentence Encoder\n", "import tensorflow as tf\n", "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", "\n", "# AzureML packages\n", "import azureml as aml\n", "import logging\n", "from azureml.telemetry import set_diagnostics_collection\n", + "\n", "set_diagnostics_collection(send_diagnostics=True)\n", "from azureml.core import Datastore, Experiment\n", "from azureml.core.compute import ComputeTarget, AmlCompute\n", @@ -205,7 +189,7 @@ "from azureml.train.automl import AutoMLStep, AutoMLStepRun, AutoMLConfig\n", "from azureml.pipeline.core import Pipeline, PipelineData, TrainingOutput\n", "from azureml.pipeline.steps import PythonScriptStep\n", - "from azureml.data.data_reference import DataReference \n", + "from azureml.data.data_reference import DataReference\n", "from azureml.widgets import RunDetails\n", "\n", "print(\"System version: {}\".format(sys.version))\n", @@ -216,11 +200,12 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "BASE_DATA_PATH = '../../data'" + "BASE_DATA_PATH = \"../../data\"\n", + "EMBEDDED_DATA_REF = os.environ[\"AZUREML_DATAREFERENCE_embedded_data\"]" ] }, { @@ -241,52 +226,9 @@ }, { "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 175KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:02<00:00, 186KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████████████████████████████████████████████████████████████████████████████| 401/401 [00:01<00:00, 220KB/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Data downloaded to ../../data\\raw\\stsbenchmark\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Load in the raw datasets as pandas dataframes\n", "train_raw = stsbenchmark.load_pandas_df(BASE_DATA_PATH, file_split=\"train\")\n", @@ -296,7 +238,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -309,7 +251,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -321,19 +263,9 @@ }, { "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Training set has 5749 sentences\n", - "Development set has 1500 sentences\n", - "Testing set has 1379 sentences\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(\"Training set has {} sentences\".format(len(train)))\n", "print(\"Development set has {} sentences\".format(len(dev)))\n", @@ -342,105 +274,23 @@ }, { "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
scoresentence1sentence2
05.00a plane is taking off.an air plane is taking off.
13.80a man is playing a large flute.a man is playing a flute.
23.80a man is spreading shreded cheese on a pizza.a man is spreading shredded cheese on an uncoo...
32.60three men are playing chess.two men are playing chess.
44.25a man is playing the cello.a man seated is playing the cello.
\n", - "
" - ], - "text/plain": [ - " score sentence1 \\\n", - "0 5.00 a plane is taking off. \n", - "1 3.80 a man is playing a large flute. \n", - "2 3.80 a man is spreading shreded cheese on a pizza. \n", - "3 2.60 three men are playing chess. \n", - "4 4.25 a man is playing the cello. \n", - "\n", - " sentence2 \n", - "0 an air plane is taking off. \n", - "1 a man is playing a flute. \n", - "2 a man is spreading shredded cheese on an uncoo... \n", - "3 two men are playing chess. \n", - "4 a man seated is playing the cello. " - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "train.head(5)" + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train.head()" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "#Save the cleaned data\n", - "if not os.path.isdir('data'):\n", - " os.mkdir('data')\n", - " \n", + "# Save the cleaned data\n", + "if not os.path.isdir(\"data\"):\n", + " os.mkdir(\"data\")\n", + "\n", "train.to_csv(\"data/train.csv\", index=False)\n", "test.to_csv(\"data/test.csv\", index=False)\n", "dev.to_csv(\"data/dev.csv\", index=False)" @@ -473,40 +323,17 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Performing interactive authentication. Please follow the instructions on the terminal.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", - "WARNING - You have logged in. Now let us find all the subscriptions to which you have access...\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Interactive authentication successfully completed.\n" - ] - } - ], + "outputs": [], "source": [ "ws = azureml_utils.get_or_create_workspace(\n", " subscription_id=\"\",\n", " resource_group=\"\",\n", " workspace_name=\"\",\n", - " workspace_region=\"\"\n", + " workspace_region=\"\",\n", ")" ] }, @@ -516,35 +343,37 @@ "metadata": {}, "outputs": [], "source": [ - "print('Workspace name: ' + ws.name, \n", - " 'Azure region: ' + ws.location, \n", - " 'Subscription id: ' + ws.subscription_id, \n", - " 'Resource group: ' + ws.resource_group, sep='\\n')" + "print(\n", + " \"Workspace name: \" + ws.name,\n", + " \"Azure region: \" + ws.location,\n", + " \"Subscription id: \" + ws.subscription_id,\n", + " \"Resource group: \" + ws.resource_group,\n", + " sep=\"\\n\",\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 3.2 Set up an Experiment and logging" + "## 3.2 Set up an Experiment and Logging" ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Make a folder for the project\n", - "project_folder = './automl-sentence-similarity'\n", - "if not os.path.exists(project_folder):\n", - " os.makedirs(project_folder)\n", + "project_folder = \"./automl-sentence-similarity\"\n", + "os.makedirs(project_folder, exist_ok=True)\n", "\n", "# Set up an experiment\n", - "experiment_name = 'automl-sentence-similarity'\n", + "experiment_name = \"automl-sentence-similarity\"\n", "experiment = Experiment(ws, experiment_name)\n", "\n", - "#Add logging to our experiment\n", + "# Add logging to our experiment\n", "run = experiment.start_logging()" ] }, @@ -552,7 +381,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 3.3 Link AmlCompute compute target" + "## 3.3 Link AmlCompute Compute Target" ] }, { @@ -564,37 +393,28 @@ }, { "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Found existing compute target.\n", - "{'currentNodeCount': 0, 'targetNodeCount': 0, 'nodeStateCounts': {'preparingNodeCount': 0, 'runningNodeCount': 0, 'idleNodeCount': 0, 'unusableNodeCount': 0, 'leavingNodeCount': 0, 'preemptedNodeCount': 0}, 'allocationState': 'Steady', 'allocationStateTransitionTime': '2019-06-25T23:36:16.901000+00:00', 'errors': None, 'creationTime': '2019-06-20T23:37:54.322324+00:00', 'modifiedTime': '2019-06-20T23:38:26.084645+00:00', 'provisioningState': 'Succeeded', 'provisioningStateTransitionTime': None, 'scaleSettings': {'minNodeCount': 0, 'maxNodeCount': 4, 'nodeIdleTimeBeforeScaleDown': 'PT120S'}, 'vmPriority': 'Dedicated', 'vmSize': 'STANDARD_NC6'}\n" - ] - } - ], - "source": [ - "# choose a name for your cluster\n", - "#cluster_name = \"<>\"\n", - "cluster_name = \"gpu-scoring-jm\"\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# choose your cluster\n", + "cluster_name = \"gpucluster\"\n", "\n", "try:\n", " compute_target = ComputeTarget(workspace=ws, name=cluster_name)\n", - " print('Found existing compute target.')\n", + " print(\"Found existing compute target.\")\n", "except ComputeTargetException:\n", - " print('Creating a new compute target...')\n", - " compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_NC6',\n", - " max_nodes=4)\n", + " print(\"Creating a new compute target...\")\n", + " compute_config = AmlCompute.provisioning_configuration(\n", + " vm_size=\"STANDARD_NC6\", max_nodes=4\n", + " )\n", "\n", " # create the cluster\n", " compute_target = ComputeTarget.create(ws, cluster_name, compute_config)\n", "\n", " compute_target.wait_for_completion(show_output=True)\n", "\n", - "# use get_status() to get a detailed status for the current AmlCompute. \n", + "# use get_status() to get a detailed status for the current AmlCompute.\n", "print(compute_target.get_status().serialize())" ] }, @@ -614,39 +434,21 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Uploading ./data\\dev.csv\n", - "Uploading ./data\\test.csv\n", - "Uploading ./data\\train.csv\n", - "Uploaded ./data\\dev.csv, 1 files out of an estimated total of 3\n", - "Uploaded ./data\\test.csv, 2 files out of an estimated total of 3\n", - "Uploaded ./data\\train.csv, 3 files out of an estimated total of 3\n" - ] - }, - { - "data": { - "text/plain": [ - "$AZUREML_DATAREFERENCE_fe1e1b2408a441cb9aba61c4686a85c2" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# Select a specific datastore or you can call ws.get_default_datastore()\n", - "datastore_name = 'workspacefilestore'\n", - "ds = ws.datastores[datastore_name] \n", + "datastore_name = \"workspacefilestore\"\n", + "ds = ws.datastores[datastore_name]\n", "\n", "# Upload files in data folder to the datastore\n", - "ds.upload(src_dir='./data', target_path='stsbenchmark_data', overwrite=True, show_progress=True)" + "ds.upload(\n", + " src_dir=\"./data\",\n", + " target_path=\"stsbenchmark_data\",\n", + " overwrite=True,\n", + " show_progress=True,\n", + ")" ] }, { @@ -658,14 +460,16 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "input_data = DataReference(datastore=ds, \n", - " data_reference_name=\"stsbenchmark\",\n", - " path_on_datastore='stsbenchmark_data/',\n", - " overwrite=False)" + "input_data = DataReference(\n", + " datastore=ds,\n", + " data_reference_name=\"stsbenchmark\",\n", + " path_on_datastore=\"stsbenchmark_data/\",\n", + " overwrite=False,\n", + ")" ] }, { @@ -695,22 +499,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "First we set up a `RunConguration` object which configures the execution environment for an experiment (sets up the conda dependencies, etc.)" + "First we set up a `RunConfiguration` object which configures the execution environment for an experiment (sets up the conda dependencies, etc.)" ] }, { "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "run config is ready\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# create a new RunConfig object\n", "conda_run_config = RunConfiguration(framework=\"python\")\n", @@ -724,11 +520,25 @@ "# Use conda_dependencies.yml to create a conda environment in the Docker image for execution\n", "conda_run_config.environment.python.user_managed_dependencies = False\n", "\n", - "conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'azureml-sdk', 'azureml-dataprep', 'azureml-train-automl==1.0.33'], \n", - " conda_packages=['numpy', 'py-xgboost<=0.80', 'pandas', 'tensorflow', 'tensorflow-hub', 'scikit-learn'], \n", - " pin_sdk_version=False)\n", + "conda_run_config.environment.python.conda_dependencies = CondaDependencies.create(\n", + " pip_packages=[\n", + " \"azureml-sdk[automl]\",\n", + " \"azureml-sdk\",\n", + " \"azureml-dataprep\",\n", + " \"azureml-train-automl==1.0.33\",\n", + " ],\n", + " conda_packages=[\n", + " \"numpy\",\n", + " \"py-xgboost<=0.80\",\n", + " \"pandas\",\n", + " \"tensorflow\",\n", + " \"tensorflow-hub\",\n", + " \"scikit-learn\",\n", + " ],\n", + " pin_sdk_version=False,\n", + ")\n", "\n", - "print('run config is ready')" + "print(\"run config is ready\")" ] }, { @@ -759,17 +569,9 @@ }, { "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Writing ./automl-sentence-similarity/embed.py\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%%writefile $project_folder/embed.py\n", "import argparse\n", @@ -779,7 +581,9 @@ "import numpy as np\n", "import tensorflow as tf\n", "import tensorflow_hub as hub\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", "\n", "def google_encoder(dataset):\n", " \"\"\" Function that embeds sentences using the Google Universal\n", @@ -800,18 +604,20 @@ " # Apply embedding model and normalize the input\n", " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", - " \n", + "\n", " with tf.Session() as session:\n", " session.run(tf.global_variables_initializer())\n", " session.run(tf.tables_initializer())\n", " emb1, emb2 = session.run(\n", - " [sts_encode1, sts_encode2],\n", - " feed_dict={\n", - " sts_input1: dataset['sentence1'],\n", - " sts_input2: dataset['sentence2']\n", - " })\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={\n", + " sts_input1: dataset[\"sentence1\"],\n", + " sts_input2: dataset[\"sentence2\"],\n", + " },\n", + " )\n", " return emb1, emb2\n", "\n", + "\n", "def feature_engineering(dataset):\n", " \"\"\"Extracts embedding features from the dataset and returns\n", " features and target in a dataframe\n", @@ -826,11 +632,14 @@ " scores: list of target variables\n", " \"\"\"\n", " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", - " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", + " n_google = google_USE_emb1.shape[1] # length of the embeddings\n", " df = np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", - " names = ['USEEmb1_'+str(i) for i in range(n_google)]+['USEEmb2_'+str(i) for i in range(n_google)]\n", + " names = [\"USEEmb1_\" + str(i) for i in range(n_google)] + [\n", + " \"USEEmb2_\" + str(i) for i in range(n_google)\n", + " ]\n", " df = pd.DataFrame(df, columns=names)\n", - " return df, dataset['score']\n", + " return df, dataset[\"score\"]\n", + "\n", "\n", "def write_output(df, path, name):\n", " \"\"\"Write dataframes to correct path\"\"\"\n", @@ -838,7 +647,8 @@ " print(\"%s created\" % path)\n", " df.to_csv(path + \"/\" + name, index=False)\n", "\n", - "#Parse arguments\n", + "\n", + "# Parse arguments\n", "parser = argparse.ArgumentParser()\n", "parser.add_argument(\"--sentence_data\", type=str)\n", "parser.add_argument(\"--embedded_data\", type=str)\n", @@ -848,21 +658,25 @@ "module_url = \"https://tfhub.dev/google/universal-sentence-encoder-large/3\"\n", "embedding_model = hub.Module(module_url)\n", "\n", - "#Read data \n", + "# Read data\n", "train = pd.read_csv(args.sentence_data + \"/train.csv\")\n", "dev = pd.read_csv(args.sentence_data + \"/dev.csv\")\n", "\n", - "#Get Google USE features\n", + "# Get Google USE features\n", "training_data, training_scores = feature_engineering(train)\n", "validation_data, validation_scores = feature_engineering(dev)\n", "\n", - "#Write out training data to Datastore\n", + "# Write out training data to Datastore\n", "write_output(training_data, args.embedded_data, \"X_train.csv\")\n", - "write_output(pd.DataFrame(training_scores, columns=['score']), args.embedded_data, \"y_train.csv\")\n", + "write_output(\n", + " pd.DataFrame(training_scores, columns=[\"score\"]), args.embedded_data, \"y_train.csv\"\n", + ")\n", "\n", - "#Write out validation data to Datastore\n", + "# Write out validation data to Datastore\n", "write_output(validation_data, args.embedded_data, \"X_dev.csv\")\n", - "write_output(pd.DataFrame(validation_scores, columns=['score']), args.embedded_data, \"y_dev.csv\")" + "write_output(\n", + " pd.DataFrame(validation_scores, columns=[\"score\"]), args.embedded_data, \"y_dev.csv\"\n", + ")" ] }, { @@ -881,7 +695,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -906,30 +720,21 @@ }, { "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "WARNING - Parameter 'hash_paths' will be deprecated. All files under source_directory will be hashed except files listed in .amlignore or .gitignore\n" - ] - } - ], - "source": [ - "embedStep = PythonScriptStep(\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "embed_step = PythonScriptStep(\n", " name=\"Embed\",\n", - " script_name=\"embed.py\", \n", - " arguments=[\"--embedded_data\", embedded_data,\n", - " \"--sentence_data\", input_data],\n", + " script_name=\"embed.py\",\n", + " arguments=[\"--embedded_data\", embedded_data, \"--sentence_data\", input_data],\n", " inputs=[input_data],\n", " outputs=[embedded_data],\n", " compute_target=compute_target,\n", - " runconfig = conda_run_config,\n", + " runconfig=conda_run_config,\n", " hash_paths=[\"embed.py\"],\n", " source_directory=project_folder,\n", - " allow_reuse=True\n", + " allow_reuse=True,\n", ")" ] }, @@ -964,30 +769,28 @@ }, { "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Writing ./automl-sentence-similarity/get_data.py\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%%writefile $project_folder/get_data.py\n", "\n", "import os\n", "import pandas as pd\n", "\n", + "\n", "def get_data():\n", " \"\"\"Function needed to load data for use on remote AutoML experiments\"\"\"\n", - " X_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_train.csv\")\n", - " y_train = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_train.csv\")\n", - " X_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/X_dev.csv\")\n", - " y_dev = pd.read_csv(os.environ['AZUREML_DATAREFERENCE_embedded_data'] + \"/y_dev.csv\")\n", - " return { \"X\" : X_train.values, \"y\" : y_train.values.flatten(), \"X_valid\": X_dev.values, \"y_valid\": y_dev.values.flatten()}" + " X_train = pd.read_csv(EMBEDDED_DATA_REF + \"/X_train.csv\")\n", + " y_train = pd.read_csv(EMBEDDED_DATA_REF + \"/y_train.csv\")\n", + " X_dev = pd.read_csv(EMBEDDED_DATA_REF + \"/X_dev.csv\")\n", + " y_dev = pd.read_csv(EMBEDDED_DATA_REF + \"/y_dev.csv\")\n", + " return {\n", + " \"X\": X_train.values,\n", + " \"y\": y_train.values.flatten(),\n", + " \"X_valid\": X_dev.values,\n", + " \"y_valid\": y_dev.values.flatten(),\n", + " }" ] }, { @@ -1038,24 +841,27 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "automl_settings = {\n", - " \"iteration_timeout_minutes\": 15, #How long each iteration can take before moving on\n", - " \"iterations\": 50, #Number of algorithm options to try\n", - " \"primary_metric\": 'spearman_correlation', #Metric to optimize\n", - " \"preprocess\": True, #Whether dataset preprocessing should be applied\n", - " \"verbosity\": logging.INFO\n", + " \"iteration_timeout_minutes\": 15, # How long each iteration can take before moving on\n", + " \"iterations\": 50, # Number of algorithm options to try\n", + " \"primary_metric\": \"spearman_correlation\", # Metric to optimize\n", + " \"preprocess\": True, # Whether dataset preprocessing should be applied\n", + " \"verbosity\": logging.INFO,\n", "}\n", - "automl_config = AutoMLConfig(task = 'regression', #type of task: classification, regression or forecasting\n", - " debug_log = 'automl_errors.log',\n", - " path = project_folder,\n", - " compute_target=compute_target,\n", - " run_configuration=conda_run_config,\n", - " data_script = project_folder + \"/get_data.py\", #local path to script with get_data() function\n", - " **automl_settings)" + "automl_config = AutoMLConfig(\n", + " task=\"regression\", # type of task: classification, regression or forecasting\n", + " debug_log=\"automl_errors.log\",\n", + " path=project_folder,\n", + " compute_target=compute_target,\n", + " run_configuration=conda_run_config,\n", + " data_script=project_folder\n", + " + \"/get_data.py\", # local path to script with get_data() function\n", + " **automl_settings\n", + ")" ] }, { @@ -1074,37 +880,45 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# Create PipelineData objects for tracking AutoML metrics \n", - "metrics_output_name = 'metrics_output'\n", - "best_model_output_name = 'best_model_output'\n", + "# Create PipelineData objects for tracking AutoML metrics\n", "\n", - "metrics_data = PipelineData(name='metrics_data',\n", - " datastore=ds,\n", - " pipeline_output_name=metrics_output_name,\n", - " training_output=TrainingOutput(type='Metrics'))\n", - "model_data = PipelineData(name='model_data',\n", - " datastore=ds,\n", - " pipeline_output_name=best_model_output_name,\n", - " training_output=TrainingOutput(type='Model'))" + "metrics_data = PipelineData(\n", + " name=\"metrics_data\",\n", + " datastore=ds,\n", + " pipeline_output_name=\"metrics_output\",\n", + " training_output=TrainingOutput(type=\"Metrics\"),\n", + ")\n", + "model_data = PipelineData(\n", + " name=\"model_data\",\n", + " datastore=ds,\n", + " pipeline_output_name=\"best_model_output\",\n", + " training_output=TrainingOutput(type=\"Model\"),\n", + ")" ] }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "automl_step = AutoMLStep(\n", - " name='AutoML',\n", - " automl_config=automl_config, #the AutoMLConfig object created previously\n", - " inputs=[embedded_data], #inputs is the PipelineData that was the output of the previous pipeline step\n", - " outputs=[metrics_data, model_data], #PipelineData objects to reference metric and model information\n", + " name=\"AutoML\",\n", + " automl_config=automl_config, # the AutoMLConfig object created previously\n", + " inputs=[\n", + " embedded_data\n", + " ], # inputs is the PipelineData that was the output of the previous pipeline step\n", + " outputs=[\n", + " metrics_data,\n", + " model_data,\n", + " ], # PipelineData objects to reference metric and model information\n", " hash_paths=[\"get_data.py\"],\n", - " allow_reuse=True)" + " allow_reuse=True,\n", + ")" ] }, { @@ -1123,33 +937,22 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "#automl_step.run_after(embedStep)\n", "pipeline = Pipeline(\n", - " description=\"pipeline_embed_automl\", #give a name for the pipeline\n", - " workspace=ws, \n", - " steps=[embedStep, automl_step])" + " description=\"pipeline_embed_automl\", # give a name for the pipeline\n", + " workspace=ws,\n", + " steps=[embed_step, automl_step],\n", + ")" ] }, { "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Created step Embed [42ea8959][75c6d120-9cb7-418c-b54b-4eef214eae5d], (This step will run and generate new outputs)\n", - "Created step AutoML [0c896fa5][eb49a173-bec7-4812-b873-7d5a0d2a051e], (This step is eligible to reuse a previous run's output)\n", - "Using data reference stsbenchmark for StepId [9b655bdc][e3340790-c54f-4147-8dd0-bcb80a9b7b46], (Consumers of this data are eligible to reuse prior runs.)\n", - "Submitted pipeline run: 318167e4-994b-43a0-96a1-54fc159043a8\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "pipeline_run = experiment.submit(pipeline)" ] @@ -1184,7 +987,9 @@ "metadata": {}, "outputs": [], "source": [ - "pipeline_run.wait_for_completion(show_output=True) #show console output while run is in progress" + "pipeline_run.wait_for_completion(\n", + " show_output=True\n", + ") # show console output while run is in progress" ] }, { @@ -1202,7 +1007,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# 6. Deploy Sentence Similarity Model" + "# 6. Deploy Sentence Similarity Model\n", + "\n", + "Deploying an Azure Machine Learning model as a web service creates a REST API. You can send data to this API and receive the prediction returned by the model.\n", + "In general, you create a webservice by deploying a model as an image to a Compute Target.\n", + "\n", + "Some of the Compute Targets are: \n", + "1. Azure Container Instance\n", + "2. Azure Kubernetes Service\n", + "3. Local web service\n", + "\n", + "The general workflow for deploying a model is as follows:\n", + "1. Register a model\n", + "2. Prepare to deploy\n", + "3. Deploy the model to the compute target\n", + "4. Test the deployed model (webservice)\n", + "\n", + "In this notebook we walk you through the process of creating a webservice running on Azure Kubernetes Service(AKS) by deploying the model as an image. AKS is good for high-scale production deployments.It provides fast response time and autoscaling of the deployed service. Cluster autoscaling is not supported through the Azure Machine Learning SDK. \n", + "\n", + "You can find more information on deploying and serving models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)\n" ] }, { @@ -1211,35 +1034,35 @@ "source": [ "## 6.1 Register/Retrieve AutoML and Google Universal Sentence Encoder Models for Deployment\n", "\n", + "Registering a model means registering one or more files that make up for a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can be either come from Azure Machine Learning or any other location such as local machine.\n", + "\n", + "\n", + "See other ways to register a model [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)\n", + "\n", + "Below we show how to register a new model and also how to retrieve and register an existing model.\n", + "\n", "### Register a new automl model\n", "Register the best AutoML model based on the pipeline results or load the saved model" ] }, { "cell_type": "code", - "execution_count": 60, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Registering model 76a6169d7f364bdbest\n", - "76a6169d7f364bdbest\n" - ] - } - ], - "source": [ - "automl_step_run = AutoMLStepRun(step_run=pipeline_run.find_step_run('AutoML')[0])\n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "automl_step_run = AutoMLStepRun(step_run=pipeline_run.find_step_run(\"AutoML\")[0])\n", "# to get the outputs\n", "best_run, fitted_model = automl_step_run.get_output()\n", "\n", "# to register the fitted_mode\n", - "description = 'Pipeline Model'\n", - "tags = {'area': \"nlp\", 'type': \"sentencesimilarity pipelines\"}\n", - "model = automl_step_run.register_model(description = description, tags = tags)\n", + "description = \"Pipeline Model\"\n", + "tags = {\"area\": \"nlp\", \"type\": \"sentencesimilarity pipelines\"}\n", + "model = automl_step_run.register_model(description=description, tags=tags)\n", "automl_model_name = automl_step_run.model_id\n", - "print(automl_step_run.model_id) # Use this id to deploy the model as a web service in Azure." + "print(\n", + " automl_step_run.model_id\n", + ") # Use this id to deploy the model as a web service in Azure." ] }, { @@ -1247,7 +1070,7 @@ "metadata": {}, "source": [ "### Retrieve existing model from Azure\n", - "If you already have a best model then you can skip registering the model by just retrieving the latest version of model by providing it's name" + "If you already have a best model then you can skip registering the model by just retrieving the latest version of model by providing its name" ] }, { @@ -1256,8 +1079,8 @@ "metadata": {}, "outputs": [], "source": [ - "automl_model_name = 'f775e327caee4f7best' # best fit model registered in the workspace\n", - "model = Model(ws, name= automl_model_name)\n", + "automl_model_name = \"76a6169d7f364bdbest\" # best fit model registered in the workspace\n", + "model = Model(ws, name=automl_model_name)\n", "print(\"Found model with name\", automl_model_name)" ] }, @@ -1271,32 +1094,23 @@ }, { "cell_type": "code", - "execution_count": 54, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Registering model googleUSEmodel\n", - "Registered googleUSEembeddings model\n" - ] - } - ], - "source": [ - "#set location for where to download google tensorflow model\n", - "os.environ['TFHUB_CACHE_DIR'] = './googleUSE' \n", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# set location for where to download google tensorflow model\n", + "os.environ[\"TFHUB_CACHE_DIR\"] = \"./googleUSE\"\n", "# download model\n", "hub.Module(\"https://tfhub.dev/google/universal-sentence-encoder-large/3\")\n", "# register model\n", "embedding_model = Model.register(\n", - " model_path = \"googleUSE\",\n", - " model_name = \"googleUSEmodel\",\n", - " tags = {\"Model\": \"GoogleUSE\"},\n", - " description = \"Google Universal Sentence Embedding pretrained model\",\n", - " workspace = ws\n", - " )\n", - "print('Registered googleUSEembeddings model')" + " model_path=\"googleUSE\",\n", + " model_name=\"googleUSEmodel\",\n", + " tags={\"Model\": \"GoogleUSE\"},\n", + " description=\"Google Universal Sentence Embedding pretrained model\",\n", + " workspace=ws,\n", + ")\n", + "print(\"Registered googleUSEembeddings model\")" ] }, { @@ -1312,7 +1126,7 @@ "metadata": {}, "outputs": [], "source": [ - "embedding_model = Model(ws, name= 'googleUSEmodel')\n", + "embedding_model = Model(ws, name=\"googleUSEmodel\")\n", "print(\"Found model with name googleUSEembeddings\")" ] }, @@ -1320,22 +1134,18 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 6.2 Create Scoring Script\n" + "## 6.2 Create Scoring Script\n", + "\n", + "In this section we show an example of entry script which is called from the deployed webservice. `score.py` is our entry script. The script must contain:\n", + "1. init() - This function loads the model in a global object.\n", + "2. run() - This function is used for model prediction. The inputs and outputs to `run()` typically use JSON for serialization and deserilization. " ] }, { "cell_type": "code", - "execution_count": 73, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Overwriting score.py\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%%writefile score.py\n", "import pickle\n", @@ -1348,7 +1158,9 @@ "import tensorflow as tf\n", "import tensorflow_hub as hub\n", "import os\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", + "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", + "\n", "\n", "def google_encoder(dataset):\n", " \"\"\" Function that embeds sentences using the Google Universal\n", @@ -1370,17 +1182,16 @@ " # Apply embedding model and normalize the input\n", " sts_encode1 = tf.nn.l2_normalize(embedding_model(sts_input1), axis=1)\n", " sts_encode2 = tf.nn.l2_normalize(embedding_model(sts_input2), axis=1)\n", - " \n", + "\n", " sess.run(tf.global_variables_initializer())\n", " sess.run(tf.tables_initializer())\n", " emb1, emb2 = sess.run(\n", - " [sts_encode1, sts_encode2],\n", - " feed_dict={\n", - " sts_input1: dataset['sentence1'],\n", - " sts_input2: dataset['sentence2']\n", - " })\n", + " [sts_encode1, sts_encode2],\n", + " feed_dict={sts_input1: dataset[\"sentence1\"], sts_input2: dataset[\"sentence2\"]},\n", + " )\n", " return emb1, emb2\n", - " \n", + "\n", + "\n", "def feature_engineering(dataset):\n", " \"\"\"Extracts embedding features from the dataset and returns\n", " features and target in a dataframe\n", @@ -1395,58 +1206,64 @@ " scores: list of target variables\n", " \"\"\"\n", " google_USE_emb1, google_USE_emb2 = google_encoder(dataset)\n", - " n_google = google_USE_emb1.shape[1] #length of the embeddings \n", + " n_google = google_USE_emb1.shape[1] # length of the embeddings\n", " return np.concatenate((google_USE_emb1, google_USE_emb2), axis=1)\n", "\n", + "\n", "def init():\n", " global model, googleUSE_dir_path\n", - " model_path = Model.get_model_path(model_name = '<>') # this name is model.id of model that we want to deploy\n", + " model_path = Model.get_model_path(\n", + " model_name=\"<>\"\n", + " ) # this name is model.id of model that we want to deploy\n", " # deserialize the model file back into a sklearn model\n", " model = joblib.load(model_path)\n", - " \n", - " #load the path for google USE embedding model\n", - " googleUSE_dir_path = Model.get_model_path(model_name = 'googleUSEmodel')\n", - " os.environ['TFHUB_CACHE_DIR'] = googleUSE_dir_path\n", + "\n", + " # load the path for google USE embedding model\n", + " googleUSE_dir_path = Model.get_model_path(model_name=\"googleUSEmodel\")\n", + " os.environ[\"TFHUB_CACHE_DIR\"] = googleUSE_dir_path\n", + "\n", "\n", "def run(rawdata):\n", " global embedding_model, sess, googleUSE_dir_path, model\n", " try:\n", - " #load data and convert to dataframe\n", - " data = json.loads(rawdata)['data']\n", - " data_df = pd.DataFrame(data, columns=['sentence1','sentence2'])\n", - " \n", - " #begin a tensorflow session and load tensorhub module\n", + " # load data and convert to dataframe\n", + " data = json.loads(rawdata)[\"data\"]\n", + " data_df = pd.DataFrame(data, columns=[\"sentence1\", \"sentence2\"])\n", + "\n", + " # begin a tensorflow session and load tensorhub module\n", " sess = tf.Session()\n", - " embedding_model = hub.Module(googleUSE_dir_path+\"/96e8f1d3d4d90ce86b2db128249eb8143a91db73\")\n", - " \n", - " #Embed sentences using Google USE model\n", + " embedding_model = hub.Module(\n", + " googleUSE_dir_path + \"/96e8f1d3d4d90ce86b2db128249eb8143a91db73\"\n", + " )\n", + "\n", + " # Embed sentences using Google USE model\n", " embedded_data = feature_engineering(data_df)\n", - " #Predict using AutoML saved model\n", + " # Predict using AutoML saved model\n", " result = model.predict(embedded_data)\n", - " \n", + "\n", " except Exception as e:\n", " result = str(e)\n", " sess.close()\n", " return json.dumps({\"error\": result})\n", - " \n", + "\n", " sess.close()\n", - " return json.dumps({\"result\":result.tolist()})" + " return json.dumps({\"result\": result.tolist()})" ] }, { "cell_type": "code", - "execution_count": 74, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Substitute the actual model id in the script file.\n", - "script_file_name = 'score.py'\n", + "script_file_name = \"score.py\"\n", "\n", - "with open(script_file_name, 'r') as cefr:\n", + "with open(script_file_name, \"r\") as cefr:\n", " content = cefr.read()\n", "\n", - "with open(script_file_name, 'w') as cefw:\n", - " cefw.write(content.replace('<>', automl_model_name))" + "with open(script_file_name, \"w\") as cefw:\n", + " cefw.write(content.replace(\"<>\", automl_model_name))" ] }, { @@ -1460,68 +1277,64 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'pipeline_env.yml'" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ - "myenv = CondaDependencies.create(conda_packages=['numpy','scikit-learn','py-xgboost<=0.80', 'pandas', 'tensorflow', 'tensorflow-hub'],\n", - " pip_packages=['azureml-sdk[automl]==1.0.43.*'], python_version = '3.6.8')\n", + "myenv = CondaDependencies.create(\n", + " conda_packages=[\n", + " \"numpy\",\n", + " \"scikit-learn\",\n", + " \"py-xgboost<=0.80\",\n", + " \"pandas\",\n", + " \"tensorflow\",\n", + " \"tensorflow-hub\",\n", + " ],\n", + " pip_packages=[\"azureml-sdk[automl]==1.0.43.*\"],\n", + " python_version=\"3.6.8\",\n", + ")\n", "\n", - "conda_env_file_name = 'pipeline_env.yml'\n", - "myenv.save_to_file('.', conda_env_file_name)\n" + "conda_env_file_name = \"pipeline_env.yml\"\n", + "myenv.save_to_file(\".\", conda_env_file_name)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## 6.4 Image Creation" + "## 6.4 Image Creation\n", + "\n", + "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step.\n", + "\n", + "Below image creation step takes a while sometimes." ] }, { "cell_type": "code", - "execution_count": 75, + "execution_count": null, "metadata": { "scrolled": false }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating image\n", - "Running......................................................................................\n", - "Succeeded\n", - "Image creation operation finished for image pipeline-image:58, operation \"Succeeded\"\n" - ] - } - ], + "outputs": [], "source": [ - "#trying to add dependencies\n", - "image_config = ContainerImage.image_configuration(execution_script = script_file_name,\n", - " runtime = \"python\",\n", - " conda_file = conda_env_file_name,\n", - " description = \"Image with aml pipeline model\",\n", - " tags = {'area': \"nlp\", 'type': \"sentencesimilarity pipeline\"})\n", + "# trying to add dependencies\n", + "image_config = ContainerImage.image_configuration(\n", + " execution_script=script_file_name,\n", + " runtime=\"python\",\n", + " conda_file=conda_env_file_name,\n", + " description=\"Image with aml pipeline model\",\n", + " tags={\"area\": \"nlp\", \"type\": \"sentencesimilarity pipeline\"},\n", + ")\n", "\n", - "image = ContainerImage.create(name = \"pipeline-image\",\n", - " # this is the model object\n", - " models = [model, embedding_model], #add both embedding and autoML models\n", - " image_config = image_config,\n", - " workspace = ws)\n", + "image = ContainerImage.create(\n", + " name=\"pipeline-image\",\n", + " # this is the model object\n", + " models=[model, embedding_model], # add both embedding and autoML models\n", + " image_config=image_config,\n", + " workspace=ws,\n", + ")\n", "\n", - "image.wait_for_creation(show_output = True)" + "image.wait_for_creation(show_output=True)" ] }, { @@ -1537,7 +1350,7 @@ "metadata": {}, "outputs": [], "source": [ - "image.get_logs()" + "# image.get_logs()" ] }, { @@ -1546,12 +1359,16 @@ "source": [ "## 6.5 Provision the AKS Cluster\n", "\n", - "This is a one time setup. You can reuse this cluster for multiple deployments after it has been created. If you delete the cluster or the resource group that contains it, then you would have to recreate it." + "**Time estimate:** Approximately 20 minutes.\n", + "\n", + "Creating or attaching an AKS cluster is a one time process for your workspace. You can reuse this cluster for multiple deployments. If you delete the cluster or the resource group that contains it, you must create a new cluster the next time you need to deploy. You can have multiple AKS clusters attached to your workspace.\n", + "\n", + "If you delete the cluster or the resource group that contains it, then you would have to recreate it." ] }, { "cell_type": "code", - "execution_count": 81, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -1560,11 +1377,10 @@ "# Use the default configuration (can also provide parameters to customize)\n", "prov_config = AksCompute.provisioning_configuration()\n", "\n", - "aks_name = 'nlp-aks' \n", "# Create the cluster\n", - "aks_target = ComputeTarget.create(workspace = ws, \n", - " name = aks_name, \n", - " provisioning_configuration = prov_config)" + "aks_target = ComputeTarget.create(\n", + " workspace=ws, name=\"nlp-aks\", provisioning_configuration=prov_config\n", + ")" ] }, { @@ -1572,45 +1388,49 @@ "metadata": {}, "source": [ "\n", - "## 6.6 Deploy the Image as a Web Service on Azure Kubernetes Service\n" + "## 6.6 Deploy the Image as a Web Service on Azure Kubernetes Service\n", + "\n", + "In the case of deployment on AKS, in addition to the Docker image, we need to define computational resources. This is typically a cluster of CPUs or a cluster of GPUs. If we already have a Kubernetes-managed cluster in our workspace, we can use it, otherwise, we can create a new one.\n", + "\n", + "In this notebook we will use the cluster in the above cell." ] }, { "cell_type": "code", - "execution_count": 83, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "#Set the web service configuration\n", + "# Set the web service configuration\n", "aks_config = AksWebservice.deploy_configuration()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are now ready to deploy our web service. We will deploy from the Docker image. It indeed contains our autoML model as well as the Google Universal Sentence Encoder model and the conda environment needed for the scoring script to work properly. The parameters to pass to the Webservice.deploy_from_image() command are similar to those used for the deployment on ACI. The only major difference is the compute target (aks_target), i.e. the CPU cluster we just spun up.\n", + "\n", + "**Note:** This deployment takes a few minutes to complete." + ] + }, { "cell_type": "code", - "execution_count": 85, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Creating service\n", - "Running........................\n", - "SucceededAKS service creation operation finished, operation \"Succeeded\"\n", - "Healthy\n" - ] - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# deploy image as web service\n", - "aks_service_name ='aks-with-pipelines-service-5'\n", - "\n", - "aks_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = aks_service_name,\n", - " image = image,\n", - " deployment_config = aks_config,\n", - " deployment_target = aks_target)\n", - "aks_service.wait_for_deployment(show_output = True)\n", + "aks_service_name = \"aks-with-pipelines-service-1\"\n", + "\n", + "aks_service = Webservice.deploy_from_image(\n", + " workspace=ws,\n", + " name=aks_service_name,\n", + " image=image,\n", + " deployment_config=aks_config,\n", + " deployment_target=aks_target,\n", + ")\n", + "aks_service.wait_for_deployment(show_output=True)\n", "print(aks_service.state)" ] }, @@ -1627,7 +1447,7 @@ "metadata": {}, "outputs": [], "source": [ - "aks_service.get_logs()" + "# aks_service.get_logs()" ] }, { @@ -1635,65 +1455,65 @@ "metadata": {}, "source": [ "## 6.7 Test Deployed Webservice\n", - "We test the web sevice by passing data.The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated. The service has a timeout which does not allow passing the large test dataset. Timeout is based off a few things.It is set to a default of ~30 seconds. To overcome this you can batch data and send it to the service." + "\n", + "Testing the deployed model means running the created webservice.
\n", + "The deployed model can be tested by passing list of sentence pairs. The output will be a score between 1-5 with 5 being identical sentences and 1 indicates that the sentences are totally different.\n", + "\n", + "The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated. The service has a timeout which does not allow passing the large test dataset. Timeout is based off a few things.It is set to a default of ~30 seconds. To overcome this you can batch data and send it to the service.\n" ] }, { "cell_type": "code", - "execution_count": 89, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# load test set sentences\n", - "data = pd.read_csv(\"data/test.csv\")\n", - "train_y = data['score'].values.flatten()\n", - "train_x = data.drop(\"score\", axis=1).values.tolist()\n", - "data = {'data': train_x[:500]}\n", + "sentences = [\n", + " [\"This is sentence1\", \"This is sentence1\"],\n", + " [\"A hungry cat.\", \"A sleeping cat\"],\n", + " [\"Its summer time \", \"Winter is coming\"],\n", + "]\n", + "data = {\"data\": sentences}\n", "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 90, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Set up a Timer to see how long the model takes to predict\n", - "t = Timer()" - ] - }, - { - "cell_type": "code", - "execution_count": 91, - "metadata": { - "scrolled": true - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Time elapsed: 18.0523\n", - "Number of sample predicted : 500\n" - ] - } - ], - "source": [ - "#print time here\n", + "t = Timer()\n", + "\n", "t.start()\n", - "score = aks_service.run(input_data = data)\n", + "score = aci_service.run(input_data=data)\n", "t.stop()\n", + "\n", "print(\"Time elapsed: {}\".format(t))\n", "\n", "result = json.loads(score)\n", - "\n", "try:\n", " output = result[\"result\"]\n", + " print(\"Number of samples predicted: {}\".format(len(output)))\n", + " print(output)\n", "except:\n", - " output = result[\"error\"]\n", - " \n", - "# output will print the error code incase error occurs.\n", - "print('Number of sample predicted : {0}'.format(len(output)))" + " print(result[\"error\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we'll calculate the Pearson Correlation on the test set.\n", + "\n", + "**What is Pearson Correlation?**\n", + "\n", + "Our evaluation metric is Pearson correlation ($\\rho$) which is a measure of the linear correlation between two variables. The formula for calculating Pearson correlation is as follows: \n", + "\n", + "$$\\rho_{X,Y} = \\frac{E[(X-\\mu_X)(Y-\\mu_Y)]}{\\sigma_X \\sigma_Y}$$\n", + "\n", + "This metric takes a value in [-1,1] where -1 represents a perfect negative correlation, 1 represents a perfect positive correlation, and 0 represents no correlation. We utilize the Pearson correlation metric as this is the main metric that [SentEval](http://nlpprogress.com/english/semantic_textual_similarity.html), a widely-used evaluation toolkit for evaluation sentence representations, uses for the STS Benchmark dataset." ] }, { @@ -1702,25 +1522,35 @@ "metadata": {}, "outputs": [], "source": [ - "print(output)" + "# load test set sentences\n", + "data = pd.read_csv(\"data/test.csv\")\n", + "train_y = data[\"score\"].values.flatten()\n", + "train_x = data.drop(\"score\", axis=1).values.tolist()\n", + "data = {\"data\": train_x[:500]}\n", + "data = json.dumps(data)" ] }, { "cell_type": "code", - "execution_count": 93, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.8200285709168692\n" - ] - } - ], - "source": [ - "#get Pearson Correlation\n", - "print(pearsonr(output, train_y[:500])[0])" + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Set up a Timer to see how long the model takes to predict\n", + "with Timer() as t:\n", + " score = aks_service.run(input_data=data)\n", + " print(\"Time elapsed: {}\".format(t))\n", + "\n", + "result = json.loads(score)\n", + "\n", + "try:\n", + " output = result[\"result\"]\n", + " print(\"Number of sample predicted : \".format(len(output)))\n", + " print(output)\n", + "except:\n", + " print(result[\"error\"])" ] }, { @@ -1728,7 +1558,10 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "# get Pearson Correlation\n", + "print(pearsonr(output, train_y[:500])[0])" + ] } ], "metadata": { From ec2c827e75fac26db2dfb3e228fcad4c234e9fcf Mon Sep 17 00:00:00 2001 From: miguelgfierro Date: Fri, 28 Jun 2019 16:15:46 +0100 Subject: [PATCH 095/108] azure devops tests --- .ci/azure-pipelines.yml | 46 --------------------- tests/ci/cpu_unit_tests_linux.yml | 44 ++++++++++++++++++++ tests/ci/gpu_unit_tests_linux.yml | 44 ++++++++++++++++++++ tests/ci/notebooks_cpu_unit_tests_linux.yml | 44 ++++++++++++++++++++ tests/ci/notebooks_gpu_unit_tests_linux.yml | 44 ++++++++++++++++++++ tests/ci/unit-tests.yml | 46 --------------------- 6 files changed, 176 insertions(+), 92 deletions(-) delete mode 100644 .ci/azure-pipelines.yml create mode 100644 tests/ci/cpu_unit_tests_linux.yml create mode 100644 tests/ci/gpu_unit_tests_linux.yml create mode 100644 tests/ci/notebooks_cpu_unit_tests_linux.yml create mode 100644 tests/ci/notebooks_gpu_unit_tests_linux.yml delete mode 100644 tests/ci/unit-tests.yml diff --git a/.ci/azure-pipelines.yml b/.ci/azure-pipelines.yml deleted file mode 100644 index 3c83dadcc..000000000 --- a/.ci/azure-pipelines.yml +++ /dev/null @@ -1,46 +0,0 @@ - -# Pull request against these branches will trigger this build -pr: -- master -- staging - -#Any commit to this branch will trigger the build. -trigger: -- staging -- master - -pool: - vmImage: 'ubuntu-16.04' - -steps: - -- bash: | - echo "##vso[task.prependpath]/usr/share/miniconda/bin" - displayName: Add Conda to PATH - -- bash: | - conda remove -q -n nlp --all -y - python tools/generate_conda_file.py --gpu - conda env create -n nlp_gpu -f nlp_gpu.yaml - conda env list - source activate nlp_gpu - displayName: 'Creating Conda Environment with dependencies' - -- bash: | - source activate nlp_gpu - python -m ipykernel install --user --name nlp_gpu --display-name "nlp_gpu" - # Commenting out pytest since it contains bunch of tests from other project which are not applicable. - # But keeping the line here to show how to run it once tests relevant to this project are added - # pytest --junitxml=junit/test-unitttest.xml #not running any tests for now - displayName: 'Run Unit tests' - -- task: PublishTestResults@2 - inputs: - testResultsFiles: '**/test-unitttest.xml' - testRunTitle: 'Test results for PyTest' - -- task: ComponentGovernanceComponentDetection@0 - inputs: - scanType: 'Register' - verbosity: 'Verbose' - alertWarningLevel: 'High' diff --git a/tests/ci/cpu_unit_tests_linux.yml b/tests/ci/cpu_unit_tests_linux.yml new file mode 100644 index 000000000..f6a50b74f --- /dev/null +++ b/tests/ci/cpu_unit_tests_linux.yml @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +pool: + name: "nlptestmachine" + +steps: + +- bash: | + echo "##vso[task.prependpath]/data/anaconda/bin" + displayName: Add Conda to PATH + +- bash: | + python tools/generate_conda_file.py + conda env create -n nlp_cpu -f nlp_cpu.yaml + displayName: 'Creating Conda Environment with dependencies' + +- bash: | + source activate nlp_cpu + pytest tests/unit -m "not notebooks and not gpu" --junitxml=junit/test-unitttest.xml + displayName: 'Run Unit tests' + +- bash: | + echo Remove Conda Environment + conda remove -n nlp_cpu --all -q --force -y + echo Done Cleanup + displayName: 'Cleanup Task' + condition: always() + +- task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-unitttest.xml' + testRunTitle: 'Test results for PyTest' + +- task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'High' \ No newline at end of file diff --git a/tests/ci/gpu_unit_tests_linux.yml b/tests/ci/gpu_unit_tests_linux.yml new file mode 100644 index 000000000..3cf91441a --- /dev/null +++ b/tests/ci/gpu_unit_tests_linux.yml @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +pool: + name: "nlptestmachine" + +steps: + +- bash: | + echo "##vso[task.prependpath]/data/anaconda/bin" + displayName: Add Conda to PATH + +- bash: | + python tools/generate_conda_file.py --gpu + conda env create -n nlp_gpu -f nlp_gpu.yaml + displayName: 'Creating Conda Environment with dependencies' + +- bash: | + source activate nlp_gpu + pytest tests/unit -m "not notebooks and gpu" --junitxml=junit/test-unitttest.xml + displayName: 'Run Unit tests' + +- bash: | + echo Remove Conda Environment + conda remove -n nlp_gpu --all -q --force -y + echo Done Cleanup + displayName: 'Cleanup Task' + condition: always() + +- task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-unitttest.xml' + testRunTitle: 'Test results for PyTest' + +- task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'High' \ No newline at end of file diff --git a/tests/ci/notebooks_cpu_unit_tests_linux.yml b/tests/ci/notebooks_cpu_unit_tests_linux.yml new file mode 100644 index 000000000..ed66fbd3c --- /dev/null +++ b/tests/ci/notebooks_cpu_unit_tests_linux.yml @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +pool: + name: "nlptestmachine" + +steps: + +- bash: | + echo "##vso[task.prependpath]/data/anaconda/bin" + displayName: Add Conda to PATH + +- bash: | + python tools/generate_conda_file.py + conda env create -n nlp_cpu -f nlp_cpu.yaml + displayName: 'Creating Conda Environment with dependencies' + +- bash: | + source activate nlp_cpu + pytest tests/unit -m "notebooks and not gpu" --junitxml=junit/test-unitttest.xml + displayName: 'Run Unit tests' + +- bash: | + echo Remove Conda Environment + conda remove -n nlp_cpu --all -q --force -y + echo Done Cleanup + displayName: 'Cleanup Task' + condition: always() + +- task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-unitttest.xml' + testRunTitle: 'Test results for PyTest' + +- task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'High' \ No newline at end of file diff --git a/tests/ci/notebooks_gpu_unit_tests_linux.yml b/tests/ci/notebooks_gpu_unit_tests_linux.yml new file mode 100644 index 000000000..619fd45cb --- /dev/null +++ b/tests/ci/notebooks_gpu_unit_tests_linux.yml @@ -0,0 +1,44 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +# Pull request against these branches will trigger this build +pr: +- master +- staging + +pool: + name: "nlptestmachine" + +steps: + +- bash: | + echo "##vso[task.prependpath]/data/anaconda/bin" + displayName: Add Conda to PATH + +- bash: | + python tools/generate_conda_file.py --gpu + conda env create -n nlp_gpu -f nlp_gpu.yaml + displayName: 'Creating Conda Environment with dependencies' + +- bash: | + source activate nlp_gpu + pytest tests/unit -m "notebooks and gpu" --junitxml=junit/test-unitttest.xml + displayName: 'Run Unit tests' + +- bash: | + echo Remove Conda Environment + conda remove -n nlp_gpu --all -q --force -y + echo Done Cleanup + displayName: 'Cleanup Task' + condition: always() + +- task: PublishTestResults@2 + inputs: + testResultsFiles: '**/test-unitttest.xml' + testRunTitle: 'Test results for PyTest' + +- task: ComponentGovernanceComponentDetection@0 + inputs: + scanType: 'Register' + verbosity: 'Verbose' + alertWarningLevel: 'High' \ No newline at end of file diff --git a/tests/ci/unit-tests.yml b/tests/ci/unit-tests.yml deleted file mode 100644 index 3c83dadcc..000000000 --- a/tests/ci/unit-tests.yml +++ /dev/null @@ -1,46 +0,0 @@ - -# Pull request against these branches will trigger this build -pr: -- master -- staging - -#Any commit to this branch will trigger the build. -trigger: -- staging -- master - -pool: - vmImage: 'ubuntu-16.04' - -steps: - -- bash: | - echo "##vso[task.prependpath]/usr/share/miniconda/bin" - displayName: Add Conda to PATH - -- bash: | - conda remove -q -n nlp --all -y - python tools/generate_conda_file.py --gpu - conda env create -n nlp_gpu -f nlp_gpu.yaml - conda env list - source activate nlp_gpu - displayName: 'Creating Conda Environment with dependencies' - -- bash: | - source activate nlp_gpu - python -m ipykernel install --user --name nlp_gpu --display-name "nlp_gpu" - # Commenting out pytest since it contains bunch of tests from other project which are not applicable. - # But keeping the line here to show how to run it once tests relevant to this project are added - # pytest --junitxml=junit/test-unitttest.xml #not running any tests for now - displayName: 'Run Unit tests' - -- task: PublishTestResults@2 - inputs: - testResultsFiles: '**/test-unitttest.xml' - testRunTitle: 'Test results for PyTest' - -- task: ComponentGovernanceComponentDetection@0 - inputs: - scanType: 'Register' - verbosity: 'Verbose' - alertWarningLevel: 'High' From ce9c06639135e37da35dd54710af1787db43e43e Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Fri, 28 Jun 2019 12:29:10 -0400 Subject: [PATCH 096/108] Address PR comments for local automl aci deployment --- .../automl_local_deployment_aci.ipynb | 93 ++++++++++++------- 1 file changed, 57 insertions(+), 36 deletions(-) diff --git a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb index 2cecb7ec3..ff76830cb 100644 --- a/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb +++ b/scenarios/sentence_similarity/automl_local_deployment_aci.ipynb @@ -20,7 +20,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook demonstrates how to use Azure Automated Machine Learning(AutoML) locally to automate machine learning model selection and tuning and how to use Azure Container Instance (ACI) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." + "This notebook demonstrates how to use [Azure Machine Learning Service's](https://azure.microsoft.com/en-us/services/machine-learning-service/\n", + ") Automated Machine Learning ([AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml\n", + ")) locally to automate machine learning model selection and tuning and how to use Azure Container Instance ([ACI](https://azure.microsoft.com/en-us/services/container-instances/\n", + ")) for deployment. We utilize the STS Benchmark dataset to predict sentence similarity and utilize AutoML's text preprocessing features." ] }, { @@ -61,7 +64,8 @@ "source": [ "### 1.1 What is Azure AutoML?\n", "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to improve the productivity of data scientists and democratize AI by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning." + "Automated machine learning ([AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml)) is a capability of Microsoft's [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/\n", + "). The goal of AutoML is to improve the productivity of data scientists and democratize AI by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning." ] }, { @@ -158,6 +162,24 @@ "MEMORY_GB = 8" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define the settings for AutoML\n", + "automl_settings = {\n", + " \"task\": \"regression\", # type of task: classification, regression or forecasting\n", + " \"debug_log\": \"automated_ml_errors.log\",\n", + " \"path\": \"./automated-ml-regression\",\n", + " \"iteration_timeout_minutes\": 15, # How long each iteration can take before moving on\n", + " \"iterations\": 50, # Number of algorithm options to try\n", + " \"primary_metric\": \"spearman_correlation\", # Metric to optimize\n", + " \"preprocess\": True, # Whether dataset preprocessing should be applied\n", + "}" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -259,6 +281,15 @@ "## 3.1 Link to or create a Workspace" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, go through the [Configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`. This will create a config.json file containing the values needed below to create a workspace.\n", + "\n", + "**Note**: you do not need to fill in these values if you have a config.json in the same folder as this notebook" + ] + }, { "cell_type": "code", "execution_count": null, @@ -329,21 +360,10 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "automl_settings = {\n", - " \"task\": \"regression\", # type of task: classification, regression or forecasting\n", - " \"debug_log\": \"automated_ml_errors.log\",\n", - " \"path\": \"./automated-ml-regression\",\n", - " \"iteration_timeout_minutes\": 15, # How long each iteration can take before moving on\n", - " \"iterations\": 50, # Number of algorithm options to try\n", - " \"primary_metric\": \"spearman_correlation\", # Metric to optimize\n", - " \"preprocess\": True, # Whether dataset preprocessing should be applied\n", - " \"verbosity\": logging.ERROR,\n", - "}" + "**Note**: we are directly passing in sentence pairs as data because we are relying upon AutoML's built-in preprocessing (by setting preprocess = True in the AutoMLConfig parameters) to perform the embedding step." ] }, { @@ -359,7 +379,12 @@ "\n", "# local compute\n", "automated_ml_config = AutoMLConfig(\n", - " X=X_train, y=y_train, X_valid=X_validation, y_valid=y_validation, **automl_settings\n", + " X=X_train,\n", + " y=y_train,\n", + " X_valid=X_validation,\n", + " y_valid=y_validation,\n", + " verbosity=logging.ERROR,\n", + " **automl_settings # where the autoML main settings are defined\n", ")" ] }, @@ -427,10 +452,10 @@ "3. Deploy the model to the compute target\n", "4. Test the deployed model (webservice)\n", "\n", - "In this notebook we walk you through the process of creating a webservice running on Azure Container Instance by deploying an AutoML model as an image. ACI is usually good for low scale, CPU-based workloads. (You can find more information on deploying and serving models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)\n", + "In this notebook, we walk you through the process of creating a webservice running on Azure Container Instance by deploying an AutoML model as an image. ACI is typically used for low scale, CPU-based workloads. (You can find more information on deploying and serving models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where))\n", "\n", - "## 4.1 Retrieve the Best Model @Courtney : review section\n", - "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the `get_output` method which returns the best run and fitted model across all iterations. Overloads on `get_output` allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. The object returned by AutoML is a Pipeline class which chains together multiple steps in a machine learning workflow in order to provide a reproducible mechanism for building, evaluating, deploying, and running ML systems (see [here](https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb) for additional information about Pipelines). \n", + "## 4.1 Retrieve the Best Model\n", + "Now we can identify the model that maximized performance on a given metric (spearman correlation in our case) using the `get_output` method which returns the best_run (AutoMLRun object with information about the experiment) and fitted_model ([Pipeline]((https://github.com/Azure/MachineLearningNotebooks/blob/master/how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-getting-started.ipynb)) object) across all iterations. Overloads on `get_output` allow you to retrieve the best run and fitted model for any logged metric or for a particular iteration. \n", "\n", "The different steps that make up the pipeline can be accessed through `fitted_model.named_steps` and information about data preprocessing is available through `fitted_model.named_steps['datatransformer'].get_featurization_summary()`" ] @@ -450,11 +475,10 @@ "source": [ "## 4.2 Register the Fitted Model for Deployment\n", "\n", - "Registering a model means registering one or more files that make up for a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can be either come from Azure Machine Learning or any other location such as local machine. \n", - "Below shows how a model is registered from an experiment run. \n", - "If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered.\n", + "Registering a model means registering one or more files that make up a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can either come from Azure Machine Learning or another location, such as your local machine. \n", + "Below we show how a model is registered from the results of an experiment run. If neither metric nor iteration are specified in the register_model call, the iteration with the best primary metric is registered.\n", "\n", - "See other ways to register a model [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)" + "See other ways to register a model [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)." ] }, { @@ -476,9 +500,9 @@ "metadata": {}, "source": [ "## 4.3 Create an Entry Script\n", - "In this section we show an example of entry script which is called from the deployed webservice. `score.py` is our entry script. The script must contain:\n", + "In this section we show an example of an entry script, which is called from the deployed webservice. `score.py` is our entry script. The script must contain:\n", "1. init() - This function loads the model in a global object.\n", - "2. run() - This function is used for model prediction. The inputs and outputs to `run()` typically use JSON for serialization and deserilization. \n" + "2. run() - This function is used for model prediction. The inputs and outputs to `run()` typically use JSON for serialization and deserilization. " ] }, { @@ -606,9 +630,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step.\n", - "\n", - "Below image creation step takes a while sometimes." + "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step. This step can take up to 10 minutes and even longer if the model is large." ] }, { @@ -640,7 +662,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If the above step fails then use below command to see logs" + "If the above step fails, then use the below command to see logs" ] }, { @@ -663,7 +685,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Azure Container Instances is mostly used for deploying your models as a web service if one or more of the following conditions is true:\n", + "Azure Container Instances are mostly used for deploying your models as a web service if one or more of the following conditions are true:\n", "1. You need to quickly deploy and validate your model.\n", "2. You are testing a model that is under development.\n", "\n", @@ -720,7 +742,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Fetch logs to debug incase of failures." + "Fetch logs to debug in case of failures." ] }, { @@ -736,7 +758,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If you are not creating a webservice but want to reuse an existing service call the webservice with the name. You can look up all the deployed webservices under deployment in the Azure Portal. Below is an example to it:" + "If you want to reuse an existing service versus creating a new one, call the webservice with the name. You can look up all the deployed webservices under deployment in the Azure Portal. Below is an example:" ] }, { @@ -755,10 +777,10 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## 4.7 Test Deployed Model @Courtney: can you check this section too for the values of the output & description\n", + "## 4.7 Test Deployed Model\n", "\n", "Testing the deployed model means running the created webservice.
\n", - "The deployed model can be tested by passing list of sentence pairs. The output will be a score between 1-5 with 5 being identical sentences and 1 indicates that the sentences are totally different.\n" + "The deployed model can be tested by passing a list of sentence pairs. The output will be a score between 0 and 5, with 0 indicating no meaning overlap between the sentences and 5 meaning equivalence." ] }, { @@ -927,8 +949,7 @@ "\n", "For production requirements, i.e. when > 100 requests per second are expected, we recommend deploying models to Azure Kubernetes Service (AKS). It is a convenient infrastructure as it manages hosted Kubernetes environments, and makes it easy to deploy and manage containerized applications without container orchestration expertise. It also supports deployments with CPU clusters and deployments with GPU clusters.\n", "\n", - "To see an example with Azure Kubernetes Service example, go to the [this notebook](https://github.com/microsoft/nlp/blob/courtney-janhavi-automl/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb)\n", - "\n", + "To see an example with Azure Kubernetes Service example, go to [this notebook](https://github.com/microsoft/nlp/blob/courtney-janhavi-automl/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb)\n", "\n", "For more examples on deployment follow [MachineLearningNotebooks](https://github.com/Azure/MachineLearningNotebooks/tree/master/how-to-use-azureml/deployment) github repository." ] From 9b563159ce99d1cc867950a1bef0a09be2bc088a Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Fri, 28 Jun 2019 12:59:00 -0400 Subject: [PATCH 097/108] Resolve PR comment on pipelines notebook --- ...automl_with_pipelines_deployment_aks.ipynb | 73 ++++++++++++------- 1 file changed, 45 insertions(+), 28 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb index ee55dba64..348336514 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb @@ -20,11 +20,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook demonstrates how to use AzureML pipelines and AutoML to streamline the creation of a machine learning workflow for predicting sentence similarity. The pipeline contains two steps: \n", + "This notebook demonstrates how to use [Azure Machine Learning](https://azure.microsoft.com/en-us/services/machine-learning-service/\n", + ") pipelines and Automated Machine Learning ([AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml\n", + ")) to streamline the creation of a machine learning workflow for predicting sentence similarity. The pipeline contains two steps: \n", "1. PythonScriptStep: embeds sentences using a popular sentence embedding model, Google Universal Sentence Encoder\n", - "2. AutoMLStep: demonstrates how to use Automated Machine Learning(AutoML) to automate model selection for predicting sentence similarity (regression)\n", + "2. AutoMLStep: demonstrates how to use Automated Machine Learning (AutoML) to automate model selection for predicting sentence similarity (regression)\n", "\n", - "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Kubernetes Service (AKS).\n", + "After creating the pipeline, the notebook demonstrates the deployment of our sentence similarity model using Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/aks/intro-kubernetes\n", + ")).\n", "\n", "This notebook showcases how to use the following AzureML features: \n", "- AzureML Pipelines (PythonScriptStep and AutoMLStep)\n", @@ -84,7 +87,7 @@ "source": [ "### 1.1 What are AzureML Pipelines?\n", "\n", - "AzureML Pipelines \"define reusable machine learning workflows that can be used as a template for your machine learning scenarios\" ([pipeline information](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines)). Pipelines allow you to optimize your workflow and spend time on machine learning rather than infrastructure. A Pipeline is defined by a series of steps; the following steps are available: AdlaStep, AutoMLStep, AzureBatchStep, DataTransferStep, DatabricksStep, EstimatorStep, HyperDriveStep, ModuleStep, MpiStep, and PythonScriptStep (see [here](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) for details of each step). When the pipeline is run, cached results are used for all steps that have not changed, optimizing the run time. Data sources and intermediate data can be used across multiple steps in a pipeline, saving time and resources. Below we see an example of an AzureML pipeline." + "[AzureML Pipelines](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-ml-pipelines) define reusable machine learning workflows that can be used as a template for your machine learning scenarios. Pipelines allow you to optimize your workflow and spend time on machine learning rather than infrastructure. A Pipeline is defined by a series of steps; the following steps are available: AdlaStep, AutoMLStep, AzureBatchStep, DataTransferStep, DatabricksStep, EstimatorStep, HyperDriveStep, ModuleStep, MpiStep, and PythonScriptStep (see [here](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-steps/?view=azure-ml-py) for details of each step). When the pipeline is run, cached results are used for all steps that have not changed, optimizing the run time. Data sources and intermediate data can be used across multiple steps in a pipeline, saving time and resources. Below we see an example of an AzureML pipeline." ] }, { @@ -100,9 +103,8 @@ "source": [ "### 1.2 What is Azure AutoML?\n", "\n", - "Automated machine learning (AutoML) is a capability of Microsoft's Azure Machine Learning service. The goal of AutoML is to \"improve the productivity of data scientists and democratize AI\" [1] by allowing for the rapid development and deployment of machine learning models. To achieve this goal, AutoML automates the process of selecting a ML model and tuning the model. AutoML even has preprocessing capabilities to engineer features from raw data. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning.\n", - "\n", - "[1]https://azure.microsoft.com/en-us/blog/new-automated-machine-learning-capabilities-in-azure-machine-learning-service/" + "Automated machine learning ([AutoML](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-automated-ml)) is a capability of Microsoft's [Azure Machine Learning service](https://azure.microsoft.com/en-us/services/machine-learning-service/\n", + "). The goal of AutoML is to improve the productivity of data scientists and democratize AI by allowing for the rapid development and deployment of machine learning models. To acheive this goal, AutoML automates the process of selecting a ML model and tuning the model. All the user is required to provide is a dataset (suitable for a classification, regression, or time-series forecasting problem) and a metric to optimize in choosing the model and hyperparameters. The user is also given the ability to set time and cost constraints for the model selection and tuning." ] }, { @@ -208,6 +210,22 @@ "EMBEDDED_DATA_REF = os.environ[\"AZUREML_DATAREFERENCE_embedded_data\"]" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "automl_settings = {\n", + " \"task\": \"regression\", # type of task: classification, regression or forecasting\n", + " \"iteration_timeout_minutes\": 15, # How long each iteration can take before moving on\n", + " \"iterations\": 50, # Number of algorithm options to try\n", + " \"primary_metric\": \"spearman_correlation\", # Metric to optimize\n", + " \"preprocess\": True, # Whether dataset preprocessing should be applied\n", + " \"verbosity\": logging.INFO,\n", + "}" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -321,6 +339,15 @@ "## 3.1 Link to or create a Workspace" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, go through the [Configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML `Workspace`. This will create a config.json file containing the values needed below to create a workspace.\n", + "\n", + "**Note**: you do not need to fill in these values if you have a config.json in the same folder as this notebook" + ] + }, { "cell_type": "code", "execution_count": null, @@ -845,22 +872,14 @@ "metadata": {}, "outputs": [], "source": [ - "automl_settings = {\n", - " \"iteration_timeout_minutes\": 15, # How long each iteration can take before moving on\n", - " \"iterations\": 50, # Number of algorithm options to try\n", - " \"primary_metric\": \"spearman_correlation\", # Metric to optimize\n", - " \"preprocess\": True, # Whether dataset preprocessing should be applied\n", - " \"verbosity\": logging.INFO,\n", - "}\n", "automl_config = AutoMLConfig(\n", - " task=\"regression\", # type of task: classification, regression or forecasting\n", " debug_log=\"automl_errors.log\",\n", " path=project_folder,\n", " compute_target=compute_target,\n", " run_configuration=conda_run_config,\n", " data_script=project_folder\n", " + \"/get_data.py\", # local path to script with get_data() function\n", - " **automl_settings\n", + " **automl_settings #where the autoML main settings are defined\n", ")" ] }, @@ -998,7 +1017,7 @@ "source": [ "**Cancel the Run**\n", "\n", - "Interrupting/Restarting the jupyter kernel will not properly cancel the run, which can lead to wasting compute resources. To avoid this, we recommend explicitly canceling a run with the following code:\n", + "Interrupting/Restarting the jupyter kernel will not properly cancel the run, which can lead to wasted compute resources. To avoid this, we recommend explicitly canceling a run with the following code:\n", "\n", "`pipeline_run.cancel()`" ] @@ -1023,7 +1042,8 @@ "3. Deploy the model to the compute target\n", "4. Test the deployed model (webservice)\n", "\n", - "In this notebook we walk you through the process of creating a webservice running on Azure Kubernetes Service(AKS) by deploying the model as an image. AKS is good for high-scale production deployments.It provides fast response time and autoscaling of the deployed service. Cluster autoscaling is not supported through the Azure Machine Learning SDK. \n", + "In this notebook we walk you through the process of creating a webservice running on Azure Kubernetes Service ([AKS](https://docs.microsoft.com/en-us/azure/aks/intro-kubernetes\n", + ")) by deploying the model as an image. AKS is good for high-scale production deployments. It provides fast response time and autoscaling of the deployed service. Cluster autoscaling is not supported through the Azure Machine Learning SDK. \n", "\n", "You can find more information on deploying and serving models [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)\n" ] @@ -1034,8 +1054,7 @@ "source": [ "## 6.1 Register/Retrieve AutoML and Google Universal Sentence Encoder Models for Deployment\n", "\n", - "Registering a model means registering one or more files that make up for a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can be either come from Azure Machine Learning or any other location such as local machine.\n", - "\n", + "Registering a model means registering one or more files that make up a model. The Machine Learning models are registered in your current Aure Machine Learning Workspace. The model can either come from Azure Machine Learning or another location, such as your local machine.\n", "\n", "See other ways to register a model [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where)\n", "\n", @@ -1136,7 +1155,7 @@ "source": [ "## 6.2 Create Scoring Script\n", "\n", - "In this section we show an example of entry script which is called from the deployed webservice. `score.py` is our entry script. The script must contain:\n", + "In this section we show an example of an entry script, which is called from the deployed webservice. `score.py` is our entry script. The script must contain:\n", "1. init() - This function loads the model in a global object.\n", "2. run() - This function is used for model prediction. The inputs and outputs to `run()` typically use JSON for serialization and deserilization. " ] @@ -1304,9 +1323,7 @@ "source": [ "## 6.4 Image Creation\n", "\n", - "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step.\n", - "\n", - "Below image creation step takes a while sometimes." + "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step. This step can take up to 10 minutes and even longer if the model is large." ] }, { @@ -1341,7 +1358,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "If the above step fails then use below command to see logs" + "If the above step fails, then use below command to see logs." ] }, { @@ -1409,7 +1426,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We are now ready to deploy our web service. We will deploy from the Docker image. It indeed contains our autoML model as well as the Google Universal Sentence Encoder model and the conda environment needed for the scoring script to work properly. The parameters to pass to the Webservice.deploy_from_image() command are similar to those used for the deployment on ACI. The only major difference is the compute target (aks_target), i.e. the CPU cluster we just spun up.\n", + "We are now ready to deploy our web service. We will deploy from the Docker image. It contains our AutoML model as well as the Google Universal Sentence Encoder model and the conda environment needed for the scoring script to work properly. The parameters to pass to the Webservice.deploy_from_image() command are similar to those used for the deployment on ACI. The only major difference is the compute target (aks_target), i.e. the CPU cluster we just spun up.\n", "\n", "**Note:** This deployment takes a few minutes to complete." ] @@ -1457,9 +1474,9 @@ "## 6.7 Test Deployed Webservice\n", "\n", "Testing the deployed model means running the created webservice.
\n", - "The deployed model can be tested by passing list of sentence pairs. The output will be a score between 1-5 with 5 being identical sentences and 1 indicates that the sentences are totally different.\n", + "The deployed model can be tested by passing a list of sentence pairs. The output will be a score between 0 and 5, with 0 indicating no meaning overlap between the sentences and 5 meaning equivalence.\n", "\n", - "The run method expects input in json format.Run() method retrieves API keys behind the scenes to make sure that call is authenticated. The service has a timeout which does not allow passing the large test dataset. Timeout is based off a few things.It is set to a default of ~30 seconds. To overcome this you can batch data and send it to the service.\n" + "The run method expects input in json format. The Run() method retrieves API keys behind the scenes to make sure that the call is authenticated. The service has a timeout (default of ~30 seconds) which does not allow passing the large test dataset. To overcome this, you can batch data and send it to the service." ] }, { From c5a4b054076764d244a04c515cea11c9196cdbac Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Thu, 27 Jun 2019 22:10:46 -0400 Subject: [PATCH 098/108] BiDAF quickstart deployment notebook --- ...on_answering_system_bidaf_quickstart.ipynb | 398 ++++++++++++++++++ 1 file changed, 398 insertions(+) create mode 100644 scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb diff --git a/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb b/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb new file mode 100644 index 000000000..2c1ca430f --- /dev/null +++ b/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb @@ -0,0 +1,398 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "sys.path.append(\"../../\")\n", + "from allennlp.predictors import Predictor#, BidafPredictor\n", + "# from allennlp.models.archival import load_archive\n", + "# from allennlp.data import DatasetReader, Instance\n", + "# from allennlp.common import Params\n", + "from azureml.core.webservice import AciWebservice, Webservice\n", + "from azureml.core.image import ContainerImage\n", + "from azureml.core.conda_dependencies import CondaDependencies\n", + "from utils_nlp.azureml import azureml_utils\n", + "from azureml.core.model import Model" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "_jsonnet not loaded, treating C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpywtpft2j\\config.json as json\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\torch\\nn\\modules\\rnn.py:46: UserWarning: dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1, but got dropout=0.2 and num_layers=1\n", + " \"num_layers={}\".format(dropout, num_layers))\n", + "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\allennlp\\data\\token_indexers\\token_characters_indexer.py:55: UserWarning: You are using the default value (0) of `min_padding_length`, which can cause some subtle bugs (more info see https://github.com/allenai/allennlp/issues/1954). Strongly recommend to set a value, usually the maximum size of the convolutional layer size when using CnnEncoder.\n", + " UserWarning)\n" + ] + } + ], + "source": [ + "archive_file = 'https://s3-us-west-2.amazonaws.com/allennlp/models/bidaf-model-2017.09.15-charpad.tar.gz'\n", + "model = Predictor.from_path(archive_file)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Predict on One Sample**" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "passage = \"The history of the penny of Great Britain and the United Kingdom from 1714 to 1901, \\\n", + "the period in which the House of Hanover reigned, saw its transformation from a small \\\n", + "silver coin to a larger bronze piece. All bear the portrait of the monarch on the obverse; \\\n", + "copper and bronze pennies have a depiction of Britannia on the reverse. During most of the 18th century, \\\n", + "the penny was a small silver coin rarely seen in circulation. Beginning in 1787, \\\n", + "the chronic shortage of good money resulted in the wide circulation of private tokens, \\\n", + "including ones valued at one penny. In 1797 Matthew Boulton gained a government contract \\\n", + "and struck millions of pennies. The copper penny continued to be issued until 1860, \\\n", + "when they were replaced by lighter bronze coins; the Bun penny, \\\n", + "named for the hairstyle of Queen Victoria on it, was issued from then until 1894. \\\n", + "The final years of her reign saw the Old head pennies, coined from 1895 until her death in 1901\\\n", + "\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "question = \"How was the penny called due to Queen Victoria hairstyle?\"\n", + "question2 = \"When did Matthew boulton gain the government contract?\"" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "ans = model.predict(question, passage)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Bun penny'" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ans['best_span_str']" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Deploy stuff" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Performing interactive authentication. Please follow the instructions on the terminal.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", + "You have logged in. Now let us find all the subscriptions to which you have access...\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Interactive authentication successfully completed.\n" + ] + } + ], + "source": [ + "ws = azureml_utils.get_or_create_workspace(\n", + " subscription_id=\"\",\n", + " resource_group=\"\",\n", + " workspace_name=\"\",\n", + " workspace_region=\"\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Workspace name: MAIDAPTest\n", + "Azure region: eastus2\n", + "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", + "Resource group: nlprg\n" + ] + } + ], + "source": [ + "print('Workspace name: ' + ws.name, \n", + " 'Azure region: ' + ws.location, \n", + " 'Subscription id: ' + ws.subscription_id, \n", + " 'Resource group: ' + ws.resource_group, sep='\\n')" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "import urllib" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "x config.json\n", + "x vocabulary/\n", + "x vocabulary/non_padded_namespaces.txt\n", + "x vocabulary/tokens.txt\n", + "x weights.th\n" + ] + } + ], + "source": [ + "bidaf_model_url = 'https://s3-us-west-2.amazonaws.com/allennlp/models/bidaf-model-2017.09.15-charpad.tar.gz'\n", + "urllib.request.urlretrieve(bidaf_model_url, filename=\"bidaf.tar.gz\")\n", + "!tar xvzf bidaf.tar.gz" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Registering model bidaf\n" + ] + } + ], + "source": [ + "bidaf_model = Model.register(workspace = ws,\n", + " model_path =\"bidaf.tar.gz\",\n", + " model_name = \"bidaf\",\n", + " tags = {\"bidaf\": \"demo\"},\n", + " description = \"BiDAF Pretrained Model\")" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Overwriting score.py\n" + ] + } + ], + "source": [ + "%%writefile score.py\n", + "import json\n", + "from allennlp.predictors import Predictor\n", + "from azureml.core.model import Model\n", + "\n", + "def init():\n", + " global model\n", + " bidaf_dir_path = Model.get_model_path('bidaf')\n", + " model = Predictor.from_path(bidaf_dir_path)\n", + "\n", + "def run(rawdata):\n", + " try:\n", + " data = json.loads(rawdata)\n", + " passage = data['passage']\n", + " question = data['question']\n", + " result = model.predict(question, passage)[\"best_span_str\"]\n", + " except Exception as e:\n", + " result = str(e)\n", + " return json.dumps({\"error\": result})\n", + " return json.dumps({\"result\":result.tolist()})" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'automlenv.yml'" + ] + }, + "execution_count": 46, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "myenv = CondaDependencies.create(conda_packages=['pytorch==0.4.1','torchvision==0.2.1'],\n", + " pip_packages=['allennlp==0.7.2','azureml-sdk[automl]==1.0.43.*'], \n", + " python_version = '3.7')\n", + "myenv.add_channel('conda-forge')\n", + "myenv.add_channel('pytorch')\n", + "\n", + "conda_env_file_name = 'automlenv.yml'\n", + "myenv.save_to_file('.', conda_env_file_name)" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Creating image\n", + "Running.........................................................................\n", + "FailedImage creation operation finished for image bidaf-image:11, operation \"Failed\"\n", + "Image creation failed with\n", + "StatusCode: 400\n", + "Message: Docker image build failed.\n" + ] + } + ], + "source": [ + "image_config = ContainerImage.image_configuration(execution_script = \"score.py\",\n", + " runtime = \"python\",\n", + " conda_file = conda_env_file_name,\n", + " description = \"Image with BiDAF model\",\n", + " tags = {'area': \"nlp\", 'type': \"question-answering BiDAF\"})\n", + "\n", + "image = ContainerImage.create(name = \"bidaf-image\",\n", + " models = [bidaf_model],\n", + " image_config = image_config,\n", + " workspace = ws)\n", + "\n", + "image.wait_for_creation(show_output = True)" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "https://maidaptest3334372853.blob.core.windows.net/azureml/ImageLogs/a2d41a70-c4d5-4cef-87ec-b7ada8888aa8/build.log?sv=2018-03-28&sr=b&sig=6HvEyf10RZjvUUdgJvSDZXmzNPkx0nKngRXKronyQOk%3D&st=2019-06-28T01%3A52%3A07Z&se=2019-07-28T01%3A57%3A07Z&sp=rl\n" + ] + } + ], + "source": [ + "print(image.image_build_log_uri)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration\n", + "aci_config = AciWebservice.deploy_configuration(cpu_cores = CPU_CORES, \n", + " memory_gb = MEMORY_GB)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# deploy image as web service\n", + "aci_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = 'aci-automl-service-1',\n", + " image = image,\n", + " deployment_config = aci_config)\n", + "\n", + "aci_service.wait_for_deployment(show_output = True)\n", + "print(aci_service.state)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From db104db301133c0bafd2f9748b7413498fded452 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sat, 29 Jun 2019 09:17:22 -0400 Subject: [PATCH 099/108] Add text to notebook and solve deployment error --- ...on_answering_system_bidaf_quickstart.ipynb | 542 ++++++++++++++---- 1 file changed, 421 insertions(+), 121 deletions(-) diff --git a/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb b/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb index 2c1ca430f..7ea6c4c20 100644 --- a/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb +++ b/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb @@ -1,126 +1,111 @@ { "cells": [ { - "cell_type": "code", - "execution_count": 13, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "import sys\n", - "sys.path.append(\"../../\")\n", - "from allennlp.predictors import Predictor#, BidafPredictor\n", - "# from allennlp.models.archival import load_archive\n", - "# from allennlp.data import DatasetReader, Instance\n", - "# from allennlp.common import Params\n", - "from azureml.core.webservice import AciWebservice, Webservice\n", - "from azureml.core.image import ContainerImage\n", - "from azureml.core.conda_dependencies import CondaDependencies\n", - "from utils_nlp.azureml import azureml_utils\n", - "from azureml.core.model import Model" + "# Create a Question Answering (QA) System in Under 20 Minutes" ] }, { - "cell_type": "code", - "execution_count": 3, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "_jsonnet not loaded, treating C:\\Users\\cocochra\\AppData\\Local\\Temp\\tmpywtpft2j\\config.json as json\n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\torch\\nn\\modules\\rnn.py:46: UserWarning: dropout option adds dropout after all but last recurrent layer, so non-zero dropout expects num_layers greater than 1, but got dropout=0.2 and num_layers=1\n", - " \"num_layers={}\".format(dropout, num_layers))\n", - "C:\\Users\\cocochra\\AppData\\Local\\Continuum\\anaconda3\\envs\\nlp_gpu\\lib\\site-packages\\allennlp\\data\\token_indexers\\token_characters_indexer.py:55: UserWarning: You are using the default value (0) of `min_padding_length`, which can cause some subtle bugs (more info see https://github.com/allenai/allennlp/issues/1954). Strongly recommend to set a value, usually the maximum size of the convolutional layer size when using CnnEncoder.\n", - " UserWarning)\n" - ] - } - ], "source": [ - "archive_file = 'https://s3-us-west-2.amazonaws.com/allennlp/models/bidaf-model-2017.09.15-charpad.tar.gz'\n", - "model = Predictor.from_path(archive_file)" + "This notebook demonstrates how to create a deployed Question Answering (QA) system in under 20 minutes. We use Azure Machine Learning ([AzureML](https://azure.microsoft.com/en-us/services/machine-learning-service/)) Service to deploy a pre-trained [AllenNLP model](https://allennlp.org/models\n", + "), BiDAF, using Azure Container Instances ([ACI](https://azure.microsoft.com/en-us/services/container-instances/))." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Predict on One Sample**" + "## Table of Contents\n", + "\n", + "1. [Deploy Model](#1.-Deploy-Model)\n", + " - 1.1 [Link to or Create a Workspace](#1.1-Link-to-or-Create-a-Workspace)\n", + " - 1.2 [Register BiDAF model for Deployment](#1.2-Register-BiDAF-model-for-Deployment) \n", + " - 1.3 [Create Scoring Script](#1.3-Create-Scoring-Script) \n", + " - 1.4 [Create a YAML File for the Environment](#1.4-Create-a-YAML-File-for-the-Environment) \n", + " - 1.5 [Image Creation](#1.5-Image-Creation)\n", + " - 1.6 [Deploy the Image as a Web Service to Azure Container Instance](#1.6-Deploy-the-Image-as-a-Web-Service-to-Azure-Container-Instance)\n", + "\n", + "\n", + "2. [Test Deployed Webservice](#2.-Test-Deployed-Webservice)\n", + " - 2.1 [Real-time Scoring](#2.1-Real-time-Scoring)\n", + " - 2.2 [Batch Scoring](#2.2-Batch-Scoring)\n", + " \n", + " \n", + "3. [Conclusion](#Conclusion)" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ - "passage = \"The history of the penny of Great Britain and the United Kingdom from 1714 to 1901, \\\n", - "the period in which the House of Hanover reigned, saw its transformation from a small \\\n", - "silver coin to a larger bronze piece. All bear the portrait of the monarch on the obverse; \\\n", - "copper and bronze pennies have a depiction of Britannia on the reverse. During most of the 18th century, \\\n", - "the penny was a small silver coin rarely seen in circulation. Beginning in 1787, \\\n", - "the chronic shortage of good money resulted in the wide circulation of private tokens, \\\n", - "including ones valued at one penny. In 1797 Matthew Boulton gained a government contract \\\n", - "and struck millions of pennies. The copper penny continued to be issued until 1860, \\\n", - "when they were replaced by lighter bronze coins; the Bun penny, \\\n", - "named for the hairstyle of Queen Victoria on it, was issued from then until 1894. \\\n", - "The final years of her reign saw the Old head pennies, coined from 1895 until her death in 1901\\\n", - "\"" + "import sys\n", + "sys.path.append(\"../../\")\n", + "import json\n", + "import urllib\n", + "\n", + "#import utils\n", + "from utils_nlp.common.timer import Timer\n", + "from utils_nlp.azureml import azureml_utils\n", + "\n", + "from azureml.core.webservice import AciWebservice, Webservice\n", + "from azureml.core.image import ContainerImage\n", + "from azureml.core.conda_dependencies import CondaDependencies\n", + "from azureml.core.model import Model" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ - "question = \"How was the penny called due to Queen Victoria hairstyle?\"\n", - "question2 = \"When did Matthew boulton gain the government contract?\"" + "CPU_CORES = 1\n", + "MEMORY_GB = 8" ] }, { - "cell_type": "code", - "execution_count": 6, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "ans = model.predict(question, passage)" + "## 1. Deploy Model" ] }, { - "cell_type": "code", - "execution_count": 7, + "cell_type": "markdown", "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Bun penny'" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ - "ans['best_span_str']" + "### 1.1 Link to or Create a Workspace" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Deploy stuff" + "First, go through the [Configuration](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) notebook to install the Azure Machine Learning Python SDK and create an Azure ML Workspace. This will create a config.json file containing the values needed below to create a workspace.\n", + "\n", + "**Note**: you do not need to fill in these values if you have a config.json in the same folder as this notebook" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 19, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n" + ] + }, { "name": "stdout", "output_type": "stream", @@ -132,7 +117,6 @@ "name": "stderr", "output_type": "stream", "text": [ - "Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", "You have logged in. Now let us find all the subscriptions to which you have access...\n" ] }, @@ -155,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 20, "metadata": {}, "outputs": [ { @@ -177,18 +161,27 @@ ] }, { - "cell_type": "code", - "execution_count": 9, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "import urllib" + "### 1.2 Register BiDAF model for Deployment" ] }, { - "cell_type": "code", - "execution_count": 11, + "cell_type": "markdown", "metadata": {}, + "source": [ + "This step downloads the pre-trained [AllenNLP](https://allennlp.org/models) pretrained model and registers the model in our Workspace. The pre-trained AllenNLP model we use is called Bidirectional Attention Flow for Machine Comprehension ([BiDAF](https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/007ab5528b3bd310a80d553cccad4b78dc496b02\n", + ")) It achieved state-of-the-art performance on the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) dataset in 2017 and is a well-respected, performant baseline for QA. AllenNLP's pre-trained BIDAF model is trained on the SQuAD training set and achieves an EM score of 68.3 on the SQuAD development set. See the [BIDAF deep dive notebook](https://github.com/microsoft/nlp/blob/courtney-bidaf/scenarios/question_answering/bidaf_deep_dive.ipynb\n", + ") for more information on this algorithm and AllenNLP implementation." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "scrolled": true + }, "outputs": [ { "name": "stderr", @@ -208,9 +201,20 @@ "!tar xvzf bidaf.tar.gz" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Registering a model means registering one or more files that make up a model (in our case, we register all the files contained in the downloaded .tar.gz file). Here we demonstrate how to register a model using the AzureML SDK, but see the [model registration](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#registermodel\n", + ") documentation for other registration methods.\n", + "\n", + "\n", + "**Note**: If you have already registered the model, you need not re-register it. Rather, just retrieve the pre-existing model in your Workspace with `bidaf_model = Model(ws, name='bidaf')`" + ] + }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 22, "metadata": {}, "outputs": [ { @@ -229,9 +233,28 @@ " description = \"BiDAF Pretrained Model\")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.3 Create Scoring Script" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this section we show an example of an entry script, score.py, which is called from the deployed webservice. The script must contain:\n", + "\n", + "1. init() - This function loads the model in a global object. \n", + "2. run() - This function is used for model prediction. The inputs and outputs to run() typically use JSON for serialization and deserilization. \n", + "\n", + "Our scoring script allows for both real-time and batch prediction. Each observation is a dictionary with two keys: _question_ and _passage_. With batch prediction we pass in a list of observations and use AllenNLPs `predict_batch_json()` method. For real-time prediction we pass in a single observation and use AllenNLPs `predict()` method." + ] + }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 23, "metadata": { "scrolled": true }, @@ -258,45 +281,82 @@ "def run(rawdata):\n", " try:\n", " data = json.loads(rawdata)\n", - " passage = data['passage']\n", - " question = data['question']\n", - " result = model.predict(question, passage)[\"best_span_str\"]\n", + " \n", + " # if one question-passage pair was passed\n", + " if type(data) == dict:\n", + " passage = data['passage']\n", + " question = data['question']\n", + " result = model.predict(question, passage)[\"best_span_str\"]\n", + " \n", + " # if multiple question-passage pairs were passed\n", + " elif type(data) == list:\n", + " result = model.predict_batch_json(data)\n", + " result = [i[\"best_span_str\"] for i in result]\n", + "\n", " except Exception as e:\n", " result = str(e)\n", " return json.dumps({\"error\": result})\n", - " return json.dumps({\"result\":result.tolist()})" + " return json.dumps({\"result\":result})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.4 Create a YAML File for the Environment " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To ensure the fit results are consistent with the training results, the SDK dependency versions need to be the same as the environment that trains the model. The following cells create a file, bidafenv.yml, which specifies the dependencies from the run." ] }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 24, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'automlenv.yml'" + "'bidafenv.yml'" ] }, - "execution_count": 46, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "myenv = CondaDependencies.create(conda_packages=['pytorch==0.4.1','torchvision==0.2.1'],\n", - " pip_packages=['allennlp==0.7.2','azureml-sdk[automl]==1.0.43.*'], \n", - " python_version = '3.7')\n", + "myenv = CondaDependencies.create(conda_packages=['jsonnet','cmake','regex','pytorch','torchvision'],\n", + " pip_packages=['allennlp==0.8.4','azureml-sdk==1.0.43.*'], \n", + " python_version = '3.6.8')\n", "myenv.add_channel('conda-forge')\n", "myenv.add_channel('pytorch')\n", "\n", - "conda_env_file_name = 'automlenv.yml'\n", + "conda_env_file_name = 'bidafenv.yml'\n", "myenv.save_to_file('.', conda_env_file_name)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.5 Image Creation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this step we create a container image which is wrapper containing the entry script, yaml file with package dependencies and the model. The created image is then deployed as a webservice in the next step. This step can take up to 10 minutes and even longer if the model is large." + ] + }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 25, "metadata": { "scrolled": true }, @@ -306,11 +366,8 @@ "output_type": "stream", "text": [ "Creating image\n", - "Running.........................................................................\n", - "FailedImage creation operation finished for image bidaf-image:11, operation \"Failed\"\n", - "Image creation failed with\n", - "StatusCode: 400\n", - "Message: Docker image build failed.\n" + "Running.........................................................................................................................................................\n", + "SucceededImage creation operation finished for image bidaf-image:33, operation \"Succeeded\"\n" ] } ], @@ -329,21 +386,118 @@ "image.wait_for_creation(show_output = True)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If the above step fails, then use the below command to see logs" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# print(image.image_build_log_uri)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1.6 Deploy the Image as a Web Service to Azure Container Instance" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Azure Container Instances are mostly used for deploying your models as a web service if one or more of the following conditions are true: \n", + "1. You need to quickly deploy and validate your model.\n", + "2. You are testing a model that is under development. \n", + "\n", + "\n", + "To set them up properly, we need to indicate the number of CPU cores and the amount of memory we want to allocate to our web service." + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [], + "source": [ + "#Set the web service configuration\n", + "aci_config = AciWebservice.deploy_configuration(cpu_cores = CPU_CORES, \n", + " memory_gb = MEMORY_GB)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The final step to deploying our web service is to call WebService.deploy_from_image(). This function uses the Docker image and the deployment configuration we created above to perform the following: \n", + "1. Deploy the docker image to an Azure Container Instance\n", + "2. Call the init() function in our scoring file\n", + "3. Provide an HTTP endpoint for scoring calls \n", + "\n", + "The deploy_from_image method requires the following parameters:\n", + "1. workspace: the workspace containing the service\n", + "2. name: a unique name used to identify the service in the workspace\n", + "3. image: a docker image object that contains the environment needed for scoring/inference\n", + "4. deployment_config: a configuration object describing the compute type\n", + "\n", + "**Note**: The web service creation can take a few minutes" + ] + }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 28, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "https://maidaptest3334372853.blob.core.windows.net/azureml/ImageLogs/a2d41a70-c4d5-4cef-87ec-b7ada8888aa8/build.log?sv=2018-03-28&sr=b&sig=6HvEyf10RZjvUUdgJvSDZXmzNPkx0nKngRXKronyQOk%3D&st=2019-06-28T01%3A52%3A07Z&se=2019-07-28T01%3A57%3A07Z&sp=rl\n" + "Creating service\n", + "Running.....................................................\n", + "SucceededACI service creation operation finished, operation \"Succeeded\"\n", + "Healthy\n" ] } ], "source": [ - "print(image.image_build_log_uri)" + "# deploy image as web service\n", + "aci_service = Webservice.deploy_from_image(workspace = ws, \n", + " name = 'bidaf-aci-service',\n", + " image = image,\n", + " deployment_config = aci_config)\n", + "\n", + "aci_service.wait_for_deployment(show_output = True)\n", + "print(aci_service.state)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Fetch logs to debug in case of failures." + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "# print(aci_service.get_logs())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "If you want to reuse an existing service versus creating a new one, call the webservice with the name of the service. You can look up all the deployed webservices under deployment in the Azure Portal. Below is an example:" ] }, { @@ -352,9 +506,31 @@ "metadata": {}, "outputs": [], "source": [ - "#Set the web service configuration\n", - "aci_config = AciWebservice.deploy_configuration(cpu_cores = CPU_CORES, \n", - " memory_gb = MEMORY_GB)" + "# aci_service = Webservice(workspace=ws, name='<>')\n", + "\n", + "# to use the webservice\n", + "# aci_service.run()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**Conclusion**: Now we have a deployed webservice and deploying the model took less than 20 minutes!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Test Deployed Webservice" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Depending on the needs of our QA system, we can either do real-time or batch scoring. We show an example of both types of scoring below using the following example [passage](https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/007ab5528b3bd310a80d553cccad4b78dc496b02) and questions:" ] }, { @@ -363,14 +539,150 @@ "metadata": {}, "outputs": [], "source": [ - "# deploy image as web service\n", - "aci_service = Webservice.deploy_from_image(workspace = ws, \n", - " name = 'aci-automl-service-1',\n", - " image = image,\n", - " deployment_config = aci_config)\n", + "passage = \"Machine Comprehension (MC), answering questions about a given context, \\\n", + "requires modeling complex interactions between the context and the query. Recently,\\\n", + "attention mechanisms have been successfully extended to MC. Typically these mechanisms\\\n", + "use attention to summarize the query and context into a single vector, couple \\\n", + "attentions temporally, and often form a uni-directional attention. In this paper \\\n", + "we introduce the Bi-Directional Attention Flow (BIDAF) network, a multi-stage \\\n", + "hierarchical process that represents the context at different levels of granularity \\\n", + "and uses a bi-directional attention flow mechanism to achieve a query-aware context \\\n", + "representation without early summarization. Our experimental evaluations show that \\\n", + "our model achieves the state-of-the-art results in Stanford QA (SQuAD) and\\\n", + "CNN/DailyMail Cloze Test datasets.\"\n", "\n", - "aci_service.wait_for_deployment(show_output = True)\n", - "print(aci_service.state)" + "question1 = \"What is BIDAF?\"\n", + "question2 = \"What datasets does BIDAF achieve state-of-the-art results on?\"\n", + "question3 = \"What do attention mechanisms do?\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.1 Real-time Scoring" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We prepare data for predicting answers for one passage-question pair by creating a dictionary with _question_ and _passage_ keys" + ] + }, + { + "cell_type": "code", + "execution_count": 71, + "metadata": {}, + "outputs": [], + "source": [ + "data = {\"passage\": passage, \"question\":question1}\n", + "data = json.dumps(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 72, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time elapsed: 0.3625\n", + "Bi-Directional Attention Flow\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " score = aci_service.run(input_data=data)\n", + " t.stop()\n", + " print(\"Time elapsed: {}\".format(t))\n", + " \n", + "result = json.loads(score)\n", + "try:\n", + " output = result[\"result\"]\n", + " print(\"Answer:\", output)\n", + "except:\n", + " print(result[\"error\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We see that the model responded to the question \"What is BiDAF?\" with \"Bi-Directional Attention Flow\"." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2.2 Batch Scoring\n", + "\n", + "We prepare the data for batch scoring by creating a list of dictionaries with _passage_ and _question_ keys." + ] + }, + { + "cell_type": "code", + "execution_count": 73, + "metadata": {}, + "outputs": [], + "source": [ + "data_multiple = [{\"passage\": passage, \"question\":i} for i in [question1, question2, question3]]\n", + "data_multiple = json.dumps(data_multiple)" + ] + }, + { + "cell_type": "code", + "execution_count": 74, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Time elapsed: 0.5340\n", + "['Bi-Directional Attention Flow', 'Stanford QA (SQuAD) and CNN/DailyMail Cloze Test', 'summarize the query and context into a single vector, couple attentions temporally, and often form a uni-directional attention']\n" + ] + } + ], + "source": [ + "with Timer() as t:\n", + " score = aci_service.run(input_data=data_multiple)\n", + " t.stop()\n", + " print(\"Time elapsed: {}\".format(t))\n", + " \n", + "result = json.loads(score)\n", + "try:\n", + " output = result[\"result\"]\n", + " print(output)\n", + "except:\n", + " print(result[\"error\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We see that the model responded to the question \"What is BiDAF?\" with \"Bi-Directional Attention Flow\", the question \"What datasets does BIDAF achieve state-of-the-art results on?\" with \"Stanford QA (SQuAD) and CNN/DailyMail Cloze Test\", and the question \"What do attention mechanisms do?\" with \"summarize the query and context into a single vector, couple attentions temporally, and often form a uni-directional attention\". All these answers make sense given the passage and demonstrate that the AllenNLP pre-trained model is a good model for a deployed QA system. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Conclusion" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook demonstrated how to produce a fast QA service in under 20 minutes using Azure Container Instances (ACI). We deployed a popular pre-trained model, BiDAF, provided by AllenNLP, which was state-of-the-art in 2017 and performs well on our example queries. " ] } ], @@ -379,18 +691,6 @@ "display_name": "Python 3", "language": "python", "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" } }, "nbformat": 4, From 9b3ce07c7e948befddb29f1800e839c1cd315462 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sat, 29 Jun 2019 09:21:03 -0400 Subject: [PATCH 100/108] Empty deep dive notebook --- .../question_answering/bidaf_deep_dive.ipynb | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 scenarios/question_answering/bidaf_deep_dive.ipynb diff --git a/scenarios/question_answering/bidaf_deep_dive.ipynb b/scenarios/question_answering/bidaf_deep_dive.ipynb new file mode 100644 index 000000000..0e30a7f7d --- /dev/null +++ b/scenarios/question_answering/bidaf_deep_dive.ipynb @@ -0,0 +1,32 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 685e7bac8a30f2a76e5b74254af7c8456e7dfbf1 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sat, 29 Jun 2019 21:36:13 -0400 Subject: [PATCH 101/108] Update READMEs --- scenarios/question_answering/README.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 scenarios/question_answering/README.md diff --git a/scenarios/question_answering/README.md b/scenarios/question_answering/README.md new file mode 100644 index 000000000..6df2e730f --- /dev/null +++ b/scenarios/question_answering/README.md @@ -0,0 +1,16 @@ +# Question Answering (QA) + +This folder contains examples and best practices, written in Jupyter notebooks, for building question answering models. These models can be used in a wide variety of applications, such as search engines, and virtual assistants. + +## What is Question Answering? + +Question Answering is a classical NLP task which consists of determining the relevant "answer" (snippet of text out of a provided passage) that answers a user's "question". This task is a subset of Machine Comprehension, or measuring how well a machine comprehends a passage of text. The Stanford Question Answering Dataset ([SQuAD](https://rajpurkar.github.io/SQuAD-explorer/)) leaderboard displays the state-of-the-art models in this space. Traditional QA models are varients of Bidirectional Recurrent Neural (BRNN) Networks. + +## Summary + +The following summarizes each notebook for Question Answering. Each notebook provides more details and guiding in principles on building state of the art models. + +|Notebook|Runs Local|Description| +|---|---|---| +|[Deployed QA System in Under 20 minutes](question_answering_system_bidaf_quickstart.ipynb)| No| Learn how to deploy a QA system in under 20 minutes using Azure Container Instances (ACI) and a popular AllenNLP pre-trained model called BiDAF. + From 950761fbd32391841d40920c972d104f9a86e3bb Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sat, 29 Jun 2019 21:38:42 -0400 Subject: [PATCH 102/108] Update readmes --- scenarios/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/scenarios/README.md b/scenarios/README.md index b86aa9ff0..e5c98539d 100644 --- a/scenarios/README.md +++ b/scenarios/README.md @@ -10,6 +10,7 @@ The following summarizes each scenario of the best practice notebooks. Each scen |---| ------------------------ | -------------------------------------------- | ------------------- | |[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| |[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| +|[Question Answering](scenarios/question_answering) |SQuAD | en |BiDAF| |[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| |[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| From 2a5e902336520c7d591c6212781f9be1f88261d0 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sat, 29 Jun 2019 21:44:04 -0400 Subject: [PATCH 103/108] Clean folder --- .../question_answering/bidaf_deep_dive.ipynb | 32 ------------------- 1 file changed, 32 deletions(-) delete mode 100644 scenarios/question_answering/bidaf_deep_dive.ipynb diff --git a/scenarios/question_answering/bidaf_deep_dive.ipynb b/scenarios/question_answering/bidaf_deep_dive.ipynb deleted file mode 100644 index 0e30a7f7d..000000000 --- a/scenarios/question_answering/bidaf_deep_dive.ipynb +++ /dev/null @@ -1,32 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From f02d39fc7e05d3d5f52dcce4213e7a98d860a754 Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Sat, 29 Jun 2019 22:06:51 -0400 Subject: [PATCH 104/108] Remove tensorflow import statement for notebook --- .../automl_with_pipelines_deployment_aks.ipynb | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb index 348336514..00673a288 100644 --- a/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb +++ b/scenarios/sentence_similarity/automl_with_pipelines_deployment_aks.ipynb @@ -168,12 +168,9 @@ ")\n", "from utils_nlp.common.timer import Timer\n", "\n", - "# Tensorflow dependencies for Google Universal Sentence Encoder\n", - "import tensorflow as tf\n", + "# Google Universal Sentence Encoder loader\n", "import tensorflow_hub as hub\n", "\n", - "tf.logging.set_verbosity(tf.logging.ERROR) # reduce logging output\n", - "\n", "# AzureML packages\n", "import azureml as aml\n", "import logging\n", From e2235b679a613feb8fd2d2b36c0d028b719c1e6d Mon Sep 17 00:00:00 2001 From: Courtney Cochrane Date: Tue, 2 Jul 2019 23:58:33 -0400 Subject: [PATCH 105/108] PR edits --- ...on_answering_system_bidaf_quickstart.ipynb | 111 +++++++++--------- 1 file changed, 56 insertions(+), 55 deletions(-) diff --git a/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb b/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb index 7ea6c4c20..3500d92e6 100644 --- a/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb +++ b/scenarios/question_answering/question_answering_system_bidaf_quickstart.ipynb @@ -11,8 +11,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook demonstrates how to create a deployed Question Answering (QA) system in under 20 minutes. We use Azure Machine Learning ([AzureML](https://azure.microsoft.com/en-us/services/machine-learning-service/)) Service to deploy a pre-trained [AllenNLP model](https://allennlp.org/models\n", - "), BiDAF, using Azure Container Instances ([ACI](https://azure.microsoft.com/en-us/services/container-instances/))." + "This notebook demonstrates how to create a Question Answering (QA) webservice in under 20 minutes. We use Azure Machine Learning ([AzureML](https://azure.microsoft.com/en-us/services/machine-learning-service/)) Service to deploy a pre-trained [AllenNLP model](https://allennlp.org/models\n", + "), [BiDAF](https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Seo-Kembhavi/007ab5528b3bd310a80d553cccad4b78dc496b02\n", + "), using Azure Container Instances ([ACI](https://azure.microsoft.com/en-us/services/container-instances/))." ] }, { @@ -40,7 +41,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -61,12 +62,17 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "CPU_CORES = 1\n", - "MEMORY_GB = 8" + "MEMORY_GB = 8\n", + "DEPLOYMENT_PYTHON_VERSION = '3.6.8'\n", + "DEPLOYMENT_CONDA_PACKAGES = ['jsonnet','cmake','regex','pytorch','torchvision']\n", + "DEPLOYMENT_PIP_PACKAGES = ['allennlp==0.8.4','azureml-sdk==1.0.43.*']\n", + "CONTAINER_TAGS = {'area': \"nlp\", 'type': \"question-answering BiDAF\"}\n", + "MODEL_TAGS = {\"bidaf\": \"demo\"}" ] }, { @@ -94,18 +100,11 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 3, "metadata": { "scrolled": true }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n" - ] - }, { "name": "stdout", "output_type": "stream", @@ -117,6 +116,7 @@ "name": "stderr", "output_type": "stream", "text": [ + "Note, we have launched a browser for you to login. For old experience with device code, use \"az login --use-device-code\"\n", "You have logged in. Now let us find all the subscriptions to which you have access...\n" ] }, @@ -139,20 +139,9 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Workspace name: MAIDAPTest\n", - "Azure region: eastus2\n", - "Subscription id: 15ae9cb6-95c1-483d-a0e3-b1a1a3b06324\n", - "Resource group: nlprg\n" - ] - } - ], + "outputs": [], "source": [ "print('Workspace name: ' + ws.name, \n", " 'Azure region: ' + ws.location, \n", @@ -178,9 +167,9 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 5, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [ { @@ -214,7 +203,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -229,7 +218,7 @@ "bidaf_model = Model.register(workspace = ws,\n", " model_path =\"bidaf.tar.gz\",\n", " model_name = \"bidaf\",\n", - " tags = {\"bidaf\": \"demo\"},\n", + " tags = MODEL_TAGS,\n", " description = \"BiDAF Pretrained Model\")" ] }, @@ -254,7 +243,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": 7, "metadata": { "scrolled": true }, @@ -263,7 +252,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Overwriting score.py\n" + "Writing score.py\n" ] } ], @@ -315,7 +304,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 8, "metadata": {}, "outputs": [ { @@ -324,15 +313,15 @@ "'bidafenv.yml'" ] }, - "execution_count": 24, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "myenv = CondaDependencies.create(conda_packages=['jsonnet','cmake','regex','pytorch','torchvision'],\n", - " pip_packages=['allennlp==0.8.4','azureml-sdk==1.0.43.*'], \n", - " python_version = '3.6.8')\n", + "myenv = CondaDependencies.create(conda_packages= DEPLOYMENT_CONDA_PACKAGES,\n", + " pip_packages= DEPLOYMENT_PIP_PACKAGES, \n", + " python_version = DEPLOYMENT_PYTHON_VERSION)\n", "myenv.add_channel('conda-forge')\n", "myenv.add_channel('pytorch')\n", "\n", @@ -356,7 +345,7 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 9, "metadata": { "scrolled": true }, @@ -366,8 +355,8 @@ "output_type": "stream", "text": [ "Creating image\n", - "Running.........................................................................................................................................................\n", - "SucceededImage creation operation finished for image bidaf-image:33, operation \"Succeeded\"\n" + "Running..............................................................................................................................................................\n", + "SucceededImage creation operation finished for image bidaf-image:34, operation \"Succeeded\"\n" ] } ], @@ -376,7 +365,7 @@ " runtime = \"python\",\n", " conda_file = conda_env_file_name,\n", " description = \"Image with BiDAF model\",\n", - " tags = {'area': \"nlp\", 'type': \"question-answering BiDAF\"})\n", + " tags = CONTAINER_TAGS)\n", "\n", "image = ContainerImage.create(name = \"bidaf-image\",\n", " models = [bidaf_model],\n", @@ -395,7 +384,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -423,7 +412,7 @@ }, { "cell_type": "code", - "execution_count": 27, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -452,7 +441,7 @@ }, { "cell_type": "code", - "execution_count": 28, + "execution_count": 13, "metadata": {}, "outputs": [ { @@ -460,7 +449,7 @@ "output_type": "stream", "text": [ "Creating service\n", - "Running.....................................................\n", + "Running....................................................\n", "SucceededACI service creation operation finished, operation \"Succeeded\"\n", "Healthy\n" ] @@ -486,7 +475,7 @@ }, { "cell_type": "code", - "execution_count": 35, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -535,7 +524,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ @@ -572,7 +561,7 @@ }, { "cell_type": "code", - "execution_count": 71, + "execution_count": 15, "metadata": {}, "outputs": [], "source": [ @@ -582,15 +571,15 @@ }, { "cell_type": "code", - "execution_count": 72, + "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 0.3625\n", - "Bi-Directional Attention Flow\n" + "Time elapsed: 0.8884\n", + "Answer: Bi-Directional Attention Flow\n" ] } ], @@ -626,7 +615,7 @@ }, { "cell_type": "code", - "execution_count": 73, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -636,7 +625,7 @@ }, { "cell_type": "code", - "execution_count": 74, + "execution_count": 18, "metadata": { "scrolled": true }, @@ -645,8 +634,8 @@ "name": "stdout", "output_type": "stream", "text": [ - "Time elapsed: 0.5340\n", - "['Bi-Directional Attention Flow', 'Stanford QA (SQuAD) and CNN/DailyMail Cloze Test', 'summarize the query and context into a single vector, couple attentions temporally, and often form a uni-directional attention']\n" + "Time elapsed: 0.9046\n", + "['Bi-Directional Attention Flow', 'Stanford QA (SQuAD) andCNN/DailyMail Cloze Test', 'have been successfully extended to MC']\n" ] } ], @@ -691,6 +680,18 @@ "display_name": "Python 3", "language": "python", "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" } }, "nbformat": 4, From b4405cebf0a7b8fa5e790c1f645580b35b1b9419 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20Gonz=C3=A1lez-Fierro?= <3491412+miguelgfierro@users.noreply.github.com> Date: Wed, 3 Jul 2019 17:14:21 +0100 Subject: [PATCH 106/108] :bug: in Readme --- tools/repo_metrics/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/repo_metrics/README.md b/tools/repo_metrics/README.md index 884cc3eb1..8d14448c2 100755 --- a/tools/repo_metrics/README.md +++ b/tools/repo_metrics/README.md @@ -2,7 +2,7 @@ [![Build Status](https://dev.azure.com/best-practices/nlp/_apis/build/status/repo_metrics?branchName=master)](https://dev.azure.com/best-practices/nlp/_build/latest?definitionId=36&branchName=master) -We developed a script that allows us to track the metrics of the Recommenders repo. Some of the metrics we can track are listed here: +We developed a script that allows us to track the repo metrics. Some of the metrics we can track are listed here: * Number of stars * Number of forks @@ -18,7 +18,7 @@ The first step is to set up the credentials, copy the configuration file and fil To track the current state of the repository and save it to CosmosDB: - python scripts/repo_metrics/track_metrics.py --github_repo "https://github.com/Microsoft/Recommenders" --save_to_database + python tools/repo_metrics/track_metrics.py --github_repo "https://github.com/Microsoft/NLP" --save_to_database To track an event related to this repository and save it to CosmosDB: From 5e2130238dfb478ce1df0bed27857f3615a4440b Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Wed, 3 Jul 2019 14:51:58 -0400 Subject: [PATCH 107/108] remove yahoo_answers utils --- utils_nlp/dataset/yahoo_answers.py | 35 ------------------------------ 1 file changed, 35 deletions(-) delete mode 100644 utils_nlp/dataset/yahoo_answers.py diff --git a/utils_nlp/dataset/yahoo_answers.py b/utils_nlp/dataset/yahoo_answers.py deleted file mode 100644 index a4c4a8923..000000000 --- a/utils_nlp/dataset/yahoo_answers.py +++ /dev/null @@ -1,35 +0,0 @@ -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. - -"""Yahoo! Answers dataset utils""" - -import os -import pandas as pd -from utils_nlp.dataset.url_utils import maybe_download, extract_tar - - -URL = "https://s3.amazonaws.com/fast-ai-nlp/yahoo_answers_csv.tgz" - - -def download(dir_path): - """Downloads and extracts the dataset files""" - file_name = URL.split("/")[-1] - maybe_download(URL, file_name, dir_path) - extract_tar(os.path.join(dir_path, file_name), dir_path) - - -def read_data(data_file, nrows=None): - return pd.read_csv(data_file, header=None, nrows=nrows) - - -def get_text(df): - df.fillna("", inplace=True) - text = df.iloc[:, 1] + " " + df.iloc[:, 2] + " " + df.iloc[:, 3] - text = text.str.replace(r"[^A-Za-z ]", "").str.lower() - text = text.str.replace(r"\\s+", " ") - text = text.astype(str) - return text - - -def get_labels(df): - return list(df[0] - 1) From 88c724303b524add20da12267781d86ad131de4d Mon Sep 17 00:00:00 2001 From: Said Bleik Date: Wed, 3 Jul 2019 15:46:11 -0400 Subject: [PATCH 108/108] readme edits --- README.md | 5 +---- scenarios/README.md | 18 +++++++++--------- scenarios/named_entity_recognition/README.md | 2 +- scenarios/sentence_similarity/README.md | 7 ++++--- 4 files changed, 15 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index 4ad19b9a3..9c89107f8 100755 --- a/README.md +++ b/README.md @@ -5,10 +5,7 @@ # NLP Best Practices -This repository contains examples and best practices for building NLP systems, provided as Jupyter notebooks and utility functions. The focus of the repository is on state-of-the-art methods and common scenarios that are popular among researchers and practitioners working on problems involving text and language. - -## Planning -All feature planning is done via projects, milestones, and issues in this repository. +This repository contains examples and best practices for building NLP systems, provided as [Jupyter notebooks](scenarios) and [utility functions](utils_nlp). The focus of the repository is on state-of-the-art methods and common scenarios that are popular among researchers and practitioners working on problems involving text and language. ## Getting Started To get started, navigate to the [Setup Guide](SETUP.md), where you'll find instructions on how to setup your environment and dependencies. diff --git a/scenarios/README.md b/scenarios/README.md index e5c98539d..21b222acd 100644 --- a/scenarios/README.md +++ b/scenarios/README.md @@ -4,15 +4,15 @@ This folder contains examples and best practices, written in Jupyter notebooks, ## Summary -The following summarizes each scenario of the best practice notebooks. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. - -| Scenario | Applications | Languages | Models | -|---| ------------------------ | -------------------------------------------- | ------------------- | -|[Text Classification](scenarios/text_classification) |Topic Classification|en, zh, ar|BERT| -|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER | en, zh |BERT| -|[Question Answering](scenarios/question_answering) |SQuAD | en |BiDAF| -|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |en|Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| -|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|en|Word2Vec
fastText
GloVe| +The following is a summary of the scenarios covered in the best practice notebooks. Each scenario is demonstrated in one or more Jupyter notebook examples that make use of the core code base of models and utilities. + +| Scenario | Applications | Models | +|---| ------------------------ | ------------------- | +|[Text Classification](scenarios/text_classification) |Topic Classification|BERT| +|[Named Entity Recognition](scenarios/named_entity_recognition) |Wikipedia NER |BERT| +|[Question Answering](scenarios/question_answering) |SQuAD | BiDAF| +|[Sentence Similarity](scenarios/sentence_similarity) |STS Benchmark |Representation: TF-IDF, Word Embeddings, Doc Embeddings
Metrics: Cosine Similarity, Word Mover's Distance| +|[Embeddings](scenarios/embeddings)| Custom Embeddings Training|Word2Vec
fastText
GloVe| ## Azure-enhanced notebooks diff --git a/scenarios/named_entity_recognition/README.md b/scenarios/named_entity_recognition/README.md index cbcc3c07e..2c80ba53e 100644 --- a/scenarios/named_entity_recognition/README.md +++ b/scenarios/named_entity_recognition/README.md @@ -5,4 +5,4 @@ names, locations, organizations, etc. The state-of-the art NER methods include combining Long Short-Term Memory neural network with Conditional Random Field (LSTM-CRF) and pretrained language models like BERT. NER can be used for information extraction and filtering. It also plays an important role in other -NLP tasks like question answering and texts summarization. +NLP tasks like question answering and text summarization. diff --git a/scenarios/sentence_similarity/README.md b/scenarios/sentence_similarity/README.md index fcd5e27bc..d2f5f3af5 100644 --- a/scenarios/sentence_similarity/README.md +++ b/scenarios/sentence_similarity/README.md @@ -1,16 +1,17 @@ # Sentence Similarity -This folder contains examples and best practices, written in Jupyter notebooks, for building sentence similarity models. The scores can be used in a wide variety of applications, such as search/retrieval, nearest-neighbor or kernel-based classification methods, recommendation, and ranking tasks. +This folder contains examples and best practices, written in Jupyter notebooks, for building sentence similarity models. The scores can be used in a wide variety of applications, such as search/retrieval, nearest-neighbor or kernel-based classification methods, recommendations, and ranking tasks. ## What is sentence similarity -Sentence similarity or semantic textual similarity is to determine how similar two pieces of texts are and a measure of the degree to which two pieces of text express the same meaning. This can take the form of assigning a score from 1 to 5. Related tasks are paraphrase or duplicate identification. The common methods used for text similarity range from simple word-vector dot products to pairwise classification, and more recently, Siamese recurrent/convolutional neural networks with triplet loss functions. +Sentence similarity or semantic textual similarity is a measure of how similar two pieces of text are, or to what degree they express the same meaning. Related tasks include paraphrase or duplicate identification, search, and matching applications. The common methods used for text similarity range from simple word-vector dot products to pairwise classification, and more recently, deep neural networks. Sentence similarity is normally calculated by the following two steps: 1. obtaining the embeddings of the sentences -2. taking the cosine similarity between them as shown in the following figure([Source](https://tfhub.dev/google/universal-sentence-encoder/1)): +2. taking the cosine similarity between them as shown in the following figure([source](https://tfhub.dev/google/universal-sentence-encoder/1)): + ![Sentence Similarity](https://nlpbp.blob.core.windows.net/images/example-similarity.png) ## Summary